summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--SECURITY.md24
-rw-r--r--bitbake/SECURITY.md24
-rwxr-xr-xbitbake/bin/bitbake3
-rwxr-xr-xbitbake/bin/bitbake-diffsigs49
-rwxr-xr-xbitbake/bin/bitbake-getvar6
-rwxr-xr-xbitbake/bin/bitbake-hashclient36
-rwxr-xr-xbitbake/bin/bitbake-layers4
-rwxr-xr-xbitbake/bin/bitbake-prserv2
-rwxr-xr-xbitbake/bin/bitbake-server5
-rwxr-xr-xbitbake/bin/bitbake-worker19
-rwxr-xr-xbitbake/bin/git-make-shallow2
-rw-r--r--bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst82
-rw-r--r--bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.rst55
-rw-r--r--bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.rst2
-rw-r--r--bitbake/lib/bb/COW.py2
-rw-r--r--bitbake/lib/bb/__init__.py7
-rw-r--r--bitbake/lib/bb/asyncrpc/__init__.py2
-rw-r--r--bitbake/lib/bb/asyncrpc/client.py37
-rw-r--r--bitbake/lib/bb/asyncrpc/serv.py2
-rw-r--r--bitbake/lib/bb/codeparser.py26
-rw-r--r--bitbake/lib/bb/compress/_pipecompress.py2
-rw-r--r--bitbake/lib/bb/compress/lz4.py2
-rw-r--r--bitbake/lib/bb/compress/zstd.py2
-rw-r--r--bitbake/lib/bb/cooker.py5
-rw-r--r--bitbake/lib/bb/cookerdata.py17
-rw-r--r--bitbake/lib/bb/daemonize.py2
-rw-r--r--bitbake/lib/bb/data.py1
-rw-r--r--bitbake/lib/bb/event.py10
-rw-r--r--bitbake/lib/bb/exceptions.py2
-rw-r--r--bitbake/lib/bb/fetch2/__init__.py2
-rw-r--r--bitbake/lib/bb/fetch2/git.py64
-rw-r--r--bitbake/lib/bb/fetch2/gitsm.py5
-rw-r--r--bitbake/lib/bb/fetch2/npm.py2
-rw-r--r--bitbake/lib/bb/fetch2/osc.py2
-rw-r--r--bitbake/lib/bb/monitordisk.py7
-rw-r--r--bitbake/lib/bb/parse/parse_py/BBHandler.py4
-rw-r--r--bitbake/lib/bb/parse/parse_py/ConfHandler.py11
-rw-r--r--bitbake/lib/bb/process.py2
-rw-r--r--bitbake/lib/bb/runqueue.py175
-rw-r--r--bitbake/lib/bb/siggen.py15
-rw-r--r--bitbake/lib/bb/tests/codeparser.py26
-rw-r--r--bitbake/lib/bb/tests/compression.py2
-rw-r--r--bitbake/lib/bb/tests/cooker.py2
-rw-r--r--bitbake/lib/bb/tests/fetch.py139
-rw-r--r--bitbake/lib/bb/tests/parse.py23
-rw-r--r--bitbake/lib/bb/tinfoil.py2
-rw-r--r--bitbake/lib/bb/utils.py90
-rw-r--r--bitbake/lib/bblayers/__init__.py2
-rw-r--r--bitbake/lib/bblayers/action.py2
-rw-r--r--bitbake/lib/bblayers/common.py2
-rw-r--r--bitbake/lib/bblayers/layerindex.py2
-rw-r--r--bitbake/lib/bblayers/query.py2
-rw-r--r--bitbake/lib/bs4/tests/test_tree.py2
-rw-r--r--bitbake/lib/ply/yacc.py7
-rw-r--r--bitbake/lib/prserv/__init__.py2
-rw-r--r--bitbake/lib/prserv/client.py2
-rw-r--r--bitbake/lib/prserv/db.py2
-rw-r--r--bitbake/lib/prserv/serv.py8
-rwxr-xr-xbitbake/lib/toaster/manage.py2
-rw-r--r--bitbake/lib/toaster/orm/fixtures/poky.xml2
-rw-r--r--bitbake/lib/toaster/toastergui/api.py26
-rw-r--r--documentation/.gitignore2
-rw-r--r--documentation/.vale.ini7
-rw-r--r--documentation/Makefile12
-rw-r--r--documentation/README71
-rw-r--r--documentation/brief-yoctoprojectqs/index.rst49
-rw-r--r--documentation/bsp-guide/bsp.rst97
-rw-r--r--documentation/conf.py8
-rw-r--r--documentation/contributor-guide/identify-component.rst31
-rw-r--r--documentation/contributor-guide/index.rst26
-rw-r--r--documentation/contributor-guide/recipe-style-guide.rst411
-rw-r--r--documentation/contributor-guide/report-defect.rst67
-rw-r--r--documentation/contributor-guide/submit-changes.rst786
-rw-r--r--documentation/dev-manual/bmaptool.rst59
-rw-r--r--documentation/dev-manual/build-quality.rst409
-rw-r--r--documentation/dev-manual/building.rst942
-rw-r--r--documentation/dev-manual/common-tasks.rst11784
-rw-r--r--documentation/dev-manual/custom-distribution.rst109
-rw-r--r--documentation/dev-manual/custom-template-configuration-directory.rst72
-rw-r--r--documentation/dev-manual/customizing-images.rst223
-rw-r--r--documentation/dev-manual/debugging.rst1252
-rw-r--r--documentation/dev-manual/development-shell.rst82
-rw-r--r--documentation/dev-manual/device-manager.rst74
-rw-r--r--documentation/dev-manual/disk-space.rst61
-rw-r--r--documentation/dev-manual/efficiently-fetching-sources.rst68
-rw-r--r--documentation/dev-manual/error-reporting-tool.rst84
-rw-r--r--documentation/dev-manual/external-scm.rst67
-rw-r--r--documentation/dev-manual/external-toolchain.rst40
-rw-r--r--documentation/dev-manual/gobject-introspection.rst155
-rw-r--r--documentation/dev-manual/index.rst40
-rw-r--r--documentation/dev-manual/init-manager.rst162
-rw-r--r--documentation/dev-manual/layers.rst853
-rw-r--r--documentation/dev-manual/libraries.rst267
-rw-r--r--documentation/dev-manual/licenses.rst539
-rw-r--r--documentation/dev-manual/new-machine.rst118
-rw-r--r--documentation/dev-manual/new-recipe.rst1640
-rw-r--r--documentation/dev-manual/packages.rst1250
-rw-r--r--documentation/dev-manual/prebuilt-libraries.rst209
-rw-r--r--documentation/dev-manual/python-development-shell.rst50
-rw-r--r--documentation/dev-manual/qemu.rst4
-rw-r--r--documentation/dev-manual/quilt.rst89
-rw-r--r--documentation/dev-manual/read-only-rootfs.rst89
-rw-r--r--documentation/dev-manual/runtime-testing.rst600
-rw-r--r--documentation/dev-manual/sbom.rst81
-rw-r--r--documentation/dev-manual/securing-images.rst156
-rw-r--r--documentation/dev-manual/security-subjects.rst189
-rw-r--r--documentation/dev-manual/speeding-up-build.rst109
-rw-r--r--documentation/dev-manual/start.rst165
-rw-r--r--documentation/dev-manual/temporary-source-code.rst66
-rw-r--r--documentation/dev-manual/upgrading-recipes.rst397
-rw-r--r--documentation/dev-manual/vulnerabilities.rst214
-rw-r--r--documentation/dev-manual/wayland.rst90
-rw-r--r--documentation/dev-manual/wic.rst732
-rw-r--r--documentation/dev-manual/x32-psabi.rst54
-rw-r--r--documentation/index.rst1
-rw-r--r--documentation/kernel-dev/advanced.rst9
-rw-r--r--documentation/kernel-dev/common.rst70
-rw-r--r--documentation/kernel-dev/concepts-appx.rst2
-rw-r--r--documentation/kernel-dev/faq.rst4
-rw-r--r--documentation/kernel-dev/intro.rst2
-rw-r--r--documentation/migration-guides/migration-1.4.rst2
-rw-r--r--documentation/migration-guides/migration-1.5.rst15
-rw-r--r--documentation/migration-guides/migration-1.6.rst32
-rw-r--r--documentation/migration-guides/migration-1.7.rst6
-rw-r--r--documentation/migration-guides/migration-2.1.rst8
-rw-r--r--documentation/migration-guides/migration-2.2.rst22
-rw-r--r--documentation/migration-guides/migration-2.3.rst2
-rw-r--r--documentation/migration-guides/migration-2.4.rst10
-rw-r--r--documentation/migration-guides/migration-2.5.rst10
-rw-r--r--documentation/migration-guides/migration-2.6.rst4
-rw-r--r--documentation/migration-guides/migration-3.0.rst6
-rw-r--r--documentation/migration-guides/migration-3.1.rst4
-rw-r--r--documentation/migration-guides/migration-3.2.rst20
-rw-r--r--documentation/migration-guides/migration-3.3.rst7
-rw-r--r--documentation/migration-guides/migration-3.4.rst10
-rw-r--r--documentation/migration-guides/migration-4.0.rst27
-rw-r--r--documentation/migration-guides/release-3.4.rst2
-rw-r--r--documentation/migration-guides/release-4.0.rst19
-rw-r--r--documentation/migration-guides/release-notes-3.4.3.rst197
-rw-r--r--documentation/migration-guides/release-notes-3.4.4.rst155
-rw-r--r--documentation/migration-guides/release-notes-3.4.rst2
-rw-r--r--documentation/migration-guides/release-notes-4.0.1.rst248
-rw-r--r--documentation/migration-guides/release-notes-4.0.10.rst180
-rw-r--r--documentation/migration-guides/release-notes-4.0.11.rst214
-rw-r--r--documentation/migration-guides/release-notes-4.0.12.rst277
-rw-r--r--documentation/migration-guides/release-notes-4.0.13.rst271
-rw-r--r--documentation/migration-guides/release-notes-4.0.14.rst227
-rw-r--r--documentation/migration-guides/release-notes-4.0.15.rst189
-rw-r--r--documentation/migration-guides/release-notes-4.0.16.rst191
-rw-r--r--documentation/migration-guides/release-notes-4.0.17.rst238
-rw-r--r--documentation/migration-guides/release-notes-4.0.2.rst296
-rw-r--r--documentation/migration-guides/release-notes-4.0.3.rst314
-rw-r--r--documentation/migration-guides/release-notes-4.0.4.rst299
-rw-r--r--documentation/migration-guides/release-notes-4.0.5.rst196
-rw-r--r--documentation/migration-guides/release-notes-4.0.6.rst313
-rw-r--r--documentation/migration-guides/release-notes-4.0.7.rst242
-rw-r--r--documentation/migration-guides/release-notes-4.0.8.rst217
-rw-r--r--documentation/migration-guides/release-notes-4.0.9.rst247
-rw-r--r--documentation/migration-guides/release-notes-4.0.rst21
-rw-r--r--documentation/overview-manual/concepts.rst222
-rw-r--r--documentation/overview-manual/development-environment.rst33
-rw-r--r--documentation/overview-manual/intro.rst2
-rw-r--r--documentation/overview-manual/svg/bitbake_tasks_map.svg4
-rw-r--r--documentation/overview-manual/yp-intro.rst53
-rw-r--r--documentation/poky.yaml.in34
-rw-r--r--documentation/profile-manual/intro.rst40
-rw-r--r--documentation/profile-manual/usage.rst891
-rw-r--r--documentation/ref-manual/classes.rst1858
-rw-r--r--documentation/ref-manual/devtool-reference.rst14
-rw-r--r--documentation/ref-manual/faq.rst16
-rw-r--r--documentation/ref-manual/features.rst10
-rw-r--r--documentation/ref-manual/images.rst22
-rw-r--r--documentation/ref-manual/kickstart.rst2
-rw-r--r--documentation/ref-manual/qa-checks.rst87
-rw-r--r--documentation/ref-manual/release-process.rst92
-rw-r--r--documentation/ref-manual/resources.rst81
-rw-r--r--documentation/ref-manual/structure.rst24
-rw-r--r--documentation/ref-manual/svg/releases.svg1744
-rw-r--r--documentation/ref-manual/system-requirements.rst311
-rw-r--r--documentation/ref-manual/tasks.rst52
-rw-r--r--documentation/ref-manual/terms.rst114
-rw-r--r--documentation/ref-manual/variables.rst1062
-rw-r--r--documentation/ref-manual/varlocality.rst2
-rw-r--r--documentation/sdk-manual/appendix-obtain.rst64
-rw-r--r--documentation/sdk-manual/extensible.rst328
-rw-r--r--documentation/sdk-manual/intro.rst2
-rw-r--r--documentation/sdk-manual/working-projects.rst16
-rw-r--r--documentation/standards.md15
-rw-r--r--documentation/styles/config/vocabularies/OpenSource/accept.txt20
-rw-r--r--documentation/styles/config/vocabularies/Yocto/accept.txt5
-rw-r--r--documentation/template/template.svg2
-rw-r--r--documentation/test-manual/intro.rst125
-rw-r--r--documentation/test-manual/reproducible-builds.rst11
-rw-r--r--documentation/test-manual/test-process.rst44
-rw-r--r--documentation/test-manual/understand-autobuilder.rst66
-rw-r--r--documentation/test-manual/yocto-project-compatible.rst2
-rw-r--r--documentation/toaster-manual/reference.rst2
-rw-r--r--documentation/toaster-manual/setup-and-use.rst4
-rw-r--r--documentation/transitioning-to-a-custom-environment.rst10
-rw-r--r--documentation/what-i-wish-id-known.rst14
-rw-r--r--meta-poky/conf/distro/poky.conf14
-rw-r--r--meta-poky/conf/local.conf.sample2
-rw-r--r--meta-selftest/files/static-group2
-rw-r--r--meta-selftest/recipes-test/devtool/devtool-test-local/file31
-rw-r--r--meta-selftest/recipes-test/devtool/devtool-test-local_6.03.bb3
-rw-r--r--meta-selftest/recipes-test/devtool/devtool-test-localonly.bb3
-rw-r--r--meta-selftest/recipes-test/devtool/devtool-test-localonly/file31
-rw-r--r--meta-selftest/recipes-test/images/oe-selftest-image.bb2
-rw-r--r--meta-selftest/recipes-test/license/incompatible-license-alias.bb2
-rw-r--r--meta-selftest/recipes-test/license/incompatible-license.bb2
-rw-r--r--meta-selftest/recipes-test/license/incompatible-licenses.bb2
-rw-r--r--meta-selftest/recipes-test/license/incompatible-nonspdx-license.bb2
-rw-r--r--meta-skeleton/recipes-skeleton/useradd/useradd-example.bb4
-rw-r--r--meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.10.bbappend16
-rw-r--r--meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.15.bbappend16
-rw-r--r--meta/classes/archiver.bbclass5
-rw-r--r--meta/classes/baremetal-image.bbclass11
-rw-r--r--meta/classes/base.bbclass9
-rw-r--r--meta/classes/bin_package.bbclass3
-rw-r--r--meta/classes/cargo.bbclass1
-rw-r--r--meta/classes/cargo_common.bbclass10
-rw-r--r--meta/classes/cmake.bbclass5
-rw-r--r--meta/classes/cml1.bbclass2
-rw-r--r--meta/classes/core-image.bbclass4
-rw-r--r--meta/classes/create-spdx.bbclass39
-rw-r--r--meta/classes/cve-check.bbclass55
-rw-r--r--meta/classes/devshell.bbclass2
-rw-r--r--meta/classes/externalsrc.bbclass46
-rw-r--r--meta/classes/fontcache.bbclass1
-rw-r--r--meta/classes/fs-uuid.bbclass2
-rw-r--r--meta/classes/gnomebase.bbclass2
-rw-r--r--meta/classes/go.bbclass2
-rw-r--r--meta/classes/goarch.bbclass27
-rw-r--r--meta/classes/gobject-introspection-data.bbclass5
-rw-r--r--meta/classes/gtk-icon-cache.bbclass2
-rw-r--r--meta/classes/image-live.bbclass2
-rw-r--r--meta/classes/image.bbclass7
-rw-r--r--meta/classes/image_types.bbclass8
-rw-r--r--meta/classes/image_types_wic.bbclass4
-rw-r--r--meta/classes/insane.bbclass16
-rw-r--r--meta/classes/kernel-arch.bbclass5
-rw-r--r--meta/classes/kernel-devicetree.bbclass22
-rw-r--r--meta/classes/kernel-fitimage.bbclass31
-rw-r--r--meta/classes/kernel-uboot.bbclass9
-rw-r--r--meta/classes/kernel-uimage.bbclass2
-rw-r--r--meta/classes/kernel-yocto.bbclass18
-rw-r--r--meta/classes/kernel.bbclass96
-rw-r--r--meta/classes/kernelsrc.bbclass1
-rw-r--r--meta/classes/libc-package.bbclass1
-rw-r--r--meta/classes/license.bbclass2
-rw-r--r--meta/classes/license_image.bbclass2
-rw-r--r--meta/classes/linux-kernel-base.bbclass15
-rw-r--r--meta/classes/meson.bbclass1
-rw-r--r--meta/classes/mirrors.bbclass4
-rw-r--r--meta/classes/module-base.bbclass1
-rw-r--r--meta/classes/multilib.bbclass1
-rw-r--r--meta/classes/native.bbclass4
-rw-r--r--meta/classes/nativesdk.bbclass1
-rw-r--r--meta/classes/npm.bbclass63
-rw-r--r--meta/classes/overlayfs-etc.bbclass5
-rw-r--r--meta/classes/overlayfs.bbclass6
-rw-r--r--meta/classes/own-mirrors.bbclass1
-rw-r--r--meta/classes/package.bbclass78
-rw-r--r--meta/classes/package_rpm.bbclass12
-rw-r--r--meta/classes/populate_sdk_base.bbclass4
-rw-r--r--meta/classes/populate_sdk_ext.bbclass9
-rw-r--r--meta/classes/qemuboot.bbclass3
-rw-r--r--meta/classes/recipe_sanity.bbclass2
-rw-r--r--meta/classes/rm_work.bbclass19
-rw-r--r--meta/classes/rootfs-postcommands.bbclass32
-rw-r--r--meta/classes/sanity.bbclass22
-rw-r--r--meta/classes/scons.bbclass8
-rw-r--r--meta/classes/sstate.bbclass2
-rw-r--r--meta/classes/staging.bbclass6
-rw-r--r--meta/classes/systemd.bbclass1
-rw-r--r--meta/classes/testimage.bbclass26
-rw-r--r--meta/classes/toolchain-scripts.bbclass4
-rw-r--r--meta/classes/uboot-config.bbclass4
-rw-r--r--meta/classes/uboot-extlinux-config.bbclass10
-rw-r--r--meta/classes/uboot-sign.bbclass5
-rw-r--r--meta/classes/uninative.bbclass2
-rw-r--r--meta/classes/update-alternatives.bbclass10
-rw-r--r--meta/classes/useradd-staticids.bbclass2
-rw-r--r--meta/conf/bitbake.conf8
-rw-r--r--meta/conf/distro/include/cve-extra-exclusions.inc35
-rw-r--r--meta/conf/distro/include/maintainers.inc40
-rw-r--r--meta/conf/distro/include/ptest-packagelists.inc4
-rw-r--r--meta/conf/distro/include/yocto-uninative.inc10
-rw-r--r--meta/conf/documentation.conf1
-rw-r--r--meta/conf/layer.conf1
-rw-r--r--meta/conf/machine/include/arm/arch-arm64.inc5
-rw-r--r--meta/conf/machine/include/arm/arch-armv9a.inc28
-rw-r--r--meta/conf/machine/include/arm/armv9a/tune-neoversen2.inc10
-rw-r--r--meta/conf/machine/include/microblaze/feature-microblaze-versions.inc2
-rw-r--r--meta/files/common-licenses/LGPL-3.0-with-zeromq-exception181
-rw-r--r--meta/files/overlayfs-etc-preinit.sh.in23
-rw-r--r--meta/lib/oe/cve_check.py67
-rw-r--r--meta/lib/oe/go.py32
-rw-r--r--meta/lib/oe/npm_registry.py169
-rw-r--r--meta/lib/oe/overlayfs.py6
-rw-r--r--meta/lib/oe/package_manager/__init__.py5
-rw-r--r--meta/lib/oe/package_manager/deb/__init__.py8
-rw-r--r--meta/lib/oe/package_manager/ipk/__init__.py25
-rw-r--r--meta/lib/oe/package_manager/ipk/manifest.py2
-rw-r--r--meta/lib/oe/package_manager/rpm/__init__.py33
-rw-r--r--meta/lib/oe/package_manager/rpm/rootfs.py2
-rw-r--r--meta/lib/oe/package_manager/rpm/sdk.py3
-rw-r--r--meta/lib/oe/patch.py8
-rw-r--r--meta/lib/oe/recipeutils.py9
-rw-r--r--meta/lib/oe/reproducible.py4
-rw-r--r--meta/lib/oe/rootfs.py24
-rw-r--r--meta/lib/oe/sbom.py4
-rw-r--r--meta/lib/oe/sdk.py2
-rw-r--r--meta/lib/oe/spdx.py2
-rw-r--r--meta/lib/oe/sstatesig.py25
-rw-r--r--meta/lib/oe/terminal.py4
-rw-r--r--meta/lib/oeqa/core/target/ssh.py50
-rw-r--r--meta/lib/oeqa/core/utils/concurrencytest.py4
-rw-r--r--meta/lib/oeqa/oetest.py2
-rw-r--r--meta/lib/oeqa/runtime/cases/dnf.py2
-rw-r--r--meta/lib/oeqa/runtime/cases/ltp.py2
-rw-r--r--meta/lib/oeqa/runtime/cases/parselogs.py2
-rw-r--r--meta/lib/oeqa/runtime/cases/rpm.py23
-rw-r--r--meta/lib/oeqa/runtime/cases/rt.py17
-rw-r--r--meta/lib/oeqa/runtime/cases/rtc.py8
-rw-r--r--meta/lib/oeqa/runtime/cases/scp.py2
-rw-r--r--meta/lib/oeqa/runtime/context.py4
-rw-r--r--meta/lib/oeqa/sdk/buildtools-cases/sanity.py2
-rw-r--r--meta/lib/oeqa/sdk/cases/buildepoxy.py2
-rw-r--r--meta/lib/oeqa/sdk/cases/python.py11
-rw-r--r--meta/lib/oeqa/sdkext/cases/devtool.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/bblayers.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/bbtests.py10
-rw-r--r--meta/lib/oeqa/selftest/cases/cve_check.py19
-rw-r--r--meta/lib/oeqa/selftest/cases/devtool.py172
-rw-r--r--meta/lib/oeqa/selftest/cases/externalsrc.py44
-rw-r--r--meta/lib/oeqa/selftest/cases/fitimage.py10
-rw-r--r--meta/lib/oeqa/selftest/cases/glibc.py8
-rw-r--r--meta/lib/oeqa/selftest/cases/gotoolchain.py8
-rw-r--r--meta/lib/oeqa/selftest/cases/intercept.py (renamed from meta/lib/oeqa/selftest/cases/git.py)0
-rw-r--r--meta/lib/oeqa/selftest/cases/liboe.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/lic_checksum.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/locales.py45
-rw-r--r--meta/lib/oeqa/selftest/cases/minidebuginfo.py49
-rw-r--r--meta/lib/oeqa/selftest/cases/oelib/buildhistory.py24
-rw-r--r--meta/lib/oeqa/selftest/cases/prservice.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/recipetool.py24
-rw-r--r--meta/lib/oeqa/selftest/cases/reproducible.py18
-rw-r--r--meta/lib/oeqa/selftest/cases/resulttooltests.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/runtime_test.py12
-rw-r--r--meta/lib/oeqa/selftest/cases/tinfoil.py14
-rw-r--r--meta/lib/oeqa/selftest/cases/wic.py2
-rw-r--r--meta/lib/oeqa/utils/dump.py23
-rw-r--r--meta/lib/oeqa/utils/httpserver.py6
-rw-r--r--meta/lib/oeqa/utils/metadata.py6
-rw-r--r--meta/lib/oeqa/utils/nfs.py4
-rw-r--r--meta/lib/oeqa/utils/qemurunner.py26
-rw-r--r--meta/lib/rootfspostcommands.py7
-rw-r--r--meta/recipes-bsp/alsa-state/alsa-state.bb7
-rwxr-xr-xmeta/recipes-bsp/alsa-state/alsa-state/alsa-state-init3
-rw-r--r--meta/recipes-bsp/efibootmgr/efibootmgr_17.bb2
-rw-r--r--meta/recipes-bsp/efivar/efivar/0001-Fix-invalid-free-in-main.patch30
-rw-r--r--meta/recipes-bsp/efivar/efivar/0001-Remove-deprecated-add-needed-linker-flag.patch45
-rw-r--r--meta/recipes-bsp/efivar/efivar/0002-Add-T-workaround-for-GNU-ld-2.36.patch33
-rw-r--r--meta/recipes-bsp/efivar/efivar/0003-Set-LC_ALL-C-to-force-English-output-from-ld.patch33
-rw-r--r--meta/recipes-bsp/efivar/efivar/0004-LLD-fix-detection-and-remove-not-needed-workarounds.patch45
-rw-r--r--meta/recipes-bsp/efivar/efivar/0005-Revamp-efi_well_known_-variable-handling.patch262
-rw-r--r--meta/recipes-bsp/efivar/efivar_38.bb10
-rw-r--r--meta/recipes-bsp/grub/files/0001-font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch115
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-3695-video-readers-png-Drop-greyscale-support-to-fix-heap.patch179
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-3696-video-readers-png-Avoid-heap-OOB-R-W-inserting-huff.patch50
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-3697-video-readers-jpeg-Block-int-underflow-wild-pointer.patch84
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-2601.patch85
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-28733-net-ip-Do-IP-fragment-maths-safely.patch63
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-28734-net-http-Error-out-on-headers-with-LF-without-CR.patch58
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-28734-net-http-Fix-OOB-write-for-split-http-headers.patch56
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-28735-kern-efi-sb-Reject-non-kernel-files-in-the-shim_lock.patch111
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-28736-loader-efi-chainloader-Use-grub_loader_set_ex.patch86
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-3775.patch95
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2023-4692.patch97
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2023-4693.patch62
-rw-r--r--meta/recipes-bsp/grub/files/commands-boot-Add-API-to-pass-context-to-loader.patch168
-rw-r--r--meta/recipes-bsp/grub/files/determinism.patch2
-rw-r--r--meta/recipes-bsp/grub/files/loader-efi-chainloader-Simplify-the-loader-state.patch129
-rw-r--r--meta/recipes-bsp/grub/files/video-Remove-trailing-whitespaces.patch693
-rw-r--r--meta/recipes-bsp/grub/files/video-readers-jpeg-Abort-sooner-if-a-read-operation-.patch264
-rw-r--r--meta/recipes-bsp/grub/files/video-readers-jpeg-Refuse-to-handle-multiple-start-o.patch53
-rw-r--r--meta/recipes-bsp/grub/grub2.inc18
-rw-r--r--meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb5
-rw-r--r--meta/recipes-bsp/u-boot/files/0001-fs-squashfs-Use-kcalloc-when-relevant.patch64
-rw-r--r--meta/recipes-bsp/u-boot/files/0001-fs-squashfs-sqfs_read-Prevent-arbitrary-code-executi.patch80
-rw-r--r--meta/recipes-bsp/u-boot/files/0001-i2c-fix-stack-buffer-overflow-vulnerability-in-i2c-m.patch126
-rw-r--r--meta/recipes-bsp/u-boot/files/0001-net-Check-for-the-minimum-IP-fragmented-datagram-siz.patch207
-rw-r--r--meta/recipes-bsp/u-boot/u-boot.inc6
-rw-r--r--meta/recipes-bsp/u-boot/u-boot_2022.01.bb6
-rw-r--r--meta/recipes-bsp/v86d/v86d_0.1.10.bb1
-rw-r--r--meta/recipes-connectivity/avahi/avahi_0.8.bb12
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-1981.patch58
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38469-1.patch47
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38469-2.patch65
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38470-1.patch59
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38470-2.patch52
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38471-1.patch73
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38471-2.patch52
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38472.patch46
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38473.patch108
-rw-r--r--meta/recipes-connectivity/bind/bind/0001-avoid-start-failure-with-bind-user.patch (renamed from meta/recipes-connectivity/bind/bind-9.18.2/0001-avoid-start-failure-with-bind-user.patch)0
-rw-r--r--meta/recipes-connectivity/bind/bind/0001-named-lwresd-V-and-start-log-hide-build-options.patch (renamed from meta/recipes-connectivity/bind/bind-9.18.2/0001-named-lwresd-V-and-start-log-hide-build-options.patch)0
-rw-r--r--meta/recipes-connectivity/bind/bind/bind-ensure-searching-for-json-headers-searches-sysr.patch (renamed from meta/recipes-connectivity/bind/bind-9.18.2/bind-ensure-searching-for-json-headers-searches-sysr.patch)0
-rw-r--r--meta/recipes-connectivity/bind/bind/bind9 (renamed from meta/recipes-connectivity/bind/bind-9.18.2/bind9)0
-rw-r--r--meta/recipes-connectivity/bind/bind/conf.patch (renamed from meta/recipes-connectivity/bind/bind-9.18.2/conf.patch)0
-rw-r--r--meta/recipes-connectivity/bind/bind/generate-rndc-key.sh (renamed from meta/recipes-connectivity/bind/bind-9.18.2/generate-rndc-key.sh)0
-rw-r--r--meta/recipes-connectivity/bind/bind/init.d-add-support-for-read-only-rootfs.patch (renamed from meta/recipes-connectivity/bind/bind-9.18.2/init.d-add-support-for-read-only-rootfs.patch)0
-rw-r--r--meta/recipes-connectivity/bind/bind/make-etc-initd-bind-stop-work.patch (renamed from meta/recipes-connectivity/bind/bind-9.18.2/make-etc-initd-bind-stop-work.patch)0
-rw-r--r--meta/recipes-connectivity/bind/bind/named.service (renamed from meta/recipes-connectivity/bind/bind-9.18.2/named.service)0
-rw-r--r--meta/recipes-connectivity/bind/bind_9.18.24.bb (renamed from meta/recipes-connectivity/bind/bind_9.18.2.bb)19
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5.inc5
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/CVE-2023-45866.patch56
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/fix_service.patch30
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5_5.65.bb (renamed from meta/recipes-connectivity/bluez5/bluez5_5.64.bb)2
-rw-r--r--meta/recipes-connectivity/connman/connman.inc1
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2022-32292.patch37
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2022-32293_p1.patch141
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2022-32293_p2.patch174
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2023-28488.patch60
-rw-r--r--meta/recipes-connectivity/connman/connman_1.41.bb4
-rw-r--r--meta/recipes-connectivity/dhcpcd/dhcpcd_9.4.1.bb12
-rw-r--r--meta/recipes-connectivity/dhcpcd/files/0001-20-resolv.conf-improve-the-sitation-of-working-with-.patch82
-rw-r--r--meta/recipes-connectivity/dhcpcd/files/0001-dhcpcd.8-Fix-conflict-error-when-enable-multilib.patch46
-rw-r--r--meta/recipes-connectivity/dhcpcd/files/0001-privsep-Allow-getrandom-sysctl-for-newer-glibc.patch30
-rw-r--r--meta/recipes-connectivity/dhcpcd/files/0001-privsep-linux-fix-SECCOMP_AUDIT_ARCH-missing-ppc64le.patch34
-rw-r--r--meta/recipes-connectivity/dhcpcd/files/0002-privsep-Allow-newfstatat-syscall-as-well.patch31
-rw-r--r--meta/recipes-connectivity/inetutils/inetutils/0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch280
-rw-r--r--meta/recipes-connectivity/inetutils/inetutils/0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch254
-rw-r--r--meta/recipes-connectivity/inetutils/inetutils/CVE-2022-39028.patch54
-rw-r--r--meta/recipes-connectivity/inetutils/inetutils_2.2.bb3
-rw-r--r--meta/recipes-connectivity/kea/files/fix-multilib-conflict.patch2
-rw-r--r--meta/recipes-connectivity/libuv/libuv/CVE-2024-24806-1.patch56
-rw-r--r--meta/recipes-connectivity/libuv/libuv/CVE-2024-24806-2.patch44
-rw-r--r--meta/recipes-connectivity/libuv/libuv/CVE-2024-24806-3.patch31
-rw-r--r--meta/recipes-connectivity/libuv/libuv_1.44.2.bb (renamed from meta/recipes-connectivity/libuv/libuv_1.44.1.bb)8
-rw-r--r--meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb4
-rw-r--r--meta/recipes-connectivity/openssh/openssh/0001-upstream-include-destination-constraints-for-smartca.patch35
-rw-r--r--meta/recipes-connectivity/openssh/openssh/7280401bdd77ca54be6867a154cc01e0d72612e0.patch984
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0001.patch585
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0002.patch173
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0003.patch36
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0004.patch114
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-48795.patch476
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-51384.patch171
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-51385.patch97
-rw-r--r--meta/recipes-connectivity/openssh/openssh/fix-authorized-principals-command.patch30
-rwxr-xr-xmeta/recipes-connectivity/openssh/openssh/run-ptest2
-rw-r--r--meta/recipes-connectivity/openssh/openssh_8.9p1.bb32
-rw-r--r--meta/recipes-connectivity/openssl/files/environment.d-openssl.sh4
-rw-r--r--meta/recipes-connectivity/openssl/openssl/0001-Configure-do-not-tweak-mips-cflags.patch12
-rw-r--r--meta/recipes-connectivity/openssl/openssl/0001-buildinfo-strip-sysroot-and-debug-prefix-map-from-co.patch20
-rw-r--r--meta/recipes-connectivity/openssl/openssl/770aea88c3888cc5cb3ebc94ffcef706c68bc1d2.patch55
-rw-r--r--meta/recipes-connectivity/openssl/openssl/CVE-2024-2511.patch122
-rw-r--r--meta/recipes-connectivity/openssl/openssl/afalg.patch10
-rw-r--r--meta/recipes-connectivity/openssl/openssl_3.0.13.bb (renamed from meta/recipes-connectivity/openssl/openssl_3.0.3.bb)13
-rw-r--r--meta/recipes-connectivity/ppp/ppp/CVE-2022-4603.patch48
-rw-r--r--meta/recipes-connectivity/ppp/ppp_2.4.9.bb1
-rw-r--r--meta/recipes-connectivity/resolvconf/resolvconf/0001-avoid-using-m-option-for-readlink.patch37
-rw-r--r--meta/recipes-connectivity/resolvconf/resolvconf_1.91.bb9
-rw-r--r--meta/recipes-connectivity/socat/socat/0001-configure.ac-check-getprotobynumber_r-with-AC_TRY_LI.patch35
-rw-r--r--meta/recipes-connectivity/socat/socat_1.7.4.4.bb (renamed from meta/recipes-connectivity/socat/socat_1.7.4.3.bb)6
-rw-r--r--meta/recipes-core/base-files/base-files/hosts2
-rw-r--r--meta/recipes-core/busybox/busybox.inc26
-rw-r--r--meta/recipes-core/busybox/busybox/0001-depmod-Ignore-.debug-directories.patch2
-rw-r--r--meta/recipes-core/busybox/busybox/0001-devmem-add-128-bit-width.patch128
-rw-r--r--meta/recipes-core/busybox/busybox/CVE-2022-30065.patch29
-rw-r--r--meta/recipes-core/busybox/busybox/CVE-2022-48174.patch80
-rw-r--r--meta/recipes-core/busybox/busybox_1.35.0.bb3
-rw-r--r--meta/recipes-core/coreutils/coreutils_9.0.bb4
-rw-r--r--meta/recipes-core/dbus/dbus_1.14.8.bb (renamed from meta/recipes-core/dbus/dbus_1.14.0.bb)14
-rw-r--r--meta/recipes-core/dropbear/dropbear.inc11
-rw-r--r--meta/recipes-core/dropbear/dropbear/CVE-2021-36369.patch145
-rw-r--r--meta/recipes-core/dropbear/dropbear/CVE-2023-36328.patch144
-rw-r--r--meta/recipes-core/dropbear/dropbear/CVE-2023-48795.patch234
-rw-r--r--meta/recipes-core/ell/ell_0.50.bb (renamed from meta/recipes-core/ell/ell_0.49.bb)2
-rw-r--r--meta/recipes-core/expat/expat/CVE-2023-52426-001.patch35
-rw-r--r--meta/recipes-core/expat/expat/CVE-2023-52426-002.patch72
-rw-r--r--meta/recipes-core/expat/expat/CVE-2023-52426-003.patch28
-rw-r--r--meta/recipes-core/expat/expat/CVE-2023-52426-004.patch429
-rw-r--r--meta/recipes-core/expat/expat/CVE-2023-52426-005.patch34
-rw-r--r--meta/recipes-core/expat/expat/CVE-2023-52426-006.patch174
-rw-r--r--meta/recipes-core/expat/expat/CVE-2023-52426-007.patch53
-rw-r--r--meta/recipes-core/expat/expat/CVE-2023-52426-008.patch37
-rw-r--r--meta/recipes-core/expat/expat/CVE-2023-52426-009.patch354
-rw-r--r--meta/recipes-core/expat/expat/CVE-2023-52426-010.patch50
-rw-r--r--meta/recipes-core/expat/expat/CVE-2023-52426-011.patch45
-rwxr-xr-xmeta/recipes-core/expat/expat/CVE-2024-28757.patch58
-rw-r--r--meta/recipes-core/expat/expat_2.5.0.bb (renamed from meta/recipes-core/expat/expat_2.4.7.bb)16
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/0001-gio-tests-g-file-info-don-t-assume-million-in-one-ev.patch51
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-29499.patch291
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0001.patch97
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0002.patch282
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32636.patch50
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32643.patch155
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0001.patch104
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0002.patch211
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0003.patch418
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0004.patch114
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0005.patch81
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0006.patch397
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0007.patch50
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0008.patch395
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0009.patch98
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/relocate-modules.patch2
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0_2.72.3.bb (renamed from meta/recipes-core/glib-2.0/glib-2.0_2.72.2.bb)17
-rw-r--r--meta/recipes-core/glib-networking/glib-networking_2.72.2.bb (renamed from meta/recipes-core/glib-networking/glib-networking_2.72.0.bb)2
-rw-r--r--meta/recipes-core/glibc/glibc-locale.inc24
-rw-r--r--meta/recipes-core/glibc/glibc-tests_2.35.bb12
-rw-r--r--meta/recipes-core/glibc/glibc-version.inc2
-rw-r--r--meta/recipes-core/glibc/glibc.inc4
-rw-r--r--meta/recipes-core/glibc/glibc/0001-Revert-Linux-Implement-a-useful-version-of-_startup_.patch128
-rw-r--r--meta/recipes-core/glibc/glibc/0002-get_nscd_addresses-Fix-subscript-typos-BZ-29605.patch40
-rw-r--r--meta/recipes-core/glibc/glibc/check-test-wrapper2
-rw-r--r--meta/recipes-core/glibc/glibc/reproducible-paths.patch23
-rw-r--r--meta/recipes-core/glibc/glibc_2.35.bb13
-rw-r--r--meta/recipes-core/glibc/ldconfig-native-2.12.1/ldconfig-handle-.dynstr-located-in-separate-segment.patch178
-rw-r--r--meta/recipes-core/glibc/ldconfig-native_2.12.1.bb1
-rw-r--r--meta/recipes-core/ifupdown/ifupdown_0.8.39.bb (renamed from meta/recipes-core/ifupdown/ifupdown_0.8.37.bb)2
-rw-r--r--meta/recipes-core/images/build-appliance-image_15.0.0.bb2
-rw-r--r--meta/recipes-core/initrdscripts/files/init-install-efi-testfs.sh2
-rw-r--r--meta/recipes-core/initrdscripts/files/init-install-efi.sh2
-rw-r--r--meta/recipes-core/initrdscripts/files/init-install-testfs.sh2
-rw-r--r--meta/recipes-core/initrdscripts/files/init-install.sh2
-rwxr-xr-xmeta/recipes-core/initrdscripts/initramfs-framework/finish9
-rw-r--r--meta/recipes-core/initscripts/initscripts_1.0.bb2
-rw-r--r--meta/recipes-core/libxcrypt/files/0001-Make-BuildCommon.pm-compatible-with-latest-perl.patch50
-rw-r--r--meta/recipes-core/libxcrypt/files/0002-Remove-smartmatch-usage-from-gen-crypt-h.patch62
-rw-r--r--meta/recipes-core/libxcrypt/libxcrypt-compat_4.4.33.bb (renamed from meta/recipes-core/libxcrypt/libxcrypt-compat_4.4.28.bb)0
-rw-r--r--meta/recipes-core/libxcrypt/libxcrypt.inc15
-rw-r--r--meta/recipes-core/libxcrypt/libxcrypt_4.4.33.bb (renamed from meta/recipes-core/libxcrypt/libxcrypt_4.4.28.bb)0
-rw-r--r--meta/recipes-core/libxml/libxml2/0001-Port-gentest.py-to-Python-3.patch814
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2022-40303.patch624
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2022-40304.patch106
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-28484.patch79
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-29469.patch42
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-39615-0001.patch37
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-39615-0002.patch72
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-45322-1.patch49
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-45322-2.patch79
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2024-25062.patch33
-rw-r--r--meta/recipes-core/libxml/libxml2_2.9.14.bb28
-rw-r--r--meta/recipes-core/meta/build-sysroots.bb3
-rw-r--r--meta/recipes-core/meta/buildtools-tarball.bb7
-rw-r--r--meta/recipes-core/meta/cve-update-db-native.bb142
-rw-r--r--meta/recipes-core/meta/cve-update-nvd2-native.bb372
-rw-r--r--meta/recipes-core/meta/wic-tools.bb3
-rw-r--r--meta/recipes-core/ncurses/files/CVE-2023-29491.patch464
-rw-r--r--meta/recipes-core/ncurses/files/CVE-2023-50495.patch81
-rw-r--r--meta/recipes-core/ncurses/ncurses_6.3+20220423.bb2
-rw-r--r--meta/recipes-core/ovmf/ovmf/0001-ovmf-update-path-to-native-BaseTools.patch2
-rw-r--r--meta/recipes-core/ovmf/ovmf/0002-BaseTools-makefile-adjust-to-build-in-under-bitbake.patch7
-rw-r--r--meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb1
-rw-r--r--meta/recipes-core/packagegroups/packagegroup-self-hosted.bb5
-rw-r--r--meta/recipes-core/psplash/files/psplash-start.service1
-rw-r--r--meta/recipes-core/psplash/files/psplash-systemd.service1
-rw-r--r--meta/recipes-core/psplash/psplash_git.bb2
-rw-r--r--meta/recipes-core/sysfsutils/sysfsutils_2.1.0.bb10
-rwxr-xr-xmeta/recipes-core/systemd/systemd-systemctl/systemctl8
-rw-r--r--meta/recipes-core/systemd/systemd/00-create-volatile.conf1
-rw-r--r--meta/recipes-core/systemd/systemd/0001-network-remove-only-managed-configs-on-reconfigure-o.patch358
-rw-r--r--meta/recipes-core/systemd/systemd/0001-nspawn-make-sure-host-root-can-write-to-the-uidmappe.patch216
-rw-r--r--meta/recipes-core/systemd/systemd/0001-shared-json-allow-json_variant_dump-to-return-an-err.patch60
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2022-3821.patch45
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2022-4415-1.patch109
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2022-4415-2.patch391
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2022-45873.patch124
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2023-7008.patch40
-rw-r--r--meta/recipes-core/systemd/systemd/fix-vlan-qos-mapping.patch140
-rw-r--r--meta/recipes-core/systemd/systemd_250.5.bb36
-rw-r--r--meta/recipes-core/sysvinit/sysvinit-inittab/start_getty3
-rw-r--r--meta/recipes-core/udev/udev-extraconf/mount.ignorelist (renamed from meta/recipes-core/udev/udev-extraconf/mount.blacklist)0
-rw-r--r--meta/recipes-core/udev/udev-extraconf/mount.sh92
-rw-r--r--meta/recipes-core/udev/udev-extraconf_1.1.bb27
-rw-r--r--meta/recipes-core/util-linux/util-linux_2.37.4.bb4
-rw-r--r--meta/recipes-core/zlib/zlib/CVE-2022-37434.patch44
-rw-r--r--meta/recipes-core/zlib/zlib/CVE-2023-45853.patch42
-rw-r--r--meta/recipes-core/zlib/zlib_1.2.11.bb5
-rw-r--r--meta/recipes-devtools/apt/apt/0001-add-missing-cstdint-for-uint16_t.patch35
-rw-r--r--meta/recipes-devtools/apt/apt_2.4.5.bb4
-rw-r--r--meta/recipes-devtools/autoconf/autoconf/0001-Port-to-compilers-that-moan-about-K-R-func-decls.patch138
-rw-r--r--meta/recipes-devtools/autoconf/autoconf_2.71.bb1
-rw-r--r--meta/recipes-devtools/automake/automake/buildtest.patch2
-rw-r--r--meta/recipes-devtools/binutils/binutils-2.38.inc41
-rw-r--r--meta/recipes-devtools/binutils/binutils/0003-binutils-nativesdk-Search-for-alternative-ld.so.conf.patch2
-rw-r--r--meta/recipes-devtools/binutils/binutils/0014-CVE-2019-1010204.patch49
-rw-r--r--meta/recipes-devtools/binutils/binutils/0015-CVE-2022-38533.patch36
-rw-r--r--meta/recipes-devtools/binutils/binutils/0016-CVE-2022-38126.patch34
-rw-r--r--meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-1.patch1224
-rw-r--r--meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-2.patch188
-rw-r--r--meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-3.patch211
-rw-r--r--meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-4.patch43
-rw-r--r--meta/recipes-devtools/binutils/binutils/0018-CVE-2022-38128-1.patch350
-rw-r--r--meta/recipes-devtools/binutils/binutils/0018-CVE-2022-38128-2.patch436
-rw-r--r--meta/recipes-devtools/binutils/binutils/0018-CVE-2022-38128-3.patch95
-rw-r--r--meta/recipes-devtools/binutils/binutils/0019-CVE-2022-4285.patch37
-rw-r--r--meta/recipes-devtools/binutils/binutils/0020-CVE-2023-22608-1.patch506
-rw-r--r--meta/recipes-devtools/binutils/binutils/0020-CVE-2023-22608-2.patch210
-rw-r--r--meta/recipes-devtools/binutils/binutils/0020-CVE-2023-22608-3.patch32
-rw-r--r--meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-1.patch459
-rw-r--r--meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-2.patch2127
-rw-r--r--meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-3.patch156
-rw-r--r--meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-4.patch37
-rw-r--r--meta/recipes-devtools/binutils/binutils/0022-CVE-2023-25584-1.patch56
-rw-r--r--meta/recipes-devtools/binutils/binutils/0022-CVE-2023-25584-2.patch38
-rw-r--r--meta/recipes-devtools/binutils/binutils/0022-CVE-2023-25584-3.patch536
-rw-r--r--meta/recipes-devtools/binutils/binutils/0023-CVE-2023-25585.patch54
-rw-r--r--meta/recipes-devtools/binutils/binutils/0025-CVE-2023-25588.patch149
-rw-r--r--meta/recipes-devtools/binutils/binutils/0026-CVE-2023-1972.patch41
-rw-r--r--meta/recipes-devtools/binutils/binutils/0027-CVE-2022-47008.patch67
-rw-r--r--meta/recipes-devtools/binutils/binutils/0028-CVE-2022-47011.patch35
-rw-r--r--meta/recipes-devtools/binutils/binutils/0029-CVE-2022-48065-1.patch31
-rw-r--r--meta/recipes-devtools/binutils/binutils/0029-CVE-2022-48065-2.patch115
-rw-r--r--meta/recipes-devtools/binutils/binutils/0029-CVE-2022-48065-3.patch122
-rw-r--r--meta/recipes-devtools/binutils/binutils/0030-CVE-2022-44840.patch151
-rw-r--r--meta/recipes-devtools/binutils/binutils/0031-CVE-2022-45703-1.patch147
-rw-r--r--meta/recipes-devtools/binutils/binutils/0031-CVE-2022-45703-2.patch31
-rw-r--r--meta/recipes-devtools/binutils/binutils/0031-CVE-2022-47695.patch58
-rw-r--r--meta/recipes-devtools/binutils/binutils/0032-CVE-2022-47010.patch38
-rw-r--r--meta/recipes-devtools/binutils/binutils/0033-CVE-2022-47007.patch34
-rw-r--r--meta/recipes-devtools/binutils/binutils/0034-CVE-2022-48064.patch57
-rw-r--r--meta/recipes-devtools/binutils/binutils/0035-CVE-2023-39129.patch50
-rw-r--r--meta/recipes-devtools/binutils/binutils/0036-CVE-2023-39130.patch326
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-48063.patch48
-rw-r--r--meta/recipes-devtools/bootchart2/bootchart2/0001-bootchart2-support-usrmerge.patch37
-rw-r--r--meta/recipes-devtools/bootchart2/bootchart2_0.14.9.bb11
-rw-r--r--meta/recipes-devtools/ccache/ccache/0001-build-Fix-FTBFS-with-not-yet-released-GCC-13.patch92
-rw-r--r--meta/recipes-devtools/ccache/ccache_4.6.bb4
-rw-r--r--meta/recipes-devtools/cmake/cmake-native_3.22.3.bb1
-rw-r--r--meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake9
-rw-r--r--meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_1a.patch236
-rw-r--r--meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_1b.patch197
-rw-r--r--meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_2.patch83
-rw-r--r--meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_3.patch71
-rw-r--r--meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_4.patch138
-rw-r--r--meta/recipes-devtools/dmidecode/dmidecode_3.3.bb5
-rw-r--r--meta/recipes-devtools/dpkg/dpkg/0001-Dpkg-Source-Archive-Prevent-directory-traversal-for-.patch328
-rw-r--r--meta/recipes-devtools/dpkg/dpkg_1.21.4.bb1
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest1
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs_1.46.5.bb3
-rw-r--r--meta/recipes-devtools/elfutils/elfutils_0.186.bb2
-rw-r--r--meta/recipes-devtools/file/file/CVE-2022-48554.patch35
-rw-r--r--meta/recipes-devtools/file/file_5.41.bb4
-rw-r--r--meta/recipes-devtools/gcc/gcc-11.4.inc (renamed from meta/recipes-devtools/gcc/gcc-11.3.inc)18
-rw-r--r--meta/recipes-devtools/gcc/gcc-configure-common.inc1
-rw-r--r--meta/recipes-devtools/gcc/gcc-cross-canadian.inc3
-rw-r--r--meta/recipes-devtools/gcc/gcc-cross-canadian_11.4.bb (renamed from meta/recipes-devtools/gcc/gcc-cross-canadian_11.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-cross_11.4.bb (renamed from meta/recipes-devtools/gcc/gcc-cross_11.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-crosssdk_11.4.bb (renamed from meta/recipes-devtools/gcc/gcc-crosssdk_11.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-multilib-config.inc2
-rw-r--r--meta/recipes-devtools/gcc/gcc-runtime.inc5
-rw-r--r--meta/recipes-devtools/gcc/gcc-runtime_11.4.bb (renamed from meta/recipes-devtools/gcc/gcc-runtime_11.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-sanitizers_11.4.bb (renamed from meta/recipes-devtools/gcc/gcc-sanitizers_11.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-shared-source.inc13
-rw-r--r--meta/recipes-devtools/gcc/gcc-source.inc9
-rw-r--r--meta/recipes-devtools/gcc/gcc-source_11.4.bb (renamed from meta/recipes-devtools/gcc/gcc-source_11.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-testsuite.inc5
-rw-r--r--meta/recipes-devtools/gcc/gcc/0001-aarch64-Update-Neoverse-N2-core-defini.patch38
-rw-r--r--meta/recipes-devtools/gcc/gcc/0002-aarch64-add-armv9-a-to-march.patch89
-rw-r--r--meta/recipes-devtools/gcc/gcc/0003-aarch64-Enable-FP16-feature-by-default-for-Armv9.patch38
-rw-r--r--meta/recipes-devtools/gcc/gcc/0004-arm-add-armv9-a-architecture-to-march.patch291
-rw-r--r--meta/recipes-devtools/gcc/gcc/0006-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch217
-rw-r--r--meta/recipes-devtools/gcc/gcc/0009-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch6
-rw-r--r--meta/recipes-devtools/gcc/gcc/0019-nios2-Define-MUSL_DYNAMIC_LINKER.patch25
-rw-r--r--meta/recipes-devtools/gcc/gcc/0030-rust-recursion-limit.patch92
-rw-r--r--meta/recipes-devtools/gcc/gcc/0031-gcc-sanitizers-fix.patch63
-rw-r--r--meta/recipes-devtools/gcc/gcc/CVE-2023-4039.patch2893
-rw-r--r--meta/recipes-devtools/gcc/gcc_11.4.bb (renamed from meta/recipes-devtools/gcc/gcc_11.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/libgcc-common.inc8
-rw-r--r--meta/recipes-devtools/gcc/libgcc-initial_11.4.bb (renamed from meta/recipes-devtools/gcc/libgcc-initial_11.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/libgcc_11.4.bb (renamed from meta/recipes-devtools/gcc/libgcc_11.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/libgfortran_11.4.bb (renamed from meta/recipes-devtools/gcc/libgfortran_11.3.bb)0
-rw-r--r--meta/recipes-devtools/gdb/gdb.inc3
-rw-r--r--meta/recipes-devtools/gdb/gdb/0011-CVE-2023-39128.patch75
-rw-r--r--meta/recipes-devtools/gdb/gdb/0012-CVE-2023-39129.patch50
-rw-r--r--meta/recipes-devtools/gdb/gdb/0013-CVE-2023-39130.patch326
-rw-r--r--meta/recipes-devtools/git/git/CVE-2023-25652.patch94
-rw-r--r--meta/recipes-devtools/git/git/CVE-2023-29007.patch162
-rw-r--r--meta/recipes-devtools/git/git_2.35.7.bb (renamed from meta/recipes-devtools/git/git_2.35.3.bb)10
-rw-r--r--meta/recipes-devtools/go/go-1.17.10.inc25
-rw-r--r--meta/recipes-devtools/go/go-1.17.13.inc67
-rw-r--r--meta/recipes-devtools/go/go-1.18/0001-net-http-httputil-avoid-query-parameter-smuggling.patch178
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2022-27664.patch102
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2022-2879.patch177
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2022-41715.patch270
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2022-41717.patch89
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2022-41720.patch514
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2022-41722.patch103
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2022-41723.patch156
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2023-24534.patch200
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2023-24537.patch75
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2023-24538_1.patch597
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2023-24538_2.patch371
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2023-24539.patch53
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2023-29400.patch99
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2023-29406-1.patch210
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2023-29406-2.patch114
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2024-24784.patch207
-rw-r--r--meta/recipes-devtools/go/go-1.18/CVE-2024-24785.patch196
-rw-r--r--meta/recipes-devtools/go/go-1.19/CVE-2023-24536_1.patch137
-rw-r--r--meta/recipes-devtools/go/go-1.19/CVE-2023-24536_2.patch187
-rw-r--r--meta/recipes-devtools/go/go-1.19/CVE-2023-24536_3.patch349
-rw-r--r--meta/recipes-devtools/go/go-1.19/CVE-2023-24540.patch93
-rw-r--r--meta/recipes-devtools/go/go-1.19/CVE-2023-29402.patch194
-rw-r--r--meta/recipes-devtools/go/go-1.19/CVE-2023-29404.patch78
-rw-r--r--meta/recipes-devtools/go/go-1.19/CVE-2023-29405.patch109
-rw-r--r--meta/recipes-devtools/go/go-1.19/CVE-2023-29409.patch175
-rw-r--r--meta/recipes-devtools/go/go-1.19/add_godebug.patch84
-rw-r--r--meta/recipes-devtools/go/go-1.19/cve-2022-41724.patch2391
-rw-r--r--meta/recipes-devtools/go/go-1.19/cve-2022-41725.patch652
-rw-r--r--meta/recipes-devtools/go/go-1.20/0010-net-Fix-issue-with-DNS-not-being-updated.patch51
-rw-r--r--meta/recipes-devtools/go/go-1.20/CVE-2023-39319.patch254
-rw-r--r--meta/recipes-devtools/go/go-1.20/CVE-2023-39326.patch182
-rw-r--r--meta/recipes-devtools/go/go-1.20/CVE-2023-45285.patch110
-rw-r--r--meta/recipes-devtools/go/go-1.20/CVE-2023-45287.patch1695
-rw-r--r--meta/recipes-devtools/go/go-1.21/CVE-2023-24531_1.patch252
-rw-r--r--meta/recipes-devtools/go/go-1.21/CVE-2023-24531_2.patch47
-rw-r--r--meta/recipes-devtools/go/go-1.21/CVE-2023-39318.patch262
-rw-r--r--meta/recipes-devtools/go/go-1.21/CVE-2023-45289.patch121
-rw-r--r--meta/recipes-devtools/go/go-1.21/CVE-2023-45290.patch270
-rw-r--r--meta/recipes-devtools/go/go-binary-native_1.17.13.bb (renamed from meta/recipes-devtools/go/go-binary-native_1.17.10.bb)4
-rw-r--r--meta/recipes-devtools/go/go-cross-canadian_1.17.13.bb (renamed from meta/recipes-devtools/go/go-cross-canadian_1.17.10.bb)0
-rw-r--r--meta/recipes-devtools/go/go-cross_1.17.13.bb (renamed from meta/recipes-devtools/go/go-cross_1.17.10.bb)0
-rw-r--r--meta/recipes-devtools/go/go-crosssdk.inc2
-rw-r--r--meta/recipes-devtools/go/go-crosssdk_1.17.13.bb (renamed from meta/recipes-devtools/go/go-crosssdk_1.17.10.bb)0
-rw-r--r--meta/recipes-devtools/go/go-native_1.17.13.bb (renamed from meta/recipes-devtools/go/go-native_1.17.10.bb)2
-rw-r--r--meta/recipes-devtools/go/go-runtime_1.17.13.bb (renamed from meta/recipes-devtools/go/go-runtime_1.17.10.bb)0
-rw-r--r--meta/recipes-devtools/go/go_1.17.13.bb (renamed from meta/recipes-devtools/go/go_1.17.10.bb)4
-rw-r--r--meta/recipes-devtools/json-c/json-c/CVE-2021-32292.patch30
-rw-r--r--meta/recipes-devtools/json-c/json-c/run-ptest20
-rw-r--r--meta/recipes-devtools/json-c/json-c_0.15.bb20
-rw-r--r--meta/recipes-devtools/libdnf/libdnf/0001-Fix-1558-Don-t-assume-inclusion-of-cstdint.patch56
-rw-r--r--meta/recipes-devtools/libdnf/libdnf/0001-libdnf-conf-OptionNumber.hpp-add-missing-cstdint-inc.patch33
-rw-r--r--meta/recipes-devtools/libdnf/libdnf/0001-libdnf-utils-sqlite3-Sqlite3.hpp-add-missing-cstdint.patch36
-rw-r--r--meta/recipes-devtools/libdnf/libdnf_0.66.0.bb3
-rw-r--r--meta/recipes-devtools/llvm/llvm/0001-Support-Add-missing-cstdint-header-to-Signals.h.patch31
-rw-r--r--meta/recipes-devtools/llvm/llvm_git.bb1
-rw-r--r--meta/recipes-devtools/log4cplus/log4cplus_2.0.8.bb (renamed from meta/recipes-devtools/log4cplus/log4cplus_2.0.7.bb)2
-rw-r--r--meta/recipes-devtools/lua/lua/CVE-2022-33099.patch61
-rw-r--r--meta/recipes-devtools/lua/lua/lua.pc.in5
-rw-r--r--meta/recipes-devtools/lua/lua_5.4.4.bb6
-rwxr-xr-xmeta/recipes-devtools/meson/meson/meson-wrapper20
-rw-r--r--meta/recipes-devtools/mtd/mtd-utils_git.bb4
-rw-r--r--meta/recipes-devtools/nasm/nasm/CVE-2020-21528.patch47
-rw-r--r--meta/recipes-devtools/nasm/nasm/CVE-2022-44370.patch104
-rw-r--r--meta/recipes-devtools/nasm/nasm/CVE-2022-46457.patch50
-rw-r--r--meta/recipes-devtools/nasm/nasm_2.15.05.bb3
-rw-r--r--meta/recipes-devtools/ninja/ninja_1.10.2.bb3
-rw-r--r--meta/recipes-devtools/opkg-utils/opkg-utils_0.5.0.bb6
-rw-r--r--meta/recipes-devtools/opkg/opkg_0.5.0.bb4
-rw-r--r--meta/recipes-devtools/patchelf/patchelf/handle-read-only-files.patch65
-rw-r--r--meta/recipes-devtools/patchelf/patchelf_0.14.5.bb1
-rw-r--r--meta/recipes-devtools/perl-cross/files/0001-Makefile-check-the-file-if-patched-or-not.patch4
-rw-r--r--meta/recipes-devtools/perl-cross/perlcross_1.5.2.bb (renamed from meta/recipes-devtools/perl-cross/perlcross_1.3.7.bb)2
-rw-r--r--meta/recipes-devtools/perl/files/CVE-2023-31484.patch29
-rw-r--r--meta/recipes-devtools/perl/files/CVE-2023-31486-0001.patch215
-rw-r--r--meta/recipes-devtools/perl/files/CVE-2023-31486-0002.patch36
-rw-r--r--meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb1
-rw-r--r--meta/recipes-devtools/perl/perl-ptest.inc4
-rw-r--r--meta/recipes-devtools/perl/perl_5.34.3.bb (renamed from meta/recipes-devtools/perl/perl_5.34.1.bb)8
-rw-r--r--meta/recipes-devtools/pkgconf/pkgconf/0001-tuple-test-for-and-stop-string-processing-on-truncat.patch75
-rw-r--r--meta/recipes-devtools/pkgconf/pkgconf_1.8.0.bb1
-rw-r--r--meta/recipes-devtools/pseudo/files/glibc238.patch72
-rw-r--r--meta/recipes-devtools/pseudo/pseudo_git.bb3
-rw-r--r--meta/recipes-devtools/python/python3-certifi/CVE-2022-23491.patch230
-rw-r--r--meta/recipes-devtools/python/python3-certifi/CVE-2023-37920.patch301
-rw-r--r--meta/recipes-devtools/python/python3-certifi_2021.10.8.bb4
-rw-r--r--meta/recipes-devtools/python/python3-cryptography/CVE-2023-23931.patch49
-rw-r--r--meta/recipes-devtools/python/python3-cryptography/CVE-2023-49083.patch53
-rw-r--r--meta/recipes-devtools/python/python3-cryptography/CVE-2024-26130.patch66
-rw-r--r--meta/recipes-devtools/python/python3-cryptography_36.0.2.bb3
-rw-r--r--meta/recipes-devtools/python/python3-git_3.1.37.bb (renamed from meta/recipes-devtools/python/python3-git_3.1.27.bb)4
-rw-r--r--meta/recipes-devtools/python/python3-jinja2/run-ptest2
-rw-r--r--meta/recipes-devtools/python/python3-jinja2_3.1.3.bb (renamed from meta/recipes-devtools/python/python3-jinja2_3.1.1.bb)2
-rw-r--r--meta/recipes-devtools/python/python3-mako/CVE-2022-40023.patch119
-rw-r--r--meta/recipes-devtools/python/python3-mako_1.1.6.bb2
-rw-r--r--meta/recipes-devtools/python/python3-pip_22.0.3.bb2
-rw-r--r--meta/recipes-devtools/python/python3-pycryptodome/CVE-2023-52323.patch436
-rw-r--r--meta/recipes-devtools/python/python3-pycryptodome_3.14.1.bb1
-rw-r--r--meta/recipes-devtools/python/python3-pycryptodomex/CVE-2023-52323.patch436
-rw-r--r--meta/recipes-devtools/python/python3-pycryptodomex_3.14.1.bb2
-rw-r--r--meta/recipes-devtools/python/python3-pygments/CVE-2022-40896.patch124
-rw-r--r--meta/recipes-devtools/python/python3-pygments_2.11.2.bb2
-rw-r--r--meta/recipes-devtools/python/python3-pytest_7.1.1.bb2
-rw-r--r--meta/recipes-devtools/python/python3-requests/CVE-2023-32681.patch63
-rw-r--r--meta/recipes-devtools/python/python3-requests_2.27.1.bb2
-rw-r--r--meta/recipes-devtools/python/python3-rfc3986-validator_0.1.1.bb2
-rw-r--r--meta/recipes-devtools/python/python3-setuptools-rust-native_1.1.2.bb4
-rw-r--r--meta/recipes-devtools/python/python3-setuptools/0001-Limit-the-amount-of-whitespace-to-search-backtrack.-.patch31
-rw-r--r--meta/recipes-devtools/python/python3-setuptools_59.5.0.bb1
-rw-r--r--meta/recipes-devtools/python/python3-urllib3_1.26.18.bb (renamed from meta/recipes-devtools/python/python3-urllib3_1.26.9.bb)3
-rw-r--r--meta/recipes-devtools/python/python3-wheel/0001-Fixed-potential-DoS-attack-via-WHEEL_INFO_RE.patch32
-rw-r--r--meta/recipes-devtools/python/python3-wheel_0.37.1.bb4
-rw-r--r--meta/recipes-devtools/python/python3/0001-test_storlines-skip-due-to-load-variability.patch32
-rw-r--r--meta/recipes-devtools/python/python3/0017-setup.py-do-not-report-missing-dependencies-for-disa.patch8
-rw-r--r--meta/recipes-devtools/python/python3/get_module_deps3.py4
-rw-r--r--meta/recipes-devtools/python/python3_3.10.13.bb (renamed from meta/recipes-devtools/python/python3_3.10.4.bb)7
-rw-r--r--meta/recipes-devtools/qemu/qemu-helper-native_1.0.bb6
-rwxr-xr-xmeta/recipes-devtools/qemu/qemu-helper/qemu-oe-bridge-helper25
-rw-r--r--meta/recipes-devtools/qemu/qemu-helper/qemu-oe-bridge-helper.c34
-rw-r--r--meta/recipes-devtools/qemu/qemu-system-native_6.2.0.bb2
-rw-r--r--meta/recipes-devtools/qemu/qemu.inc92
-rw-r--r--meta/recipes-devtools/qemu/qemu/0001-hw-display-qxl-Have-qxl_log_command-Return-early-if-.patch57
-rw-r--r--meta/recipes-devtools/qemu/qemu/0001-hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch217
-rw-r--r--meta/recipes-devtools/qemu/qemu/0001-net-tulip-Restrict-DMA-engine-to-memories.patch64
-rw-r--r--meta/recipes-devtools/qemu/qemu/0001-softfloat-Extend-float_exception_flags-to-16-bits.patch75
-rw-r--r--meta/recipes-devtools/qemu/qemu/0001-use-uint32t-for-reply-queue-head-tail-values.patch83
-rw-r--r--meta/recipes-devtools/qemu/qemu/0002-softfloat-Add-flag-specific-to-Inf-Inf.patch59
-rw-r--r--meta/recipes-devtools/qemu/qemu/0002_let_dma_memory_valid_function_take_MemTxAttrs_argument.patch60
-rw-r--r--meta/recipes-devtools/qemu/qemu/0003-softfloat-Add-flag-specific-to-Inf-0.patch126
-rw-r--r--meta/recipes-devtools/qemu/qemu/0003_let_dma_memory_set_function_take_MemTxAttrs_argument.patch98
-rw-r--r--meta/recipes-devtools/qemu/qemu/0004-softfloat-Add-flags-specific-to-Inf-Inf-and-0-0.patch73
-rw-r--r--meta/recipes-devtools/qemu/qemu/0004_let_dma_memory_rw_relaxed_function_take_MemTxAttrs_argument.patch78
-rw-r--r--meta/recipes-devtools/qemu/qemu/0005-softfloat-Add-flag-specific-to-signaling-nans.patch121
-rw-r--r--meta/recipes-devtools/qemu/qemu/0005_let_dma_memory_rw_function_take_MemTxAttrs_argument.patch158
-rw-r--r--meta/recipes-devtools/qemu/qemu/0006-target-ppc-Update-float_invalid_op_addsub-for-new-fl.patch114
-rw-r--r--meta/recipes-devtools/qemu/qemu/0006_let_dma_memory_read_write_function_take_MemTxAttrs_argument.patch1453
-rw-r--r--meta/recipes-devtools/qemu/qemu/0007-target-ppc-Update-float_invalid_op_mul-for-new-flags.patch86
-rw-r--r--meta/recipes-devtools/qemu/qemu/0007_let_dma_memory_map_function_take_MemTxAttrs_argument.patch227
-rw-r--r--meta/recipes-devtools/qemu/qemu/0008-target-ppc-Update-float_invalid_op_div-for-new-flags.patch99
-rw-r--r--meta/recipes-devtools/qemu/qemu/0008_have_dma_buf_rw_function_take_a_void_pointer.patch41
-rw-r--r--meta/recipes-devtools/qemu/qemu/0009-target-ppc-Update-fmadd-for-new-flags.patch102
-rw-r--r--meta/recipes-devtools/qemu/qemu/0009_have_dma_buf_read_and_dma_buf_write_functions_take_a_void.patch167
-rw-r--r--meta/recipes-devtools/qemu/qemu/0010-target-ppc-Split-out-do_fmadd.patch71
-rw-r--r--meta/recipes-devtools/qemu/qemu/0010_let_pci_dma_rw_function_take_MemTxAttrs_argument.patch91
-rw-r--r--meta/recipes-devtools/qemu/qemu/0011-target-ppc-Fix-xs-max-min-cj-dp-to-use-VSX-registers.patch93
-rw-r--r--meta/recipes-devtools/qemu/qemu/0011_let_dma_buf_rw_function_take_MemTxAttrs_argument.patch65
-rw-r--r--meta/recipes-devtools/qemu/qemu/0012-target-ppc-Move-xs-max-min-cj-dp-to-decodetree.patch121
-rw-r--r--meta/recipes-devtools/qemu/qemu/0012_let_dma_buf_write_function_take_MemTxAttrs_argument.patch129
-rw-r--r--meta/recipes-devtools/qemu/qemu/0013-target-ppc-fix-xscvqpdp-register-access.patch41
-rw-r--r--meta/recipes-devtools/qemu/qemu/0013_let_dma_buf_read_function_take_MemTxAttrs_argument.patch222
-rw-r--r--meta/recipes-devtools/qemu/qemu/0014-target-ppc-move-xscvqpdp-to-decodetree.patch130
-rw-r--r--meta/recipes-devtools/qemu/qemu/0014_let_dma_buf_rw_function_propagate_MemTxResult.patch91
-rw-r--r--meta/recipes-devtools/qemu/qemu/0015-target-ppc-ppc_store_fpscr-doesn-t-update-bits-0-to-.patch70
-rw-r--r--meta/recipes-devtools/qemu/qemu/0015_let_st_pointer_dma_function_take_MemTxAttrs_argument.patch120
-rw-r--r--meta/recipes-devtools/qemu/qemu/0016-target-ppc-Introduce-TRANS-FLAGS-macros.patch133
-rw-r--r--meta/recipes-devtools/qemu/qemu/0016_let_ld_pointer_dma_function_take_MemTxAttrs_argument.patch151
-rw-r--r--meta/recipes-devtools/qemu/qemu/0017-target-ppc-Implement-Vector-Expand-Mask.patch105
-rw-r--r--meta/recipes-devtools/qemu/qemu/0017_let_st_pointer_dma_function_propagate_MemTxResult.patch65
-rw-r--r--meta/recipes-devtools/qemu/qemu/0018-target-ppc-Implement-Vector-Extract-Mask.patch141
-rw-r--r--meta/recipes-devtools/qemu/qemu/0018_let_ld_pointer_dma_function_propagate_MemTxResult.patch175
-rw-r--r--meta/recipes-devtools/qemu/qemu/0019-target-ppc-Implement-Vector-Mask-Move-insns.patch187
-rw-r--r--meta/recipes-devtools/qemu/qemu/0019_let_st_pointer_pci_dma_function_take_MemTxAttrs_argument.patch303
-rw-r--r--meta/recipes-devtools/qemu/qemu/0020-target-ppc-move-xs-n-madd-am-ds-p-xs-n-msub-am-ds-p-.patch258
-rw-r--r--meta/recipes-devtools/qemu/qemu/0020_let_ld_pointer_pci_dma_function_take_MemTxAttrs_argument.patch271
-rw-r--r--meta/recipes-devtools/qemu/qemu/0021-target-ppc-implement-xs-n-maddqp-o-xs-n-msubqp-o.patch174
-rw-r--r--meta/recipes-devtools/qemu/qemu/0021_let_st_pointer_pci_dma_function_propagate_MemTxResult.patch47
-rw-r--r--meta/recipes-devtools/qemu/qemu/0022_let_ld_pointer_pci_dma_function_propagate_MemTxResult.patch296
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-14394.patch79
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3611_1.patch74
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3611_2.patch43
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3638.patch88
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3750-1.patch59
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3750-2.patch65
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3750-3.patch156
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch70
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-4158.patch46
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-0216_1.patch42
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-0216_2.patch52
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-0358.patch106
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-3165.patch61
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-35414.patch53
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-4144.patch99
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-0330.patch75
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-1544.patch70
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-2861.patch180
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-3180.patch50
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-3255.patch64
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-3301.patch60
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-3354.patch87
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-42467.patch46
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-5088.patch112
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-6683.patch92
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-6693.patch74
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2024-24474.patch44
-rw-r--r--meta/recipes-devtools/qemu/qemu/scsi-disk-allow-MODE-SELECT-block-desriptor-to-set-the-block-size.patch54
-rw-r--r--meta/recipes-devtools/qemu/qemu/scsi-disk-ensure-block-size-is-non-zero-and-changes-limited-to-bits-8-15.patch67
-rw-r--r--meta/recipes-devtools/qemu/qemu_6.2.0.bb4
-rw-r--r--meta/recipes-devtools/quilt/quilt.inc3
-rw-r--r--meta/recipes-devtools/quilt/quilt/0001-test-Fix-a-race-condition-in-merge.test.patch48
-rw-r--r--meta/recipes-devtools/quilt/quilt/faildiff-order.patch41
-rw-r--r--meta/recipes-devtools/quilt/quilt/fix-grep-3.8.patch144
-rw-r--r--meta/recipes-devtools/rpm/files/0001-CVE-2021-3521.patch57
-rw-r--r--meta/recipes-devtools/rpm/files/0001-Do-not-hardcode-lib-rpm-as-the-installation-path-for.patch14
-rw-r--r--meta/recipes-devtools/rpm/files/0001-When-cross-installing-execute-package-scriptlets-wit.patch18
-rw-r--r--meta/recipes-devtools/rpm/files/0001-configure.ac-add-linux-gnux32-variant-to-triplet-han.patch31
-rw-r--r--meta/recipes-devtools/rpm/files/0002-CVE-2021-3521.patch64
-rw-r--r--meta/recipes-devtools/rpm/files/0003-CVE-2021-3521.patch329
-rw-r--r--meta/recipes-devtools/rpm/rpm_4.17.1.bb (renamed from meta/recipes-devtools/rpm/rpm_4.17.0.bb)6
-rw-r--r--meta/recipes-devtools/rsync/files/0001-Add-missing-prototypes-to-function-declarations.patch173
-rw-r--r--meta/recipes-devtools/rsync/files/0001-Turn-on-pedantic-errors-at-the-end-of-configure.patch68
-rw-r--r--meta/recipes-devtools/rsync/files/0001-rsync-ssl-Verify-the-hostname-in-the-certificate-whe.patch31
-rw-r--r--meta/recipes-devtools/rsync/files/makefile-no-rebuild.patch12
-rw-r--r--meta/recipes-devtools/rsync/rsync_3.2.5.bb (renamed from meta/recipes-devtools/rsync/rsync_3.2.3.bb)19
-rw-r--r--meta/recipes-devtools/ruby/ruby.inc39
-rw-r--r--meta/recipes-devtools/ruby/ruby/CVE-2023-28755.patch68
-rw-r--r--meta/recipes-devtools/ruby/ruby/CVE-2023-28756.patch73
-rw-r--r--meta/recipes-devtools/ruby/ruby/CVE-2023-36617_1.patch52
-rw-r--r--meta/recipes-devtools/ruby/ruby/CVE-2023-36617_2.patch47
-rw-r--r--meta/recipes-devtools/ruby/ruby/CVE-2024-27281.patch97
-rw-r--r--meta/recipes-devtools/ruby/ruby_3.1.3.bb (renamed from meta/recipes-devtools/ruby/ruby_3.1.2.bb)52
-rw-r--r--meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service2
-rw-r--r--meta/recipes-devtools/rust/rust-common.inc29
-rw-r--r--meta/recipes-devtools/rust/rust-cross-canadian-common.inc5
-rw-r--r--meta/recipes-devtools/rust/rust-cross.inc21
-rw-r--r--meta/recipes-devtools/rust/rust-llvm.inc8
-rw-r--r--meta/recipes-devtools/rust/rust-llvm/0003-Support-Add-missing-cstdint-header-to-Signals.h.patch32
-rw-r--r--meta/recipes-devtools/rust/rust-source.inc3
-rw-r--r--meta/recipes-devtools/rust/rust.inc20
-rw-r--r--meta/recipes-devtools/strace/strace/0001-caps-abbrev.awk-fix-gawk-s-path.patch47
-rw-r--r--meta/recipes-devtools/strace/strace/3bbfb541b258baec9eba674b5d8dc30007a61542.patch50
-rw-r--r--meta/recipes-devtools/strace/strace/f31c2f4494779e5c5f170ad10539bfc2dfafe967.patch50
-rw-r--r--meta/recipes-devtools/strace/strace/update-gawk-paths.patch30
-rw-r--r--meta/recipes-devtools/strace/strace_5.16.bb6
-rw-r--r--meta/recipes-devtools/tcltk/tcl/fix_non_native_build_issue.patch2
-rw-r--r--meta/recipes-devtools/tcltk/tcl/run-ptest6
-rw-r--r--meta/recipes-devtools/tcltk/tcl_8.6.11.bb11
-rw-r--r--meta/recipes-devtools/vala/vala.inc17
-rw-r--r--meta/recipes-devtools/vala/vala_0.56.0.bb3
-rw-r--r--meta/recipes-devtools/vala/vala_0.56.3.bb3
-rw-r--r--meta/recipes-devtools/valgrind/valgrind/remove-for-aarch64228
-rw-r--r--meta/recipes-devtools/valgrind/valgrind/remove-for-all2
-rw-r--r--meta/recipes-extended/at/at_3.2.5.bb8
-rw-r--r--meta/recipes-extended/bash/bash/0001-changes-to-SIGINT-handler-while-waiting-for-a-child-.patch229
-rw-r--r--meta/recipes-extended/bash/bash/CVE-2022-3715.patch33
-rw-r--r--meta/recipes-extended/bash/bash_5.1.16.bb2
-rw-r--r--meta/recipes-extended/bc/bc_1.07.1.bb2
-rw-r--r--meta/recipes-extended/cpio/cpio-2.13/0001-Unset-need_charset_alias-when-building-for-musl.patch30
-rw-r--r--meta/recipes-extended/cpio/cpio-2.13/0002-src-global.c-Remove-superfluous-declaration-of-progr.patch28
-rw-r--r--meta/recipes-extended/cpio/cpio-2.13/CVE-2021-38185.patch581
-rw-r--r--meta/recipes-extended/cpio/cpio_2.14.bb (renamed from meta/recipes-extended/cpio/cpio_2.13.bb)7
-rw-r--r--meta/recipes-extended/cpio/files/0001-configure-Include-needed-header-for-major-minor-macr.patch47
-rw-r--r--meta/recipes-extended/cracklib/cracklib_2.9.8.bb (renamed from meta/recipes-extended/cracklib/cracklib_2.9.7.bb)7
-rw-r--r--meta/recipes-extended/cups/cups.inc22
-rw-r--r--meta/recipes-extended/cups/cups/CVE-2023-32324.patch36
-rw-r--r--meta/recipes-extended/cups/cups/CVE-2023-32360.patch35
-rw-r--r--meta/recipes-extended/cups/cups/CVE-2023-34241.patch68
-rw-r--r--meta/recipes-extended/cups/cups/CVE-2023-4504.patch42
-rw-r--r--meta/recipes-extended/diffutils/diffutils/0001-Skip-strip-trailing-cr-test-case.patch24
-rw-r--r--meta/recipes-extended/diffutils/diffutils/0001-mcontext-is-not-a-standard-layout-so-glibc-and-musl-.patch33
-rw-r--r--meta/recipes-extended/diffutils/diffutils_3.10.bb (renamed from meta/recipes-extended/diffutils/diffutils_3.8.bb)3
-rw-r--r--meta/recipes-extended/gawk/gawk/CVE-2023-4156.patch28
-rw-r--r--meta/recipes-extended/gawk/gawk_5.1.1.bb1
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2022-2085.patch44
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-0001.patch146
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-0002.patch60
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-38559.patch32
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-43115.patch62
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-46751.patch41
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/cve-2023-28879.patch60
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript_9.55.0.bb12
-rw-r--r--meta/recipes-extended/gperf/gperf/1862c6e57a308a05889c80c048dbc58bdc378dcb.patch181
-rw-r--r--meta/recipes-extended/gperf/gperf_3.1.bb2
-rw-r--r--meta/recipes-extended/groff/files/0001-Make-manpages-mulitlib-identical.patch2
-rw-r--r--meta/recipes-extended/groff/files/0001-replace-perl-w-with-use-warnings.patch2
-rw-r--r--meta/recipes-extended/less/less/CVE-2022-46663.patch31
-rw-r--r--meta/recipes-extended/less/less/CVE-2022-48624.patch41
-rw-r--r--meta/recipes-extended/less/less_600.bb2
-rw-r--r--meta/recipes-extended/libarchive/libarchive_3.6.2.bb (renamed from meta/recipes-extended/libarchive/libarchive_3.6.1.bb)13
-rw-r--r--meta/recipes-extended/libnss-nis/libnss-nis.bb4
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc/CVE-2021-46828.patch155
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc_1.3.2.bb6
-rw-r--r--meta/recipes-extended/lighttpd/lighttpd_1.4.67.bb (renamed from meta/recipes-extended/lighttpd/lighttpd_1.4.64.bb)2
-rw-r--r--meta/recipes-extended/logrotate/logrotate_3.20.1.bb1
-rw-r--r--meta/recipes-extended/lsof/lsof_4.94.0.bb9
-rw-r--r--meta/recipes-extended/ltp/ltp/0001-clock_gettime04-set-threshold-based-on-the-clock-res.patch89
-rw-r--r--meta/recipes-extended/ltp/ltp/0001-syscalls-pread02-extend-buffer-to-avoid-glibc-overflow-detection.patch58
-rw-r--r--meta/recipes-extended/ltp/ltp_20220121.bb2
-rw-r--r--meta/recipes-extended/mdadm/files/0001-DDF-Cleanup-validate_geometry_ddf_container.patch148
-rw-r--r--meta/recipes-extended/mdadm/files/0001-mdadm-Fix-optional-write-behind-parameter.patch45
-rw-r--r--meta/recipes-extended/mdadm/files/0001-tests-00raid0-add-a-test-that-validates-raid0-with-l.patch41
-rw-r--r--meta/recipes-extended/mdadm/files/0001-tests-00readonly-Run-udevadm-settle-before-setting-r.patch39
-rw-r--r--meta/recipes-extended/mdadm/files/0001-tests-02lineargrow-clear-the-superblock-at-every-ite.patch33
-rw-r--r--meta/recipes-extended/mdadm/files/0001-tests-04update-metadata-avoid-passing-chunk-size-to.patch41
-rw-r--r--meta/recipes-extended/mdadm/files/0001-tests-fix-raid0-tests-for-0.90-metadata.patch102
-rw-r--r--meta/recipes-extended/mdadm/files/0002-DDF-Fix-NULL-pointer-dereference-in-validate_geometr.patch56
-rw-r--r--meta/recipes-extended/mdadm/files/0003-mdadm-Grow-Fix-use-after-close-bug-by-closing-after-.patch91
-rw-r--r--meta/recipes-extended/mdadm/files/0004-monitor-Avoid-segfault-when-calling-NULL-get_bad_blo.patch42
-rw-r--r--meta/recipes-extended/mdadm/files/0005-mdadm-test-Mark-and-ignore-broken-test-failures.patch128
-rw-r--r--meta/recipes-extended/mdadm/files/0006-tests-Add-broken-files-for-all-broken-tests.patch454
-rw-r--r--meta/recipes-extended/mdadm/files/run-ptest2
-rw-r--r--meta/recipes-extended/mdadm/mdadm_4.2.bb22
-rw-r--r--meta/recipes-extended/minicom/minicom/0001-Drop-superfluous-global-variable-definitions.patch35
-rw-r--r--meta/recipes-extended/minicom/minicom/0002-Drop-superfluous-global-variable-definitions.patch37
-rw-r--r--meta/recipes-extended/minicom/minicom/0003-Drop-superfluous-global-variable-definitions.patch42
-rw-r--r--meta/recipes-extended/newt/files/0001-detect-gold-as-GNU-linker-too.patch14
-rw-r--r--meta/recipes-extended/newt/files/0002-don-t-ignore-CFLAGS-when-building-snack.patch29
-rw-r--r--meta/recipes-extended/newt/libnewt_0.52.23.bb (renamed from meta/recipes-extended/newt/libnewt_0.52.21.bb)4
-rw-r--r--meta/recipes-extended/pam/libpam/0001-pam_motd-do-not-rely-on-all-filesystems-providing-a-.patch108
-rw-r--r--meta/recipes-extended/pam/libpam/99_pam2
-rw-r--r--meta/recipes-extended/pam/libpam/CVE-2022-28321-0002.patch205
-rw-r--r--meta/recipes-extended/pam/libpam/CVE-2024-22365.patch62
-rw-r--r--meta/recipes-extended/pam/libpam_1.5.2.bb3
-rw-r--r--meta/recipes-extended/parted/files/run-ptest6
-rw-r--r--meta/recipes-extended/procps/procps/CVE-2023-4016.patch85
-rw-r--r--meta/recipes-extended/procps/procps_3.3.17.bb1
-rw-r--r--meta/recipes-extended/psmisc/psmisc.inc2
-rw-r--r--meta/recipes-extended/screen/screen/CVE-2023-24626.patch40
-rw-r--r--meta/recipes-extended/screen/screen_4.9.0.bb1
-rw-r--r--meta/recipes-extended/shadow/files/0001-Drop-nsswitch.conf-message-when-not-in-place-eg.-musl.patch27
-rw-r--r--meta/recipes-extended/shadow/files/0001-Overhaul-valid_field.patch65
-rw-r--r--meta/recipes-extended/shadow/files/CVE-2023-29383.patch53
-rw-r--r--meta/recipes-extended/shadow/files/CVE-2023-4641-0001.patch36
-rw-r--r--meta/recipes-extended/shadow/files/CVE-2023-4641-0002.patch147
-rw-r--r--meta/recipes-extended/shadow/files/login.defs_shadow-sysroot1
-rw-r--r--meta/recipes-extended/shadow/shadow-sysroot_4.6.bb2
-rw-r--r--meta/recipes-extended/shadow/shadow.inc13
-rw-r--r--meta/recipes-extended/shadow/shadow_4.11.1.bb3
-rw-r--r--meta/recipes-extended/stress-ng/stress-ng-0.13.12/0001-Makefile-avoid-calling-sync.patch35
-rw-r--r--meta/recipes-extended/stress-ng/stress-ng-0.13.12/0001-stress-cpu-disable-float128-math-on-powerpc64-to-avo.patch43
-rw-r--r--meta/recipes-extended/stress-ng/stress-ng_0.13.12.bb5
-rw-r--r--meta/recipes-extended/sudo/files/0001-lib-util-mksigname.c-correctly-include-header-for-ou.patch25
-rw-r--r--meta/recipes-extended/sudo/files/0001-sudo.conf.in-fix-conflict-with-multilib.patch21
-rw-r--r--meta/recipes-extended/sudo/sudo.inc7
-rw-r--r--meta/recipes-extended/sudo/sudo_1.9.15p2.bb (renamed from meta/recipes-extended/sudo/sudo_1.9.10.bb)3
-rw-r--r--meta/recipes-extended/sysklogd/files/0001-syslogd.service-KillMode-process-is-not-recommended-.patch33
-rw-r--r--meta/recipes-extended/sysklogd/files/0002-Fix-62-early-log-messages-lost-when-running-in-syste.patch75
-rw-r--r--meta/recipes-extended/sysklogd/sysklogd_2.3.0.bb2
-rw-r--r--meta/recipes-extended/sysstat/sysstat/CVE-2022-39377.patch93
-rw-r--r--meta/recipes-extended/sysstat/sysstat/CVE-2023-33204.patch80
-rw-r--r--meta/recipes-extended/sysstat/sysstat_12.4.5.bb6
-rw-r--r--meta/recipes-extended/tar/tar_1.35.bb (renamed from meta/recipes-extended/tar/tar_1.34.bb)4
-rw-r--r--meta/recipes-extended/tcp-wrappers/tcp-wrappers-7.6/0001-Fix-implicit-function-declaration-warnings.patch109
-rw-r--r--meta/recipes-extended/tcp-wrappers/tcp-wrappers_7.6.bb1
-rw-r--r--meta/recipes-extended/timezone/timezone.inc13
-rw-r--r--meta/recipes-extended/timezone/tzcode-native.bb3
-rw-r--r--meta/recipes-extended/timezone/tzdata.bb16
-rw-r--r--meta/recipes-extended/unzip/unzip/0001-unix-configure-fix-detection-for-cross-compilation.patch103
-rw-r--r--meta/recipes-extended/unzip/unzip/CVE-2022-0529.patch39
-rw-r--r--meta/recipes-extended/unzip/unzip/CVE-2022-0530.patch33
-rw-r--r--meta/recipes-extended/unzip/unzip_6.0.bb3
-rw-r--r--meta/recipes-extended/watchdog/watchdog/0001-shutdown-Do-not-guard-sys-quota.h-sys-swap.h-and-sys.patch37
-rw-r--r--meta/recipes-extended/watchdog/watchdog_5.16.bb1
-rw-r--r--meta/recipes-extended/wget/wget.inc2
-rw-r--r--meta/recipes-extended/wget/wget_1.21.4.bb (renamed from meta/recipes-extended/wget/wget_1.21.3.bb)2
-rw-r--r--meta/recipes-extended/xdg-utils/xdg-utils/CVE-2022-4055.patch165
-rw-r--r--meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb1
-rw-r--r--meta/recipes-extended/xinetd/xinetd_2.3.15.4.bb2
-rw-r--r--meta/recipes-extended/xz/xz/CVE-2022-1271.patch96
-rw-r--r--meta/recipes-extended/xz/xz_5.2.6.bb (renamed from meta/recipes-extended/xz/xz_5.2.5.bb)7
-rw-r--r--meta/recipes-extended/zip/zip-3.0/0001-unix-configure-use-_Static_assert-to-do-correct-dete.patch96
-rw-r--r--meta/recipes-extended/zip/zip_3.0.bb1
-rw-r--r--meta/recipes-gnome/epiphany/epiphany_42.4.bb (renamed from meta/recipes-gnome/epiphany/epiphany_42.2.bb)3
-rw-r--r--meta/recipes-gnome/epiphany/files/CVE-2023-26081.patch90
-rw-r--r--meta/recipes-gnome/gcr/gcr_3.40.0.bb2
-rw-r--r--meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/0001-Add-use_prebuilt_tools-option.patch171
-rw-r--r--meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/0001-meson.build-allow-a-subset-of-tests-in-cross-compile.patch66
-rw-r--r--meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/fatal-loader.patch20
-rw-r--r--meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.42.10.bb (renamed from meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.42.6.bb)29
-rw-r--r--meta/recipes-gnome/gobject-introspection/gobject-introspection_1.72.0.bb3
-rw-r--r--meta/recipes-gnome/gtk-doc/gtk-doc_1.33.2.bb2
-rw-r--r--meta/recipes-gnome/librsvg/librsvg_2.52.10.bb (renamed from meta/recipes-gnome/librsvg/librsvg_2.52.7.bb)4
-rw-r--r--meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch21
-rw-r--r--meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch46
-rw-r--r--meta/recipes-graphics/cairo/cairo_1.16.0.bb6
-rw-r--r--meta/recipes-graphics/freetype/freetype/CVE-2023-2004.patch41
-rw-r--r--meta/recipes-graphics/freetype/freetype_2.11.1.bb3
-rw-r--r--meta/recipes-graphics/glslang/glslang_1.3.204.1.bb2
-rw-r--r--meta/recipes-graphics/harfbuzz/harfbuzz/0001-Fix-conditional.patch25
-rw-r--r--meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2022-33068.patch35
-rw-r--r--meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre1.patch135
-rw-r--r--meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193.patch185
-rw-r--r--meta/recipes-graphics/harfbuzz/harfbuzz_4.0.1.bb12
-rw-r--r--meta/recipes-graphics/jpeg/files/CVE-2023-2804-1.patch103
-rw-r--r--meta/recipes-graphics/jpeg/files/CVE-2023-2804-2.patch75
-rw-r--r--meta/recipes-graphics/jpeg/libjpeg-turbo_2.1.5.1.bb (renamed from meta/recipes-graphics/jpeg/libjpeg-turbo_2.1.3.bb)4
-rw-r--r--meta/recipes-graphics/kmscube/kmscube_git.bb3
-rw-r--r--meta/recipes-graphics/libepoxy/files/0001-dispatch_common.h-define-also-EGL_NO_X11.patch27
-rw-r--r--meta/recipes-graphics/libepoxy/libepoxy_1.5.10.bb (renamed from meta/recipes-graphics/libepoxy/libepoxy_1.5.9.bb)7
-rw-r--r--meta/recipes-graphics/libsdl2/libsdl2/0001-Fix-potential-memory-leak-in-GLES_CreateTextur.patch40
-rw-r--r--meta/recipes-graphics/libsdl2/libsdl2_2.0.20.bb1
-rw-r--r--meta/recipes-graphics/piglit/piglit/0002-cmake-use-proper-WAYLAND_INCLUDE_DIRS-variable.patch (renamed from meta/recipes-graphics/piglit/piglit/0001-cmake-use-proper-WAYLAND_INCLUDE_DIRS-variable.patch)0
-rw-r--r--meta/recipes-graphics/piglit/piglit/0003-tests-util-piglit-shader.c-do-not-hardcode-build-pat.patch (renamed from meta/recipes-graphics/piglit/piglit/0002-tests-util-piglit-shader.c-do-not-hardcode-build-pat.patch)0
-rw-r--r--meta/recipes-graphics/piglit/piglit/0005-cmake-Don-t-enable-GLX-if-tests-are-disabled.patch32
-rw-r--r--meta/recipes-graphics/piglit/piglit_git.bb13
-rw-r--r--meta/recipes-graphics/spir/spirv-headers_1.3.204.1.bb2
-rw-r--r--meta/recipes-graphics/vulkan/vulkan-samples_git.bb2
-rw-r--r--meta/recipes-graphics/waffle/waffle/0001-meson.build-request-native-wayland-scanner.patch28
-rw-r--r--meta/recipes-graphics/waffle/waffle/0001-waffle-do-not-make-core-protocol-into-the-library.patch23
-rw-r--r--meta/recipes-graphics/waffle/waffle_1.7.2.bb (renamed from meta/recipes-graphics/waffle/waffle_1.7.0.bb)9
-rw-r--r--meta/recipes-graphics/wayland/wayland/CVE-2021-3782.patch111
-rw-r--r--meta/recipes-graphics/wayland/wayland_1.20.0.bb2
-rw-r--r--meta/recipes-graphics/wayland/weston/dont-use-plane-add-prop.patch32
-rw-r--r--meta/recipes-graphics/wayland/weston_10.0.2.bb (renamed from meta/recipes-graphics/wayland/weston_10.0.0.bb)8
-rw-r--r--meta/recipes-graphics/xorg-app/mkfontscale_1.2.2.bb (renamed from meta/recipes-graphics/xorg-app/mkfontscale_1.2.1.bb)4
-rw-r--r--meta/recipes-graphics/xorg-app/xdpyinfo_1.3.4.bb (renamed from meta/recipes-graphics/xorg-app/xdpyinfo_1.3.2.bb)4
-rw-r--r--meta/recipes-graphics/xorg-app/xev_1.2.5.bb (renamed from meta/recipes-graphics/xorg-app/xev_1.2.4.bb)4
-rw-r--r--meta/recipes-graphics/xorg-app/xmodmap_1.0.11.bb (renamed from meta/recipes-graphics/xorg-app/xmodmap_1.0.10.bb)5
-rw-r--r--meta/recipes-graphics/xorg-app/xorg-app-common.inc3
-rw-r--r--meta/recipes-graphics/xorg-app/xrandr_1.5.1.bb3
-rw-r--r--meta/recipes-graphics/xorg-driver/xf86-input-synaptics/64bit_time_t_support.patch51
-rw-r--r--meta/recipes-graphics/xorg-driver/xf86-input-synaptics_1.9.2.bb (renamed from meta/recipes-graphics/xorg-driver/xf86-input-synaptics_1.9.1.bb)7
-rw-r--r--meta/recipes-graphics/xorg-font/encodings/nocompiler.patch8
-rw-r--r--meta/recipes-graphics/xorg-font/encodings_1.0.6.bb (renamed from meta/recipes-graphics/xorg-font/encodings_1.0.5.bb)6
-rw-r--r--meta/recipes-graphics/xorg-font/font-util_1.3.3.bb (renamed from meta/recipes-graphics/xorg-font/font-util_1.3.2.bb)5
-rw-r--r--meta/recipes-graphics/xorg-font/xorg-font-common.inc3
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3554.patch58
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3555.patch40
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-3138.patch111
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43785.patch62
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-0001.patch41
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-0002.patch45
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-0003.patch51
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787.patch63
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11_1.7.3.1.bb8
-rw-r--r--meta/recipes-graphics/xorg-lib/libxpm_3.5.17.bb (renamed from meta/recipes-graphics/xorg-lib/libxpm_3.5.13.bb)7
-rw-r--r--meta/recipes-graphics/xorg-lib/pixman/CVE-2022-44638.patch33
-rw-r--r--meta/recipes-graphics/xorg-lib/pixman_0.40.0.bb3
-rw-r--r--meta/recipes-graphics/xorg-lib/xorg-lib-common.inc3
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg.inc8
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/0001-render-Fix-build-with-gcc-12.patch90
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5367.patch84
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5380.patch102
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6377.patch79
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6478.patch63
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6816.patch55
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-1.patch87
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-2.patch221
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-3.patch41
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-4.patch45
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0408.patch64
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0409.patch46
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21885.patch113
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-1.patch74
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-2.patch57
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31080.patch49
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31081.patch47
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg_21.1.8.bb (renamed from meta/recipes-graphics/xorg-xserver/xserver-xorg_21.1.3.bb)23
-rw-r--r--meta/recipes-graphics/xwayland/xwayland/CVE-2023-5367.patch85
-rw-r--r--meta/recipes-graphics/xwayland/xwayland/CVE-2023-6377.patch82
-rw-r--r--meta/recipes-graphics/xwayland/xwayland/CVE-2023-6478.patch66
-rw-r--r--meta/recipes-graphics/xwayland/xwayland/CVE-2023-6816.patch57
-rw-r--r--meta/recipes-graphics/xwayland/xwayland/CVE-2024-0408.patch65
-rw-r--r--meta/recipes-graphics/xwayland/xwayland/CVE-2024-0409.patch47
-rw-r--r--meta/recipes-graphics/xwayland/xwayland_22.1.8.bb (renamed from meta/recipes-graphics/xwayland/xwayland_22.1.1.bb)13
-rw-r--r--meta/recipes-kernel/blktrace/blktrace/0001-bno_plot.py-btt_plot.py-Ask-for-python3-specifically.patch35
-rw-r--r--meta/recipes-kernel/blktrace/blktrace_git.bb4
-rw-r--r--meta/recipes-kernel/kern-tools/kern-tools-native_git.bb2
-rw-r--r--meta/recipes-kernel/kmod/kmod/ptest.patch25
-rw-r--r--meta/recipes-kernel/linux-firmware/linux-firmware_20240220.bb (renamed from meta/recipes-kernel/linux-firmware/linux-firmware_20220610.bb)396
-rw-r--r--meta/recipes-kernel/linux-libc-headers/linux-libc-headers_5.16.bb2
-rw-r--r--meta/recipes-kernel/linux/cve-exclusion.inc6
-rw-r--r--meta/recipes-kernel/linux/cve-exclusion_5.10.inc7565
-rw-r--r--meta/recipes-kernel/linux/cve-exclusion_5.15.inc7753
-rwxr-xr-xmeta/recipes-kernel/linux/generate-cve-exclusions.py101
-rw-r--r--meta/recipes-kernel/linux/kernel-devsrc.bb10
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-dev.bb4
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-rt_5.10.bb8
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-rt_5.15.bb8
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-tiny_5.10.bb10
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-tiny_5.15.bb8
-rw-r--r--meta/recipes-kernel/linux/linux-yocto.inc6
-rw-r--r--meta/recipes-kernel/linux/linux-yocto_5.10.bb27
-rw-r--r--meta/recipes-kernel/linux/linux-yocto_5.15.bb29
-rw-r--r--meta/recipes-kernel/lttng/babeltrace2_2.0.5.bb (renamed from meta/recipes-kernel/lttng/babeltrace2_2.0.4.bb)3
-rw-r--r--meta/recipes-kernel/lttng/babeltrace_1.5.11.bb (renamed from meta/recipes-kernel/lttng/babeltrace_1.5.8.bb)2
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0001-Fix-compaction-migratepages-event-name.patch37
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0001-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch44
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0001-fix-sched-tracing-Append-prev_state-to-tp-args-inste.patch59
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0002-Fix-tracepoint-event-allow-same-provider-and-event-n.patch48
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0003-fix-sched-tracing-Don-t-re-read-p-state-when-emittin.patch183
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0004-fix-block-remove-genhd.h-v5.18.patch45
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0005-fix-scsi-block-Remove-REQ_OP_WRITE_SAME-support-v5.1.patch79
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0006-fix-random-remove-unused-tracepoints-v5.18.patch47
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0007-fix-kprobes-Use-rethook-for-kretprobe-if-possible-v5.patch72
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0008-fix-scsi-core-Remove-scsi-scsi_request.h-v5.18.patch44
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0010-fix-mm-compaction-cleanup-the-compaction-trace-event.patch106
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules_2.13.9.bb (renamed from meta/recipes-kernel/lttng/lttng-modules_2.13.3.bb)15
-rw-r--r--meta/recipes-kernel/lttng/lttng-platforms.inc4
-rw-r--r--meta/recipes-kernel/lttng/lttng-tools/determinism.patch64
-rw-r--r--meta/recipes-kernel/lttng/lttng-tools_2.13.9.bb (renamed from meta/recipes-kernel/lttng/lttng-tools_2.13.4.bb)12
-rw-r--r--meta/recipes-kernel/lttng/lttng-ust_2.13.6.bb (renamed from meta/recipes-kernel/lttng/lttng-ust_2.13.3.bb)2
-rw-r--r--meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb5
-rw-r--r--meta/recipes-kernel/perf/perf.bb19
-rwxr-xr-xmeta/recipes-kernel/perf/perf/sort-pmuevents.py13
-rw-r--r--meta/recipes-kernel/systemtap/systemtap/0001-bpf-translate.cxx-Prevent-Werror-maybe-uninitialized.patch53
-rw-r--r--meta/recipes-kernel/systemtap/systemtap_git.bb1
-rw-r--r--meta/recipes-kernel/wireless-regdb/wireless-regdb_2024.01.23.bb (renamed from meta/recipes-kernel/wireless-regdb/wireless-regdb_2022.04.08.bb)4
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avcodec-rpzaenc-stop-accessing-out-of-bounds-frame.patch86
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avcodec-smcenc-stop-accessing-out-of-bounds-frame.patch106
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avcodec-vp3-Add-missing-check-for-av_malloc.patch42
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avformat-nutdec-Add-check-for-avformat_new_stream.patch67
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-48434.patch130
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg_5.0.1.bb14
-rw-r--r--meta/recipes-multimedia/flac/files/CVE-2020-22219.patch197
-rw-r--r--meta/recipes-multimedia/flac/flac_1.3.4.bb1
-rw-r--r--meta/recipes-multimedia/gstreamer/gst-devtools_1.20.7.bb (renamed from meta/recipes-multimedia/gstreamer/gst-devtools_1.20.2.bb)2
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.20.7.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.20.2.bb)2
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.20.7.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.20.2.bb)2
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-40474.patch118
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-40475.patch49
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-40476.patch44
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-44429.patch38
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.20.7.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.20.2.bb)6
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.20.7.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.20.2.bb)8
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.20.7.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.20.2.bb)2
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.20.7.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.20.2.bb)2
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.20.7.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.20.2.bb)2
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.20.7.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.20.2.bb)2
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.20.7.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.20.2.bb)2
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0/0005-tests-remove-gstbin-test_watch_for_state_change-test.patch107
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0_1.20.7.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0_1.20.2.bb)3
-rw-r--r--meta/recipes-multimedia/libpng/files/run-ptest29
-rw-r--r--meta/recipes-multimedia/libpng/libpng_1.6.39.bb (renamed from meta/recipes-multimedia/libpng/libpng_1.6.37.bb)21
-rw-r--r--meta/recipes-multimedia/libsndfile/libsndfile1/0001-flac-Fix-improper-buffer-reusing-732.patch29
-rw-r--r--meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2022-33065.patch46
-rw-r--r--meta/recipes-multimedia/libsndfile/libsndfile1_1.0.31.bb2
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/0001-Revised-handling-of-TIFFTAG_INKNAMES-and-related-TIF.patch267
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/0001-fix-the-FPE-in-tiffcrop-415-427-and-428.patch182
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/0001-tif_jbig.c-fix-crash-when-reading-a-file-with-multip.patch14
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-Fix-issue-330-and-some-more-from-320-to-349.patch607
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-S-option-Make-decision-simpler.patch36
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-disable-incompatibility-of-Z-X-Y-z-options-.patch59
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-subroutines-require-a-larger-buffer-fixes-2.patch640
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/0001-tiffset-fix-global-buffer-overflow-for-ASCII-tags-wh.patch13
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/0002-tiffcrop-fix-issue-380-and-382-heap-buffer-overflow-.patch14
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/0003-add-checks-for-return-value-of-limitMalloc-392.patch15
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/0004-TIFFFetchNormalTag-avoid-calling-memcpy-with-a-null-.patch16
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/0005-fix-the-FPE-in-tiffcrop-393.patch15
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/0006-fix-heap-buffer-overflow-in-tiffcp-278.patch15
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/561599c99f987dc32ae110370cfdd7df7975586b.patch9
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2022-1354.patch210
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2022-1355.patch60
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2022-2867.patch129
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2022-2869.patch84
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2022-2953.patch87
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2022-34526.patch27
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2022-3970.patch38
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2022-40090.patch569
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2022-48281.patch26
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-0795_0796_0797_0798_0799.patch162
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-0800_0801_0802_0803_0804.patch128
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-1916.patch99
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-25433.patch195
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-25434-CVE-2023-25435.patch94
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-26965.patch97
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-26966.patch35
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-2908.patch33
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-3316.patch59
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-3576.patch35
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-3618.patch47
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-40745.patch34
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-41175.patch69
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-52356.patch54
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-6228.patch31
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-1.patch178
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-2.patch151
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-3.patch46
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-4.patch93
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/b258ed69a485a9cfb299d9f060eb2a46c54e5903.patch46
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/eecb0712f4c3a5b449f70c57988260a667ddbdef.patch9
-rw-r--r--meta/recipes-multimedia/libtiff/tiff_4.3.0.bb37
-rw-r--r--meta/recipes-multimedia/pulseaudio/pulseaudio.inc2
-rw-r--r--meta/recipes-multimedia/webp/files/CVE-2023-1999.patch60
-rw-r--r--meta/recipes-multimedia/webp/files/CVE-2023-4863-0001.patch366
-rw-r--r--meta/recipes-multimedia/webp/files/CVE-2023-4863-0002.patch53
-rw-r--r--meta/recipes-multimedia/webp/libwebp_1.2.4.bb (renamed from meta/recipes-multimedia/webp/libwebp_1.2.2.bb)8
-rwxr-xr-xmeta/recipes-rt/rt-tests/files/rt_bmark.py2
-rw-r--r--meta/recipes-sato/webkit/libwpe_1.12.3.bb (renamed from meta/recipes-sato/webkit/libwpe_1.12.0.bb)2
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/CVE-2022-32888.patch41
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/CVE-2022-32923.patch435
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/CVE-2022-42867.patch104
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/CVE-2022-46691.patch43
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/CVE-2022-46699.patch136
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/CVE-2022-46700.patch67
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/CVE-2022-48503.patch225
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/CVE-2023-23529.patch65
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/CVE-2023-32439.patch127
-rw-r--r--meta/recipes-sato/webkit/webkitgtk_2.36.8.bb (renamed from meta/recipes-sato/webkit/webkitgtk_2.36.3.bb)14
-rw-r--r--meta/recipes-sato/webkit/wpebackend-fdo_1.14.2.bb (renamed from meta/recipes-sato/webkit/wpebackend-fdo_1.12.0.bb)2
-rw-r--r--meta/recipes-support/apr/apr-util/0001-Fix-error-handling-in-gdbm.patch134
-rw-r--r--meta/recipes-support/apr/apr-util_1.6.3.bb (renamed from meta/recipes-support/apr/apr-util_1.6.1.bb)6
-rw-r--r--meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch20
-rw-r--r--meta/recipes-support/apr/apr/0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch58
-rw-r--r--meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch25
-rw-r--r--meta/recipes-support/apr/apr/0003-Makefile.in-configure.in-support-cross-compiling.patch63
-rw-r--r--meta/recipes-support/apr/apr/0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch76
-rw-r--r--meta/recipes-support/apr/apr/CVE-2021-35940.patch58
-rw-r--r--meta/recipes-support/apr/apr/autoconf270.patch22
-rw-r--r--meta/recipes-support/apr/apr/libtoolize_check.patch21
-rw-r--r--meta/recipes-support/apr/apr_1.7.2.bb (renamed from meta/recipes-support/apr/apr_1.7.0.bb)25
-rw-r--r--meta/recipes-support/attr/acl/run-ptest6
-rw-r--r--meta/recipes-support/attr/acl_2.3.1.bb1
-rw-r--r--meta/recipes-support/attr/attr.inc1
-rw-r--r--meta/recipes-support/attr/attr/run-ptest7
-rw-r--r--meta/recipes-support/bmap-tools/bmap-tools_git.bb2
-rw-r--r--meta/recipes-support/boost/boost/0001-Don-t-skip-install-targets-if-there-s-build-no-in-ur.patch82
-rw-r--r--meta/recipes-support/boost/boost_1.78.0.bb1
-rw-r--r--meta/recipes-support/curl/curl/0001-openssl-fix-CN-check-error-code.patch38
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-32205.patch174
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-32206.patch51
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-32207.patch283
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-32208.patch67
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-32221.patch28
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-35252.patch72
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-42915.patch53
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-42916.patch136
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-43551.patch35
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-43552.patch80
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-23914_5-1.patch280
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-23914_5-2.patch23
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-23914_5-3.patch45
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-23914_5-4.patch48
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-23914_5-5.patch118
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-23916.patch219
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27533.patch208
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27534.patch122
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27535-pre1.patch196
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27535_and_CVE-2023-27538.patch170
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27536.patch53
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-28319.patch33
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-28320-fol1.patch197
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-28320.patch83
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-28321.patch302
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-28322-1.patch84
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-28322-2.patch436
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-38545.patch133
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-38546.patch137
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-46218.patch52
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-46219-0001.patch42
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-46219-0002.patch133
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-46219-0003.patch81
-rw-r--r--meta/recipes-support/curl/curl/CVE-2024-2398.patch89
-rw-r--r--meta/recipes-support/curl/curl_7.82.0.bb48
-rw-r--r--meta/recipes-support/fribidi/fribidi_1.0.13.bb (renamed from meta/recipes-support/fribidi/fribidi_1.0.12.bb)2
-rw-r--r--meta/recipes-support/gnupg/gnupg/0001-configure.ac-use-a-custom-value-for-the-location-of-.patch6
-rw-r--r--meta/recipes-support/gnupg/gnupg/0003-dirmngr-uses-libgpg-error.patch29
-rw-r--r--meta/recipes-support/gnupg/gnupg/relocate.patch18
-rw-r--r--meta/recipes-support/gnupg/gnupg_2.3.7.bb (renamed from meta/recipes-support/gnupg/gnupg_2.3.4.bb)3
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2022-2509.patch282
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2023-0361.patch85
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2023-5981.patch206
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2024-0553.patch125
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2024-0567.patch184
-rw-r--r--meta/recipes-support/gnutls/gnutls_3.7.4.bb7
-rw-r--r--meta/recipes-support/gnutls/libtasn1_4.19.0.bb (renamed from meta/recipes-support/gnutls/libtasn1_4.18.0.bb)2
-rw-r--r--meta/recipes-support/iso-codes/iso-codes_4.15.0.bb (renamed from meta/recipes-support/iso-codes/iso-codes_4.10.0.bb)2
-rw-r--r--meta/recipes-support/libassuan/libassuan_2.5.6.bb (renamed from meta/recipes-support/libassuan/libassuan_2.5.5.bb)2
-rw-r--r--meta/recipes-support/libatomic-ops/libatomic-ops_7.6.14.bb (renamed from meta/recipes-support/libatomic-ops/libatomic-ops_7.6.12.bb)4
-rw-r--r--meta/recipes-support/libbsd/libbsd_0.11.5.bb7
-rw-r--r--meta/recipes-support/libcap/files/0001-nativesdk-libcap-Raise-the-size-of-arrays-containing.patch2
-rw-r--r--meta/recipes-support/libcap/files/CVE-2023-2602.patch45
-rw-r--r--meta/recipes-support/libcap/files/CVE-2023-2603.patch60
-rw-r--r--meta/recipes-support/libcap/libcap_2.66.bb (renamed from meta/recipes-support/libcap/libcap_2.63.bb)4
-rw-r--r--meta/recipes-support/libffi/libffi/0001-arm-sysv-reverted-clang-VFP-mitigation.patch8
-rw-r--r--meta/recipes-support/libffi/libffi/not-win32.patch8
-rw-r--r--meta/recipes-support/libffi/libffi_3.4.4.bb (renamed from meta/recipes-support/libffi/libffi_3.4.2.bb)6
-rw-r--r--meta/recipes-support/libgit2/libgit2/CVE-2024-24575.patch56
-rw-r--r--meta/recipes-support/libgit2/libgit2/CVE-2024-24577.patch52
-rw-r--r--meta/recipes-support/libgit2/libgit2_1.4.5.bb (renamed from meta/recipes-support/libgit2/libgit2_1.4.3.bb)7
-rw-r--r--meta/recipes-support/libical/libical_3.0.16.bb (renamed from meta/recipes-support/libical/libical_3.0.14.bb)2
-rw-r--r--meta/recipes-support/libksba/libksba/ksba-add-pkgconfig-support.patch6
-rw-r--r--meta/recipes-support/libksba/libksba_1.6.4.bb (renamed from meta/recipes-support/libksba/libksba_1.6.0.bb)2
-rw-r--r--meta/recipes-support/libmicrohttpd/libmicrohttpd_0.9.76.bb (renamed from meta/recipes-support/libmicrohttpd/libmicrohttpd_0.9.75.bb)2
-rw-r--r--meta/recipes-support/libpcre/libpcre2/CVE-2022-41409.patch75
-rw-r--r--meta/recipes-support/libpcre/libpcre2_10.40.bb1
-rw-r--r--meta/recipes-support/libseccomp/files/run-ptest3
-rw-r--r--meta/recipes-support/libseccomp/libseccomp_2.5.3.bb2
-rw-r--r--meta/recipes-support/libsoup/libsoup_3.0.7.bb (renamed from meta/recipes-support/libsoup/libsoup_3.0.6.bb)2
-rw-r--r--meta/recipes-support/libssh2/files/0001-Don-t-let-host-enviroment-to-decide-if-a-test-is-bui.patch44
-rw-r--r--meta/recipes-support/libssh2/libssh2/CVE-2020-22218.patch34
-rw-r--r--meta/recipes-support/libssh2/libssh2/CVE-2023-48795.patch459
-rw-r--r--meta/recipes-support/libssh2/libssh2/fix-ssh2-test.patch23
-rw-r--r--meta/recipes-support/libssh2/libssh2/run-ptest (renamed from meta/recipes-support/libssh2/files/run-ptest)3
-rw-r--r--meta/recipes-support/libssh2/libssh2_1.10.0.bb5
-rw-r--r--meta/recipes-support/liburcu/liburcu_0.13.2.bb (renamed from meta/recipes-support/liburcu/liburcu_0.13.1.bb)2
-rw-r--r--meta/recipes-support/libusb/libusb1/0001-configure.ac-Link-with-latomic-only-if-no-atomic-bui.patch46
-rw-r--r--meta/recipes-support/libusb/libusb1_1.0.26.bb13
-rw-r--r--meta/recipes-support/lz4/files/CVE-2021-3520.patch27
-rw-r--r--meta/recipes-support/lz4/lz4_1.9.4.bb (renamed from meta/recipes-support/lz4/lz4_1.9.3.bb)10
-rw-r--r--meta/recipes-support/mpfr/mpfr_4.1.1.bb (renamed from meta/recipes-support/mpfr/mpfr_4.1.0.bb)2
-rw-r--r--meta/recipes-support/nghttp2/nghttp2/CVE-2023-35945.patch151
-rw-r--r--meta/recipes-support/nghttp2/nghttp2/CVE-2023-44487.patch927
-rw-r--r--meta/recipes-support/nghttp2/nghttp2/CVE-2024-28182-0001.patch110
-rw-r--r--meta/recipes-support/nghttp2/nghttp2/CVE-2024-28182-0002.patch105
-rw-r--r--meta/recipes-support/nghttp2/nghttp2_1.47.0.bb14
-rw-r--r--meta/recipes-support/numactl/numactl/Fix-the-test-output-format.patch3
-rwxr-xr-xmeta/recipes-support/numactl/numactl/run-ptest6
-rw-r--r--meta/recipes-support/numactl/numactl_git.bb6
-rw-r--r--meta/recipes-support/p11-kit/p11-kit_0.24.1.bb2
-rw-r--r--meta/recipes-support/pinentry/pinentry_1.2.0.bb3
-rw-r--r--meta/recipes-support/serf/serf/0001-Fix-syntax-of-a-print-in-the-scons-file-to-unbreak-b.patch29
-rw-r--r--meta/recipes-support/serf/serf/0001-buckets-ssl_buckets.c-do-not-use-ERR_GET_FUNC.patch28
-rw-r--r--meta/recipes-support/serf/serf/0004-Follow-up-to-r1811083-fix-building-with-scons-3.0.0-.patch29
-rw-r--r--meta/recipes-support/serf/serf/SConstruct.stop.creating.directories.without.sandbox-install.prefix.patch2
-rw-r--r--meta/recipes-support/serf/serf_1.3.10.bb (renamed from meta/recipes-support/serf/serf_1.3.9.bb)6
-rw-r--r--meta/recipes-support/sqlite/files/0001-sqlite-Increased-the-size-of-loop-variables-in-the-printf-implementation.patch26
-rw-r--r--meta/recipes-support/sqlite/files/CVE-2022-46908.patch39
-rw-r--r--meta/recipes-support/sqlite/files/CVE-2023-36191.patch37
-rw-r--r--meta/recipes-support/sqlite/files/CVE-2023-7104.patch44
-rw-r--r--meta/recipes-support/sqlite/sqlite3_3.38.5.bb7
-rw-r--r--meta/recipes-support/vim/files/racefix.patch33
-rw-r--r--meta/recipes-support/vim/vim-tiny_9.0.bb (renamed from meta/recipes-support/vim/vim-tiny_8.2.bb)0
-rw-r--r--meta/recipes-support/vim/vim.inc36
-rw-r--r--meta/recipes-support/vim/vim_9.0.bb (renamed from meta/recipes-support/vim/vim_8.2.bb)0
-rwxr-xr-xscripts/combo-layer32
-rwxr-xr-xscripts/contrib/bbvars.py6
-rwxr-xr-xscripts/contrib/convert-overrides.py111
-rwxr-xr-xscripts/contrib/image-manifest2
-rwxr-xr-xscripts/create-pull-request9
-rwxr-xr-xscripts/devtool10
-rw-r--r--scripts/lib/buildstats.py38
-rw-r--r--scripts/lib/checklayer/__init__.py15
-rw-r--r--scripts/lib/checklayer/cases/bsp.py2
-rw-r--r--scripts/lib/checklayer/cases/common.py3
-rw-r--r--scripts/lib/checklayer/cases/distro.py2
-rw-r--r--scripts/lib/devtool/menuconfig.py2
-rw-r--r--scripts/lib/devtool/standard.py86
-rw-r--r--scripts/lib/devtool/upgrade.py48
-rw-r--r--scripts/lib/recipetool/create.py16
-rw-r--r--scripts/lib/recipetool/create_buildsys.py38
-rw-r--r--scripts/lib/recipetool/create_buildsys_python.py18
-rw-r--r--scripts/lib/resulttool/report.py5
-rw-r--r--scripts/lib/resulttool/resultutils.py8
-rw-r--r--scripts/lib/wic/misc.py8
-rw-r--r--scripts/lib/wic/partition.py31
-rw-r--r--scripts/lib/wic/plugins/imager/direct.py5
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-efi.py34
-rw-r--r--scripts/lib/wic/plugins/source/rootfs.py4
-rwxr-xr-xscripts/oe-check-sstate10
-rwxr-xr-xscripts/oe-depends-dot21
-rwxr-xr-xscripts/oe-pkgdata-util2
-rwxr-xr-xscripts/oe-setup-builddir12
-rwxr-xr-xscripts/opkg-query-helper.py2
-rw-r--r--scripts/pybootchartgui/pybootchartgui/draw.py9
-rw-r--r--scripts/pybootchartgui/pybootchartgui/parsing.py2
-rwxr-xr-xscripts/relocate_sdk.py10
-rwxr-xr-xscripts/rpm2cpio.sh30
-rwxr-xr-xscripts/runqemu145
-rw-r--r--scripts/runqemu.README16
-rwxr-xr-xscripts/wic2
-rwxr-xr-xscripts/yocto-check-layer5
1437 files changed, 123793 insertions, 20396 deletions
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000000..7d2ce1f631
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,24 @@
+How to Report a Potential Vulnerability?
+========================================
+
+If you would like to report a public issue (for example, one with a released
+CVE number), please report it using the
+[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla].
+If you have a patch ready, submit it following the same procedure as any other
+patch as described in README.md.
+
+If you are dealing with a not-yet released or urgent issue, please send a
+message to security AT yoctoproject DOT org, including as many details as
+possible: the layer or software module affected, the recipe and its version,
+and any example code, if available.
+
+Branches maintained with security fixes
+---------------------------------------
+
+See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS]
+for detailed info regarding the policies and maintenance of Stable branches.
+
+The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all
+releases of the Yocto Project. Versions in grey are no longer actively maintained with
+security patches, but well-tested patches may still be accepted for them for
+significant issues.
diff --git a/bitbake/SECURITY.md b/bitbake/SECURITY.md
new file mode 100644
index 0000000000..7d2ce1f631
--- /dev/null
+++ b/bitbake/SECURITY.md
@@ -0,0 +1,24 @@
+How to Report a Potential Vulnerability?
+========================================
+
+If you would like to report a public issue (for example, one with a released
+CVE number), please report it using the
+[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla].
+If you have a patch ready, submit it following the same procedure as any other
+patch as described in README.md.
+
+If you are dealing with a not-yet released or urgent issue, please send a
+message to security AT yoctoproject DOT org, including as many details as
+possible: the layer or software module affected, the recipe and its version,
+and any example code, if available.
+
+Branches maintained with security fixes
+---------------------------------------
+
+See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS]
+for detailed info regarding the policies and maintenance of Stable branches.
+
+The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all
+releases of the Yocto Project. Versions in grey are no longer actively maintained with
+security patches, but well-tested patches may still be accepted for them for
+significant issues.
diff --git a/bitbake/bin/bitbake b/bitbake/bin/bitbake
index 042c91807d..0b9cc6297c 100755
--- a/bitbake/bin/bitbake
+++ b/bitbake/bin/bitbake
@@ -25,8 +25,7 @@ except RuntimeError as exc:
from bb import cookerdata
from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
-if sys.getfilesystemencoding() != "utf-8":
- sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
+bb.utils.check_system_locale()
__version__ = "2.0.0"
diff --git a/bitbake/bin/bitbake-diffsigs b/bitbake/bin/bitbake-diffsigs
index cf4cc706a2..fe0f33eea1 100755
--- a/bitbake/bin/bitbake-diffsigs
+++ b/bitbake/bin/bitbake-diffsigs
@@ -11,6 +11,7 @@
import os
import sys
import warnings
+
warnings.simplefilter("default")
import argparse
import logging
@@ -27,6 +28,7 @@ logger = bb.msg.logger_create(myname)
is_dump = myname == 'bitbake-dumpsig'
+
def find_siginfo(tinfoil, pn, taskname, sigs=None):
result = None
tinfoil.set_event_mask(['bb.event.FindSigInfoResult',
@@ -52,6 +54,7 @@ def find_siginfo(tinfoil, pn, taskname, sigs=None):
sys.exit(2)
return result
+
def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
""" Find the most recent signature files for the specified PN/task """
@@ -63,10 +66,10 @@ def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
if not sigfiles:
logger.error('No sigdata files found matching %s %s matching either %s or %s' % (pn, taskname, sig1, sig2))
sys.exit(1)
- elif not sig1 in sigfiles:
+ elif sig1 not in sigfiles:
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig1))
sys.exit(1)
- elif not sig2 in sigfiles:
+ elif sig2 not in sigfiles:
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig2))
sys.exit(1)
latestfiles = [sigfiles[sig1], sigfiles[sig2]]
@@ -88,9 +91,9 @@ def recursecb(key, hash1, hash2):
recout = []
if not hashfiles:
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
- elif not hash1 in hashfiles:
+ elif hash1 not in hashfiles:
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash1))
- elif not hash2 in hashfiles:
+ elif hash2 not in hashfiles:
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash2))
else:
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, color=color)
@@ -110,36 +113,36 @@ parser.add_argument('-D', '--debug',
if is_dump:
parser.add_argument("-t", "--task",
- help="find the signature data file for the last run of the specified task",
- action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
+ help="find the signature data file for the last run of the specified task",
+ action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
parser.add_argument("sigdatafile1",
- help="Signature file to dump. Not used when using -t/--task.",
- action="store", nargs='?', metavar="sigdatafile")
+ help="Signature file to dump. Not used when using -t/--task.",
+ action="store", nargs='?', metavar="sigdatafile")
else:
parser.add_argument('-c', '--color',
- help='Colorize the output (where %(metavar)s is %(choices)s)',
- choices=['auto', 'always', 'never'], default='auto', metavar='color')
+ help='Colorize the output (where %(metavar)s is %(choices)s)',
+ choices=['auto', 'always', 'never'], default='auto', metavar='color')
parser.add_argument('-d', '--dump',
- help='Dump the last signature data instead of comparing (equivalent to using bitbake-dumpsig)',
- action='store_true')
+ help='Dump the last signature data instead of comparing (equivalent to using bitbake-dumpsig)',
+ action='store_true')
parser.add_argument("-t", "--task",
- help="find the signature data files for the last two runs of the specified task and compare them",
- action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
+ help="find the signature data files for the last two runs of the specified task and compare them",
+ action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
parser.add_argument("-s", "--signature",
- help="With -t/--task, specify the signatures to look for instead of taking the last two",
- action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig'))
+ help="With -t/--task, specify the signatures to look for instead of taking the last two",
+ action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig'))
parser.add_argument("sigdatafile1",
- help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.",
- action="store", nargs='?')
+ help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.",
+ action="store", nargs='?')
parser.add_argument("sigdatafile2",
- help="Second signature file to compare",
- action="store", nargs='?')
+ help="Second signature file to compare",
+ action="store", nargs='?')
options = parser.parse_args()
if is_dump:
@@ -157,7 +160,8 @@ if options.taskargs:
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=True)
if not options.dump and options.sigargs:
- files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0], options.sigargs[1])
+ files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0],
+ options.sigargs[1])
else:
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1])
@@ -166,7 +170,8 @@ if options.taskargs:
output = bb.siggen.dump_sigfile(files[-1])
else:
if len(files) < 2:
- logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (options.taskargs[0], options.taskargs[1]))
+ logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (
+ options.taskargs[0], options.taskargs[1]))
sys.exit(1)
# Recurse into signature comparison
diff --git a/bitbake/bin/bitbake-getvar b/bitbake/bin/bitbake-getvar
index 5435a8d797..13a317e1d6 100755
--- a/bitbake/bin/bitbake-getvar
+++ b/bitbake/bin/bitbake-getvar
@@ -25,6 +25,7 @@ if __name__ == "__main__":
parser.add_argument('-u', '--unexpand', help='Do not expand the value (with --value)', action="store_true")
parser.add_argument('-f', '--flag', help='Specify a variable flag to query (with --value)', default=None)
parser.add_argument('--value', help='Only report the value, no history and no variable name', action="store_true")
+ parser.add_argument('-q', '--quiet', help='Silence bitbake server logging', action="store_true")
args = parser.parse_args()
if args.unexpand and not args.value:
@@ -35,9 +36,10 @@ if __name__ == "__main__":
print("--flag only makes sense with --value")
sys.exit(1)
- with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
+ quiet = args.quiet
+ with bb.tinfoil.Tinfoil(tracking=True, setup_logging=not quiet) as tinfoil:
if args.recipe:
- tinfoil.prepare(quiet=2)
+ tinfoil.prepare(quiet=3 if quiet else 2)
d = tinfoil.parse_recipe(args.recipe)
else:
tinfoil.prepare(quiet=2, config_only=True)
diff --git a/bitbake/bin/bitbake-hashclient b/bitbake/bin/bitbake-hashclient
index 494f17592a..7c57866d36 100755
--- a/bitbake/bin/bitbake-hashclient
+++ b/bitbake/bin/bitbake-hashclient
@@ -56,25 +56,24 @@ def main():
nonlocal missed_hashes
nonlocal max_time
- client = hashserv.create_client(args.address)
-
- for i in range(args.requests):
- taskhash = hashlib.sha256()
- taskhash.update(args.taskhash_seed.encode('utf-8'))
- taskhash.update(str(i).encode('utf-8'))
+ with hashserv.create_client(args.address) as client:
+ for i in range(args.requests):
+ taskhash = hashlib.sha256()
+ taskhash.update(args.taskhash_seed.encode('utf-8'))
+ taskhash.update(str(i).encode('utf-8'))
- start_time = time.perf_counter()
- l = client.get_unihash(METHOD, taskhash.hexdigest())
- elapsed = time.perf_counter() - start_time
+ start_time = time.perf_counter()
+ l = client.get_unihash(METHOD, taskhash.hexdigest())
+ elapsed = time.perf_counter() - start_time
- with lock:
- if l:
- found_hashes += 1
- else:
- missed_hashes += 1
+ with lock:
+ if l:
+ found_hashes += 1
+ else:
+ missed_hashes += 1
- max_time = max(elapsed, max_time)
- pbar.update()
+ max_time = max(elapsed, max_time)
+ pbar.update()
max_time = 0
found_hashes = 0
@@ -152,9 +151,8 @@ def main():
func = getattr(args, 'func', None)
if func:
- client = hashserv.create_client(args.address)
-
- return func(args, client)
+ with hashserv.create_client(args.address) as client:
+ return func(args, client)
return 0
diff --git a/bitbake/bin/bitbake-layers b/bitbake/bin/bitbake-layers
index 449434d468..d4b1d1aaf2 100755
--- a/bitbake/bin/bitbake-layers
+++ b/bitbake/bin/bitbake-layers
@@ -68,11 +68,11 @@ def main():
registered = False
for plugin in plugins:
+ if hasattr(plugin, 'tinfoil_init'):
+ plugin.tinfoil_init(tinfoil)
if hasattr(plugin, 'register_commands'):
registered = True
plugin.register_commands(subparsers)
- if hasattr(plugin, 'tinfoil_init'):
- plugin.tinfoil_init(tinfoil)
if not registered:
logger.error("No commands registered - missing plugins?")
diff --git a/bitbake/bin/bitbake-prserv b/bitbake/bin/bitbake-prserv
index 323df66dd0..5be42f3ce5 100755
--- a/bitbake/bin/bitbake-prserv
+++ b/bitbake/bin/bitbake-prserv
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/bin/bitbake-server b/bitbake/bin/bitbake-server
index f53f88b6b0..d00bb068b8 100755
--- a/bitbake/bin/bitbake-server
+++ b/bitbake/bin/bitbake-server
@@ -12,8 +12,9 @@ warnings.simplefilter("default")
import logging
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
-if sys.getfilesystemencoding() != "utf-8":
- sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
+import bb
+
+bb.utils.check_system_locale()
# Users shouldn't be running this code directly
if len(sys.argv) != 10 or not sys.argv[1].startswith("decafbad"):
diff --git a/bitbake/bin/bitbake-worker b/bitbake/bin/bitbake-worker
index 9d850ec77c..e02f253205 100755
--- a/bitbake/bin/bitbake-worker
+++ b/bitbake/bin/bitbake-worker
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -22,8 +24,7 @@ import subprocess
from multiprocessing import Lock
from threading import Thread
-if sys.getfilesystemencoding() != "utf-8":
- sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
+bb.utils.check_system_locale()
# Users shouldn't be running this code directly
if len(sys.argv) != 2 or not sys.argv[1].startswith("decafbad"):
@@ -90,19 +91,19 @@ def worker_fire_prepickled(event):
worker_thread_exit = False
def worker_flush(worker_queue):
- worker_queue_int = b""
+ worker_queue_int = bytearray()
global worker_pipe, worker_thread_exit
while True:
try:
- worker_queue_int = worker_queue_int + worker_queue.get(True, 1)
+ worker_queue_int.extend(worker_queue.get(True, 1))
except queue.Empty:
pass
while (worker_queue_int or not worker_queue.empty()):
try:
(_, ready, _) = select.select([], [worker_pipe], [], 1)
if not worker_queue.empty():
- worker_queue_int = worker_queue_int + worker_queue.get()
+ worker_queue_int.extend(worker_queue.get())
written = os.write(worker_pipe, worker_queue_int)
worker_queue_int = worker_queue_int[written:]
except (IOError, OSError) as e:
@@ -337,12 +338,12 @@ class runQueueWorkerPipe():
if pipeout:
pipeout.close()
bb.utils.nonblockingfd(self.input)
- self.queue = b""
+ self.queue = bytearray()
def read(self):
start = len(self.queue)
try:
- self.queue = self.queue + (self.input.read(102400) or b"")
+ self.queue.extend(self.input.read(102400) or b"")
except (OSError, IOError) as e:
if e.errno != errno.EAGAIN:
raise
@@ -370,7 +371,7 @@ class BitbakeWorker(object):
def __init__(self, din):
self.input = din
bb.utils.nonblockingfd(self.input)
- self.queue = b""
+ self.queue = bytearray()
self.cookercfg = None
self.databuilder = None
self.data = None
@@ -404,7 +405,7 @@ class BitbakeWorker(object):
if len(r) == 0:
# EOF on pipe, server must have terminated
self.sigterm_exception(signal.SIGTERM, None)
- self.queue = self.queue + r
+ self.queue.extend(r)
except (OSError, IOError):
pass
if len(self.queue):
diff --git a/bitbake/bin/git-make-shallow b/bitbake/bin/git-make-shallow
index 1d00fbf183..d0532c5ab8 100755
--- a/bitbake/bin/git-make-shallow
+++ b/bitbake/bin/git-make-shallow
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst
index f9d9e617f3..519aec9a9f 100644
--- a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst
+++ b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst
@@ -424,8 +424,8 @@ This fetcher supports the following parameters:
- *"nobranch":* Tells the fetcher to not check the SHA validation for
the branch when set to "1". The default is "0". Set this option for
- the recipe that refers to the commit that is valid for a tag instead
- of the branch.
+ the recipe that refers to the commit that is valid for any namespace
+ (branch, tag, ...) instead of the branch.
- *"bareclone":* Tells the fetcher to clone a bare clone into the
destination directory without checking out a working tree. Only the
@@ -688,6 +688,8 @@ Here is an example URL::
It can also be used when setting mirrors definitions using the :term:`PREMIRRORS` variable.
+.. _crate-fetcher:
+
Crate Fetcher (``crate://``)
----------------------------
@@ -704,6 +706,80 @@ Here is an example URL::
SRC_URI = "crate://crates.io/glob/0.2.11"
+.. _npm-fetcher:
+
+NPM Fetcher (``npm://``)
+------------------------
+
+This submodule fetches source code from an
+`NPM <https://en.wikipedia.org/wiki/Npm_(software)>`__
+Javascript package registry.
+
+The format for the :term:`SRC_URI` setting must be::
+
+ SRC_URI = "npm://some.registry.url;ParameterA=xxx;ParameterB=xxx;..."
+
+This fetcher supports the following parameters:
+
+- *"package":* The NPM package name. This is a mandatory parameter.
+
+- *"version":* The NPM package version. This is a mandatory parameter.
+
+- *"downloadfilename":* Specifies the filename used when storing the downloaded file.
+
+- *"destsuffix":* Specifies the directory to use to unpack the package (default: ``npm``).
+
+Note that NPM fetcher only fetches the package source itself. The dependencies
+can be fetched through the `npmsw-fetcher`_.
+
+Here is an example URL with both fetchers::
+
+ SRC_URI = " \
+ npm://registry.npmjs.org/;package=cute-files;version=${PV} \
+ npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \
+ "
+
+See :yocto_docs:`Creating Node Package Manager (NPM) Packages
+</dev-manual/common-tasks.html#creating-node-package-manager-npm-packages>`
+in the Yocto Project manual for details about using
+:yocto_docs:`devtool <https://docs.yoctoproject.org/ref-manual/devtool-reference.html>`
+to automatically create a recipe from an NPM URL.
+
+.. _npmsw-fetcher:
+
+NPM shrinkwrap Fetcher (``npmsw://``)
+-------------------------------------
+
+This submodule fetches source code from an
+`NPM shrinkwrap <https://docs.npmjs.com/cli/v8/commands/npm-shrinkwrap>`__
+description file, which lists the dependencies
+of an NPM package while locking their versions.
+
+The format for the :term:`SRC_URI` setting must be::
+
+ SRC_URI = "npmsw://some.registry.url;ParameterA=xxx;ParameterB=xxx;..."
+
+This fetcher supports the following parameters:
+
+- *"dev":* Set this parameter to ``1`` to install "devDependencies".
+
+- *"destsuffix":* Specifies the directory to use to unpack the dependencies
+ (``${S}`` by default).
+
+Note that the shrinkwrap file can also be provided by the recipe for
+the package which has such dependencies, for example::
+
+ SRC_URI = " \
+ npm://registry.npmjs.org/;package=cute-files;version=${PV} \
+ npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \
+ "
+
+Such a file can automatically be generated using
+:yocto_docs:`devtool <https://docs.yoctoproject.org/ref-manual/devtool-reference.html>`
+as described in the :yocto_docs:`Creating Node Package Manager (NPM) Packages
+</dev-manual/common-tasks.html#creating-node-package-manager-npm-packages>`
+section of the Yocto Project.
+
Other Fetchers
--------------
@@ -713,8 +789,6 @@ Fetch submodules also exist for the following:
- Mercurial (``hg://``)
-- npm (``npm://``)
-
- OSC (``osc://``)
- Secure FTP (``sftp://``)
diff --git a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.rst b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.rst
index af4b135867..337821612c 100644
--- a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.rst
+++ b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.rst
@@ -195,22 +195,45 @@ value. However, if ``A`` is not set, the variable is set to "aval".
Setting a weak default value (??=)
----------------------------------
-It is possible to use a "weaker" assignment than in the previous section
-by using the "??=" operator. This assignment behaves identical to "?="
-except that the assignment is made at the end of the parsing process
-rather than immediately. Consequently, when multiple "??=" assignments
-exist, the last one is used. Also, any "=" or "?=" assignment will
-override the value set with "??=". Here is an example::
-
- A ??= "somevalue"
- A ??= "someothervalue"
-
-If ``A`` is set before the above statements are
-parsed, the variable retains its value. If ``A`` is not set, the
-variable is set to "someothervalue".
-
-Again, this assignment is a "lazy" or "weak" assignment because it does
-not occur until the end of the parsing process.
+The weak default value of a variable is the value which that variable
+will expand to if no value has been assigned to it via any of the other
+assignment operators. The "??=" operator takes effect immediately, replacing
+any previously defined weak default value. Here is an example::
+
+ W ??= "x"
+ A := "${W}" # Immediate variable expansion
+ W ??= "y"
+ B := "${W}" # Immediate variable expansion
+ W ??= "z"
+ C = "${W}"
+ W ?= "i"
+
+After parsing we will have::
+
+ A = "x"
+ B = "y"
+ C = "i"
+ W = "i"
+
+Appending and prepending non-override style will not substitute the weak
+default value, which means that after parsing::
+
+ W ??= "x"
+ W += "y"
+
+we will have::
+
+ W = " y"
+
+On the other hand, override-style appends/prepends/removes are applied after
+any active weak default value has been substituted::
+
+ W ??= "x"
+ W:append = "y"
+
+After parsing we will have::
+
+ W = "xy"
Immediate variable expansion (:=)
---------------------------------
diff --git a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.rst b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.rst
index af4ff9805c..12aef3cbb7 100644
--- a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.rst
+++ b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.rst
@@ -401,7 +401,7 @@ overview of their function and contents.
Example usage::
- BB_HASHSERVE_UPSTREAM = "typhoon.yocto.io:8687"
+ BB_HASHSERVE_UPSTREAM = "hashserv.yocto.io:8687"
:term:`BB_INVALIDCONF`
Used in combination with the ``ConfigParsed`` event to trigger
diff --git a/bitbake/lib/bb/COW.py b/bitbake/lib/bb/COW.py
index 23c22b65ef..76bc08a3ea 100644
--- a/bitbake/lib/bb/COW.py
+++ b/bitbake/lib/bb/COW.py
@@ -3,6 +3,8 @@
#
# Copyright (C) 2006 Tim Ansell
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Please Note:
# Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW.
# Assign a file to __warn__ to get warnings about slow operations.
diff --git a/bitbake/lib/bb/__init__.py b/bitbake/lib/bb/__init__.py
index b8333bdb81..565f163f5f 100644
--- a/bitbake/lib/bb/__init__.py
+++ b/bitbake/lib/bb/__init__.py
@@ -15,6 +15,13 @@ import sys
if sys.version_info < (3, 6, 0):
raise RuntimeError("Sorry, python 3.6.0 or later is required for this version of bitbake")
+if sys.version_info < (3, 10, 0):
+ # With python 3.8 and 3.9, we see errors of "libgcc_s.so.1 must be installed for pthread_cancel to work"
+ # https://stackoverflow.com/questions/64797838/libgcc-s-so-1-must-be-installed-for-pthread-cancel-to-work
+ # https://bugs.ams1.psf.io/issue42888
+ # so ensure libgcc_s is loaded early on
+ import ctypes
+ libgcc_s = ctypes.CDLL('libgcc_s.so.1')
class BBHandledException(Exception):
"""
diff --git a/bitbake/lib/bb/asyncrpc/__init__.py b/bitbake/lib/bb/asyncrpc/__init__.py
index c2f2b3c00b..9a85e9965b 100644
--- a/bitbake/lib/bb/asyncrpc/__init__.py
+++ b/bitbake/lib/bb/asyncrpc/__init__.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/bb/asyncrpc/client.py b/bitbake/lib/bb/asyncrpc/client.py
index 34960197d1..dcbe7e5762 100644
--- a/bitbake/lib/bb/asyncrpc/client.py
+++ b/bitbake/lib/bb/asyncrpc/client.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -29,7 +31,17 @@ class AsyncClient(object):
async def connect_unix(self, path):
async def connect_sock():
- return await asyncio.open_unix_connection(path)
+ # AF_UNIX has path length issues so chdir here to workaround
+ cwd = os.getcwd()
+ try:
+ os.chdir(os.path.dirname(path))
+ # The socket must be opened synchronously so that CWD doesn't get
+ # changed out from underneath us so we pass as a sock into asyncio
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
+ sock.connect(os.path.basename(path))
+ finally:
+ os.chdir(cwd)
+ return await asyncio.open_unix_connection(sock=sock)
self._connect_sock = connect_sock
@@ -114,6 +126,12 @@ class AsyncClient(object):
{'ping': {}}
)
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc_value, traceback):
+ await self.close()
+
class Client(object):
def __init__(self):
@@ -148,14 +166,8 @@ class Client(object):
setattr(self, m, self._get_downcall_wrapper(downcall))
def connect_unix(self, path):
- # AF_UNIX has path length issues so chdir here to workaround
- cwd = os.getcwd()
- try:
- os.chdir(os.path.dirname(path))
- self.loop.run_until_complete(self.client.connect_unix(os.path.basename(path)))
- self.loop.run_until_complete(self.client.connect())
- finally:
- os.chdir(cwd)
+ self.loop.run_until_complete(self.client.connect_unix(path))
+ self.loop.run_until_complete(self.client.connect())
@property
def max_chunk(self):
@@ -170,3 +182,10 @@ class Client(object):
if sys.version_info >= (3, 6):
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.loop.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+ return False
diff --git a/bitbake/lib/bb/asyncrpc/serv.py b/bitbake/lib/bb/asyncrpc/serv.py
index b4cffff213..e14df18e71 100644
--- a/bitbake/lib/bb/asyncrpc/serv.py
+++ b/bitbake/lib/bb/asyncrpc/serv.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/bb/codeparser.py b/bitbake/lib/bb/codeparser.py
index 3b3c3b41ff..6ce0c5182f 100644
--- a/bitbake/lib/bb/codeparser.py
+++ b/bitbake/lib/bb/codeparser.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -223,19 +225,19 @@ class PythonParser():
def visit_Call(self, node):
name = self.called_node_name(node.func)
if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
- if isinstance(node.args[0], ast.Str):
- varname = node.args[0].s
- if name in self.containsfuncs and isinstance(node.args[1], ast.Str):
+ if isinstance(node.args[0], ast.Constant) and isinstance(node.args[0].value, str):
+ varname = node.args[0].value
+ if name in self.containsfuncs and isinstance(node.args[1], ast.Constant):
if varname not in self.contains:
self.contains[varname] = set()
- self.contains[varname].add(node.args[1].s)
- elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Str):
+ self.contains[varname].add(node.args[1].value)
+ elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Constant):
if varname not in self.contains:
self.contains[varname] = set()
- self.contains[varname].update(node.args[1].s.split())
+ self.contains[varname].update(node.args[1].value.split())
elif name.endswith(self.getvarflags):
- if isinstance(node.args[1], ast.Str):
- self.references.add('%s[%s]' % (varname, node.args[1].s))
+ if isinstance(node.args[1], ast.Constant):
+ self.references.add('%s[%s]' % (varname, node.args[1].value))
else:
self.warn(node.func, node.args[1])
else:
@@ -243,8 +245,8 @@ class PythonParser():
else:
self.warn(node.func, node.args[0])
elif name and name.endswith(".expand"):
- if isinstance(node.args[0], ast.Str):
- value = node.args[0].s
+ if isinstance(node.args[0], ast.Constant):
+ value = node.args[0].value
d = bb.data.init()
parser = d.expandWithRefs(value, self.name)
self.references |= parser.references
@@ -254,8 +256,8 @@ class PythonParser():
self.contains[varname] = set()
self.contains[varname] |= parser.contains[varname]
elif name in self.execfuncs:
- if isinstance(node.args[0], ast.Str):
- self.var_execs.add(node.args[0].s)
+ if isinstance(node.args[0], ast.Constant):
+ self.var_execs.add(node.args[0].value)
else:
self.warn(node.func, node.args[0])
elif name and isinstance(node.func, (ast.Name, ast.Attribute)):
diff --git a/bitbake/lib/bb/compress/_pipecompress.py b/bitbake/lib/bb/compress/_pipecompress.py
index 5de17a82e2..4a403d62cf 100644
--- a/bitbake/lib/bb/compress/_pipecompress.py
+++ b/bitbake/lib/bb/compress/_pipecompress.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# Helper library to implement streaming compression and decompression using an
diff --git a/bitbake/lib/bb/compress/lz4.py b/bitbake/lib/bb/compress/lz4.py
index 0f6bc51a5b..88b0989322 100644
--- a/bitbake/lib/bb/compress/lz4.py
+++ b/bitbake/lib/bb/compress/lz4.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/bb/compress/zstd.py b/bitbake/lib/bb/compress/zstd.py
index 50c42133fb..cdbbe9d60f 100644
--- a/bitbake/lib/bb/compress/zstd.py
+++ b/bitbake/lib/bb/compress/zstd.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/bb/cooker.py b/bitbake/lib/bb/cooker.py
index 6da9291f9c..2adf4d297d 100644
--- a/bitbake/lib/bb/cooker.py
+++ b/bitbake/lib/bb/cooker.py
@@ -13,7 +13,6 @@ import sys, os, glob, os.path, re, time
import itertools
import logging
import multiprocessing
-import sre_constants
import threading
from io import StringIO, UnsupportedOperation
from contextlib import closing
@@ -1907,7 +1906,7 @@ class CookerCollectFiles(object):
try:
re.compile(mask)
bbmasks.append(mask)
- except sre_constants.error:
+ except re.error:
collectlog.critical("BBMASK contains an invalid regular expression, ignoring: %s" % mask)
# Then validate the combined regular expressions. This should never
@@ -1915,7 +1914,7 @@ class CookerCollectFiles(object):
bbmask = "|".join(bbmasks)
try:
bbmask_compiled = re.compile(bbmask)
- except sre_constants.error:
+ except re.error:
collectlog.critical("BBMASK is not a valid regular expression, ignoring: %s" % bbmask)
bbmask = None
diff --git a/bitbake/lib/bb/cookerdata.py b/bitbake/lib/bb/cookerdata.py
index fe5696c704..ec3741cc1d 100644
--- a/bitbake/lib/bb/cookerdata.py
+++ b/bitbake/lib/bb/cookerdata.py
@@ -160,12 +160,7 @@ def catch_parse_error(func):
def wrapped(fn, *args):
try:
return func(fn, *args)
- except IOError as exc:
- import traceback
- parselog.critical(traceback.format_exc())
- parselog.critical("Unable to parse %s: %s" % (fn, exc))
- raise bb.BBHandledException()
- except bb.data_smart.ExpansionError as exc:
+ except Exception as exc:
import traceback
bbdir = os.path.dirname(__file__) + os.sep
@@ -177,9 +172,6 @@ def catch_parse_error(func):
break
parselog.critical("Unable to parse %s" % fn, exc_info=(exc_class, exc, tb))
raise bb.BBHandledException()
- except bb.parse.ParseError as exc:
- parselog.critical(str(exc))
- raise bb.BBHandledException()
return wrapped
@catch_parse_error
@@ -301,14 +293,9 @@ class CookerDataBuilder(object):
bb.event.fire(bb.event.MultiConfigParsed(self.mcdata), self.data)
self.data_hash = data_hash.hexdigest()
- except (SyntaxError, bb.BBHandledException):
- raise bb.BBHandledException()
except bb.data_smart.ExpansionError as e:
logger.error(str(e))
raise bb.BBHandledException()
- except Exception:
- logger.exception("Error parsing configuration files")
- raise bb.BBHandledException()
# Handle obsolete variable names
@@ -435,7 +422,7 @@ class CookerDataBuilder(object):
msg += (" and bitbake did not find a conf/bblayers.conf file in"
" the expected location.\nMaybe you accidentally"
" invoked bitbake from the wrong directory?")
- raise SystemExit(msg)
+ bb.fatal(msg)
if not data.getVar("TOPDIR"):
data.setVar("TOPDIR", os.path.abspath(os.getcwd()))
diff --git a/bitbake/lib/bb/daemonize.py b/bitbake/lib/bb/daemonize.py
index 4957bfd4b8..7689404436 100644
--- a/bitbake/lib/bb/daemonize.py
+++ b/bitbake/lib/bb/daemonize.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/bb/data.py b/bitbake/lib/bb/data.py
index c09d9b04bb..45411055b9 100644
--- a/bitbake/lib/bb/data.py
+++ b/bitbake/lib/bb/data.py
@@ -310,6 +310,7 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, ignored_vars, d):
value += "\n_remove of %s" % r
deps |= r2.references
deps = deps | (keys & r2.execs)
+ value = handle_contains(value, r2.contains, exclusions, d)
return value
if "vardepvalue" in varflags:
diff --git a/bitbake/lib/bb/event.py b/bitbake/lib/bb/event.py
index df020551e3..97668601a1 100644
--- a/bitbake/lib/bb/event.py
+++ b/bitbake/lib/bb/event.py
@@ -132,8 +132,14 @@ def print_ui_queue():
if not _uiready:
from bb.msg import BBLogFormatter
# Flush any existing buffered content
- sys.stdout.flush()
- sys.stderr.flush()
+ try:
+ sys.stdout.flush()
+ except:
+ pass
+ try:
+ sys.stderr.flush()
+ except:
+ pass
stdout = logging.StreamHandler(sys.stdout)
stderr = logging.StreamHandler(sys.stderr)
formatter = BBLogFormatter("%(levelname)s: %(message)s")
diff --git a/bitbake/lib/bb/exceptions.py b/bitbake/lib/bb/exceptions.py
index ecbad59970..801db9c82f 100644
--- a/bitbake/lib/bb/exceptions.py
+++ b/bitbake/lib/bb/exceptions.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/bb/fetch2/__init__.py b/bitbake/lib/bb/fetch2/__init__.py
index ac557176d7..a31406263f 100644
--- a/bitbake/lib/bb/fetch2/__init__.py
+++ b/bitbake/lib/bb/fetch2/__init__.py
@@ -1097,6 +1097,8 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
def ensure_symlink(target, link_name):
if not os.path.exists(link_name):
+ dirname = os.path.dirname(link_name)
+ bb.utils.mkdirhier(dirname)
if os.path.islink(link_name):
# Broken symbolic link
os.unlink(link_name)
diff --git a/bitbake/lib/bb/fetch2/git.py b/bitbake/lib/bb/fetch2/git.py
index f0df6fb69b..9ecc855af8 100644
--- a/bitbake/lib/bb/fetch2/git.py
+++ b/bitbake/lib/bb/fetch2/git.py
@@ -44,7 +44,8 @@ Supported SRC_URI options are:
- nobranch
Don't check the SHA validation for branch. set this option for the recipe
- referring to commit which is valid in tag instead of branch.
+ referring to commit which is valid in any namespace (branch, tag, ...)
+ instead of branch.
The default is "0", set nobranch=1 if needed.
- usehead
@@ -306,7 +307,10 @@ class Git(FetchMethod):
return ud.clonedir
def need_update(self, ud, d):
- return self.clonedir_need_update(ud, d) or self.shallow_tarball_need_update(ud) or self.tarball_need_update(ud)
+ return self.clonedir_need_update(ud, d) \
+ or self.shallow_tarball_need_update(ud) \
+ or self.tarball_need_update(ud) \
+ or self.lfs_need_update(ud, d)
def clonedir_need_update(self, ud, d):
if not os.path.exists(ud.clonedir):
@@ -318,6 +322,15 @@ class Git(FetchMethod):
return True
return False
+ def lfs_need_update(self, ud, d):
+ if self.clonedir_need_update(ud, d):
+ return True
+
+ for name in ud.names:
+ if not self._lfs_objects_downloaded(ud, d, name, ud.clonedir):
+ return True
+ return False
+
def clonedir_need_shallow_revs(self, ud, d):
for rev in ud.shallow_revs:
try:
@@ -358,9 +371,13 @@ class Git(FetchMethod):
# If the repo still doesn't exist, fallback to cloning it
if not os.path.exists(ud.clonedir):
- # We do this since git will use a "-l" option automatically for local urls where possible
+ # We do this since git will use a "-l" option automatically for local urls where possible,
+ # but it doesn't work when git/objects is a symlink, only works when it is a directory.
if repourl.startswith("file://"):
- repourl = repourl[7:]
+ repourl_path = repourl[7:]
+ objects = os.path.join(repourl_path, 'objects')
+ if os.path.isdir(objects) and not os.path.islink(objects):
+ repourl = repourl_path
clone_cmd = "LANG=C %s clone --bare --mirror %s %s --progress" % (ud.basecmd, shlex.quote(repourl), ud.clonedir)
if ud.proto.lower() != 'file':
bb.fetch2.check_network_access(d, clone_cmd, ud.url)
@@ -374,7 +391,11 @@ class Git(FetchMethod):
runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir)
runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=ud.clonedir)
- fetch_cmd = "LANG=C %s fetch -f --progress %s refs/*:refs/*" % (ud.basecmd, shlex.quote(repourl))
+
+ if ud.nobranch:
+ fetch_cmd = "LANG=C %s fetch -f --progress %s refs/*:refs/*" % (ud.basecmd, shlex.quote(repourl))
+ else:
+ fetch_cmd = "LANG=C %s fetch -f --progress %s refs/heads/*:refs/heads/* refs/tags/*:refs/tags/*" % (ud.basecmd, shlex.quote(repourl))
if ud.proto.lower() != 'file':
bb.fetch2.check_network_access(d, fetch_cmd, ud.url)
progresshandler = GitProgressHandler(d)
@@ -397,7 +418,7 @@ class Git(FetchMethod):
if missing_rev:
raise bb.fetch2.FetchError("Unable to find revision %s even from upstream" % missing_rev)
- if self._contains_lfs(ud, d, ud.clonedir) and self._need_lfs(ud):
+ if self.lfs_need_update(ud, d):
# Unpack temporary working copy, use it to run 'git checkout' to force pre-fetching
# of all LFS blobs needed at the srcrev.
#
@@ -640,6 +661,35 @@ class Git(FetchMethod):
raise bb.fetch2.FetchError("The command '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output))
return output.split()[0] != "0"
+ def _lfs_objects_downloaded(self, ud, d, name, wd):
+ """
+ Verifies whether the LFS objects for requested revisions have already been downloaded
+ """
+ # Bail out early if this repository doesn't use LFS
+ if not self._need_lfs(ud) or not self._contains_lfs(ud, d, wd):
+ return True
+
+ # The Git LFS specification specifies ([1]) the LFS folder layout so it should be safe to check for file
+ # existence.
+ # [1] https://github.com/git-lfs/git-lfs/blob/main/docs/spec.md#intercepting-git
+ cmd = "%s lfs ls-files -l %s" \
+ % (ud.basecmd, ud.revisions[name])
+ output = runfetchcmd(cmd, d, quiet=True, workdir=wd).rstrip()
+ # Do not do any further matching if no objects are managed by LFS
+ if not output:
+ return True
+
+ # Match all lines beginning with the hexadecimal OID
+ oid_regex = re.compile("^(([a-fA-F0-9]{2})([a-fA-F0-9]{2})[A-Fa-f0-9]+)")
+ for line in output.split("\n"):
+ oid = re.search(oid_regex, line)
+ if not oid:
+ bb.warn("git lfs ls-files output '%s' did not match expected format." % line)
+ if not os.path.exists(os.path.join(wd, "lfs", "objects", oid.group(2), oid.group(3), oid.group(1))):
+ return False
+
+ return True
+
def _need_lfs(self, ud):
return ud.parm.get("lfs", "1") == "1"
@@ -731,7 +781,7 @@ class Git(FetchMethod):
Compute the HEAD revision for the url
"""
if not d.getVar("__BBSEENSRCREV"):
- raise bb.fetch2.FetchError("Recipe uses a floating tag/branch without a fixed SRCREV yet doesn't call bb.fetch2.get_srcrev() (use SRCPV in PV for OE).")
+ raise bb.fetch2.FetchError("Recipe uses a floating tag/branch '%s' for repo '%s' without a fixed SRCREV yet doesn't call bb.fetch2.get_srcrev() (use SRCPV in PV for OE)." % (ud.unresolvedrev[name], ud.host+ud.path))
# Ensure we mark as not cached
bb.fetch2.get_autorev(d)
diff --git a/bitbake/lib/bb/fetch2/gitsm.py b/bitbake/lib/bb/fetch2/gitsm.py
index c5c23d5260..c5f7c03c4c 100644
--- a/bitbake/lib/bb/fetch2/gitsm.py
+++ b/bitbake/lib/bb/fetch2/gitsm.py
@@ -88,7 +88,7 @@ class GitSM(Git):
subrevision[m] = module_hash.split()[2]
# Convert relative to absolute uri based on parent uri
- if uris[m].startswith('..'):
+ if uris[m].startswith('..') or uris[m].startswith('./'):
newud = copy.copy(ud)
newud.path = os.path.realpath(os.path.join(newud.path, uris[m]))
uris[m] = Git._get_repo_url(self, newud)
@@ -115,6 +115,9 @@ class GitSM(Git):
# This has to be a file reference
proto = "file"
url = "gitsm://" + uris[module]
+ if url.endswith("{}{}".format(ud.host, ud.path)):
+ raise bb.fetch2.FetchError("Submodule refers to the parent repository. This will cause deadlock situation in current version of Bitbake." \
+ "Consider using git fetcher instead.")
url += ';protocol=%s' % proto
url += ";name=%s" % module
diff --git a/bitbake/lib/bb/fetch2/npm.py b/bitbake/lib/bb/fetch2/npm.py
index 8f7c10ac9b..8a179a339a 100644
--- a/bitbake/lib/bb/fetch2/npm.py
+++ b/bitbake/lib/bb/fetch2/npm.py
@@ -156,7 +156,7 @@ class Npm(FetchMethod):
raise ParameterError("Invalid 'version' parameter", ud.url)
# Extract the 'registry' part of the url
- ud.registry = re.sub(r"^npm://", "http://", ud.url.split(";")[0])
+ ud.registry = re.sub(r"^npm://", "https://", ud.url.split(";")[0])
# Using the 'downloadfilename' parameter as local filename
# or the npm package name.
diff --git a/bitbake/lib/bb/fetch2/osc.py b/bitbake/lib/bb/fetch2/osc.py
index eb0f82c8e6..bf4c2f0511 100644
--- a/bitbake/lib/bb/fetch2/osc.py
+++ b/bitbake/lib/bb/fetch2/osc.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
"""
diff --git a/bitbake/lib/bb/monitordisk.py b/bitbake/lib/bb/monitordisk.py
index a1b910007d..f928210351 100644
--- a/bitbake/lib/bb/monitordisk.py
+++ b/bitbake/lib/bb/monitordisk.py
@@ -234,9 +234,10 @@ class diskMonitor:
freeInode = st.f_favail
if minInode and freeInode < minInode:
- # Some filesystems use dynamic inodes so can't run out
- # (e.g. btrfs). This is reported by the inode count being 0.
- if st.f_files == 0:
+ # Some filesystems use dynamic inodes so can't run out.
+ # This is reported by the inode count being 0 (btrfs) or the free
+ # inode count being -1 (cephfs).
+ if st.f_files == 0 or st.f_favail == -1:
self.devDict[k][2] = None
continue
# Always show warning, the self.checked would always be False if the action is WARN
diff --git a/bitbake/lib/bb/parse/parse_py/BBHandler.py b/bitbake/lib/bb/parse/parse_py/BBHandler.py
index ee9bd760ce..68415735fd 100644
--- a/bitbake/lib/bb/parse/parse_py/BBHandler.py
+++ b/bitbake/lib/bb/parse/parse_py/BBHandler.py
@@ -178,10 +178,10 @@ def feeder(lineno, s, fn, root, statements, eof=False):
if s and s[0] == '#':
if len(__residue__) != 0 and __residue__[0][0] != "#":
- bb.fatal("There is a comment on line %s of file %s (%s) which is in the middle of a multiline expression.\nBitbake used to ignore these but no longer does so, please fix your metadata as errors are likely as a result of this change." % (lineno, fn, s))
+ bb.fatal("There is a comment on line %s of file %s:\n'''\n%s\n'''\nwhich is in the middle of a multiline expression. This syntax is invalid, please correct it." % (lineno, fn, s))
if len(__residue__) != 0 and __residue__[0][0] == "#" and (not s or s[0] != "#"):
- bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
+ bb.fatal("There is a confusing multiline partially commented expression on line %s of file %s:\n%s\nPlease clarify whether this is all a comment or should be parsed." % (lineno - len(__residue__), fn, "\n".join(__residue__)))
if s and s[-1] == '\\':
__residue__.append(s[:-1])
diff --git a/bitbake/lib/bb/parse/parse_py/ConfHandler.py b/bitbake/lib/bb/parse/parse_py/ConfHandler.py
index b895d5b5ef..451e68dd66 100644
--- a/bitbake/lib/bb/parse/parse_py/ConfHandler.py
+++ b/bitbake/lib/bb/parse/parse_py/ConfHandler.py
@@ -125,16 +125,21 @@ def handle(fn, data, include):
s = f.readline()
if not s:
break
+ origlineno = lineno
+ origline = s
w = s.strip()
# skip empty lines
if not w:
continue
s = s.rstrip()
while s[-1] == '\\':
- s2 = f.readline().rstrip()
+ line = f.readline()
+ origline += line
+ s2 = line.rstrip()
lineno = lineno + 1
if (not s2 or s2 and s2[0] != "#") and s[0] == "#" :
- bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
+ bb.fatal("There is a confusing multiline, partially commented expression starting on line %s of file %s:\n%s\nPlease clarify whether this is all a comment or should be parsed." % (origlineno, fn, origline))
+
s = s[:-1] + s2
# skip comments
if s[0] == '#':
@@ -147,8 +152,6 @@ def handle(fn, data, include):
if oldfile:
data.setVar('FILE', oldfile)
- f.close()
-
for f in confFilters:
f(fn, data)
diff --git a/bitbake/lib/bb/process.py b/bitbake/lib/bb/process.py
index be2c15a188..4c7b6d39df 100644
--- a/bitbake/lib/bb/process.py
+++ b/bitbake/lib/bb/process.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py
index f34f1568e2..0b6eddd45c 100644
--- a/bitbake/lib/bb/runqueue.py
+++ b/bitbake/lib/bb/runqueue.py
@@ -24,6 +24,7 @@ import pickle
from multiprocessing import Process
import shlex
import pprint
+import time
bblogger = logging.getLogger("BitBake")
logger = logging.getLogger("BitBake.RunQueue")
@@ -159,6 +160,67 @@ class RunQueueScheduler(object):
self.buildable.append(tid)
self.rev_prio_map = None
+ self.is_pressure_usable()
+
+ def is_pressure_usable(self):
+ """
+ If monitoring pressure, return True if pressure files can be open and read. For example
+ openSUSE /proc/pressure/* files have readable file permissions but when read the error EOPNOTSUPP (Operation not supported)
+ is returned.
+ """
+ if self.rq.max_cpu_pressure or self.rq.max_io_pressure or self.rq.max_memory_pressure:
+ try:
+ with open("/proc/pressure/cpu") as cpu_pressure_fds, \
+ open("/proc/pressure/io") as io_pressure_fds, \
+ open("/proc/pressure/memory") as memory_pressure_fds:
+
+ self.prev_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1]
+ self.prev_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1]
+ self.prev_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1]
+ self.prev_pressure_time = time.time()
+ self.check_pressure = True
+ except:
+ bb.note("The /proc/pressure files can't be read. Continuing build without monitoring pressure")
+ self.check_pressure = False
+ else:
+ self.check_pressure = False
+
+ def exceeds_max_pressure(self):
+ """
+ Monitor the difference in total pressure at least once per second, if
+ BB_PRESSURE_MAX_{CPU|IO|MEMORY} are set, return True if above threshold.
+ """
+ if self.check_pressure:
+ with open("/proc/pressure/cpu") as cpu_pressure_fds, \
+ open("/proc/pressure/io") as io_pressure_fds, \
+ open("/proc/pressure/memory") as memory_pressure_fds:
+ # extract "total" from /proc/pressure/{cpu|io}
+ curr_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1]
+ curr_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1]
+ curr_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1]
+ now = time.time()
+ tdiff = now - self.prev_pressure_time
+ psi_accumulation_interval = 1.0
+ cpu_pressure = (float(curr_cpu_pressure) - float(self.prev_cpu_pressure)) / tdiff
+ io_pressure = (float(curr_io_pressure) - float(self.prev_io_pressure)) / tdiff
+ memory_pressure = (float(curr_memory_pressure) - float(self.prev_memory_pressure)) / tdiff
+ exceeds_cpu_pressure = self.rq.max_cpu_pressure and cpu_pressure > self.rq.max_cpu_pressure
+ exceeds_io_pressure = self.rq.max_io_pressure and io_pressure > self.rq.max_io_pressure
+ exceeds_memory_pressure = self.rq.max_memory_pressure and memory_pressure > self.rq.max_memory_pressure
+
+ if tdiff > psi_accumulation_interval:
+ self.prev_cpu_pressure = curr_cpu_pressure
+ self.prev_io_pressure = curr_io_pressure
+ self.prev_memory_pressure = curr_memory_pressure
+ self.prev_pressure_time = now
+
+ pressure_state = (exceeds_cpu_pressure, exceeds_io_pressure, exceeds_memory_pressure)
+ pressure_values = (round(cpu_pressure,1), self.rq.max_cpu_pressure, round(io_pressure,1), self.rq.max_io_pressure, round(memory_pressure,1), self.rq.max_memory_pressure)
+ if hasattr(self, "pressure_state") and pressure_state != self.pressure_state:
+ bb.note("Pressure status changed to CPU: %s, IO: %s, Mem: %s (CPU: %s/%s, IO: %s/%s, Mem: %s/%s) - using %s/%s bitbake threads" % (pressure_state + pressure_values + (len(self.rq.runq_running.difference(self.rq.runq_complete)), self.rq.number_tasks)))
+ self.pressure_state = pressure_state
+ return (exceeds_cpu_pressure or exceeds_io_pressure or exceeds_memory_pressure)
+ return False
def next_buildable_task(self):
"""
@@ -172,6 +234,12 @@ class RunQueueScheduler(object):
if not buildable:
return None
+ # Bitbake requires that at least one task be active. Only check for pressure if
+ # this is the case, otherwise the pressure limitation could result in no tasks
+ # being active and no new tasks started thereby, at times, breaking the scheduler.
+ if self.rq.stats.active and self.exceeds_max_pressure():
+ return None
+
# Filter out tasks that have a max number of threads that have been exceeded
skip_buildable = {}
for running in self.rq.runq_running.difference(self.rq.runq_complete):
@@ -1699,6 +1767,9 @@ class RunQueueExecute:
self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
+ self.max_cpu_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_CPU")
+ self.max_io_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_IO")
+ self.max_memory_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_MEMORY")
self.sq_buildable = set()
self.sq_running = set()
@@ -1733,6 +1804,29 @@ class RunQueueExecute:
if self.number_tasks <= 0:
bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
+ lower_limit = 1.0
+ upper_limit = 1000000.0
+ if self.max_cpu_pressure:
+ self.max_cpu_pressure = float(self.max_cpu_pressure)
+ if self.max_cpu_pressure < lower_limit:
+ bb.fatal("Invalid BB_PRESSURE_MAX_CPU %s, minimum value is %s." % (self.max_cpu_pressure, lower_limit))
+ if self.max_cpu_pressure > upper_limit:
+ bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_CPU is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_cpu_pressure))
+
+ if self.max_io_pressure:
+ self.max_io_pressure = float(self.max_io_pressure)
+ if self.max_io_pressure < lower_limit:
+ bb.fatal("Invalid BB_PRESSURE_MAX_IO %s, minimum value is %s." % (self.max_io_pressure, lower_limit))
+ if self.max_io_pressure > upper_limit:
+ bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_IO is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure))
+
+ if self.max_memory_pressure:
+ self.max_memory_pressure = float(self.max_memory_pressure)
+ if self.max_memory_pressure < lower_limit:
+ bb.fatal("Invalid BB_PRESSURE_MAX_MEMORY %s, minimum value is %s." % (self.max_memory_pressure, lower_limit))
+ if self.max_memory_pressure > upper_limit:
+ bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_MEMORY is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure))
+
# List of setscene tasks which we've covered
self.scenequeue_covered = set()
# List of tasks which are covered (including setscene ones)
@@ -1892,11 +1986,19 @@ class RunQueueExecute:
self.setbuildable(revdep)
logger.debug("Marking task %s as buildable", revdep)
- for t in self.sq_deferred.copy():
+ found = None
+ for t in sorted(self.sq_deferred.copy()):
if self.sq_deferred[t] == task:
- logger.debug2("Deferred task %s now buildable" % t)
- del self.sq_deferred[t]
- update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
+ # Allow the next deferred task to run. Any other deferred tasks should be deferred after that task.
+ # We shouldn't allow all to run at once as it is prone to races.
+ if not found:
+ bb.debug(1, "Deferred task %s now buildable" % t)
+ del self.sq_deferred[t]
+ update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
+ found = t
+ else:
+ bb.debug(1, "Deferring %s after %s" % (t, found))
+ self.sq_deferred[t] = found
def task_complete(self, task):
self.stats.taskCompleted()
@@ -2172,10 +2274,9 @@ class RunQueueExecute:
# No more tasks can be run. If we have deferred setscene tasks we should run them.
if self.sq_deferred:
- tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
- logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
- if tid not in self.runq_complete:
- self.sq_task_failoutright(tid)
+ deferred_tid = list(self.sq_deferred.keys())[0]
+ blocking_tid = self.sq_deferred.pop(deferred_tid)
+ logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s blocked by %s" % (deferred_tid, blocking_tid))
return True
if self.failed_tids:
@@ -2299,6 +2400,9 @@ class RunQueueExecute:
self.rqdata.runtaskentries[hashtid].unihash = unihash
bb.parse.siggen.set_unihash(hashtid, unihash)
toprocess.add(hashtid)
+ if torehash:
+ # Need to save after set_unihash above
+ bb.parse.siggen.save_unitaskhashes()
# Work out all tasks which depend upon these
total = set()
@@ -2405,17 +2509,6 @@ class RunQueueExecute:
self.sq_buildable.remove(tid)
if tid in self.sq_running:
self.sq_running.remove(tid)
- harddepfail = False
- for t in self.sqdata.sq_harddeps:
- if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered:
- harddepfail = True
- break
- if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
- if tid not in self.sq_buildable:
- self.sq_buildable.add(tid)
- if not self.sqdata.sq_revdeps[tid]:
- self.sq_buildable.add(tid)
-
if tid in self.sqdata.outrightfail:
self.sqdata.outrightfail.remove(tid)
if tid in self.scenequeue_notcovered:
@@ -2434,18 +2527,36 @@ class RunQueueExecute:
if tid in self.build_stamps:
del self.build_stamps[tid]
- update_tasks.append((tid, harddepfail, tid in self.sqdata.valid))
+ update_tasks.append(tid)
- if update_tasks:
+ update_tasks2 = []
+ for tid in update_tasks:
+ harddepfail = False
+ for t in self.sqdata.sq_harddeps:
+ if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered:
+ harddepfail = True
+ break
+ if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
+ if tid not in self.sq_buildable:
+ self.sq_buildable.add(tid)
+ if not self.sqdata.sq_revdeps[tid]:
+ self.sq_buildable.add(tid)
+
+ update_tasks2.append((tid, harddepfail, tid in self.sqdata.valid))
+
+ if update_tasks2:
self.sqdone = False
- for tid in [t[0] for t in update_tasks]:
- h = pending_hash_index(tid, self.rqdata)
- if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]:
- self.sq_deferred[tid] = self.sqdata.hashes[h]
- bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h]))
- update_scenequeue_data([t[0] for t in update_tasks], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
-
- for (tid, harddepfail, origvalid) in update_tasks:
+ for mc in sorted(self.sqdata.multiconfigs):
+ for tid in sorted([t[0] for t in update_tasks2]):
+ if mc_from_tid(tid) != mc:
+ continue
+ h = pending_hash_index(tid, self.rqdata)
+ if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]:
+ self.sq_deferred[tid] = self.sqdata.hashes[h]
+ bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h]))
+ update_scenequeue_data([t[0] for t in update_tasks2], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
+
+ for (tid, harddepfail, origvalid) in update_tasks2:
if tid in self.sqdata.valid and not origvalid:
hashequiv_logger.verbose("Setscene task %s became valid" % tid)
if harddepfail:
@@ -2793,7 +2904,7 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
sqdata.hashes[h] = tid
else:
sqrq.sq_deferred[tid] = sqdata.hashes[h]
- bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h]))
+ bb.debug(1, "Deferring %s after %s" % (tid, sqdata.hashes[h]))
update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True)
@@ -3002,7 +3113,7 @@ class runQueuePipe():
if pipeout:
pipeout.close()
bb.utils.nonblockingfd(self.input)
- self.queue = b""
+ self.queue = bytearray()
self.d = d
self.rq = rq
self.rqexec = rqexec
@@ -3021,7 +3132,7 @@ class runQueuePipe():
start = len(self.queue)
try:
- self.queue = self.queue + (self.input.read(102400) or b"")
+ self.queue.extend(self.input.read(102400) or b"")
except (OSError, IOError) as e:
if e.errno != errno.EAGAIN:
raise
diff --git a/bitbake/lib/bb/siggen.py b/bitbake/lib/bb/siggen.py
index 9fa568f614..0a9ce0ede3 100644
--- a/bitbake/lib/bb/siggen.py
+++ b/bitbake/lib/bb/siggen.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -327,19 +329,19 @@ class SignatureGeneratorBasic(SignatureGenerator):
data = self.basehash[tid]
for dep in self.runtaskdeps[tid]:
- data = data + self.get_unihash(dep)
+ data += self.get_unihash(dep)
for (f, cs) in self.file_checksum_values[tid]:
if cs:
if "/./" in f:
- data = data + "./" + f.split("/./")[1]
- data = data + cs
+ data += "./" + f.split("/./")[1]
+ data += cs
if tid in self.taints:
if self.taints[tid].startswith("nostamp:"):
- data = data + self.taints[tid][8:]
+ data += self.taints[tid][8:]
else:
- data = data + self.taints[tid]
+ data += self.taints[tid]
h = hashlib.sha256(data.encode("utf-8")).hexdigest()
self.taskhash[tid] = h
@@ -419,7 +421,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[tid], tid))
sigfile = sigfile.replace(self.taskhash[tid], computed_taskhash)
- fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
+ fd, tmpfile = bb.utils.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
try:
with bb.compress.zstd.open(fd, "wt", encoding="utf-8", num_threads=1) as f:
json.dump(data, f, sort_keys=True, separators=(",", ":"), cls=SetEncoder)
@@ -1026,6 +1028,7 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
# If a dependent hash changed, might as well print the line above and then defer to the changes in
# that hash since in all likelyhood, they're the same changes this task also saw.
output = [output[-1]] + recout
+ break
a_taint = a_data.get('taint', None)
b_taint = b_data.get('taint', None)
diff --git a/bitbake/lib/bb/tests/codeparser.py b/bitbake/lib/bb/tests/codeparser.py
index 71ed382ab8..36ed4e196b 100644
--- a/bitbake/lib/bb/tests/codeparser.py
+++ b/bitbake/lib/bb/tests/codeparser.py
@@ -430,6 +430,32 @@ esac
self.assertEqual(deps, set(["TESTVAR2"]))
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval3', 'anothervalue'])
+ def test_contains_vardeps_override_operators(self):
+ # Check override operators handle dependencies correctly with the contains functionality
+ expr_plain = 'testval'
+ expr_prepend = '${@bb.utils.filter("TESTVAR1", "testval1", d)} '
+ expr_append = ' ${@bb.utils.filter("TESTVAR2", "testval2", d)}'
+ expr_remove = '${@bb.utils.contains("TESTVAR3", "no-testval", "testval", "", d)}'
+ # Check dependencies
+ self.d.setVar('ANOTHERVAR', expr_plain)
+ self.d.prependVar('ANOTHERVAR', expr_prepend)
+ self.d.appendVar('ANOTHERVAR', expr_append)
+ self.d.setVar('ANOTHERVAR:remove', expr_remove)
+ self.d.setVar('TESTVAR1', 'blah')
+ self.d.setVar('TESTVAR2', 'testval2')
+ self.d.setVar('TESTVAR3', 'no-testval')
+ deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), self.d)
+ self.assertEqual(sorted(values.splitlines()),
+ sorted([
+ expr_prepend + expr_plain + expr_append,
+ '_remove of ' + expr_remove,
+ 'TESTVAR1{testval1} = Unset',
+ 'TESTVAR2{testval2} = Set',
+ 'TESTVAR3{no-testval} = Set',
+ ]))
+ # Check final value
+ self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval2'])
+
#Currently no wildcard support
#def test_vardeps_wildcards(self):
# self.d.setVar("oe_libinstall", "echo test")
diff --git a/bitbake/lib/bb/tests/compression.py b/bitbake/lib/bb/tests/compression.py
index d3ddf67f1c..95af3f96d7 100644
--- a/bitbake/lib/bb/tests/compression.py
+++ b/bitbake/lib/bb/tests/compression.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/bb/tests/cooker.py b/bitbake/lib/bb/tests/cooker.py
index c82d4b7b81..9e524ae345 100644
--- a/bitbake/lib/bb/tests/cooker.py
+++ b/bitbake/lib/bb/tests/cooker.py
@@ -1,6 +1,8 @@
#
# BitBake Tests for cooker.py
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/bb/tests/fetch.py b/bitbake/lib/bb/tests/fetch.py
index 1152e89c0d..5aa3e464dd 100644
--- a/bitbake/lib/bb/tests/fetch.py
+++ b/bitbake/lib/bb/tests/fetch.py
@@ -6,6 +6,7 @@
# SPDX-License-Identifier: GPL-2.0-only
#
+import contextlib
import unittest
import hashlib
import tempfile
@@ -1834,7 +1835,7 @@ class GitShallowTest(FetcherTest):
self.add_empty_file('bsub', cwd=smdir)
self.git('submodule init', cwd=self.srcdir)
- self.git('submodule add file://%s' % smdir, cwd=self.srcdir)
+ self.git('-c protocol.file.allow=always submodule add file://%s' % smdir, cwd=self.srcdir)
self.git('submodule update', cwd=self.srcdir)
self.git('commit -m submodule -a', cwd=self.srcdir)
@@ -1864,7 +1865,7 @@ class GitShallowTest(FetcherTest):
self.add_empty_file('bsub', cwd=smdir)
self.git('submodule init', cwd=self.srcdir)
- self.git('submodule add file://%s' % smdir, cwd=self.srcdir)
+ self.git('-c protocol.file.allow=always submodule add file://%s' % smdir, cwd=self.srcdir)
self.git('submodule update', cwd=self.srcdir)
self.git('commit -m submodule -a', cwd=self.srcdir)
@@ -2159,6 +2160,12 @@ class GitShallowTest(FetcherTest):
self.assertIn("fstests.doap", dir)
class GitLfsTest(FetcherTest):
+ def skipIfNoGitLFS():
+ import shutil
+ if not shutil.which('git-lfs'):
+ return unittest.skip('git-lfs not installed')
+ return lambda f: f
+
def setUp(self):
FetcherTest.setUp(self)
@@ -2176,10 +2183,14 @@ class GitLfsTest(FetcherTest):
bb.utils.mkdirhier(self.srcdir)
self.git_init(cwd=self.srcdir)
- with open(os.path.join(self.srcdir, '.gitattributes'), 'wt') as attrs:
- attrs.write('*.mp3 filter=lfs -text')
- self.git(['add', '.gitattributes'], cwd=self.srcdir)
- self.git(['commit', '-m', "attributes", '.gitattributes'], cwd=self.srcdir)
+ self.commit_file('.gitattributes', '*.mp3 filter=lfs -text')
+
+ def commit_file(self, filename, content):
+ with open(os.path.join(self.srcdir, filename), "w") as f:
+ f.write(content)
+ self.git(["add", filename], cwd=self.srcdir)
+ self.git(["commit", "-m", "Change"], cwd=self.srcdir)
+ return self.git(["rev-parse", "HEAD"], cwd=self.srcdir).strip()
def fetch(self, uri=None, download=True):
uris = self.d.getVar('SRC_URI').split()
@@ -2192,6 +2203,82 @@ class GitLfsTest(FetcherTest):
ud = fetcher.ud[uri]
return fetcher, ud
+ def get_real_git_lfs_file(self):
+ self.d.setVar('PATH', os.environ.get('PATH'))
+ fetcher, ud = self.fetch()
+ fetcher.unpack(self.d.getVar('WORKDIR'))
+ unpacked_lfs_file = os.path.join(self.d.getVar('WORKDIR'), 'git', "Cat_poster_1.jpg")
+ return unpacked_lfs_file
+
+ @skipIfNoGitLFS()
+ def test_fetch_lfs_on_srcrev_change(self):
+ """Test if fetch downloads missing LFS objects when a different revision within an existing repository is requested"""
+ self.git(["lfs", "install", "--local"], cwd=self.srcdir)
+
+ @contextlib.contextmanager
+ def hide_upstream_repository():
+ """Hide the upstream repository to make sure that git lfs cannot pull from it"""
+ temp_name = self.srcdir + ".bak"
+ os.rename(self.srcdir, temp_name)
+ try:
+ yield
+ finally:
+ os.rename(temp_name, self.srcdir)
+
+ def fetch_and_verify(revision, filename, content):
+ self.d.setVar('SRCREV', revision)
+ fetcher, ud = self.fetch()
+
+ with hide_upstream_repository():
+ workdir = self.d.getVar('WORKDIR')
+ fetcher.unpack(workdir)
+
+ with open(os.path.join(workdir, "git", filename)) as f:
+ self.assertEqual(f.read(), content)
+
+ commit_1 = self.commit_file("a.mp3", "version 1")
+ commit_2 = self.commit_file("a.mp3", "version 2")
+
+ self.d.setVar('SRC_URI', "git://%s;protocol=file;lfs=1;branch=master" % self.srcdir)
+
+ # Seed the local download folder by fetching the latest commit and verifying that the LFS contents are
+ # available even when the upstream repository disappears.
+ fetch_and_verify(commit_2, "a.mp3", "version 2")
+ # Verify that even when an older revision is fetched, the needed LFS objects are fetched into the download
+ # folder.
+ fetch_and_verify(commit_1, "a.mp3", "version 1")
+
+ @skipIfNoGitLFS()
+ @skipIfNoNetwork()
+ def test_real_git_lfs_repo_succeeds_without_lfs_param(self):
+ self.d.setVar('SRC_URI', "git://gitlab.com/gitlab-examples/lfs.git;protocol=https;branch=master")
+ f = self.get_real_git_lfs_file()
+ self.assertTrue(os.path.exists(f))
+ self.assertEqual("c0baab607a97839c9a328b4310713307", bb.utils.md5_file(f))
+
+ @skipIfNoGitLFS()
+ @skipIfNoNetwork()
+ def test_real_git_lfs_repo_succeeds(self):
+ self.d.setVar('SRC_URI', "git://gitlab.com/gitlab-examples/lfs.git;protocol=https;branch=master;lfs=1")
+ f = self.get_real_git_lfs_file()
+ self.assertTrue(os.path.exists(f))
+ self.assertEqual("c0baab607a97839c9a328b4310713307", bb.utils.md5_file(f))
+
+ @skipIfNoGitLFS()
+ @skipIfNoNetwork()
+ def test_real_git_lfs_repo_succeeds(self):
+ self.d.setVar('SRC_URI', "git://gitlab.com/gitlab-examples/lfs.git;protocol=https;branch=master;lfs=0")
+ f = self.get_real_git_lfs_file()
+ # This is the actual non-smudged placeholder file on the repo if git-lfs does not run
+ lfs_file = (
+ 'version https://git-lfs.github.com/spec/v1\n'
+ 'oid sha256:34be66b1a39a1955b46a12588df9d5f6fc1da790e05cf01f3c7422f4bbbdc26b\n'
+ 'size 11423554\n'
+ )
+
+ with open(f) as fh:
+ self.assertEqual(lfs_file, fh.read())
+
def test_lfs_enabled(self):
import shutil
@@ -2210,12 +2297,16 @@ class GitLfsTest(FetcherTest):
shutil.rmtree(self.gitdir, ignore_errors=True)
fetcher.unpack(self.d.getVar('WORKDIR'))
- # If git-lfs cannot be found, the unpack should throw an error
- with self.assertRaises(bb.fetch2.FetchError):
- fetcher.download()
- ud.method._find_git_lfs = lambda d: False
- shutil.rmtree(self.gitdir, ignore_errors=True)
- fetcher.unpack(self.d.getVar('WORKDIR'))
+ old_find_git_lfs = ud.method._find_git_lfs
+ try:
+ # If git-lfs cannot be found, the unpack should throw an error
+ with self.assertRaises(bb.fetch2.FetchError):
+ fetcher.download()
+ ud.method._find_git_lfs = lambda d: False
+ shutil.rmtree(self.gitdir, ignore_errors=True)
+ fetcher.unpack(self.d.getVar('WORKDIR'))
+ finally:
+ ud.method._find_git_lfs = old_find_git_lfs
def test_lfs_disabled(self):
import shutil
@@ -2230,17 +2321,21 @@ class GitLfsTest(FetcherTest):
fetcher, ud = self.fetch()
self.assertIsNotNone(ud.method._find_git_lfs)
- # If git-lfs can be found, the unpack should be successful. A
- # live copy of git-lfs is not required for this case, so
- # unconditionally forge its presence.
- ud.method._find_git_lfs = lambda d: True
- shutil.rmtree(self.gitdir, ignore_errors=True)
- fetcher.unpack(self.d.getVar('WORKDIR'))
+ old_find_git_lfs = ud.method._find_git_lfs
+ try:
+ # If git-lfs can be found, the unpack should be successful. A
+ # live copy of git-lfs is not required for this case, so
+ # unconditionally forge its presence.
+ ud.method._find_git_lfs = lambda d: True
+ shutil.rmtree(self.gitdir, ignore_errors=True)
+ fetcher.unpack(self.d.getVar('WORKDIR'))
+ # If git-lfs cannot be found, the unpack should be successful
- # If git-lfs cannot be found, the unpack should be successful
- ud.method._find_git_lfs = lambda d: False
- shutil.rmtree(self.gitdir, ignore_errors=True)
- fetcher.unpack(self.d.getVar('WORKDIR'))
+ ud.method._find_git_lfs = lambda d: False
+ shutil.rmtree(self.gitdir, ignore_errors=True)
+ fetcher.unpack(self.d.getVar('WORKDIR'))
+ finally:
+ ud.method._find_git_lfs = old_find_git_lfs
class GitURLWithSpacesTest(FetcherTest):
test_git_urls = {
diff --git a/bitbake/lib/bb/tests/parse.py b/bitbake/lib/bb/tests/parse.py
index 2898f9bb14..1a3b74934d 100644
--- a/bitbake/lib/bb/tests/parse.py
+++ b/bitbake/lib/bb/tests/parse.py
@@ -194,3 +194,26 @@ deltask ${EMPTYVAR}
self.assertTrue('addtask ignored: " do_patch"' in stdout)
#self.assertTrue('dependent task do_foo for do_patch does not exist' in stdout)
+ broken_multiline_comment = """
+# First line of comment \\
+# Second line of comment \\
+
+"""
+ def test_parse_broken_multiline_comment(self):
+ f = self.parsehelper(self.broken_multiline_comment)
+ with self.assertRaises(bb.BBHandledException):
+ d = bb.parse.handle(f.name, self.d)['']
+
+
+ comment_in_var = """
+VAR = " \\
+ SOMEVAL \\
+# some comment \\
+ SOMEOTHERVAL \\
+"
+"""
+ def test_parse_comment_in_var(self):
+ f = self.parsehelper(self.comment_in_var)
+ with self.assertRaises(bb.BBHandledException):
+ d = bb.parse.handle(f.name, self.d)['']
+
diff --git a/bitbake/lib/bb/tinfoil.py b/bitbake/lib/bb/tinfoil.py
index e68a3b879a..fa29b930ce 100644
--- a/bitbake/lib/bb/tinfoil.py
+++ b/bitbake/lib/bb/tinfoil.py
@@ -324,11 +324,11 @@ class Tinfoil:
self.recipes_parsed = False
self.quiet = 0
self.oldhandlers = self.logger.handlers[:]
+ self.localhandlers = []
if setup_logging:
# This is the *client-side* logger, nothing to do with
# logging messages from the server
bb.msg.logger_create('BitBake', output)
- self.localhandlers = []
for handler in self.logger.handlers:
if handler not in self.oldhandlers:
self.localhandlers.append(handler)
diff --git a/bitbake/lib/bb/utils.py b/bitbake/lib/bb/utils.py
index d11da978d7..3f7f82d17d 100644
--- a/bitbake/lib/bb/utils.py
+++ b/bitbake/lib/bb/utils.py
@@ -13,6 +13,7 @@ import errno
import logging
import bb
import bb.msg
+import locale
import multiprocessing
import fcntl
import importlib
@@ -28,6 +29,8 @@ import signal
import collections
import copy
import ctypes
+import random
+import tempfile
from subprocess import getstatusoutput
from contextlib import contextmanager
from ctypes import cdll
@@ -429,12 +432,14 @@ def better_eval(source, locals, extraglobals = None):
return eval(source, ctx, locals)
@contextmanager
-def fileslocked(files):
+def fileslocked(files, *args, **kwargs):
"""Context manager for locking and unlocking file locks."""
locks = []
if files:
for lockfile in files:
- locks.append(bb.utils.lockfile(lockfile))
+ l = bb.utils.lockfile(lockfile, *args, **kwargs)
+ if l is not None:
+ locks.append(l)
try:
yield
@@ -541,7 +546,12 @@ def md5_file(filename):
Return the hex string representation of the MD5 checksum of filename.
"""
import hashlib
- return _hasher(hashlib.new('MD5', usedforsecurity=False), filename)
+ try:
+ sig = hashlib.new('MD5', usedforsecurity=False)
+ except TypeError:
+ # Some configurations don't appear to support two arguments
+ sig = hashlib.new('MD5')
+ return _hasher(sig, filename)
def sha256_file(filename):
"""
@@ -597,6 +607,21 @@ def preserved_envvars():
]
return v + preserved_envvars_exported()
+def check_system_locale():
+ """Make sure the required system locale are available and configured"""
+ default_locale = locale.getlocale(locale.LC_CTYPE)
+
+ try:
+ locale.setlocale(locale.LC_CTYPE, ("en_US", "UTF-8"))
+ except:
+ sys.exit("Please make sure locale 'en_US.UTF-8' is available on your system")
+ else:
+ locale.setlocale(locale.LC_CTYPE, default_locale)
+
+ if sys.getfilesystemencoding() != "utf-8":
+ sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\n"
+ "Python can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
+
def filter_environment(good_vars):
"""
Create a pristine environment for bitbake. This will remove variables that
@@ -692,8 +717,8 @@ def remove(path, recurse=False, ionice=False):
return
if recurse:
for name in glob.glob(path):
- if _check_unsafe_delete_path(path):
- raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % path)
+ if _check_unsafe_delete_path(name):
+ raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % name)
# shutil.rmtree(name) would be ideal but its too slow
cmd = []
if ionice:
@@ -751,7 +776,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
if not sstat:
sstat = os.lstat(src)
except Exception as e:
- print("movefile: Stating source file failed...", e)
+ logger.warning("movefile: Stating source file failed...", e)
return None
destexists = 1
@@ -779,7 +804,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
os.unlink(src)
return os.lstat(dest)
except Exception as e:
- print("movefile: failed to properly create symlink:", dest, "->", target, e)
+ logger.warning("movefile: failed to properly create symlink:", dest, "->", target, e)
return None
renamefailed = 1
@@ -796,7 +821,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
except Exception as e:
if e.errno != errno.EXDEV:
# Some random error.
- print("movefile: Failed to move", src, "to", dest, e)
+ logger.warning("movefile: Failed to move", src, "to", dest, e)
return None
# Invalid cross-device-link 'bind' mounted or actually Cross-Device
@@ -808,13 +833,13 @@ def movefile(src, dest, newmtime = None, sstat = None):
bb.utils.rename(destpath + "#new", destpath)
didcopy = 1
except Exception as e:
- print('movefile: copy', src, '->', dest, 'failed.', e)
+ logger.warning('movefile: copy', src, '->', dest, 'failed.', e)
return None
else:
#we don't yet handle special, so we need to fall back to /bin/mv
a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'")
if a[0] != 0:
- print("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
+ logger.warning("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
return None # failure
try:
if didcopy:
@@ -822,7 +847,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
os.chmod(destpath, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
os.unlink(src)
except Exception as e:
- print("movefile: Failed to chown/chmod/unlink", dest, e)
+ logger.warning("movefile: Failed to chown/chmod/unlink", dest, e)
return None
if newmtime:
@@ -981,6 +1006,9 @@ def to_boolean(string, default=None):
if not string:
return default
+ if isinstance(string, int):
+ return string != 0
+
normalized = string.lower()
if normalized in ("y", "yes", "1", "true"):
return True
@@ -1631,23 +1659,20 @@ def disable_network(uid=None, gid=None):
def export_proxies(d):
""" export common proxies variables from datastore to environment """
- import os
variables = ['http_proxy', 'HTTP_PROXY', 'https_proxy', 'HTTPS_PROXY',
'ftp_proxy', 'FTP_PROXY', 'no_proxy', 'NO_PROXY',
- 'GIT_PROXY_COMMAND']
- exported = False
+ 'GIT_PROXY_COMMAND', 'SSL_CERT_FILE', 'SSL_CERT_DIR']
- for v in variables:
- if v in os.environ.keys():
- exported = True
- else:
- v_proxy = d.getVar(v)
- if v_proxy is not None:
- os.environ[v] = v_proxy
- exported = True
+ origenv = d.getVar("BB_ORIGENV")
+
+ for name in variables:
+ value = d.getVar(name)
+ if not value and origenv:
+ value = origenv.getVar(name)
+ if value:
+ os.environ[name] = value
- return exported
def load_plugins(logger, plugins, pluginpath):
@@ -1754,3 +1779,22 @@ def is_local_uid(uid=''):
if str(uid) == line_split[2]:
return True
return False
+
+def mkstemp(suffix=None, prefix=None, dir=None, text=False):
+ """
+ Generates a unique filename, independent of time.
+
+ mkstemp() in glibc (at least) generates unique file names based on the
+ current system time. When combined with highly parallel builds, and
+ operating over NFS (e.g. shared sstate/downloads) this can result in
+ conflicts and race conditions.
+
+ This function adds additional entropy to the file name so that a collision
+ is independent of time and thus extremely unlikely.
+ """
+ entropy = "".join(random.choices("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890", k=20))
+ if prefix:
+ prefix = prefix + entropy
+ else:
+ prefix = tempfile.gettempprefix() + entropy
+ return tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir, text=text)
diff --git a/bitbake/lib/bblayers/__init__.py b/bitbake/lib/bblayers/__init__.py
index 4e7c09da04..78efd29750 100644
--- a/bitbake/lib/bblayers/__init__.py
+++ b/bitbake/lib/bblayers/__init__.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/bblayers/action.py b/bitbake/lib/bblayers/action.py
index 6723e2c605..454c251410 100644
--- a/bitbake/lib/bblayers/action.py
+++ b/bitbake/lib/bblayers/action.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/bblayers/common.py b/bitbake/lib/bblayers/common.py
index 6c76ef3505..f7b9cee371 100644
--- a/bitbake/lib/bblayers/common.py
+++ b/bitbake/lib/bblayers/common.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/bblayers/layerindex.py b/bitbake/lib/bblayers/layerindex.py
index 7936516209..0ac8fd2ec7 100644
--- a/bitbake/lib/bblayers/layerindex.py
+++ b/bitbake/lib/bblayers/layerindex.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/bblayers/query.py b/bitbake/lib/bblayers/query.py
index 525d4f0d47..9142ec4474 100644
--- a/bitbake/lib/bblayers/query.py
+++ b/bitbake/lib/bblayers/query.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/bs4/tests/test_tree.py b/bitbake/lib/bs4/tests/test_tree.py
index 8e5c66426e..cf0f1abe0c 100644
--- a/bitbake/lib/bs4/tests/test_tree.py
+++ b/bitbake/lib/bs4/tests/test_tree.py
@@ -585,7 +585,7 @@ class SiblingTest(TreeTest):
</html>'''
# All that whitespace looks good but makes the tests more
# difficult. Get rid of it.
- markup = re.compile("\n\s*").sub("", markup)
+ markup = re.compile(r"\n\s*").sub("", markup)
self.tree = self.soup(markup)
diff --git a/bitbake/lib/ply/yacc.py b/bitbake/lib/ply/yacc.py
index 767c4e4674..381b50cf0b 100644
--- a/bitbake/lib/ply/yacc.py
+++ b/bitbake/lib/ply/yacc.py
@@ -2798,7 +2798,14 @@ class ParserReflect(object):
def signature(self):
try:
import hashlib
+ except ImportError:
+ raise RuntimeError("Unable to import hashlib")
+ try:
sig = hashlib.new('MD5', usedforsecurity=False)
+ except TypeError:
+ # Some configurations don't appear to support two arguments
+ sig = hashlib.new('MD5')
+ try:
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
diff --git a/bitbake/lib/prserv/__init__.py b/bitbake/lib/prserv/__init__.py
index 9961040b58..38ced818ad 100644
--- a/bitbake/lib/prserv/__init__.py
+++ b/bitbake/lib/prserv/__init__.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/prserv/client.py b/bitbake/lib/prserv/client.py
index a3f19ddafc..69ab7a4ac9 100644
--- a/bitbake/lib/prserv/client.py
+++ b/bitbake/lib/prserv/client.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/prserv/db.py b/bitbake/lib/prserv/db.py
index 2710d4a225..b4bda7078c 100644
--- a/bitbake/lib/prserv/db.py
+++ b/bitbake/lib/prserv/db.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/prserv/serv.py b/bitbake/lib/prserv/serv.py
index 0a20b927c7..0db6ebc707 100644
--- a/bitbake/lib/prserv/serv.py
+++ b/bitbake/lib/prserv/serv.py
@@ -1,4 +1,6 @@
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -342,9 +344,9 @@ def auto_shutdown():
def ping(host, port):
from . import client
- conn = client.PRClient()
- conn.connect_tcp(host, port)
- return conn.ping()
+ with client.PRClient() as conn:
+ conn.connect_tcp(host, port)
+ return conn.ping()
def connect(host, port):
from . import client
diff --git a/bitbake/lib/toaster/manage.py b/bitbake/lib/toaster/manage.py
index ae32619d12..f8de49c264 100755
--- a/bitbake/lib/toaster/manage.py
+++ b/bitbake/lib/toaster/manage.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# Copyright BitBake Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/bitbake/lib/toaster/orm/fixtures/poky.xml b/bitbake/lib/toaster/orm/fixtures/poky.xml
index ed86114ebe..20fcc01767 100644
--- a/bitbake/lib/toaster/orm/fixtures/poky.xml
+++ b/bitbake/lib/toaster/orm/fixtures/poky.xml
@@ -42,7 +42,7 @@
<!-- Releases available -->
<object model="orm.release" pk="1">
<field type="CharField" name="name">kirkstone</field>
- <field type="CharField" name="description">Yocto Project 3.5 "Kirkstone"</field>
+ <field type="CharField" name="description">Yocto Project 4.0 "Kirkstone"</field>
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">1</field>
<field type="CharField" name="branch_name">kirkstone</field>
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the &lt;a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=kirkstone"&gt;Yocto Project Kirkstone branch&lt;/a&gt;.</field>
diff --git a/bitbake/lib/toaster/toastergui/api.py b/bitbake/lib/toaster/toastergui/api.py
index b4cdc335ef..e367bd910e 100644
--- a/bitbake/lib/toaster/toastergui/api.py
+++ b/bitbake/lib/toaster/toastergui/api.py
@@ -11,7 +11,7 @@ import os
import re
import logging
import json
-import subprocess
+import glob
from collections import Counter
from orm.models import Project, ProjectTarget, Build, Layer_Version
@@ -227,20 +227,18 @@ class XhrSetDefaultImageUrl(View):
# same logical name
# * Each project that uses a layer will have its own
# LayerVersion and Project Layer for it
-# * During the Paroject delete process, when the last
+# * During the Project delete process, when the last
# LayerVersion for a 'local_source_dir' layer is deleted
# then the Layer record is deleted to remove orphans
#
def scan_layer_content(layer,layer_version):
# if this is a local layer directory, we can immediately scan its content
- if layer.local_source_dir:
+ if os.path.isdir(layer.local_source_dir):
try:
# recipes-*/*/*.bb
- cmd = '%s %s' % ('ls', os.path.join(layer.local_source_dir,'recipes-*/*/*.bb'))
- recipes_list = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read()
- recipes_list = recipes_list.decode("utf-8").strip()
- if recipes_list and 'No such' not in recipes_list:
+ recipes_list = glob.glob(os.path.join(layer.local_source_dir, 'recipes-*/*/*.bb'))
+ for recipe in recipes_list:
for recipe in recipes_list.split('\n'):
recipe_path = recipe[recipe.rfind('recipes-'):]
recipe_name = recipe[recipe.rfind('/')+1:].replace('.bb','')
@@ -260,6 +258,9 @@ def scan_layer_content(layer,layer_version):
except Exception as e:
logger.warning("ERROR:scan_layer_content: %s" % e)
+ else:
+ logger.warning("ERROR: wrong path given")
+ raise KeyError("local_source_dir")
class XhrLayer(View):
""" Delete, Get, Add and Update Layer information
@@ -456,15 +457,18 @@ class XhrLayer(View):
'layerdetailurl':
layer_dep.get_detailspage_url(project.pk)})
- # Scan the layer's content and update components
- scan_layer_content(layer,layer_version)
+ # Only scan_layer_content if layer is local
+ if layer_data.get('local_source_dir', None):
+ # Scan the layer's content and update components
+ scan_layer_content(layer,layer_version)
except Layer_Version.DoesNotExist:
return error_response("layer-dep-not-found")
except Project.DoesNotExist:
return error_response("project-not-found")
- except KeyError:
- return error_response("incorrect-parameters")
+ except KeyError as e:
+ _log("KeyError: %s" % e)
+ return error_response(f"incorrect-parameters")
return JsonResponse({'error': "ok",
'imported_layer': {
diff --git a/documentation/.gitignore b/documentation/.gitignore
index 096b97ec28..a8f86f7c98 100644
--- a/documentation/.gitignore
+++ b/documentation/.gitignore
@@ -5,3 +5,5 @@ sphinx-static/switchers.js
.vscode/
*/svg/*.png
*/svg/*.pdf
+styles/*
+!styles/config
diff --git a/documentation/.vale.ini b/documentation/.vale.ini
new file mode 100644
index 0000000000..02042bb632
--- /dev/null
+++ b/documentation/.vale.ini
@@ -0,0 +1,7 @@
+StylesPath = styles
+MinAlertLevel = suggestion
+Packages = RedHat, proselint, write-good, alex, Readability, Joblint
+Vocab = Yocto, OpenSource
+[*.rst]
+BasedOnStyles = Vale, RedHat, proselint, write-good, alex, Readability, Joblint
+
diff --git a/documentation/Makefile b/documentation/Makefile
index 33bbca0bab..4e0af4bd30 100644
--- a/documentation/Makefile
+++ b/documentation/Makefile
@@ -5,6 +5,9 @@
# from the environment for the first two.
SPHINXOPTS ?= -W --keep-going -j auto
SPHINXBUILD ?= sphinx-build
+# Release notes are excluded because they contain contributor names and commit messages which can't be modified
+VALEOPTS ?= --no-wrap --glob '!migration-guides/release-notes-*.rst'
+VALEDOCS ?= .
SOURCEDIR = .
IMAGEDIRS = */svg
BUILDDIR = _build
@@ -20,7 +23,7 @@ endif
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-.PHONY: all help Makefile clean publish epub latexpdf
+.PHONY: all help Makefile clean stylecheck publish epub latexpdf
publish: Makefile html singlehtml
rm -rf $(BUILDDIR)/$(DESTDIR)/
@@ -46,6 +49,13 @@ PNGs := $(foreach dir, $(IMAGEDIRS), $(patsubst %.svg,%.png,$(wildcard $(SOURCED
clean:
@rm -rf $(BUILDDIR) $(PNGs) $(PDFs) poky.yaml sphinx-static/switchers.js
+stylecheck:
+ vale sync
+ vale $(VALEOPTS) $(VALEDOCS)
+
+sphinx-lint:
+ sphinx-lint $(SOURCEDIR)
+
epub: $(PNGs)
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/documentation/README b/documentation/README
index 6f6a8ec842..b60472fcbf 100644
--- a/documentation/README
+++ b/documentation/README
@@ -34,16 +34,18 @@ Manual Organization
Here the folders corresponding to individual manuals:
+* brief-yoctoprojectqs - Yocto Project Quick Start
* overview-manual - Yocto Project Overview and Concepts Manual
-* sdk-manual - Yocto Project Software Development Kit (SDK) Developer's Guide.
+* contributor-guide - Yocto Project and OpenEmbedded Contributor Guide
+* ref-manual - Yocto Project Reference Manual
* bsp-guide - Yocto Project Board Support Package (BSP) Developer's Guide
* dev-manual - Yocto Project Development Tasks Manual
* kernel-dev - Yocto Project Linux Kernel Development Manual
-* ref-manual - Yocto Project Reference Manual
-* brief-yoctoprojectqs - Yocto Project Quick Start
* profile-manual - Yocto Project Profiling and Tracing Manual
+* sdk-manual - Yocto Project Software Development Kit (SDK) Developer's Guide.
* toaster-manual - Toaster User Manual
* test-manual - Yocto Project Test Environment Manual
+* migration-guides - Yocto Project Release and Migration Notes
Each folder is self-contained regarding content and figures.
@@ -129,6 +131,10 @@ Also install the "inkscape" package from your distribution.
Inkscape is need to convert SVG graphics to PNG (for EPUB
export) and to PDF (for PDF export).
+Additionally install "fncychap.sty" TeX font if you want to build PDFs. Debian
+and Ubuntu have it in "texlive-latex-extra" package while RedHat distributions
+and OpenSUSE have it in "texlive-fncychap" package for example.
+
To build the documentation locally, run:
$ cd documentation
@@ -145,6 +151,34 @@ dependencies in a virtual environment:
$ pipenv install
$ pipenv run make html
+Style checking the Yocto Project documentation
+==============================================
+
+The project is starting to use Vale (https://vale.sh/)
+to validate the text style.
+
+To install Vale:
+
+ $ pip install vale
+
+To run Vale:
+
+ $ make stylecheck
+
+Link checking the Yocto Project documentation
+=============================================
+
+To fix errors which are not reported by Sphinx itself,
+the project uses sphinx-lint (https://github.com/sphinx-contrib/sphinx-lint).
+
+To install sphinx-lint:
+
+ $ pip install sphinx-lint
+
+To run sphinx-lint:
+
+ $ make sphinx-lint
+
Sphinx theme and CSS customization
==================================
@@ -271,6 +305,19 @@ websites.
More information can be found here:
https://sublime-and-sphinx-guide.readthedocs.io/en/latest/references.html.
+For external links, we use this syntax:
+`link text <link URL>`__
+
+instead of:
+`link text <link URL>`_
+
+Both syntaxes work, but the latter also creates a "link text" reference
+target which could conflict with other references with the same name.
+So, only use this variant when you wish to make multiple references
+to this link, reusing only the target name.
+
+See https://stackoverflow.com/questions/27420317/restructured-text-rst-http-links-underscore-vs-use
+
Anchor (<#link>) links are forbidden as they are not checked by Sphinx during
the build and may be broken without knowing about it.
@@ -340,13 +387,16 @@ The sphinx.ext.intersphinx extension is enabled by default
so that we can cross reference content from other Sphinx based
documentation projects, such as the BitBake manual.
-References to the BitBake manual can be done:
+References to the BitBake manual can directly be done:
- With a specific description instead of the section name:
- :ref:`Azure Storage fetcher (az://) <bitbake:bitbake-user-manual/bitbake-user-manual-fetching:fetchers>`
+ :ref:`Azure Storage fetcher (az://) <bitbake-user-manual/bitbake-user-manual-fetching:fetchers>`
- With the section name:
- :ref:`bitbake:bitbake-user-manual/bitbake-user-manual-intro:usage and syntax` option
- - Linking to the entire BitBake manual:
- :doc:`BitBake User Manual <bitbake:index>`
+ :ref:`bitbake-user-manual/bitbake-user-manual-intro:usage and syntax` option
+
+If you want to refer to an entire document (or chapter) in the BitBake manual,
+you have to use the ":doc:" macro with the "bitbake:" prefix:
+ - :doc:`BitBake User Manual <bitbake:index>`
+ - :doc:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata`" chapter
Note that a reference to a variable (:term:`VARIABLE`) automatically points to
the BitBake manual if the variable is not described in the Reference Manual's Variable Glossary.
@@ -355,6 +405,11 @@ BitBake manual as follows:
:term:`bitbake:BB_NUMBER_PARSE_THREADS`
+This would be the same if we had identical document filenames in
+both the Yocto Project and BitBake manuals:
+
+ :ref:`bitbake:directory/file:section title`
+
Submitting documentation changes
================================
diff --git a/documentation/brief-yoctoprojectqs/index.rst b/documentation/brief-yoctoprojectqs/index.rst
index 12cab1db76..db17437aa8 100644
--- a/documentation/brief-yoctoprojectqs/index.rst
+++ b/documentation/brief-yoctoprojectqs/index.rst
@@ -25,18 +25,11 @@ build a reference embedded OS called Poky.
in the Yocto Project Development Tasks Manual for more
information.
- - You may use Windows Subsystem For Linux v2 to set up a build host
- using Windows 10.
-
- .. note::
-
- The Yocto Project is not compatible with WSLv1, it is
- compatible but not officially supported nor validated with
- WSLv2, if you still decide to use WSL please upgrade to WSLv2.
-
- See the :ref:`dev-manual/start:setting up to use windows
- subsystem for linux (wslv2)` section in the Yocto Project Development
- Tasks Manual for more information.
+ - You may use version 2 of Windows Subsystem For Linux (WSL 2) to set
+ up a build host using Windows 10 or later, Windows Server 2019 or later.
+ See the :ref:`dev-manual/start:setting up to use windows subsystem for
+ linux (wsl 2)` section in the Yocto Project Development Tasks Manual
+ for more information.
If you want more conceptual or background information on the Yocto
Project, see the :doc:`/overview-manual/index`.
@@ -47,7 +40,13 @@ Compatible Linux Distribution
Make sure your :term:`Build Host` meets the
following requirements:
-- 50 Gbytes of free disk space
+- At least &MIN_DISK_SPACE; Gbytes of free disk space, though
+ much more will help to run multiple builds and increase
+ performance by reusing build artifacts.
+
+- At least &MIN_RAM; Gbytes of RAM, though a modern modern build host with as
+ much RAM and as many CPU cores as possible is strongly recommended to
+ maximize build performance.
- Runs a supported Linux distribution (i.e. recent releases of Fedora,
openSUSE, CentOS, Debian, or Ubuntu). For a list of Linux
@@ -64,11 +63,12 @@ following requirements:
- tar &MIN_TAR_VERSION; or greater
- Python &MIN_PYTHON_VERSION; or greater.
- gcc &MIN_GCC_VERSION; or greater.
+ - GNU make &MIN_MAKE_VERSION; or greater
If your build host does not meet any of these three listed version
requirements, you can take steps to prepare the system so that you
can still use the Yocto Project. See the
-:ref:`ref-manual/system-requirements:required git, tar, python and gcc versions`
+:ref:`ref-manual/system-requirements:required git, tar, python, make and gcc versions`
section in the Yocto Project Reference Manual for information.
Build Host Packages
@@ -76,11 +76,9 @@ Build Host Packages
You must install essential host packages on your build host. The
following command installs the host packages based on an Ubuntu
-distribution:
-
-.. code-block:: shell
+distribution::
- $ sudo apt install &UBUNTU_HOST_PACKAGES_ESSENTIAL;
+ $ sudo apt install &UBUNTU_HOST_PACKAGES_ESSENTIAL;
.. note::
@@ -253,15 +251,10 @@ an entire Linux distribution, including the toolchain, from source.
To use such mirrors, uncomment the below lines in your ``conf/local.conf``
file in the :term:`Build Directory`::
- BB_SIGNATURE_HANDLER = "OEEquivHash"
+ BB_HASHSERVE_UPSTREAM = "hashserv.yocto.io:8687"
+ SSTATE_MIRRORS ?= "file://.* http://cdn.jsdelivr.net/yocto/sstate/all/PATH;downloadfilename=PATH"
BB_HASHSERVE = "auto"
- BB_HASHSERVE_UPSTREAM = "typhoon.yocto.io:8687"
- SSTATE_MIRRORS ?= "file://.* https://sstate.yoctoproject.org/&YOCTO_DOC_VERSION;/PATH;downloadfilename=PATH"
-
- The above settings assumed the use of Yocto Project &YOCTO_DOC_VERSION;.
- If you are using the development version instead, set :term:`SSTATE_MIRRORS` as follows::
-
- SSTATE_MIRRORS ?= "file://.* https://sstate.yoctoproject.org/dev/PATH;downloadfilename=PATH"
+ BB_SIGNATURE_HANDLER = "OEEquivHash"
#. **Start the Build:** Continue with the following command to build an OS
image for the target, which is ``core-image-sato`` in this example:
@@ -374,7 +367,7 @@ Follow these steps to add a hardware layer:
You can find
more information on adding layers in the
- :ref:`dev-manual/common-tasks:adding a layer using the \`\`bitbake-layers\`\` script`
+ :ref:`dev-manual/layers:adding a layer using the \`\`bitbake-layers\`\` script`
section.
Completing these steps has added the ``meta-altera`` layer to your Yocto
@@ -409,7 +402,7 @@ The following commands run the tool to create a layer named
For more information
on layers and how to create them, see the
-:ref:`dev-manual/common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`
+:ref:`dev-manual/layers:creating a general layer using the \`\`bitbake-layers\`\` script`
section in the Yocto Project Development Tasks Manual.
Where To Go Next
diff --git a/documentation/bsp-guide/bsp.rst b/documentation/bsp-guide/bsp.rst
index 8ec7f2957e..353d56777b 100644
--- a/documentation/bsp-guide/bsp.rst
+++ b/documentation/bsp-guide/bsp.rst
@@ -1,8 +1,8 @@
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
-************************************************
-Board Support Packages (BSP) - Developer's Guide
-************************************************
+**************************************************
+Board Support Packages (BSP) --- Developer's Guide
+**************************************************
A Board Support Package (BSP) is a collection of information that
defines how to support a particular hardware device, set of devices, or
@@ -128,7 +128,7 @@ you want to work with, such as::
and so on.
For more information on layers, see the
-":ref:`dev-manual/common-tasks:understanding and creating layers`"
+":ref:`dev-manual/layers:understanding and creating layers`"
section of the Yocto Project Development Tasks Manual.
Preparing Your Build Host to Work With BSP Layers
@@ -464,7 +464,7 @@ requirements are handled with the ``COPYING.MIT`` file.
Licensing files can be MIT, BSD, GPLv*, and so forth. These files are
recommended for the BSP but are optional and totally up to the BSP
developer. For information on how to maintain license compliance, see
-the ":ref:`dev-manual/common-tasks:maintaining open source license compliance during your product's lifecycle`"
+the ":ref:`dev-manual/licenses:maintaining open source license compliance during your product's lifecycle`"
section in the Yocto Project Development Tasks Manual.
README File
@@ -590,7 +590,7 @@ filenames correspond to the values to which users have set the
These files define things such as the kernel package to use
(:term:`PREFERRED_PROVIDER` of
-:ref:`virtual/kernel <dev-manual/common-tasks:using virtual providers>`),
+:ref:`virtual/kernel <dev-manual/new-recipe:using virtual providers>`),
the hardware drivers to include in different types of images, any
special software components that are needed, any bootloader information,
and also any special image format requirements.
@@ -757,7 +757,7 @@ workflow.
OpenEmbedded build system knows about. For more information on
layers, see the ":ref:`overview-manual/yp-intro:the yocto project layer model`"
section in the Yocto Project Overview and Concepts Manual. You can also
- reference the ":ref:`dev-manual/common-tasks:understanding and creating layers`"
+ reference the ":ref:`dev-manual/layers:understanding and creating layers`"
section in the Yocto Project Development Tasks Manual. For more
information on BSP layers, see the ":ref:`bsp-guide/bsp:bsp layers`"
section.
@@ -774,20 +774,6 @@ workflow.
- Two general IA platforms (``genericx86`` and ``genericx86-64``)
- - There are three core Intel BSPs in the Yocto Project
- release, in the ``meta-intel`` layer:
-
- - ``intel-core2-32``, which is a BSP optimized for the Core2
- family of CPUs as well as all CPUs prior to the Silvermont
- core.
-
- - ``intel-corei7-64``, which is a BSP optimized for Nehalem
- and later Core and Xeon CPUs as well as Silvermont and later
- Atom CPUs, such as the Baytrail SoCs.
-
- - ``intel-quark``, which is a BSP optimized for the Intel
- Galileo gen1 & gen2 development boards.
-
When you set up a layer for a new BSP, you should follow a standard
layout. This layout is described in the ":ref:`bsp-guide/bsp:example filesystem layout`"
section. In the standard layout, notice
@@ -816,7 +802,7 @@ workflow.
key configuration files are configured appropriately: the
``conf/local.conf`` and the ``conf/bblayers.conf`` file. You must
make the OpenEmbedded build system aware of your new layer. See the
- ":ref:`dev-manual/common-tasks:enabling your layer`"
+ ":ref:`dev-manual/layers:enabling your layer`"
section in the Yocto Project Development Tasks Manual for information
on how to let the build system know about your new layer.
@@ -845,7 +831,7 @@ Before looking at BSP requirements, you should consider the following:
layer that can be added to the Yocto Project. For guidelines on
creating a layer that meets these base requirements, see the
":ref:`bsp-guide/bsp:bsp layers`" section in this manual and the
- ":ref:`dev-manual/common-tasks:understanding and creating layers`"
+ ":ref:`dev-manual/layers:understanding and creating layers`"
section in the Yocto Project Development Tasks Manual.
- The requirements in this section apply regardless of how you package
@@ -867,8 +853,7 @@ Before looking at BSP requirements, you should consider the following:
dictating that a specific kernel or kernel version be used in a given
BSP.
-Following are the requirements for a released BSP that conform to the
-Yocto Project:
+The requirements for a released BSP that conform to the Yocto Project are:
- *Layer Name:* The BSP must have a layer name that follows the Yocto
Project standards. For information on BSP layer names, see the
@@ -893,8 +878,8 @@ Yocto Project:
``recipes-*`` subdirectories specific to the recipe's function, or
within a subdirectory containing a set of closely-related recipes.
The recipes themselves should follow the general guidelines for
- recipes used in the Yocto Project found in the ":oe_wiki:`OpenEmbedded
- Style Guide </Styleguide>`".
+ recipes found in the ":doc:`../contributor-guide/recipe-style-guide`"
+ in the Yocto Project and OpenEmbedded Contributor Guide.
- *License File:* You must include a license file in the
``meta-bsp_root_name`` directory. This license covers the BSP
@@ -927,8 +912,8 @@ Yocto Project:
- The name and contact information for the BSP layer maintainer.
This is the person to whom patches and questions should be sent.
For information on how to find the right person, see the
- ":ref:`dev-manual/common-tasks:submitting a change to the yocto project`"
- section in the Yocto Project Development Tasks Manual.
+ :doc:`../contributor-guide/submit-changes` section in the Yocto Project and
+ OpenEmbedded Contributor Guide.
- Instructions on how to build the BSP using the BSP layer.
@@ -972,7 +957,7 @@ Yocto Project:
Released BSP Recommendations
----------------------------
-Following are recommendations for released BSPs that conform to the
+Here are recommendations for released BSPs that conform to the
Yocto Project:
- *Bootable Images:* Released BSPs can contain one or more bootable
@@ -1013,7 +998,7 @@ the following:
- Create a ``*.bbappend`` file for the modified recipe. For information on using
append files, see the
- ":ref:`dev-manual/common-tasks:appending other layers metadata with your layer`"
+ ":ref:`dev-manual/layers:appending other layers metadata with your layer`"
section in the Yocto Project Development Tasks Manual.
- Ensure your directory structure in the BSP layer that supports your
@@ -1034,7 +1019,7 @@ the following:
that additional hierarchy and the files would obviously not be able
to reside in a machine-specific directory.
-Following is a specific example to help you better understand the
+Here is a specific example to help you better understand the
process. This example customizes a recipe by adding a
BSP-specific configuration file named ``interfaces`` to the
``init-ifupdown_1.0.bb`` recipe for machine "xyz" where the BSP layer
@@ -1117,7 +1102,7 @@ list describes them in order of preference:
Specifying the matching license string signifies that you agree to
the license. Thus, the build system can build the corresponding
recipe and include the component in the image. See the
- ":ref:`dev-manual/common-tasks:enabling commercially licensed recipes`"
+ ":ref:`dev-manual/licenses:enabling commercially licensed recipes`"
section in the Yocto Project Development Tasks Manual for details on
how to use these variables.
@@ -1169,7 +1154,7 @@ Use these steps to create a BSP layer:
``create-layer`` subcommand to create a new general layer. For
instructions on how to create a general layer using the
``bitbake-layers`` script, see the
- ":ref:`dev-manual/common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
+ ":ref:`dev-manual/layers:creating a general layer using the \`\`bitbake-layers\`\` script`"
section in the Yocto Project Development Tasks Manual.
- *Create a Layer Configuration File:* Every layer needs a layer
@@ -1179,14 +1164,14 @@ Use these steps to create a BSP layer:
:yocto_git:`Source Repositories <>`. To get examples of what you need
in your configuration file, locate a layer (e.g. "meta-ti") and
examine the
- :yocto_git:`local.conf </meta-ti/tree/conf/layer.conf>`
+ :yocto_git:`local.conf </meta-ti/tree/meta-ti-bsp/conf/layer.conf>`
file.
- *Create a Machine Configuration File:* Create a
``conf/machine/bsp_root_name.conf`` file. See
:yocto_git:`meta-yocto-bsp/conf/machine </poky/tree/meta-yocto-bsp/conf/machine>`
for sample ``bsp_root_name.conf`` files. There are other samples such as
- :yocto_git:`meta-ti </meta-ti/tree/conf/machine>`
+ :yocto_git:`meta-ti </meta-ti/tree/meta-ti-bsp/conf/machine>`
and
:yocto_git:`meta-freescale </meta-freescale/tree/conf/machine>`
from other vendors that have more specific machine and tuning
@@ -1194,7 +1179,7 @@ Use these steps to create a BSP layer:
- *Create a Kernel Recipe:* Create a kernel recipe in
``recipes-kernel/linux`` by either using a kernel append file or a
- new custom kernel recipe file (e.g. ``yocto-linux_4.12.bb``). The BSP
+ new custom kernel recipe file (e.g. ``linux-yocto_4.12.bb``). The BSP
layers mentioned in the previous step also contain different kernel
examples. See the ":ref:`kernel-dev/common:modifying an existing recipe`"
section in the Yocto Project Linux Kernel Development Manual for
@@ -1209,7 +1194,7 @@ BSP Layer Configuration Example
-------------------------------
The layer's ``conf`` directory contains the ``layer.conf`` configuration
-file. In this example, the ``conf/layer.conf`` is the following::
+file. In this example, the ``conf/layer.conf`` file is the following::
# We have a conf and classes directory, add to BBPATH
BBPATH .= ":${LAYERDIR}"
@@ -1229,7 +1214,7 @@ configuration files is to examine various files for BSP from the
:yocto_git:`Source Repositories <>`.
For a detailed description of this particular layer configuration file,
-see ":ref:`step 3 <dev-manual/common-tasks:creating your own layer>`"
+see ":ref:`step 3 <dev-manual/layers:creating your own layer>`"
in the discussion that describes how to create layers in the Yocto
Project Development Tasks Manual.
@@ -1449,39 +1434,39 @@ The kernel recipe used to build the kernel image for the BeagleBone
device was established in the machine configuration::
PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto"
- PREFERRED_VERSION_linux-yocto ?= "5.0%"
+ PREFERRED_VERSION_linux-yocto ?= "5.15%"
The ``meta-yocto-bsp/recipes-kernel/linux`` directory in the layer contains
metadata used to build the kernel. In this case, a kernel append file
-(i.e. ``linux-yocto_5.0.bbappend``) is used to override an established
-kernel recipe (i.e. ``linux-yocto_5.0.bb``), which is located in
-:yocto_git:`/poky/tree/meta/recipes-kernel/linux`.
+(i.e. ``linux-yocto_5.15.bbappend``) is used to override an established
+kernel recipe (i.e. ``linux-yocto_5.15.bb``), which is located in
+:yocto_git:`/poky/tree/meta-yocto-bsp/recipes-kernel/linux`.
-Following is the contents of the append file::
+The contents of the append file are::
- KBRANCH:genericx86 = "v5.0/standard/base"
- KBRANCH:genericx86-64 = "v5.0/standard/base"
- KBRANCH:edgerouter = "v5.0/standard/edgerouter"
- KBRANCH:beaglebone-yocto = "v5.0/standard/beaglebone"
+ KBRANCH:genericx86 = "v5.15/standard/base"
+ KBRANCH:genericx86-64 = "v5.15/standard/base"
+ KBRANCH:edgerouter = "v5.15/standard/edgerouter"
+ KBRANCH:beaglebone-yocto = "v5.15/standard/beaglebone"
KMACHINE:genericx86 ?= "common-pc"
KMACHINE:genericx86-64 ?= "common-pc-64"
KMACHINE:beaglebone-yocto ?= "beaglebone"
- SRCREV_machine:genericx86 ?= "3df4aae6074e94e794e27fe7f17451d9353cdf3d"
- SRCREV_machine:genericx86-64 ?= "3df4aae6074e94e794e27fe7f17451d9353cdf3d"
- SRCREV_machine:edgerouter ?= "3df4aae6074e94e794e27fe7f17451d9353cdf3d"
- SRCREV_machine:beaglebone-yocto ?= "3df4aae6074e94e794e27fe7f17451d9353cdf3d"
+ SRCREV_machine:genericx86 ?= "0b628306d1f9ea28c0e86369ce9bb87a47893c9c"
+ SRCREV_machine:genericx86-64 ?= "0b628306d1f9ea28c0e86369ce9bb87a47893c9c"
+ SRCREV_machine:edgerouter ?= "90f1ee6589264545f548d731c2480b08a007230f"
+ SRCREV_machine:beaglebone-yocto ?= "9aabbaa89fcb21af7028e814c1f5b61171314d5a"
COMPATIBLE_MACHINE:genericx86 = "genericx86"
COMPATIBLE_MACHINE:genericx86-64 = "genericx86-64"
COMPATIBLE_MACHINE:edgerouter = "edgerouter"
COMPATIBLE_MACHINE:beaglebone-yocto = "beaglebone-yocto"
- LINUX_VERSION:genericx86 = "5.0.3"
- LINUX_VERSION:genericx86-64 = "5.0.3"
- LINUX_VERSION:edgerouter = "5.0.3"
- LINUX_VERSION:beaglebone-yocto = "5.0.3"
+ LINUX_VERSION:genericx86 = "5.15.72"
+ LINUX_VERSION:genericx86-64 = "5.15.72"
+ LINUX_VERSION:edgerouter = "5.15.54"
+ LINUX_VERSION:beaglebone-yocto = "5.15.54"
This particular append file works for all the machines that are
part of the ``meta-yocto-bsp`` layer. The relevant statements are
diff --git a/documentation/conf.py b/documentation/conf.py
index a7cdf415f8..83cf829e6a 100644
--- a/documentation/conf.py
+++ b/documentation/conf.py
@@ -90,7 +90,8 @@ rst_prolog = """
# external links and substitutions
extlinks = {
- 'cve': ('https://nvd.nist.gov/vuln/detail/CVE-%s', 'CVE-'),
+ 'cve': ('https://nvd.nist.gov/vuln/detail/CVE-%s', 'CVE-%s'),
+ 'cve_mitre': ('https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-%s', 'CVE-%s'),
'yocto_home': ('https://www.yoctoproject.org%s', None),
'yocto_wiki': ('https://wiki.yoctoproject.org/wiki%s', None),
'yocto_dl': ('https://downloads.yoctoproject.org%s', None),
@@ -106,6 +107,7 @@ extlinks = {
'oe_wiki': ('https://www.openembedded.org/wiki%s', None),
'oe_layerindex': ('https://layers.openembedded.org%s', None),
'oe_layer': ('https://layers.openembedded.org/layerindex/branch/master/layer%s', None),
+ 'wikipedia': ('https://en.wikipedia.org/wiki/%s', None),
}
# Intersphinx config to use cross reference with Bitbake user manual
@@ -157,8 +159,8 @@ html_last_updated_fmt = '%b %d, %Y'
html_secnumber_suffix = " "
latex_elements = {
- 'passoptionstopackages': '\PassOptionsToPackage{bookmarksdepth=5}{hyperref}',
- 'preamble': '\setcounter{tocdepth}{2}',
+ 'passoptionstopackages': '\\PassOptionsToPackage{bookmarksdepth=5}{hyperref}',
+ 'preamble': '\\setcounter{tocdepth}{2}',
}
# Make the EPUB builder prefer PNG to SVG because of issues rendering Inkscape SVG
diff --git a/documentation/contributor-guide/identify-component.rst b/documentation/contributor-guide/identify-component.rst
new file mode 100644
index 0000000000..a28391a66a
--- /dev/null
+++ b/documentation/contributor-guide/identify-component.rst
@@ -0,0 +1,31 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Identify the component
+**********************
+
+The Yocto Project and OpenEmbedded ecosystem is built of :term:`layers <Layer>`
+so the first step is to identify the component where the issue likely lies.
+For example, if you have a hardware issue, it is likely related to the BSP
+you are using and the best place to seek advice would be from the BSP provider
+or :term:`layer`. If the issue is a build/configuration one and a distro is in
+use, they would likely be the first place to ask questions. If the issue is a
+generic one and/or in the core classes or metadata, the core layer or BitBake
+might be the appropriate component.
+
+Each metadata layer being used should contain a ``README`` file and that should
+explain where to report issues, where to send changes and how to contact the
+maintainers.
+
+If the issue is in the core metadata layer (OpenEmbedded-Core) or in BitBake,
+issues can be reported in the :yocto_bugs:`Yocto Project Bugzilla <>`. The
+:yocto_lists:`yocto </g/yocto>` mailing list is a general “catch-all” location
+where questions can be sent if you can’t work out where something should go.
+
+:term:`Poky` is a commonly used “combination” repository where multiple
+components have been combined (:oe_git:`bitbake </bitbake>`,
+:oe_git:`openembedded-core </openembedded-core>`,
+:yocto_git:`meta-yocto </meta-yocto>` and
+:yocto_git:`yocto-docs </yocto-docs>`). Patches should be submitted against the
+appropriate individual component rather than :term:`Poky` itself as detailed in
+the appropriate ``README`` file.
+
diff --git a/documentation/contributor-guide/index.rst b/documentation/contributor-guide/index.rst
new file mode 100644
index 0000000000..a832169455
--- /dev/null
+++ b/documentation/contributor-guide/index.rst
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+================================================
+Yocto Project and OpenEmbedded Contributor Guide
+================================================
+
+The Yocto Project and OpenEmbedded are open-source, community-based projects so
+contributions are very welcome, it is how the code evolves and everyone can
+effect change. Contributions take different forms, if you have a fix for an
+issue you’ve run into, a patch is the most appropriate way to contribute it.
+If you run into an issue but don’t have a solution, opening a defect in
+:yocto_bugs:`Bugzilla <>` or asking questions on the mailing lists might be
+more appropriate. This guide intends to point you in the right direction to
+this.
+
+
+.. toctree::
+ :caption: Table of Contents
+ :numbered:
+
+ identify-component
+ report-defect
+ recipe-style-guide
+ submit-changes
+
+.. include:: /boilerplate.rst
diff --git a/documentation/contributor-guide/recipe-style-guide.rst b/documentation/contributor-guide/recipe-style-guide.rst
new file mode 100644
index 0000000000..08d8fb4259
--- /dev/null
+++ b/documentation/contributor-guide/recipe-style-guide.rst
@@ -0,0 +1,411 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Recipe Style Guide
+******************
+
+Recipe Naming Conventions
+=========================
+
+In general, most recipes should follow the naming convention
+``recipes-category/recipename/recipename_version.bb``. Recipes for related
+projects may share the same recipe directory. ``recipename`` and ``category``
+may contain hyphens, but hyphens are not allowed in ``version``.
+
+If the recipe is tracking a Git revision that does not correspond to a released
+version of the software, ``version`` may be ``git`` (e.g. ``recipename_git.bb``)
+and the recipe would set :term:`PV`.
+
+Version Policy
+==============
+
+Our versions follow the form ``<epoch>:<version>-<revision>``
+or in BitBake variable terms ${:term:`PE`}:${:term:`PV`}-${:term:`PR`}. We
+generally follow the `Debian <https://www.debian.org/doc/debian-policy/ch-controlfields.html#version>`__
+version policy which defines these terms.
+
+In most cases the version :term:`PV` will be set automatically from the recipe
+file name. It is recommended to use released versions of software as these are
+revisions that upstream are expecting people to use.
+
+Recipe versions should always compare and sort correctly so that upgrades work
+as expected. With conventional versions such as ``1.4`` upgrading ``to 1.5``
+this happens naturally, but some versions don't sort. For example,
+``1.5 Release Candidate 2`` could be written as ``1.5rc2`` but this sorts after
+``1.5``, so upgrades from feeds won't happen correctly.
+
+Instead the tilde (``~``) operator can be used, which sorts before the empty
+string so ``1.5~rc2`` comes before ``1.5``. There is a historical syntax which
+may be found where :term:`PV` is set as a combination of the prior version
+``+`` the pre-release version, for example ``PV=1.4+1.5rc2``. This is a valid
+syntax but the tilde form is preferred.
+
+For version comparisons, the ``opkg-compare-versions`` program from
+``opkg-utils`` can be useful when attempting to determine how two version
+numbers compare to each other. Our definitive version comparison algorithm is
+the one within bitbake which aims to match those of the package managers and
+Debian policy closely.
+
+When a recipe references a git revision that does not correspond to a released
+version of software (e.g. is not a tagged version), the :term:`PV` variable
+should include the Git revision using the following to make the
+version clear::
+
+ PV = "<version>+git${SRCPV}"
+
+In this case, ``<version>`` should be the most recently released version of the
+software from the current source revision (``git describe`` can be useful for
+determining this). Whilst not recommended for published layers, this format is
+also useful when using :term:`AUTOREV` to set the recipe to increment source
+control revisions automatically, which can be useful during local development.
+
+Version Number Changes
+======================
+
+The :term:`PR` variable is used to indicate different revisions of a recipe
+that reference the same upstream source version. It can be used to force a
+new version of a recipe to be installed onto a device from a package feed.
+These once had to be set manually but in most cases these can now be set and
+incremented automatically by a PR Server connected with a package feed.
+
+When :term:`PV` increases, any existing :term:`PR` value can and should be
+removed.
+
+If :term:`PV` changes in such a way that it does not increase with respect to
+the previous value, you need to increase :term:`PE` to ensure package managers
+will upgrade it correctly. If unset you should set :term:`PE` to "1" since
+the default of empty is easily confused with "0" depending on the package
+manager. :term:`PE` can only have an integer value.
+
+Recipe formatting
+=================
+
+Variable Formatting
+-------------------
+
+- Variable assignment should a space around each side of the operator, e.g.
+ ``FOO = "bar"``, not ``FOO="bar"``.
+
+- Double quotes should be used on the right-hand side of the assignment,
+ e.g. ``FOO = "bar"`` not ``FOO = 'bar'``
+
+- Spaces should be used for indenting variables, with 4 spaces per tab
+
+- Long variables should be split over multiple lines when possible by using
+ the continuation character (``\``)
+
+- When splitting a long variable over multiple lines, all continuation lines
+ should be indented (with spaces) to align with the start of the quote on the
+ first line::
+
+ FOO = "this line is \
+ long \
+ "
+
+ Instead of::
+
+ FOO = "this line is \
+ long \
+ "
+
+Python Function formatting
+--------------------------
+
+- Spaces must be used for indenting Python code, with 4 spaces per tab
+
+Shell Function formatting
+-------------------------
+
+- The formatting of shell functions should be consistent within layers.
+ Some use tabs, some use spaces.
+
+Recipe metadata
+===============
+
+Required Variables
+------------------
+
+The following variables should be included in all recipes:
+
+- :term:`SUMMARY`: a one line description of the upstream project
+
+- :term:`DESCRIPTION`: an extended description of the upstream project,
+ possibly with multiple lines. If no reasonable description can be written,
+ this may be omitted as it defaults to :term:`SUMMARY`.
+
+- :term:`HOMEPAGE`: the URL to the upstream projects homepage.
+
+- :term:`BUGTRACKER`: the URL upstream projects bug tracking website,
+ if applicable.
+
+Recipe Ordering
+---------------
+
+When a variable is defined in recipes and classes, variables should follow the
+general order when possible:
+
+- :term:`SUMMARY`
+- :term:`DESCRIPTION`
+- :term:`HOMEPAGE`
+- :term:`BUGTRACKER`
+- :term:`SECTION`
+- :term:`LICENSE`
+- :term:`LIC_FILES_CHKSUM`
+- :term:`DEPENDS`
+- :term:`PROVIDES`
+- :term:`PV`
+- :term:`SRC_URI`
+- :term:`SRCREV`
+- :term:`S`
+- ``inherit ...``
+- :term:`PACKAGECONFIG`
+- Build class specific variables such as ``EXTRA_QMAKEVARS_POST`` and :term:`EXTRA_OECONF`
+- Tasks such as :ref:`ref-tasks-configure`
+- :term:`PACKAGE_ARCH`
+- :term:`PACKAGES`
+- :term:`FILES`
+- :term:`RDEPENDS`
+- :term:`RRECOMMENDS`
+- :term:`RSUGGESTS`
+- :term:`RPROVIDES`
+- :term:`RCONFLICTS`
+- :term:`BBCLASSEXTEND`
+
+There are some cases where ordering is important and these cases would override
+this default order. Examples include:
+
+- :term:`PACKAGE_ARCH` needing to be set before ``inherit packagegroup``
+
+Tasks should be ordered based on the order they generally execute. For commonly
+used tasks this would be:
+
+- :ref:`ref-tasks-fetch`
+- :ref:`ref-tasks-unpack`
+- :ref:`ref-tasks-patch`
+- :ref:`ref-tasks-prepare_recipe_sysroot`
+- :ref:`ref-tasks-configure`
+- :ref:`ref-tasks-compile`
+- :ref:`ref-tasks-install`
+- :ref:`ref-tasks-populate_sysroot`
+- :ref:`ref-tasks-package`
+
+Custom tasks should be sorted similarly.
+
+Package specific variables are typically grouped together, e.g.::
+
+ RDEPENDS:${PN} = “foo”
+ RDEPENDS:${PN}-libs = “bar”
+
+ RRECOMMENDS:${PN} = “one”
+ RRECOMMENDS:${PN}-libs = “two”
+
+Recipe License Fields
+---------------------
+
+Recipes need to define both the :term:`LICENSE` and
+:term:`LIC_FILES_CHKSUM` variables:
+
+- :term:`LICENSE`: This variable specifies the license for the software.
+ If you do not know the license under which the software you are
+ building is distributed, you should go to the source code and look
+ for that information. Typical files containing this information
+ include ``COPYING``, :term:`LICENSE`, and ``README`` files. You could
+ also find the information near the top of a source file. For example,
+ given a piece of software licensed under the GNU General Public
+ License version 2, you would set :term:`LICENSE` as follows::
+
+ LICENSE = "GPL-2.0-only"
+
+ The licenses you specify within :term:`LICENSE` can have any name as long
+ as you do not use spaces, since spaces are used as separators between
+ license names. For standard licenses, use the names of the files in
+ ``meta/files/common-licenses/`` or the :term:`SPDXLICENSEMAP` flag names
+ defined in ``meta/conf/licenses.conf``.
+
+- :term:`LIC_FILES_CHKSUM`: The OpenEmbedded build system uses this
+ variable to make sure the license text has not changed. If it has,
+ the build produces an error and it affords you the chance to figure
+ it out and correct the problem.
+
+ You need to specify all applicable licensing files for the software.
+ At the end of the configuration step, the build process will compare
+ the checksums of the files to be sure the text has not changed. Any
+ differences result in an error with the message containing the
+ current checksum. For more explanation and examples of how to set the
+ :term:`LIC_FILES_CHKSUM` variable, see the
+ ":ref:`dev-manual/licenses:tracking license changes`" section.
+
+ To determine the correct checksum string, you can list the
+ appropriate files in the :term:`LIC_FILES_CHKSUM` variable with incorrect
+ md5 strings, attempt to build the software, and then note the
+ resulting error messages that will report the correct md5 strings.
+ See the ":ref:`dev-manual/new-recipe:fetching code`" section for
+ additional information.
+
+ Here is an example that assumes the software has a ``COPYING`` file::
+
+ LIC_FILES_CHKSUM = "file://COPYING;md5=xxx"
+
+ When you try to build the
+ software, the build system will produce an error and give you the
+ correct string that you can substitute into the recipe file for a
+ subsequent build.
+
+License Updates
+~~~~~~~~~~~~~~~
+
+When you change the :term:`LICENSE` or :term:`LIC_FILES_CHKSUM` in the recipe
+you need to briefly explain the reason for the change via a ``License-Update:``
+tag. Often it's quite trivial, such as::
+
+ License-Update: copyright years refreshed
+
+Less often, the actual licensing terms themselves will have changed. If so, do
+try to link to upstream making/justifying that decision.
+
+Tips and Guidelines for Writing Recipes
+---------------------------------------
+
+- Use :term:`BBCLASSEXTEND` instead of creating separate recipes such as ``-native``
+ and ``-nativesdk`` ones, whenever possible. This avoids having to maintain multiple
+ recipe files at the same time.
+
+- Recipes should have tasks which are idempotent, i.e. that executing a given task
+ multiple times shouldn't change the end result. The build environment is built upon
+ this assumption and breaking it can cause obscure build failures.
+
+- For idempotence when modifying files in tasks, it is usually best to:
+
+ - copy a file ``X`` to ``X.orig`` (only if it doesn't exist already)
+ - then, copy ``X.orig`` back to ``X``,
+ - and, finally, modify ``X``.
+
+ This ensures if rerun the task always has the same end result and the
+ original file can be preserved to reuse. It also guards against an
+ interrupted build corrupting the file.
+
+Patch Upstream Status
+=====================
+
+In order to keep track of patches applied by recipes and ultimately reduce the
+number of patches that need maintaining, the OpenEmbedded build system
+requires information about the upstream status of each patch.
+
+In its description, each patch should provide detailed information about the
+bug that it addresses, such as the URL in a bug tracking system and links
+to relevant mailing list archives.
+
+Then, you should also add an ``Upstream-Status:`` tag containing one of the
+following status strings:
+
+``Pending``
+ No determination has been made yet, or patch has not yet been submitted to
+ upstream.
+
+ Keep in mind that every patch submitted upstream reduces the maintainance
+ burden in OpenEmbedded and Yocto Project in the long run, so this patch
+ status should only be used in exceptional cases if there are genuine
+ obstacles to submitting a patch upstream; the reason for that should be
+ included in the patch.
+
+``Submitted [where]``
+ Submitted to upstream, waiting for approval. Optionally include where
+ it was submitted, such as the author, mailing list, etc.
+
+``Backport [version]``
+ Accepted upstream and included in the next release, or backported from newer
+ upstream version, because we are at a fixed version.
+ Include upstream version info (e.g. commit ID or next expected version).
+
+``Denied``
+ Not accepted by upstream, include reason in patch.
+
+``Inactive-Upstream [lastcommit: when (and/or) lastrelease: when]``
+ The upstream is no longer available. This typically means a defunct project
+ where no activity has happened for a long time --- measured in years. To make
+ that judgement, it is recommended to look at not only when the last release
+ happened, but also when the last commit happened, and whether newly made bug
+ reports and merge requests since that time receive no reaction. It is also
+ recommended to add to the patch description any relevant links where the
+ inactivity can be clearly seen.
+
+``Inappropriate [reason]``
+ The patch is not appropriate for upstream, include a brief reason on the
+ same line enclosed with ``[]``. In the past, there were several different
+ reasons not to submit patches upstream, but we have to consider that every
+ non-upstreamed patch means a maintainance burden for recipe maintainers.
+ Currently, the only reasons to mark patches as inappropriate for upstream
+ submission are:
+
+ - ``oe specific``: the issue is specific to how OpenEmbedded performs builds
+ or sets things up at runtime, and can be resolved only with a patch that
+ is not however relevant or appropriate for general upstream submission.
+ - ``upstream ticket <link>``: the issue is not specific to Open-Embedded
+ and should be fixed upstream, but the patch in its current form is not
+ suitable for merging upstream, and the author lacks sufficient expertise
+ to develop a proper patch. Instead the issue is handled via a bug report
+ (include link).
+
+Of course, if another person later takes care of submitting this patch upstream,
+the status should be changed to ``Submitted [where]``, and an additional
+``Signed-off-by:`` line should be added to the patch by the person claiming
+responsibility for upstreaming.
+
+Examples
+--------
+
+Here's an example of a patch that has been submitted upstream::
+
+ rpm: Adjusted the foo setting in bar
+
+ [RPM Ticket #65] -- http://rpm5.org/cvs/tktview?tn=65,5
+
+ The foo setting in bar was decreased from X to X-50% in order to
+ ensure we don't exhaust all system memory with foobar threads.
+
+ Upstream-Status: Submitted [rpm5-devel@rpm5.org]
+
+ Signed-off-by: Joe Developer <joe.developer@example.com>
+
+A future update can change the value to ``Backport`` or ``Denied`` as
+appropriate.
+
+Another example of a patch that is specific to OpenEmbedded::
+
+ Do not treat warnings as errors
+
+ There are additional warnings found with musl which are
+ treated as errors and fails the build, we have more combinations
+ than upstream supports to handle.
+
+ Upstream-Status: Inappropriate [oe specific]
+
+Here's a patch that has been backported from an upstream commit::
+
+ include missing sys/file.h for LOCK_EX
+
+ Upstream-Status: Backport [https://github.com/systemd/systemd/commit/ac8db36cbc26694ee94beecc8dca208ec4b5fd45]
+
+CVE patches
+===========
+
+In order to have a better control of vulnerabilities, patches that fix CVEs must
+contain a ``CVE:`` tag. This tag list all CVEs fixed by the patch. If more than
+one CVE is fixed, separate them using spaces.
+
+CVE Examples
+------------
+
+This should be the header of patch that fixes :cve:`2015-8370` in GRUB2::
+
+ grub2: Fix CVE-2015-8370
+
+ [No upstream tracking] -- https://bugzilla.redhat.com/show_bug.cgi?id=1286966
+
+ Back to 28; Grub2 Authentication
+
+ Two functions suffer from integer underflow fault; the grub_username_get() and grub_password_get()located in
+ grub-core/normal/auth.c and lib/crypto.c respectively. This can be exploited to obtain a Grub rescue shell.
+
+ Upstream-Status: Backport [http://git.savannah.gnu.org/cgit/grub.git/commit/?id=451d80e52d851432e109771bb8febafca7a5f1f2]
+ CVE: CVE-2015-8370
+ Signed-off-by: Joe Developer <joe.developer@example.com>
diff --git a/documentation/contributor-guide/report-defect.rst b/documentation/contributor-guide/report-defect.rst
new file mode 100644
index 0000000000..8ef133b842
--- /dev/null
+++ b/documentation/contributor-guide/report-defect.rst
@@ -0,0 +1,67 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Reporting a Defect Against the Yocto Project and OpenEmbedded
+**************************************************************
+
+You can use the Yocto Project instance of
+`Bugzilla <https://www.bugzilla.org/about/>`__ to submit a defect (bug)
+against BitBake, OpenEmbedded-Core, against any other Yocto Project component
+or for tool issues. For additional information on this implementation of
+Bugzilla see the ":ref:`Yocto Project Bugzilla <resources-bugtracker>`" section
+in the Yocto Project Reference Manual. For more detail on any of the following
+steps, see the Yocto Project
+:yocto_wiki:`Bugzilla wiki page </Bugzilla_Configuration_and_Bug_Tracking>`.
+
+Use the following general steps to submit a bug:
+
+#. Open the Yocto Project implementation of :yocto_bugs:`Bugzilla <>`.
+
+#. Click "File a Bug" to enter a new bug.
+
+#. Choose the appropriate "Classification", "Product", and "Component"
+ for which the bug was found. Bugs for the Yocto Project fall into
+ one of several classifications, which in turn break down into
+ several products and components. For example, for a bug against the
+ ``meta-intel`` layer, you would choose "Build System, Metadata &
+ Runtime", "BSPs", and "bsps-meta-intel", respectively.
+
+#. Choose the "Version" of the Yocto Project for which you found the
+ bug (e.g. &DISTRO;).
+
+#. Determine and select the "Severity" of the bug. The severity
+ indicates how the bug impacted your work.
+
+#. Choose the "Hardware" that the bug impacts.
+
+#. Choose the "Architecture" that the bug impacts.
+
+#. Choose a "Documentation change" item for the bug. Fixing a bug might
+ or might not affect the Yocto Project documentation. If you are
+ unsure of the impact to the documentation, select "Don't Know".
+
+#. Provide a brief "Summary" of the bug. Try to limit your summary to
+ just a line or two and be sure to capture the essence of the bug.
+
+#. Provide a detailed "Description" of the bug. You should provide as
+ much detail as you can about the context, behavior, output, and so
+ forth that surrounds the bug. You can even attach supporting files
+ for output from logs by using the "Add an attachment" button.
+
+#. Click the "Submit Bug" button submit the bug. A new Bugzilla number
+ is assigned to the bug and the defect is logged in the bug tracking
+ system.
+
+Once you file a bug, the bug is processed by the Yocto Project Bug
+Triage Team and further details concerning the bug are assigned (e.g.
+priority and owner). You are the "Submitter" of the bug and any further
+categorization, progress, or comments on the bug result in Bugzilla
+sending you an automated email concerning the particular change or
+progress to the bug.
+
+There are no guarantees about if or when a bug might be worked on since an
+open-source project has no dedicated engineering resources. However, the
+project does have a good track record of resolving common issues over the
+medium and long term. We do encourage people to file bugs so issues are
+at least known about. It helps other users when they find somebody having
+the same issue as they do, and an issue that is unknown is much less likely
+to ever be fixed!
diff --git a/documentation/contributor-guide/submit-changes.rst b/documentation/contributor-guide/submit-changes.rst
new file mode 100644
index 0000000000..25f839ba03
--- /dev/null
+++ b/documentation/contributor-guide/submit-changes.rst
@@ -0,0 +1,786 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Contributing Changes to a Component
+************************************
+
+Contributions to the Yocto Project and OpenEmbedded are very welcome.
+Because the system is extremely configurable and flexible, we recognize
+that developers will want to extend, configure or optimize it for their
+specific uses.
+
+.. _ref-why-mailing-lists:
+
+Contributing through mailing lists --- Why not using web-based workflows?
+=========================================================================
+
+Both Yocto Project and OpenEmbedded have many key components that are
+maintained by patches being submitted on mailing lists. We appreciate this
+approach does look a little old fashioned when other workflows are available
+through web technology such as GitHub, GitLab and others. Since we are often
+asked this question, we’ve decided to document the reasons for using mailing
+lists.
+
+One significant factor is that we value peer review. When a change is proposed
+to many of the core pieces of the project, it helps to have many eyes of review
+go over them. Whilst there is ultimately one maintainer who needs to make the
+final call on accepting or rejecting a patch, the review is made by many eyes
+and the exact people reviewing it are likely unknown to the maintainer. It is
+often the surprise reviewer that catches the most interesting issues!
+
+This is in contrast to the "GitHub" style workflow where either just a
+maintainer makes that review, or review is specifically requested from
+nominated people. We believe there is significant value added to the codebase
+by this peer review and that moving away from mailing lists would be to the
+detriment of our code.
+
+We also need to acknowledge that many of our developers are used to this
+mailing list workflow and have worked with it for years, with tools and
+processes built around it. Changing away from this would result in a loss
+of key people from the project, which would again be to its detriment.
+
+The projects are acutely aware that potential new contributors find the
+mailing list approach off-putting and would prefer a web-based GUI.
+Since we don’t believe that can work for us, the project is aiming to ensure
+`patchwork <https://patchwork.yoctoproject.org/>`__ is available to help track
+patch status and also looking at how tooling can provide more feedback to users
+about patch status. We are looking at improving tools such as ``patchtest`` to
+test user contributions before they hit the mailing lists and also at better
+documenting how to use such workflows since we recognise that whilst this was
+common knowledge a decade ago, it might not be as familiar now.
+
+Preparing Changes for Submission
+================================
+
+Set up Git
+----------
+
+The first thing to do is to install Git packages. Here is an example
+on Debian and Ubuntu::
+
+ sudo apt install git-core git-email
+
+Then, you need to set a name and e-mail address that Git will
+use to identify your commits::
+
+ git config --global user.name "Ada Lovelace"
+ git config --global user.email "ada.lovelace@gmail.com"
+
+Clone the Git repository for the component to modify
+----------------------------------------------------
+
+After identifying the component to modify as described in the
+":doc:`../contributor-guide/identify-component`" section, clone the
+corresponding Git repository. Here is an example for OpenEmbedded-Core::
+
+ git clone https://git.openembedded.org/openembedded-core
+ cd openembedded-core
+
+Create a new branch
+-------------------
+
+Then, create a new branch in your local Git repository
+for your changes, starting from the reference branch in the upstream
+repository (often called ``master``)::
+
+ $ git checkout <ref-branch>
+ $ git checkout -b my-changes
+
+If you have completely unrelated sets of changes to submit, you should even
+create one branch for each set.
+
+Implement and commit changes
+----------------------------
+
+In each branch, you should group your changes into small, controlled and
+isolated ones. Keeping changes small and isolated aids review, makes
+merging/rebasing easier and keeps the change history clean should anyone need
+to refer to it in future.
+
+To this purpose, you should create *one Git commit per change*,
+corresponding to each of the patches you will eventually submit.
+See `further guidance <https://www.kernel.org/doc/html/latest/process/submitting-patches.html#separate-your-changes>`__
+in the Linux kernel documentation if needed.
+
+For example, when you intend to add multiple new recipes, each recipe
+should be added in a separate commit. For upgrades to existing recipes,
+the previous version should usually be deleted as part of the same commit
+to add the upgraded version.
+
+#. *Stage Your Changes:* Stage your changes by using the ``git add``
+ command on each file you modified. If you want to stage all the
+ files you modified, you can even use the ``git add -A`` command.
+
+#. *Commit Your Changes:* This is when you can create separate commits. For
+ each commit to create, use the ``git commit -s`` command with the files
+ or directories you want to include in the commit::
+
+ $ git commit -s file1 file2 dir1 dir2 ...
+
+ To include **a**\ ll staged files::
+
+ $ git commit -sa
+
+ - The ``-s`` option of ``git commit`` adds a "Signed-off-by:" line
+ to your commit message. There is the same requirement for contributing
+ to the Linux kernel. Adding such a line signifies that you, the
+ submitter, have agreed to the `Developer's Certificate of Origin 1.1
+ <https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>`__
+ as follows:
+
+ .. code-block:: none
+
+ Developer's Certificate of Origin 1.1
+
+ By making a contribution to this project, I certify that:
+
+ (a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+ (b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+ (c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+ (d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+
+ - Provide a single-line summary of the change and, if more
+ explanation is needed, provide more detail in the body of the
+ commit. This summary is typically viewable in the "shortlist" of
+ changes. Thus, providing something short and descriptive that
+ gives the reader a summary of the change is useful when viewing a
+ list of many commits. You should prefix this short description
+ with the recipe name (if changing a recipe), or else with the
+ short form path to the file being changed.
+
+ .. note::
+
+ To find a suitable prefix for the commit summary, a good idea
+ is to look for prefixes used in previous commits touching the
+ same files or directories::
+
+ git log --oneline <paths>
+
+ - For the body of the commit message, provide detailed information
+ that describes what you changed, why you made the change, and the
+ approach you used. It might also be helpful if you mention how you
+ tested the change. Provide as much detail as you can in the body
+ of the commit message.
+
+ .. note::
+
+ If the single line summary is enough to describe a simple
+ change, the body of the commit message can be left empty.
+
+ - If the change addresses a specific bug or issue that is associated
+ with a bug-tracking ID, include a reference to that ID in your
+ detailed description. For example, the Yocto Project uses a
+ specific convention for bug references --- any commit that addresses
+ a specific bug should use the following form for the detailed
+ description. Be sure to use the actual bug-tracking ID from
+ Bugzilla for bug-id::
+
+ Fixes [YOCTO #bug-id]
+
+ detailed description of change
+
+#. *Crediting contributors:* By using the ``git commit --amend`` command,
+ you can add some tags to the commit description to credit other contributors
+ to the change:
+
+ - ``Reported-by``: name and email of a person reporting a bug
+ that your commit is trying to fix. This is a good practice
+ to encourage people to go on reporting bugs and let them
+ know that their reports are taken into account.
+
+ - ``Suggested-by``: name and email of a person to credit for the
+ idea of making the change.
+
+ - ``Tested-by``, ``Reviewed-by``: name and email for people having
+ tested your changes or reviewed their code. These fields are
+ usually added by the maintainer accepting a patch, or by
+ yourself if you submitted your patches to early reviewers,
+ or are submitting an unmodified patch again as part of a
+ new iteration of your patch series.
+
+ - ``CC:`` Name and email of people you want to send a copy
+ of your changes to. This field will be used by ``git send-email``.
+
+ See `more guidance about using such tags
+ <https://www.kernel.org/doc/html/latest/process/submitting-patches.html#using-reported-by-tested-by-reviewed-by-suggested-by-and-fixes>`__
+ in the Linux kernel documentation.
+
+Test your changes
+-----------------
+
+For each contributions you make, you should test your changes as well.
+For this the Yocto Project offers several types of tests. Those tests cover
+different areas and it depends on your changes which are feasible. For example run:
+
+ - For changes that affect the build environment:
+
+ - ``bitbake-selftest``: for changes within BitBake
+
+ - ``oe-selftest``: to test combinations of BitBake runs
+
+ - ``oe-build-perf-test``: to test the performance of common build scenarios
+
+ - For changes in a recipe:
+
+ - ``ptest``: run package specific tests, if they exist
+
+ - ``testimage``: build an image, boot it and run testcases on it
+
+ - If applicable, ensure also the ``native`` and ``nativesdk`` variants builds
+
+ - For changes relating to the SDK:
+
+ - ``testsdk``: to build, install and run tests against a SDK
+
+ - ``testsdk_ext``: to build, install and run tests against an extended SDK
+
+Note that this list just gives suggestions and is not exhaustive. More details can
+be found here: :ref:`test-manual/intro:Yocto Project Tests --- Types of Testing Overview`.
+
+Creating Patches
+================
+
+Here is the general procedure on how to create patches to be sent through email:
+
+#. *Describe the Changes in your Branch:* If you have more than one commit
+ in your branch, it's recommended to provide a cover letter describing
+ the series of patches you are about to send.
+
+ For this purpose, a good solution is to store the cover letter contents
+ in the branch itself::
+
+ git branch --edit-description
+
+ This will open a text editor to fill in the description for your
+ changes. This description can be updated when necessary and will
+ be used by Git to create the cover letter together with the patches.
+
+ It is recommended to start this description with a title line which
+ will serve a the subject line for the cover letter.
+
+#. *Generate Patches for your Branch:* The ``git format-patch`` command will
+ generate patch files for each of the commits in your branch. You need
+ to pass the reference branch your branch starts from.
+
+ If you branch didn't need a description in the previous step::
+
+ $ git format-patch <ref-branch>
+
+ If you filled a description for your branch, you will want to generate
+ a cover letter too::
+
+ $ git format-patch --cover-letter --cover-from-description=auto <ref-branch>
+
+ After the command is run, the current directory contains numbered
+ ``.patch`` files for the commits in your branch. If you have a cover
+ letter, it will be in the ``0000-cover-letter.patch``.
+
+ .. note::
+
+ The ``--cover-from-description=auto`` option makes ``git format-patch``
+ use the first paragraph of the branch description as the cover
+ letter title. Another possibility, which is easier to remember, is to pass
+ only the ``--cover-letter`` option, but you will have to edit the
+ subject line manually every time you generate the patches.
+
+ See the `git format-patch manual page <https://git-scm.com/docs/git-format-patch>`__
+ for details.
+
+#. *Review each of the Patch Files:* This final review of the patches
+ before sending them often allows to view your changes from a different
+ perspective and discover defects such as typos, spacing issues or lines
+ or even files that you didn't intend to modify. This review should
+ include the cover letter patch too.
+
+ If necessary, rework your commits as described in
+ ":ref:`contributor-guide/submit-changes:taking patch review into account`".
+
+Sending the Patches via Email
+=============================
+
+Using Git to Send Patches
+-------------------------
+
+To submit patches through email, it is very important that you send them
+without any whitespace or HTML formatting that either you or your mailer
+introduces. The maintainer that receives your patches needs to be able
+to save and apply them directly from your emails, using the ``git am``
+command.
+
+Using the ``git send-email`` command is the only error-proof way of sending
+your patches using email since there is no risk of compromising whitespace
+in the body of the message, which can occur when you use your own mail
+client. It will also properly include your patches as *inline attachments*,
+which is not easy to do with standard e-mail clients without breaking lines.
+If you used your regular e-mail client and shared your patches as regular
+attachments, reviewers wouldn't be able to quote specific sections of your
+changes and make comments about them.
+
+Setting up Git to Send Email
+----------------------------
+
+The ``git send-email`` command can send email by using a local or remote
+Mail Transport Agent (MTA) such as ``msmtp``, ``sendmail``, or
+through a direct SMTP configuration in your Git ``~/.gitconfig`` file.
+
+Here are the settings for letting ``git send-email`` send e-mail through your
+regular STMP server, using a Google Mail account as an example::
+
+ git config --global sendemail.smtpserver smtp.gmail.com
+ git config --global sendemail.smtpserverport 587
+ git config --global sendemail.smtpencryption tls
+ git config --global sendemail.smtpuser ada.lovelace@gmail.com
+ git config --global sendemail.smtppass = XXXXXXXX
+
+These settings will appear in the ``.gitconfig`` file in your home directory.
+
+If you neither can use a local MTA nor SMTP, make sure you use an email client
+that does not touch the message (turning spaces in tabs, wrapping lines, etc.).
+A good mail client to do so is Pine (or Alpine) or Mutt. For more
+information about suitable clients, see `Email clients info for Linux
+<https://www.kernel.org/doc/html/latest/process/email-clients.html>`__
+in the Linux kernel sources.
+
+If you use such clients, just include the patch in the body of your email.
+
+Finding a Suitable Mailing List
+-------------------------------
+
+You should send patches to the appropriate mailing list so that they can be
+reviewed by the right contributors and merged by the appropriate maintainer.
+The specific mailing list you need to use depends on the location of the code
+you are changing.
+
+If people have concerns with any of the patches, they will usually voice
+their concern over the mailing list. If patches do not receive any negative
+reviews, the maintainer of the affected layer typically takes them, tests them,
+and then based on successful testing, merges them.
+
+In general, each component (e.g. layer) should have a ``README`` file
+that indicates where to send the changes and which process to follow.
+
+The "poky" repository, which is the Yocto Project's reference build
+environment, is a hybrid repository that contains several individual
+pieces (e.g. BitBake, Metadata, documentation, and so forth) built using
+the combo-layer tool. The upstream location used for submitting changes
+varies by component:
+
+- *Core Metadata:* Send your patches to the
+ :oe_lists:`openembedded-core </g/openembedded-core>`
+ mailing list. For example, a change to anything under the ``meta`` or
+ ``scripts`` directories should be sent to this mailing list.
+
+- *BitBake:* For changes to BitBake (i.e. anything under the
+ ``bitbake`` directory), send your patches to the
+ :oe_lists:`bitbake-devel </g/bitbake-devel>`
+ mailing list.
+
+- *meta-poky* and *meta-yocto-bsp* trees: These trees contain Metadata. Use the
+ :yocto_lists:`poky </g/poky>` mailing list.
+
+- *Documentation*: For changes to the Yocto Project documentation, use the
+ :yocto_lists:`docs </g/docs>` mailing list.
+
+For changes to other layers and tools hosted in the Yocto Project source
+repositories (i.e. :yocto_git:`git.yoctoproject.org <>`), use the
+:yocto_lists:`yocto-patches </g/yocto-patches/>` general mailing list.
+
+For changes to other layers hosted in the OpenEmbedded source
+repositories (i.e. :oe_git:`git.openembedded.org <>`), use
+the :oe_lists:`openembedded-devel </g/openembedded-devel>`
+mailing list, unless specified otherwise in the layer's ``README`` file.
+
+If you intend to submit a new recipe that neither fits into the core Metadata,
+nor into :oe_git:`meta-openembedded </meta-openembedded/>`, you should
+look for a suitable layer in https://layers.openembedded.org. If similar
+recipes can be expected, you may consider :ref:`dev-manual/layers:creating your own layer`.
+
+If in doubt, please ask on the :yocto_lists:`yocto </g/yocto/>` general mailing list
+or on the :oe_lists:`openembedded-devel </g/openembedded-devel>` mailing list.
+
+Subscribing to the Mailing List
+-------------------------------
+
+After identifying the right mailing list to use, you will have to subscribe to
+it if you haven't done it yet.
+
+If you attempt to send patches to a list you haven't subscribed to, your email
+will be returned as undelivered.
+
+However, if you don't want to be receive all the messages sent to a mailing list,
+you can set your subscription to "no email". You will still be a subscriber able
+to send messages, but you won't receive any e-mail. If people reply to your message,
+their e-mail clients will default to including your email address in the
+conversation anyway.
+
+Anyway, you'll also be able to access the new messages on mailing list archives,
+either through a web browser, or for the lists archived on https://lore.kernel.org,
+through an individual newsgroup feed or a git repository.
+
+Sending Patches via Email
+-------------------------
+
+At this stage, you are ready to send your patches via email. Here's the
+typical usage of ``git send-email``::
+
+ git send-email --to <mailing-list-address> *.patch
+
+Then, review each subject line and list of recipients carefully, and then
+and then allow the command to send each message.
+
+You will see that ``git send-email`` will automatically copy the people listed
+in any commit tags such as ``Signed-off-by`` or ``Reported-by``.
+
+In case you are sending patches for :oe_git:`meta-openembedded </meta-openembedded/>`
+or any layer other than :oe_git:`openembedded-core </openembedded-core/>`,
+please add the appropriate prefix so that it is clear which layer the patch is intended
+to be applied to::
+
+ git format-patch --subject-prefix="meta-oe][PATCH" ...
+
+.. note::
+
+ It is actually possible to send patches without generating them
+ first. However, make sure you have reviewed your changes carefully
+ because ``git send-email`` will just show you the title lines of
+ each patch.
+
+ Here's a command you can use if you just have one patch in your
+ branch::
+
+ git send-email --to <mailing-list-address> -1
+
+ If you have multiple patches and a cover letter, you can send
+ patches for all the commits between the reference branch
+ and the tip of your branch::
+
+ git send-email --cover-letter --cover-from-description=auto --to <mailing-list-address> -M <ref-branch>
+
+See the `git send-email manual page <https://git-scm.com/docs/git-send-email>`__
+for details.
+
+Troubleshooting Email Issues
+----------------------------
+
+Fixing your From identity
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+We have a frequent issue with contributors whose patches are received through
+a ``From`` field which doesn't match the ``Signed-off-by`` information. Here is
+a typical example for people sending from a domain name with :wikipedia:`DMARC`::
+
+ From: "Linus Torvalds via lists.openembedded.org <linus.torvalds=kernel.org@lists.openembedded.org>"
+
+This ``From`` field is used by ``git am`` to recreate commits with the right
+author name. The following will ensure that your e-mails have an additional
+``From`` field at the beginning of the Email body, and therefore that
+maintainers accepting your patches don't have to fix commit author information
+manually::
+
+ git config --global sendemail.from "linus.torvalds@kernel.org"
+
+The ``sendemail.from`` should match your ``user.email`` setting,
+which appears in the ``Signed-off-by`` line of your commits.
+
+Streamlining git send-email usage
+---------------------------------
+
+If you want to save time and not be forced to remember the right options to use
+with ``git send-email``, you can use Git configuration settings.
+
+- To set the right mailing list address for a given repository::
+
+ git config --local sendemail.to openembedded-devel@lists.openembedded.org
+
+- If the mailing list requires a subject prefix for the layer
+ (this only works when the repository only contains one layer)::
+
+ git config --local format.subjectprefix "meta-something][PATCH"
+
+Using Scripts to Push a Change Upstream and Request a Pull
+==========================================================
+
+For larger patch series it is preferable to send a pull request which not
+only includes the patch but also a pointer to a branch that can be pulled
+from. This involves making a local branch for your changes, pushing this
+branch to an accessible repository and then using the ``create-pull-request``
+and ``send-pull-request`` scripts from openembedded-core to create and send a
+patch series with a link to the branch for review.
+
+Follow this procedure to push a change to an upstream "contrib" Git
+repository once the steps in
+":ref:`contributor-guide/submit-changes:preparing changes for submission`"
+have been followed:
+
+.. note::
+
+ You can find general Git information on how to push a change upstream
+ in the
+ `Git Community Book <https://git-scm.com/book/en/v2/Distributed-Git-Distributed-Workflows>`__.
+
+#. *Request Push Access to an "Upstream" Contrib Repository:* Send an email to
+ ``helpdesk@yoctoproject.org``:
+
+ - Attach your SSH public key which usually named ``id_rsa.pub.``.
+ If you don't have one generate it by running ``ssh-keygen -t rsa -b 4096 -C "your_email@example.com"``.
+
+ - List the repositories you're planning to contribute to.
+
+ - Include your preferred branch prefix for ``-contrib`` repositories.
+
+#. *Push Your Commits to the "Contrib" Upstream:* Push your
+ changes to that repository::
+
+ $ git push upstream_remote_repo local_branch_name
+
+ For example, suppose you have permissions to push
+ into the upstream ``meta-intel-contrib`` repository and you are
+ working in a local branch named `your_name`\ ``/README``. The following
+ command pushes your local commits to the ``meta-intel-contrib``
+ upstream repository and puts the commit in a branch named
+ `your_name`\ ``/README``::
+
+ $ git push meta-intel-contrib your_name/README
+
+#. *Determine Who to Notify:* Determine the maintainer or the mailing
+ list that you need to notify for the change.
+
+ Before submitting any change, you need to be sure who the maintainer
+ is or what mailing list that you need to notify. Use either these
+ methods to find out:
+
+ - *Maintenance File:* Examine the ``maintainers.inc`` file, which is
+ located in the :term:`Source Directory` at
+ ``meta/conf/distro/include``, to see who is responsible for code.
+
+ - *Search by File:* Using :ref:`overview-manual/development-environment:git`, you can
+ enter the following command to bring up a short list of all
+ commits against a specific file::
+
+ git shortlog -- filename
+
+ Just provide the name of the file for which you are interested. The
+ information returned is not ordered by history but does include a
+ list of everyone who has committed grouped by name. From the list,
+ you can see who is responsible for the bulk of the changes against
+ the file.
+
+ - *Find the Mailing List to Use:* See the
+ ":ref:`contributor-guide/submit-changes:finding a suitable mailing list`"
+ section above.
+
+#. *Make a Pull Request:* Notify the maintainer or the mailing list that
+ you have pushed a change by making a pull request.
+
+ The Yocto Project provides two scripts that conveniently let you
+ generate and send pull requests to the Yocto Project. These scripts
+ are ``create-pull-request`` and ``send-pull-request``. You can find
+ these scripts in the ``scripts`` directory within the
+ :term:`Source Directory` (e.g.
+ ``poky/scripts``).
+
+ Using these scripts correctly formats the requests without
+ introducing any whitespace or HTML formatting. The maintainer that
+ receives your patches either directly or through the mailing list
+ needs to be able to save and apply them directly from your emails.
+ Using these scripts is the preferred method for sending patches.
+
+ First, create the pull request. For example, the following command
+ runs the script, specifies the upstream repository in the contrib
+ directory into which you pushed the change, and provides a subject
+ line in the created patch files::
+
+ $ poky/scripts/create-pull-request -u meta-intel-contrib -s "Updated Manual Section Reference in README"
+
+ Running this script forms ``*.patch`` files in a folder named
+ ``pull-``\ `PID` in the current directory. One of the patch files is a
+ cover letter.
+
+ Before running the ``send-pull-request`` script, you must edit the
+ cover letter patch to insert information about your change. After
+ editing the cover letter, send the pull request. For example, the
+ following command runs the script and specifies the patch directory
+ and email address. In this example, the email address is a mailing
+ list::
+
+ $ poky/scripts/send-pull-request -p ~/meta-intel/pull-10565 -t meta-intel@lists.yoctoproject.org
+
+ You need to follow the prompts as the script is interactive.
+
+ .. note::
+
+ For help on using these scripts, simply provide the ``-h``
+ argument as follows::
+
+ $ poky/scripts/create-pull-request -h
+ $ poky/scripts/send-pull-request -h
+
+Submitting Changes to Stable Release Branches
+=============================================
+
+The process for proposing changes to a Yocto Project stable branch differs
+from the steps described above. Changes to a stable branch must address
+identified bugs or CVEs and should be made carefully in order to avoid the
+risk of introducing new bugs or breaking backwards compatibility. Typically
+bug fixes must already be accepted into the master branch before they can be
+backported to a stable branch unless the bug in question does not affect the
+master branch or the fix on the master branch is unsuitable for backporting.
+
+The list of stable branches along with the status and maintainer for each
+branch can be obtained from the
+:yocto_wiki:`Releases wiki page </Releases>`.
+
+.. note::
+
+ Changes will not typically be accepted for branches which are marked as
+ End-Of-Life (EOL).
+
+With this in mind, the steps to submit a change for a stable branch are as
+follows:
+
+#. *Identify the bug or CVE to be fixed:* This information should be
+ collected so that it can be included in your submission.
+
+ See :ref:`dev-manual/vulnerabilities:checking for vulnerabilities`
+ for details about CVE tracking.
+
+#. *Check if the fix is already present in the master branch:* This will
+ result in the most straightforward path into the stable branch for the
+ fix.
+
+ #. *If the fix is present in the master branch --- submit a backport request
+ by email:* You should send an email to the relevant stable branch
+ maintainer and the mailing list with details of the bug or CVE to be
+ fixed, the commit hash on the master branch that fixes the issue and
+ the stable branches which you would like this fix to be backported to.
+
+ #. *If the fix is not present in the master branch --- submit the fix to the
+ master branch first:* This will ensure that the fix passes through the
+ project's usual patch review and test processes before being accepted.
+ It will also ensure that bugs are not left unresolved in the master
+ branch itself. Once the fix is accepted in the master branch a backport
+ request can be submitted as above.
+
+ #. *If the fix is unsuitable for the master branch --- submit a patch
+ directly for the stable branch:* This method should be considered as a
+ last resort. It is typically necessary when the master branch is using
+ a newer version of the software which includes an upstream fix for the
+ issue or when the issue has been fixed on the master branch in a way
+ that introduces backwards incompatible changes. In this case follow the
+ steps in ":ref:`contributor-guide/submit-changes:preparing changes for submission`"
+ and in the following sections but modify the subject header of your patch
+ email to include the name of the stable branch which you are
+ targetting. This can be done using the ``--subject-prefix`` argument to
+ ``git format-patch``, for example to submit a patch to the
+ "&DISTRO_NAME_NO_CAP_MINUS_ONE;" branch use::
+
+ git format-patch --subject-prefix='&DISTRO_NAME_NO_CAP_MINUS_ONE;][PATCH' ...
+
+Taking Patch Review into Account
+================================
+
+You may get feedback on your submitted patches from other community members
+or from the automated patchtest service. If issues are identified in your
+patches then it is usually necessary to address these before the patches are
+accepted into the project. In this case you should your commits according
+to the feedback and submit an updated version to the relevant mailing list.
+
+In any case, never fix reported issues by fixing them in new commits
+on the tip of your branch. Always come up with a new series of commits
+without the reported issues.
+
+.. note::
+
+ It is a good idea to send a copy to the reviewers who provided feedback
+ to the previous version of the patch. You can make sure this happens
+ by adding a ``CC`` tag to the commit description::
+
+ CC: William Shakespeare <bill@yoctoproject.org>
+
+A single patch can be amended using ``git commit --amend``, and multiple
+patches can be easily reworked and reordered through an interactive Git rebase::
+
+ git rebase -i <ref-branch>
+
+See `this tutorial <https://hackernoon.com/beginners-guide-to-interactive-rebasing-346a3f9c3a6d>`__
+for practical guidance about using Git interactive rebasing.
+
+You should also modify the ``[PATCH]`` tag in the email subject line when
+sending the revised patch to mark the new iteration as ``[PATCH v2]``,
+``[PATCH v3]``, etc as appropriate. This can be done by passing the ``-v``
+argument to ``git format-patch`` with a version number::
+
+ git format-patch -v2 <ref-branch>
+
+Lastly please ensure that you also test your revised changes. In particular
+please don't just edit the patch file written out by ``git format-patch`` and
+resend it.
+
+Tracking the Status of Patches
+==============================
+
+The Yocto Project uses a `Patchwork instance <https://patchwork.yoctoproject.org/>`__
+to track the status of patches submitted to the various mailing lists and to
+support automated patch testing. Each submitted patch is checked for common
+mistakes and deviations from the expected patch format and submitters are
+notified by ``patchtest`` if such mistakes are found. This process helps to
+reduce the burden of patch review on maintainers.
+
+.. note::
+
+ This system is imperfect and changes can sometimes get lost in the flow.
+ Asking about the status of a patch or change is reasonable if the change
+ has been idle for a while with no feedback.
+
+If your patches have not had any feedback in a few days, they may have already
+been merged. You can run ``git pull`` branch to check this. Note that many if
+not most layer maintainers do not send out acknowledgement emails when they
+accept patches. Alternatively, if there is no response or merge after a few days
+the patch may have been missed or the appropriate reviewers may not currently be
+around. It is then perfectly fine to reply to it yourself with a reminder asking
+for feedback.
+
+.. note::
+
+ Patch reviews for feature and recipe upgrade patches are likely be delayed
+ during a feature freeze because these types of patches aren't merged during
+ at that time --- you may have to wait until after the freeze is lifted.
+
+Maintainers also commonly use ``-next`` branches to test submissions prior to
+merging patches. Thus, you can get an idea of the status of a patch based on
+whether the patch has been merged into one of these branches. The commonly
+used testing branches for OpenEmbedded-Core are as follows:
+
+- *openembedded-core "master-next" branch:* This branch is part of the
+ :oe_git:`openembedded-core </openembedded-core/>` repository and contains
+ proposed changes to the core metadata.
+
+- *poky "master-next" branch:* This branch is part of the
+ :yocto_git:`poky </poky/>` repository and combines proposed
+ changes to BitBake, the core metadata and the poky distro.
+
+Similarly, stable branches maintained by the project may have corresponding
+``-next`` branches which collect proposed changes. For example,
+``&DISTRO_NAME_NO_CAP;-next`` and ``&DISTRO_NAME_NO_CAP_MINUS_ONE;-next``
+branches in both the "openembdedded-core" and "poky" repositories.
+
+Other layers may have similar testing branches but there is no formal
+requirement or standard for these so please check the documentation for the
+layers you are contributing to.
+
diff --git a/documentation/dev-manual/bmaptool.rst b/documentation/dev-manual/bmaptool.rst
new file mode 100644
index 0000000000..9add72cf3b
--- /dev/null
+++ b/documentation/dev-manual/bmaptool.rst
@@ -0,0 +1,59 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Flashing Images Using ``bmaptool``
+**********************************
+
+A fast and easy way to flash an image to a bootable device is to use
+Bmaptool, which is integrated into the OpenEmbedded build system.
+Bmaptool is a generic tool that creates a file's block map (bmap) and
+then uses that map to copy the file. As compared to traditional tools
+such as dd or cp, Bmaptool can copy (or flash) large files like raw
+system image files much faster.
+
+.. note::
+
+ - If you are using Ubuntu or Debian distributions, you can install
+ the ``bmap-tools`` package using the following command and then
+ use the tool without specifying ``PATH`` even from the root
+ account::
+
+ $ sudo apt install bmap-tools
+
+ - If you are unable to install the ``bmap-tools`` package, you will
+ need to build Bmaptool before using it. Use the following command::
+
+ $ bitbake bmap-tools-native
+
+Following, is an example that shows how to flash a Wic image. Realize
+that while this example uses a Wic image, you can use Bmaptool to flash
+any type of image. Use these steps to flash an image using Bmaptool:
+
+#. *Update your local.conf File:* You need to have the following set
+ in your ``local.conf`` file before building your image::
+
+ IMAGE_FSTYPES += "wic wic.bmap"
+
+#. *Get Your Image:* Either have your image ready (pre-built with the
+ :term:`IMAGE_FSTYPES`
+ setting previously mentioned) or take the step to build the image::
+
+ $ bitbake image
+
+#. *Flash the Device:* Flash the device with the image by using Bmaptool
+ depending on your particular setup. The following commands assume the
+ image resides in the :term:`Build Directory`'s ``deploy/images/`` area:
+
+ - If you have write access to the media, use this command form::
+
+ $ oe-run-native bmap-tools-native bmaptool copy build-directory/tmp/deploy/images/machine/image.wic /dev/sdX
+
+ - If you do not have write access to the media, set your permissions
+ first and then use the same command form::
+
+ $ sudo chmod 666 /dev/sdX
+ $ oe-run-native bmap-tools-native bmaptool copy build-directory/tmp/deploy/images/machine/image.wic /dev/sdX
+
+For help on the ``bmaptool`` command, use the following command::
+
+ $ bmaptool --help
+
diff --git a/documentation/dev-manual/build-quality.rst b/documentation/dev-manual/build-quality.rst
new file mode 100644
index 0000000000..713ea3a48e
--- /dev/null
+++ b/documentation/dev-manual/build-quality.rst
@@ -0,0 +1,409 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Maintaining Build Output Quality
+********************************
+
+Many factors can influence the quality of a build. For example, if you
+upgrade a recipe to use a new version of an upstream software package or
+you experiment with some new configuration options, subtle changes can
+occur that you might not detect until later. Consider the case where
+your recipe is using a newer version of an upstream package. In this
+case, a new version of a piece of software might introduce an optional
+dependency on another library, which is auto-detected. If that library
+has already been built when the software is building, the software will
+link to the built library and that library will be pulled into your
+image along with the new software even if you did not want the library.
+
+The :ref:`ref-classes-buildhistory` class helps you maintain the quality of
+your build output. You can use the class to highlight unexpected and possibly
+unwanted changes in the build output. When you enable build history, it records
+information about the contents of each package and image and then commits that
+information to a local Git repository where you can examine the information.
+
+The remainder of this section describes the following:
+
+- :ref:`How you can enable and disable build history <dev-manual/build-quality:enabling and disabling build history>`
+
+- :ref:`How to understand what the build history contains <dev-manual/build-quality:understanding what the build history contains>`
+
+- :ref:`How to limit the information used for build history <dev-manual/build-quality:using build history to gather image information only>`
+
+- :ref:`How to examine the build history from both a command-line and web interface <dev-manual/build-quality:examining build history information>`
+
+Enabling and Disabling Build History
+====================================
+
+Build history is disabled by default. To enable it, add the following
+:term:`INHERIT` statement and set the :term:`BUILDHISTORY_COMMIT` variable to
+"1" at the end of your ``conf/local.conf`` file found in the
+:term:`Build Directory`::
+
+ INHERIT += "buildhistory"
+ BUILDHISTORY_COMMIT = "1"
+
+Enabling build history as
+previously described causes the OpenEmbedded build system to collect
+build output information and commit it as a single commit to a local
+:ref:`overview-manual/development-environment:git` repository.
+
+.. note::
+
+ Enabling build history increases your build times slightly,
+ particularly for images, and increases the amount of disk space used
+ during the build.
+
+You can disable build history by removing the previous statements from
+your ``conf/local.conf`` file.
+
+Understanding What the Build History Contains
+=============================================
+
+Build history information is kept in ``${``\ :term:`TOPDIR`\ ``}/buildhistory``
+in the :term:`Build Directory` as defined by the :term:`BUILDHISTORY_DIR`
+variable. Here is an example abbreviated listing:
+
+.. image:: figures/buildhistory.png
+ :align: center
+ :width: 50%
+
+At the top level, there is a ``metadata-revs`` file that lists the
+revisions of the repositories for the enabled layers when the build was
+produced. The rest of the data splits into separate ``packages``,
+``images`` and ``sdk`` directories, the contents of which are described
+as follows.
+
+Build History Package Information
+---------------------------------
+
+The history for each package contains a text file that has name-value
+pairs with information about the package. For example,
+``buildhistory/packages/i586-poky-linux/busybox/busybox/latest``
+contains the following:
+
+.. code-block:: none
+
+ PV = 1.22.1
+ PR = r32
+ RPROVIDES =
+ RDEPENDS = glibc (>= 2.20) update-alternatives-opkg
+ RRECOMMENDS = busybox-syslog busybox-udhcpc update-rc.d
+ PKGSIZE = 540168
+ FILES = /usr/bin/* /usr/sbin/* /usr/lib/busybox/* /usr/lib/lib*.so.* \
+ /etc /com /var /bin/* /sbin/* /lib/*.so.* /lib/udev/rules.d \
+ /usr/lib/udev/rules.d /usr/share/busybox /usr/lib/busybox/* \
+ /usr/share/pixmaps /usr/share/applications /usr/share/idl \
+ /usr/share/omf /usr/share/sounds /usr/lib/bonobo/servers
+ FILELIST = /bin/busybox /bin/busybox.nosuid /bin/busybox.suid /bin/sh \
+ /etc/busybox.links.nosuid /etc/busybox.links.suid
+
+Most of these
+name-value pairs correspond to variables used to produce the package.
+The exceptions are ``FILELIST``, which is the actual list of files in
+the package, and ``PKGSIZE``, which is the total size of files in the
+package in bytes.
+
+There is also a file that corresponds to the recipe from which the package
+came (e.g. ``buildhistory/packages/i586-poky-linux/busybox/latest``):
+
+.. code-block:: none
+
+ PV = 1.22.1
+ PR = r32
+ DEPENDS = initscripts kern-tools-native update-rc.d-native \
+ virtual/i586-poky-linux-compilerlibs virtual/i586-poky-linux-gcc \
+ virtual/libc virtual/update-alternatives
+ PACKAGES = busybox-ptest busybox-httpd busybox-udhcpd busybox-udhcpc \
+ busybox-syslog busybox-mdev busybox-hwclock busybox-dbg \
+ busybox-staticdev busybox-dev busybox-doc busybox-locale busybox
+
+Finally, for those recipes fetched from a version control system (e.g.,
+Git), there is a file that lists source revisions that are specified in
+the recipe and the actual revisions used during the build. Listed
+and actual revisions might differ when
+:term:`SRCREV` is set to
+${:term:`AUTOREV`}. Here is an
+example assuming
+``buildhistory/packages/qemux86-poky-linux/linux-yocto/latest_srcrev``)::
+
+ # SRCREV_machine = "38cd560d5022ed2dbd1ab0dca9642e47c98a0aa1"
+ SRCREV_machine = "38cd560d5022ed2dbd1ab0dca9642e47c98a0aa1"
+ # SRCREV_meta = "a227f20eff056e511d504b2e490f3774ab260d6f"
+ SRCREV_meta ="a227f20eff056e511d504b2e490f3774ab260d6f"
+
+You can use the
+``buildhistory-collect-srcrevs`` command with the ``-a`` option to
+collect the stored :term:`SRCREV` values from build history and report them
+in a format suitable for use in global configuration (e.g.,
+``local.conf`` or a distro include file) to override floating
+:term:`AUTOREV` values to a fixed set of revisions. Here is some example
+output from this command::
+
+ $ buildhistory-collect-srcrevs -a
+ # all-poky-linux
+ SRCREV:pn-ca-certificates = "07de54fdcc5806bde549e1edf60738c6bccf50e8"
+ SRCREV:pn-update-rc.d = "8636cf478d426b568c1be11dbd9346f67e03adac"
+ # core2-64-poky-linux
+ SRCREV:pn-binutils = "87d4632d36323091e731eb07b8aa65f90293da66"
+ SRCREV:pn-btrfs-tools = "8ad326b2f28c044cb6ed9016d7c3285e23b673c8"
+ SRCREV_bzip2-tests:pn-bzip2 = "f9061c030a25de5b6829e1abf373057309c734c0"
+ SRCREV:pn-e2fsprogs = "02540dedd3ddc52c6ae8aaa8a95ce75c3f8be1c0"
+ SRCREV:pn-file = "504206e53a89fd6eed71aeaf878aa3512418eab1"
+ SRCREV_glibc:pn-glibc = "24962427071fa532c3c48c918e9d64d719cc8a6c"
+ SRCREV:pn-gnome-desktop-testing = "e346cd4ed2e2102c9b195b614f3c642d23f5f6e7"
+ SRCREV:pn-init-system-helpers = "dbd9197569c0935029acd5c9b02b84c68fd937ee"
+ SRCREV:pn-kmod = "b6ecfc916a17eab8f93be5b09f4e4f845aabd3d1"
+ SRCREV:pn-libnsl2 = "82245c0c58add79a8e34ab0917358217a70e5100"
+ SRCREV:pn-libseccomp = "57357d2741a3b3d3e8425889a6b79a130e0fa2f3"
+ SRCREV:pn-libxcrypt = "50cf2b6dd4fdf04309445f2eec8de7051d953abf"
+ SRCREV:pn-ncurses = "51d0fd9cc3edb975f04224f29f777f8f448e8ced"
+ SRCREV:pn-procps = "19a508ea121c0c4ac6d0224575a036de745eaaf8"
+ SRCREV:pn-psmisc = "5fab6b7ab385080f1db725d6803136ec1841a15f"
+ SRCREV:pn-ptest-runner = "bcb82804daa8f725b6add259dcef2067e61a75aa"
+ SRCREV:pn-shared-mime-info = "18e558fa1c8b90b86757ade09a4ba4d6a6cf8f70"
+ SRCREV:pn-zstd = "e47e674cd09583ff0503f0f6defd6d23d8b718d3"
+ # qemux86_64-poky-linux
+ SRCREV_machine:pn-linux-yocto = "20301aeb1a64164b72bc72af58802b315e025c9c"
+ SRCREV_meta:pn-linux-yocto = "2d38a472b21ae343707c8bd64ac68a9eaca066a0"
+ # x86_64-linux
+ SRCREV:pn-binutils-cross-x86_64 = "87d4632d36323091e731eb07b8aa65f90293da66"
+ SRCREV_glibc:pn-cross-localedef-native = "24962427071fa532c3c48c918e9d64d719cc8a6c"
+ SRCREV_localedef:pn-cross-localedef-native = "794da69788cbf9bf57b59a852f9f11307663fa87"
+ SRCREV:pn-debianutils-native = "de14223e5bffe15e374a441302c528ffc1cbed57"
+ SRCREV:pn-libmodulemd-native = "ee80309bc766d781a144e6879419b29f444d94eb"
+ SRCREV:pn-virglrenderer-native = "363915595e05fb252e70d6514be2f0c0b5ca312b"
+ SRCREV:pn-zstd-native = "e47e674cd09583ff0503f0f6defd6d23d8b718d3"
+
+.. note::
+
+ Here are some notes on using the ``buildhistory-collect-srcrevs`` command:
+
+ - By default, only values where the :term:`SRCREV` was not hardcoded
+ (usually when :term:`AUTOREV` is used) are reported. Use the ``-a``
+ option to see all :term:`SRCREV` values.
+
+ - The output statements might not have any effect if overrides are
+ applied elsewhere in the build system configuration. Use the
+ ``-f`` option to add the ``forcevariable`` override to each output
+ line if you need to work around this restriction.
+
+ - The script does apply special handling when building for multiple
+ machines. However, the script does place a comment before each set
+ of values that specifies which triplet to which they belong as
+ previously shown (e.g., ``i586-poky-linux``).
+
+Build History Image Information
+-------------------------------
+
+The files produced for each image are as follows:
+
+- ``image-files:`` A directory containing selected files from the root
+ filesystem. The files are defined by
+ :term:`BUILDHISTORY_IMAGE_FILES`.
+
+- ``build-id.txt:`` Human-readable information about the build
+ configuration and metadata source revisions. This file contains the
+ full build header as printed by BitBake.
+
+- ``*.dot:`` Dependency graphs for the image that are compatible with
+ ``graphviz``.
+
+- ``files-in-image.txt:`` A list of files in the image with
+ permissions, owner, group, size, and symlink information.
+
+- ``image-info.txt:`` A text file containing name-value pairs with
+ information about the image. See the following listing example for
+ more information.
+
+- ``installed-package-names.txt:`` A list of installed packages by name
+ only.
+
+- ``installed-package-sizes.txt:`` A list of installed packages ordered
+ by size.
+
+- ``installed-packages.txt:`` A list of installed packages with full
+ package filenames.
+
+.. note::
+
+ Installed package information is able to be gathered and produced
+ even if package management is disabled for the final image.
+
+Here is an example of ``image-info.txt``:
+
+.. code-block:: none
+
+ DISTRO = poky
+ DISTRO_VERSION = 3.4+snapshot-a0245d7be08f3d24ea1875e9f8872aa6bbff93be
+ USER_CLASSES = buildstats
+ IMAGE_CLASSES = qemuboot qemuboot license_image
+ IMAGE_FEATURES = debug-tweaks
+ IMAGE_LINGUAS =
+ IMAGE_INSTALL = packagegroup-core-boot speex speexdsp
+ BAD_RECOMMENDATIONS =
+ NO_RECOMMENDATIONS =
+ PACKAGE_EXCLUDE =
+ ROOTFS_POSTPROCESS_COMMAND = write_package_manifest; license_create_manifest; cve_check_write_rootfs_manifest; ssh_allow_empty_password; ssh_allow_root_login; postinst_enable_logging; rootfs_update_timestamp; write_image_test_data; empty_var_volatile; sort_passwd; rootfs_reproducible;
+ IMAGE_POSTPROCESS_COMMAND = buildhistory_get_imageinfo ;
+ IMAGESIZE = 9265
+
+Other than ``IMAGESIZE``,
+which is the total size of the files in the image in Kbytes, the
+name-value pairs are variables that may have influenced the content of
+the image. This information is often useful when you are trying to
+determine why a change in the package or file listings has occurred.
+
+Using Build History to Gather Image Information Only
+----------------------------------------------------
+
+As you can see, build history produces image information, including
+dependency graphs, so you can see why something was pulled into the
+image. If you are just interested in this information and not interested
+in collecting specific package or SDK information, you can enable
+writing only image information without any history by adding the
+following to your ``conf/local.conf`` file found in the
+:term:`Build Directory`::
+
+ INHERIT += "buildhistory"
+ BUILDHISTORY_COMMIT = "0"
+ BUILDHISTORY_FEATURES = "image"
+
+Here, you set the
+:term:`BUILDHISTORY_FEATURES`
+variable to use the image feature only.
+
+Build History SDK Information
+-----------------------------
+
+Build history collects similar information on the contents of SDKs (e.g.
+``bitbake -c populate_sdk imagename``) as compared to information it
+collects for images. Furthermore, this information differs depending on
+whether an extensible or standard SDK is being produced.
+
+The following list shows the files produced for SDKs:
+
+- ``files-in-sdk.txt:`` A list of files in the SDK with permissions,
+ owner, group, size, and symlink information. This list includes both
+ the host and target parts of the SDK.
+
+- ``sdk-info.txt:`` A text file containing name-value pairs with
+ information about the SDK. See the following listing example for more
+ information.
+
+- ``sstate-task-sizes.txt:`` A text file containing name-value pairs
+ with information about task group sizes (e.g. :ref:`ref-tasks-populate_sysroot`
+ tasks have a total size). The ``sstate-task-sizes.txt`` file exists
+ only when an extensible SDK is created.
+
+- ``sstate-package-sizes.txt:`` A text file containing name-value pairs
+ with information for the shared-state packages and sizes in the SDK.
+ The ``sstate-package-sizes.txt`` file exists only when an extensible
+ SDK is created.
+
+- ``sdk-files:`` A folder that contains copies of the files mentioned
+ in ``BUILDHISTORY_SDK_FILES`` if the files are present in the output.
+ Additionally, the default value of ``BUILDHISTORY_SDK_FILES`` is
+ specific to the extensible SDK although you can set it differently if
+ you would like to pull in specific files from the standard SDK.
+
+ The default files are ``conf/local.conf``, ``conf/bblayers.conf``,
+ ``conf/auto.conf``, ``conf/locked-sigs.inc``, and
+ ``conf/devtool.conf``. Thus, for an extensible SDK, these files get
+ copied into the ``sdk-files`` directory.
+
+- The following information appears under each of the ``host`` and
+ ``target`` directories for the portions of the SDK that run on the
+ host and on the target, respectively:
+
+ .. note::
+
+ The following files for the most part are empty when producing an
+ extensible SDK because this type of SDK is not constructed from
+ packages as is the standard SDK.
+
+ - ``depends.dot:`` Dependency graph for the SDK that is compatible
+ with ``graphviz``.
+
+ - ``installed-package-names.txt:`` A list of installed packages by
+ name only.
+
+ - ``installed-package-sizes.txt:`` A list of installed packages
+ ordered by size.
+
+ - ``installed-packages.txt:`` A list of installed packages with full
+ package filenames.
+
+Here is an example of ``sdk-info.txt``:
+
+.. code-block:: none
+
+ DISTRO = poky
+ DISTRO_VERSION = 1.3+snapshot-20130327
+ SDK_NAME = poky-glibc-i686-arm
+ SDK_VERSION = 1.3+snapshot
+ SDKMACHINE =
+ SDKIMAGE_FEATURES = dev-pkgs dbg-pkgs
+ BAD_RECOMMENDATIONS =
+ SDKSIZE = 352712
+
+Other than ``SDKSIZE``, which is
+the total size of the files in the SDK in Kbytes, the name-value pairs
+are variables that might have influenced the content of the SDK. This
+information is often useful when you are trying to determine why a
+change in the package or file listings has occurred.
+
+Examining Build History Information
+-----------------------------------
+
+You can examine build history output from the command line or from a web
+interface.
+
+To see any changes that have occurred (assuming you have
+:term:`BUILDHISTORY_COMMIT` = "1"),
+you can simply use any Git command that allows you to view the history
+of a repository. Here is one method::
+
+ $ git log -p
+
+You need to realize,
+however, that this method does show changes that are not significant
+(e.g. a package's size changing by a few bytes).
+
+There is a command-line tool called ``buildhistory-diff``, though,
+that queries the Git repository and prints just the differences that
+might be significant in human-readable form. Here is an example::
+
+ $ poky/poky/scripts/buildhistory-diff . HEAD^
+ Changes to images/qemux86_64/glibc/core-image-minimal (files-in-image.txt):
+ /etc/anotherpkg.conf was added
+ /sbin/anotherpkg was added
+ * (installed-package-names.txt):
+ * anotherpkg was added
+ Changes to images/qemux86_64/glibc/core-image-minimal (installed-package-names.txt):
+ anotherpkg was added
+ packages/qemux86_64-poky-linux/v86d: PACKAGES: added "v86d-extras"
+ * PR changed from "r0" to "r1"
+ * PV changed from "0.1.10" to "0.1.12"
+ packages/qemux86_64-poky-linux/v86d/v86d: PKGSIZE changed from 110579 to 144381 (+30%)
+ * PR changed from "r0" to "r1"
+ * PV changed from "0.1.10" to "0.1.12"
+
+.. note::
+
+ The ``buildhistory-diff`` tool requires the ``GitPython``
+ package. Be sure to install it using Pip3 as follows::
+
+ $ pip3 install GitPython --user
+
+
+ Alternatively, you can install ``python3-git`` using the appropriate
+ distribution package manager (e.g. ``apt``, ``dnf``, or ``zipper``).
+
+To see changes to the build history using a web interface, follow the
+instruction in the ``README`` file
+:yocto_git:`here </buildhistory-web/>`.
+
+Here is a sample screenshot of the interface:
+
+.. image:: figures/buildhistory-web.png
+ :width: 100%
+
diff --git a/documentation/dev-manual/building.rst b/documentation/dev-manual/building.rst
new file mode 100644
index 0000000000..fe502690dd
--- /dev/null
+++ b/documentation/dev-manual/building.rst
@@ -0,0 +1,942 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Building
+********
+
+This section describes various build procedures, such as the steps
+needed for a simple build, building a target for multiple configurations,
+generating an image for more than one machine, and so forth.
+
+Building a Simple Image
+=======================
+
+In the development environment, you need to build an image whenever you
+change hardware support, add or change system libraries, or add or
+change services that have dependencies. There are several methods that allow
+you to build an image within the Yocto Project. This section presents
+the basic steps you need to build a simple image using BitBake from a
+build host running Linux.
+
+.. note::
+
+ - For information on how to build an image using
+ :term:`Toaster`, see the
+ :doc:`/toaster-manual/index`.
+
+ - For information on how to use ``devtool`` to build images, see the
+ ":ref:`sdk-manual/extensible:using \`\`devtool\`\` in your sdk workflow`"
+ section in the Yocto Project Application Development and the
+ Extensible Software Development Kit (eSDK) manual.
+
+ - For a quick example on how to build an image using the
+ OpenEmbedded build system, see the
+ :doc:`/brief-yoctoprojectqs/index` document.
+
+ - You can also use the `Yocto Project BitBake
+ <https://marketplace.visualstudio.com/items?itemName=yocto-project.yocto-bitbake>`__
+ extension for Visual Studio Code to build images.
+
+The build process creates an entire Linux distribution from source and
+places it in your :term:`Build Directory` under ``tmp/deploy/images``. For
+detailed information on the build process using BitBake, see the
+":ref:`overview-manual/concepts:images`" section in the Yocto Project Overview
+and Concepts Manual.
+
+The following figure and list overviews the build process:
+
+.. image:: figures/bitbake-build-flow.png
+ :width: 100%
+
+#. *Set up Your Host Development System to Support Development Using the
+ Yocto Project*: See the ":doc:`start`" section for options on how to get a
+ build host ready to use the Yocto Project.
+
+#. *Initialize the Build Environment:* Initialize the build environment
+ by sourcing the build environment script (i.e.
+ :ref:`structure-core-script`)::
+
+ $ source oe-init-build-env [build_dir]
+
+ When you use the initialization script, the OpenEmbedded build system
+ uses ``build`` as the default :term:`Build Directory` in your current work
+ directory. You can use a `build_dir` argument with the script to
+ specify a different :term:`Build Directory`.
+
+ .. note::
+
+ A common practice is to use a different :term:`Build Directory` for
+ different targets; for example, ``~/build/x86`` for a ``qemux86``
+ target, and ``~/build/arm`` for a ``qemuarm`` target. In any
+ event, it's typically cleaner to locate the :term:`Build Directory`
+ somewhere outside of your source directory.
+
+#. *Make Sure Your* ``local.conf`` *File is Correct*: Ensure the
+ ``conf/local.conf`` configuration file, which is found in the
+ :term:`Build Directory`, is set up how you want it. This file defines many
+ aspects of the build environment including the target machine architecture
+ through the :term:`MACHINE` variable, the packaging format used during
+ the build (:term:`PACKAGE_CLASSES`), and a centralized tarball download
+ directory through the :term:`DL_DIR` variable.
+
+#. *Build the Image:* Build the image using the ``bitbake`` command::
+
+ $ bitbake target
+
+ .. note::
+
+ For information on BitBake, see the :doc:`bitbake:index`.
+
+ The target is the name of the recipe you want to build. Common
+ targets are the images in ``meta/recipes-core/images``,
+ ``meta/recipes-sato/images``, and so forth all found in the
+ :term:`Source Directory`. Alternatively, the target
+ can be the name of a recipe for a specific piece of software such as
+ BusyBox. For more details about the images the OpenEmbedded build
+ system supports, see the
+ ":ref:`ref-manual/images:Images`" chapter in the Yocto
+ Project Reference Manual.
+
+ As an example, the following command builds the
+ ``core-image-minimal`` image::
+
+ $ bitbake core-image-minimal
+
+ Once an
+ image has been built, it often needs to be installed. The images and
+ kernels built by the OpenEmbedded build system are placed in the
+ :term:`Build Directory` in ``tmp/deploy/images``. For information on how to
+ run pre-built images such as ``qemux86`` and ``qemuarm``, see the
+ :doc:`/sdk-manual/index` manual. For
+ information about how to install these images, see the documentation
+ for your particular board or machine.
+
+Building Images for Multiple Targets Using Multiple Configurations
+==================================================================
+
+You can use a single ``bitbake`` command to build multiple images or
+packages for different targets where each image or package requires a
+different configuration (multiple configuration builds). The builds, in
+this scenario, are sometimes referred to as "multiconfigs", and this
+section uses that term throughout.
+
+This section describes how to set up for multiple configuration builds
+and how to account for cross-build dependencies between the
+multiconfigs.
+
+Setting Up and Running a Multiple Configuration Build
+-----------------------------------------------------
+
+To accomplish a multiple configuration build, you must define each
+target's configuration separately using a parallel configuration file in
+the :term:`Build Directory` or configuration directory within a layer, and you
+must follow a required file hierarchy. Additionally, you must enable the
+multiple configuration builds in your ``local.conf`` file.
+
+Follow these steps to set up and execute multiple configuration builds:
+
+- *Create Separate Configuration Files*: You need to create a single
+ configuration file for each build target (each multiconfig).
+ The configuration definitions are implementation dependent but often
+ each configuration file will define the machine and the
+ temporary directory BitBake uses for the build. Whether the same
+ temporary directory (:term:`TMPDIR`) can be shared will depend on what is
+ similar and what is different between the configurations. Multiple MACHINE
+ targets can share the same (:term:`TMPDIR`) as long as the rest of the
+ configuration is the same, multiple :term:`DISTRO` settings would need separate
+ (:term:`TMPDIR`) directories.
+
+ For example, consider a scenario with two different multiconfigs for the same
+ :term:`MACHINE`: "qemux86" built
+ for two distributions such as "poky" and "poky-lsb". In this case,
+ you would need to use the different :term:`TMPDIR`.
+
+ Here is an example showing the minimal statements needed in a
+ configuration file for a "qemux86" target whose temporary build
+ directory is ``tmpmultix86``::
+
+ MACHINE = "qemux86"
+ TMPDIR = "${TOPDIR}/tmpmultix86"
+
+ The location for these multiconfig configuration files is specific.
+ They must reside in the current :term:`Build Directory` in a sub-directory of
+ ``conf`` named ``multiconfig`` or within a layer's ``conf`` directory
+ under a directory named ``multiconfig``. Here is an example that defines
+ two configuration files for the "x86" and "arm" multiconfigs:
+
+ .. image:: figures/multiconfig_files.png
+ :align: center
+ :width: 50%
+
+ The usual :term:`BBPATH` search path is used to locate multiconfig files in
+ a similar way to other conf files.
+
+- *Add the BitBake Multi-configuration Variable to the Local
+ Configuration File*: Use the
+ :term:`BBMULTICONFIG`
+ variable in your ``conf/local.conf`` configuration file to specify
+ each multiconfig. Continuing with the example from the previous
+ figure, the :term:`BBMULTICONFIG` variable needs to enable two
+ multiconfigs: "x86" and "arm" by specifying each configuration file::
+
+ BBMULTICONFIG = "x86 arm"
+
+ .. note::
+
+ A "default" configuration already exists by definition. This
+ configuration is named: "" (i.e. empty string) and is defined by
+ the variables coming from your ``local.conf``
+ file. Consequently, the previous example actually adds two
+ additional configurations to your build: "arm" and "x86" along
+ with "".
+
+- *Launch BitBake*: Use the following BitBake command form to launch
+ the multiple configuration build::
+
+ $ bitbake [mc:multiconfigname:]target [[[mc:multiconfigname:]target] ... ]
+
+ For the example in this section, the following command applies::
+
+ $ bitbake mc:x86:core-image-minimal mc:arm:core-image-sato mc::core-image-base
+
+ The previous BitBake command builds a ``core-image-minimal`` image
+ that is configured through the ``x86.conf`` configuration file, a
+ ``core-image-sato`` image that is configured through the ``arm.conf``
+ configuration file and a ``core-image-base`` that is configured
+ through your ``local.conf`` configuration file.
+
+.. note::
+
+ Support for multiple configuration builds in the Yocto Project &DISTRO;
+ (&DISTRO_NAME;) Release does not include Shared State (sstate)
+ optimizations. Consequently, if a build uses the same object twice
+ in, for example, two different :term:`TMPDIR`
+ directories, the build either loads from an existing sstate cache for
+ that build at the start or builds the object fresh.
+
+Enabling Multiple Configuration Build Dependencies
+--------------------------------------------------
+
+Sometimes dependencies can exist between targets (multiconfigs) in a
+multiple configuration build. For example, suppose that in order to
+build a ``core-image-sato`` image for an "x86" multiconfig, the root
+filesystem of an "arm" multiconfig must exist. This dependency is
+essentially that the
+:ref:`ref-tasks-image` task in the
+``core-image-sato`` recipe depends on the completion of the
+:ref:`ref-tasks-rootfs` task of the
+``core-image-minimal`` recipe.
+
+To enable dependencies in a multiple configuration build, you must
+declare the dependencies in the recipe using the following statement
+form::
+
+ task_or_package[mcdepends] = "mc:from_multiconfig:to_multiconfig:recipe_name:task_on_which_to_depend"
+
+To better show how to use this statement, consider the example scenario
+from the first paragraph of this section. The following statement needs
+to be added to the recipe that builds the ``core-image-sato`` image::
+
+ do_image[mcdepends] = "mc:x86:arm:core-image-minimal:do_rootfs"
+
+In this example, the `from_multiconfig` is "x86". The `to_multiconfig` is "arm". The
+task on which the :ref:`ref-tasks-image` task in the recipe depends is the
+:ref:`ref-tasks-rootfs` task from the ``core-image-minimal`` recipe associated
+with the "arm" multiconfig.
+
+Once you set up this dependency, you can build the "x86" multiconfig
+using a BitBake command as follows::
+
+ $ bitbake mc:x86:core-image-sato
+
+This command executes all the tasks needed to create the
+``core-image-sato`` image for the "x86" multiconfig. Because of the
+dependency, BitBake also executes through the :ref:`ref-tasks-rootfs` task for the
+"arm" multiconfig build.
+
+Having a recipe depend on the root filesystem of another build might not
+seem that useful. Consider this change to the statement in the
+``core-image-sato`` recipe::
+
+ do_image[mcdepends] = "mc:x86:arm:core-image-minimal:do_image"
+
+In this case, BitBake must
+create the ``core-image-minimal`` image for the "arm" build since the
+"x86" build depends on it.
+
+Because "x86" and "arm" are enabled for multiple configuration builds
+and have separate configuration files, BitBake places the artifacts for
+each build in the respective temporary build directories (i.e.
+:term:`TMPDIR`).
+
+Building an Initial RAM Filesystem (Initramfs) Image
+====================================================
+
+An initial RAM filesystem (:term:`Initramfs`) image provides a temporary root
+filesystem used for early system initialization, typically providing tools and
+loading modules needed to locate and mount the final root filesystem.
+
+Follow these steps to create an :term:`Initramfs` image:
+
+#. *Create the Initramfs Image Recipe:* You can reference the
+ ``core-image-minimal-initramfs.bb`` recipe found in the
+ ``meta/recipes-core`` directory of the :term:`Source Directory`
+ as an example from which to work.
+
+#. *Decide if You Need to Bundle the Initramfs Image Into the Kernel
+ Image:* If you want the :term:`Initramfs` image that is built to be bundled
+ in with the kernel image, set the :term:`INITRAMFS_IMAGE_BUNDLE`
+ variable to ``"1"`` in your ``local.conf`` configuration file and set the
+ :term:`INITRAMFS_IMAGE` variable in the recipe that builds the kernel image.
+
+ Setting the :term:`INITRAMFS_IMAGE_BUNDLE` flag causes the :term:`Initramfs`
+ image to be unpacked into the ``${B}/usr/`` directory. The unpacked
+ :term:`Initramfs` image is then passed to the kernel's ``Makefile`` using the
+ :term:`CONFIG_INITRAMFS_SOURCE` variable, allowing the :term:`Initramfs`
+ image to be built into the kernel normally.
+
+#. *Optionally Add Items to the Initramfs Image Through the Initramfs
+ Image Recipe:* If you add items to the :term:`Initramfs` image by way of its
+ recipe, you should use :term:`PACKAGE_INSTALL` rather than
+ :term:`IMAGE_INSTALL`. :term:`PACKAGE_INSTALL` gives more direct control of
+ what is added to the image as compared to the defaults you might not
+ necessarily want that are set by the :ref:`ref-classes-image`
+ or :ref:`ref-classes-core-image` classes.
+
+#. *Build the Kernel Image and the Initramfs Image:* Build your kernel
+ image using BitBake. Because the :term:`Initramfs` image recipe is a
+ dependency of the kernel image, the :term:`Initramfs` image is built as well
+ and bundled with the kernel image if you used the
+ :term:`INITRAMFS_IMAGE_BUNDLE` variable described earlier.
+
+Bundling an Initramfs Image From a Separate Multiconfig
+-------------------------------------------------------
+
+There may be a case where we want to build an :term:`Initramfs` image which does not
+inherit the same distro policy as our main image, for example, we may want
+our main image to use ``TCLIBC="glibc"``, but to use ``TCLIBC="musl"`` in our :term:`Initramfs`
+image to keep a smaller footprint. However, by performing the steps mentioned
+above the :term:`Initramfs` image will inherit ``TCLIBC="glibc"`` without allowing us
+to override it.
+
+To achieve this, you need to perform some additional steps:
+
+#. *Create a multiconfig for your Initramfs image:* You can perform the steps
+ on ":ref:`dev-manual/building:building images for multiple targets using multiple configurations`" to create a separate multiconfig.
+ For the sake of simplicity let's assume such multiconfig is called: ``initramfscfg.conf`` and
+ contains the variables::
+
+ TMPDIR="${TOPDIR}/tmp-initramfscfg"
+ TCLIBC="musl"
+
+#. *Set additional Initramfs variables on your main configuration:*
+ Additionally, on your main configuration (``local.conf``) you need to set the
+ variables::
+
+ INITRAMFS_MULTICONFIG = "initramfscfg"
+ INITRAMFS_DEPLOY_DIR_IMAGE = "${TOPDIR}/tmp-initramfscfg/deploy/images/${MACHINE}"
+
+ The variables :term:`INITRAMFS_MULTICONFIG` and :term:`INITRAMFS_DEPLOY_DIR_IMAGE`
+ are used to create a multiconfig dependency from the kernel to the :term:`INITRAMFS_IMAGE`
+ to be built coming from the ``initramfscfg`` multiconfig, and to let the
+ buildsystem know where the :term:`INITRAMFS_IMAGE` will be located.
+
+ Building a system with such configuration will build the kernel using the
+ main configuration but the :ref:`ref-tasks-bundle_initramfs` task will grab the
+ selected :term:`INITRAMFS_IMAGE` from :term:`INITRAMFS_DEPLOY_DIR_IMAGE`
+ instead, resulting in a musl based :term:`Initramfs` image bundled in the kernel
+ but a glibc based main image.
+
+ The same is applicable to avoid inheriting :term:`DISTRO_FEATURES` on :term:`INITRAMFS_IMAGE`
+ or to build a different :term:`DISTRO` for it such as ``poky-tiny``.
+
+
+Building a Tiny System
+======================
+
+Very small distributions have some significant advantages such as
+requiring less on-die or in-package memory (cheaper), better performance
+through efficient cache usage, lower power requirements due to less
+memory, faster boot times, and reduced development overhead. Some
+real-world examples where a very small distribution gives you distinct
+advantages are digital cameras, medical devices, and small headless
+systems.
+
+This section presents information that shows you how you can trim your
+distribution to even smaller sizes than the ``poky-tiny`` distribution,
+which is around 5 Mbytes, that can be built out-of-the-box using the
+Yocto Project.
+
+Tiny System Overview
+--------------------
+
+The following list presents the overall steps you need to consider and
+perform to create distributions with smaller root filesystems, achieve
+faster boot times, maintain your critical functionality, and avoid
+initial RAM disks:
+
+- :ref:`Determine your goals and guiding principles
+ <dev-manual/building:goals and guiding principles>`
+
+- :ref:`dev-manual/building:understand what contributes to your image size`
+
+- :ref:`Reduce the size of the root filesystem
+ <dev-manual/building:trim the root filesystem>`
+
+- :ref:`Reduce the size of the kernel <dev-manual/building:trim the kernel>`
+
+- :ref:`dev-manual/building:remove package management requirements`
+
+- :ref:`dev-manual/building:look for other ways to minimize size`
+
+- :ref:`dev-manual/building:iterate on the process`
+
+Goals and Guiding Principles
+----------------------------
+
+Before you can reach your destination, you need to know where you are
+going. Here is an example list that you can use as a guide when creating
+very small distributions:
+
+- Determine how much space you need (e.g. a kernel that is 1 Mbyte or
+ less and a root filesystem that is 3 Mbytes or less).
+
+- Find the areas that are currently taking 90% of the space and
+ concentrate on reducing those areas.
+
+- Do not create any difficult "hacks" to achieve your goals.
+
+- Leverage the device-specific options.
+
+- Work in a separate layer so that you keep changes isolated. For
+ information on how to create layers, see the
+ ":ref:`dev-manual/layers:understanding and creating layers`" section.
+
+Understand What Contributes to Your Image Size
+----------------------------------------------
+
+It is easiest to have something to start with when creating your own
+distribution. You can use the Yocto Project out-of-the-box to create the
+``poky-tiny`` distribution. Ultimately, you will want to make changes in
+your own distribution that are likely modeled after ``poky-tiny``.
+
+.. note::
+
+ To use ``poky-tiny`` in your build, set the :term:`DISTRO` variable in your
+ ``local.conf`` file to "poky-tiny" as described in the
+ ":ref:`dev-manual/custom-distribution:creating your own distribution`"
+ section.
+
+Understanding some memory concepts will help you reduce the system size.
+Memory consists of static, dynamic, and temporary memory. Static memory
+is the TEXT (code), DATA (initialized data in the code), and BSS
+(uninitialized data) sections. Dynamic memory represents memory that is
+allocated at runtime: stacks, hash tables, and so forth. Temporary
+memory is recovered after the boot process. This memory consists of
+memory used for decompressing the kernel and for the ``__init__``
+functions.
+
+To help you see where you currently are with kernel and root filesystem
+sizes, you can use two tools found in the :term:`Source Directory`
+in the
+``scripts/tiny/`` directory:
+
+- ``ksize.py``: Reports component sizes for the kernel build objects.
+
+- ``dirsize.py``: Reports component sizes for the root filesystem.
+
+This next tool and command help you organize configuration fragments and
+view file dependencies in a human-readable form:
+
+- ``merge_config.sh``: Helps you manage configuration files and
+ fragments within the kernel. With this tool, you can merge individual
+ configuration fragments together. The tool allows you to make
+ overrides and warns you of any missing configuration options. The
+ tool is ideal for allowing you to iterate on configurations, create
+ minimal configurations, and create configuration files for different
+ machines without having to duplicate your process.
+
+ The ``merge_config.sh`` script is part of the Linux Yocto kernel Git
+ repositories (i.e. ``linux-yocto-3.14``, ``linux-yocto-3.10``,
+ ``linux-yocto-3.8``, and so forth) in the ``scripts/kconfig``
+ directory.
+
+ For more information on configuration fragments, see the
+ ":ref:`kernel-dev/common:creating configuration fragments`"
+ section in the Yocto Project Linux Kernel Development Manual.
+
+- ``bitbake -u taskexp -g bitbake_target``: Using the BitBake command
+ with these options brings up a Dependency Explorer from which you can
+ view file dependencies. Understanding these dependencies allows you
+ to make informed decisions when cutting out various pieces of the
+ kernel and root filesystem.
+
+Trim the Root Filesystem
+------------------------
+
+The root filesystem is made up of packages for booting, libraries, and
+applications. To change things, you can configure how the packaging
+happens, which changes the way you build them. You can also modify the
+filesystem itself or select a different filesystem.
+
+First, find out what is hogging your root filesystem by running the
+``dirsize.py`` script from your root directory::
+
+ $ cd root-directory-of-image
+ $ dirsize.py 100000 > dirsize-100k.log
+ $ cat dirsize-100k.log
+
+You can apply a filter to the script to ignore files
+under a certain size. The previous example filters out any files below
+100 Kbytes. The sizes reported by the tool are uncompressed, and thus
+will be smaller by a relatively constant factor in a compressed root
+filesystem. When you examine your log file, you can focus on areas of
+the root filesystem that take up large amounts of memory.
+
+You need to be sure that what you eliminate does not cripple the
+functionality you need. One way to see how packages relate to each other
+is by using the Dependency Explorer UI with the BitBake command::
+
+ $ cd image-directory
+ $ bitbake -u taskexp -g image
+
+Use the interface to
+select potential packages you wish to eliminate and see their dependency
+relationships.
+
+When deciding how to reduce the size, get rid of packages that result in
+minimal impact on the feature set. For example, you might not need a VGA
+display. Or, you might be able to get by with ``devtmpfs`` and ``mdev``
+instead of ``udev``.
+
+Use your ``local.conf`` file to make changes. For example, to eliminate
+``udev`` and ``glib``, set the following in the local configuration
+file::
+
+ VIRTUAL-RUNTIME_dev_manager = ""
+
+Finally, you should consider exactly the type of root filesystem you
+need to meet your needs while also reducing its size. For example,
+consider ``cramfs``, ``squashfs``, ``ubifs``, ``ext2``, or an
+:term:`Initramfs` using ``initramfs``. Be aware that ``ext3`` requires a 1
+Mbyte journal. If you are okay with running read-only, you do not need
+this journal.
+
+.. note::
+
+ After each round of elimination, you need to rebuild your system and
+ then use the tools to see the effects of your reductions.
+
+Trim the Kernel
+---------------
+
+The kernel is built by including policies for hardware-independent
+aspects. What subsystems do you enable? For what architecture are you
+building? Which drivers do you build by default?
+
+.. note::
+
+ You can modify the kernel source if you want to help with boot time.
+
+Run the ``ksize.py`` script from the top-level Linux build directory to
+get an idea of what is making up the kernel::
+
+ $ cd top-level-linux-build-directory
+ $ ksize.py > ksize.log
+ $ cat ksize.log
+
+When you examine the log, you will see how much space is taken up with
+the built-in ``.o`` files for drivers, networking, core kernel files,
+filesystem, sound, and so forth. The sizes reported by the tool are
+uncompressed, and thus will be smaller by a relatively constant factor
+in a compressed kernel image. Look to reduce the areas that are large
+and taking up around the "90% rule."
+
+To examine, or drill down, into any particular area, use the ``-d``
+option with the script::
+
+ $ ksize.py -d > ksize.log
+
+Using this option
+breaks out the individual file information for each area of the kernel
+(e.g. drivers, networking, and so forth).
+
+Use your log file to see what you can eliminate from the kernel based on
+features you can let go. For example, if you are not going to need
+sound, you do not need any drivers that support sound.
+
+After figuring out what to eliminate, you need to reconfigure the kernel
+to reflect those changes during the next build. You could run
+``menuconfig`` and make all your changes at once. However, that makes it
+difficult to see the effects of your individual eliminations and also
+makes it difficult to replicate the changes for perhaps another target
+device. A better method is to start with no configurations using
+``allnoconfig``, create configuration fragments for individual changes,
+and then manage the fragments into a single configuration file using
+``merge_config.sh``. The tool makes it easy for you to iterate using the
+configuration change and build cycle.
+
+Each time you make configuration changes, you need to rebuild the kernel
+and check to see what impact your changes had on the overall size.
+
+Remove Package Management Requirements
+--------------------------------------
+
+Packaging requirements add size to the image. One way to reduce the size
+of the image is to remove all the packaging requirements from the image.
+This reduction includes both removing the package manager and its unique
+dependencies as well as removing the package management data itself.
+
+To eliminate all the packaging requirements for an image, be sure that
+"package-management" is not part of your
+:term:`IMAGE_FEATURES`
+statement for the image. When you remove this feature, you are removing
+the package manager as well as its dependencies from the root
+filesystem.
+
+Look for Other Ways to Minimize Size
+------------------------------------
+
+Depending on your particular circumstances, other areas that you can
+trim likely exist. The key to finding these areas is through tools and
+methods described here combined with experimentation and iteration. Here
+are a couple of areas to experiment with:
+
+- ``glibc``: In general, follow this process:
+
+ #. Remove ``glibc`` features from
+ :term:`DISTRO_FEATURES`
+ that you think you do not need.
+
+ #. Build your distribution.
+
+ #. If the build fails due to missing symbols in a package, determine
+ if you can reconfigure the package to not need those features. For
+ example, change the configuration to not support wide character
+ support as is done for ``ncurses``. Or, if support for those
+ characters is needed, determine what ``glibc`` features provide
+ the support and restore the configuration.
+
+ 4. Rebuild and repeat the process.
+
+- ``busybox``: For BusyBox, use a process similar as described for
+ ``glibc``. A difference is you will need to boot the resulting system
+ to see if you are able to do everything you expect from the running
+ system. You need to be sure to integrate configuration fragments into
+ Busybox because BusyBox handles its own core features and then allows
+ you to add configuration fragments on top.
+
+Iterate on the Process
+----------------------
+
+If you have not reached your goals on system size, you need to iterate
+on the process. The process is the same. Use the tools and see just what
+is taking up 90% of the root filesystem and the kernel. Decide what you
+can eliminate without limiting your device beyond what you need.
+
+Depending on your system, a good place to look might be Busybox, which
+provides a stripped down version of Unix tools in a single, executable
+file. You might be able to drop virtual terminal services or perhaps
+ipv6.
+
+Building Images for More than One Machine
+=========================================
+
+A common scenario developers face is creating images for several
+different machines that use the same software environment. In this
+situation, it is tempting to set the tunings and optimization flags for
+each build specifically for the targeted hardware (i.e. "maxing out" the
+tunings). Doing so can considerably add to build times and package feed
+maintenance collectively for the machines. For example, selecting tunes
+that are extremely specific to a CPU core used in a system might enable
+some micro optimizations in GCC for that particular system but would
+otherwise not gain you much of a performance difference across the other
+systems as compared to using a more general tuning across all the builds
+(e.g. setting :term:`DEFAULTTUNE`
+specifically for each machine's build). Rather than "max out" each
+build's tunings, you can take steps that cause the OpenEmbedded build
+system to reuse software across the various machines where it makes
+sense.
+
+If build speed and package feed maintenance are considerations, you
+should consider the points in this section that can help you optimize
+your tunings to best consider build times and package feed maintenance.
+
+- *Share the :term:`Build Directory`:* If at all possible, share the
+ :term:`TMPDIR` across builds. The Yocto Project supports switching between
+ different :term:`MACHINE` values in the same :term:`TMPDIR`. This practice
+ is well supported and regularly used by developers when building for
+ multiple machines. When you use the same :term:`TMPDIR` for multiple
+ machine builds, the OpenEmbedded build system can reuse the existing native
+ and often cross-recipes for multiple machines. Thus, build time decreases.
+
+ .. note::
+
+ If :term:`DISTRO` settings change or fundamental configuration settings
+ such as the filesystem layout, you need to work with a clean :term:`TMPDIR`.
+ Sharing :term:`TMPDIR` under these circumstances might work but since it is
+ not guaranteed, you should use a clean :term:`TMPDIR`.
+
+- *Enable the Appropriate Package Architecture:* By default, the
+ OpenEmbedded build system enables three levels of package
+ architectures: "all", "tune" or "package", and "machine". Any given
+ recipe usually selects one of these package architectures (types) for
+ its output. Depending for what a given recipe creates packages,
+ making sure you enable the appropriate package architecture can
+ directly impact the build time.
+
+ A recipe that just generates scripts can enable "all" architecture
+ because there are no binaries to build. To specifically enable "all"
+ architecture, be sure your recipe inherits the
+ :ref:`ref-classes-allarch` class.
+ This class is useful for "all" architectures because it configures
+ many variables so packages can be used across multiple architectures.
+
+ If your recipe needs to generate packages that are machine-specific
+ or when one of the build or runtime dependencies is already
+ machine-architecture dependent, which makes your recipe also
+ machine-architecture dependent, make sure your recipe enables the
+ "machine" package architecture through the
+ :term:`MACHINE_ARCH`
+ variable::
+
+ PACKAGE_ARCH = "${MACHINE_ARCH}"
+
+ When you do not
+ specifically enable a package architecture through the
+ :term:`PACKAGE_ARCH`, The
+ OpenEmbedded build system defaults to the
+ :term:`TUNE_PKGARCH` setting::
+
+ PACKAGE_ARCH = "${TUNE_PKGARCH}"
+
+- *Choose a Generic Tuning File if Possible:* Some tunes are more
+ generic and can run on multiple targets (e.g. an ``armv5`` set of
+ packages could run on ``armv6`` and ``armv7`` processors in most
+ cases). Similarly, ``i486`` binaries could work on ``i586`` and
+ higher processors. You should realize, however, that advances on
+ newer processor versions would not be used.
+
+ If you select the same tune for several different machines, the
+ OpenEmbedded build system reuses software previously built, thus
+ speeding up the overall build time. Realize that even though a new
+ sysroot for each machine is generated, the software is not recompiled
+ and only one package feed exists.
+
+- *Manage Granular Level Packaging:* Sometimes there are cases where
+ injecting another level of package architecture beyond the three
+ higher levels noted earlier can be useful. For example, consider how
+ NXP (formerly Freescale) allows for the easy reuse of binary packages
+ in their layer
+ :yocto_git:`meta-freescale </meta-freescale/>`.
+ In this example, the
+ :yocto_git:`fsl-dynamic-packagearch </meta-freescale/tree/classes/fsl-dynamic-packagearch.bbclass>`
+ class shares GPU packages for i.MX53 boards because all boards share
+ the AMD GPU. The i.MX6-based boards can do the same because all
+ boards share the Vivante GPU. This class inspects the BitBake
+ datastore to identify if the package provides or depends on one of
+ the sub-architecture values. If so, the class sets the
+ :term:`PACKAGE_ARCH` value
+ based on the ``MACHINE_SUBARCH`` value. If the package does not
+ provide or depend on one of the sub-architecture values but it
+ matches a value in the machine-specific filter, it sets
+ :term:`MACHINE_ARCH`. This
+ behavior reduces the number of packages built and saves build time by
+ reusing binaries.
+
+- *Use Tools to Debug Issues:* Sometimes you can run into situations
+ where software is being rebuilt when you think it should not be. For
+ example, the OpenEmbedded build system might not be using shared
+ state between machines when you think it should be. These types of
+ situations are usually due to references to machine-specific
+ variables such as :term:`MACHINE`,
+ :term:`SERIAL_CONSOLES`,
+ :term:`XSERVER`,
+ :term:`MACHINE_FEATURES`,
+ and so forth in code that is supposed to only be tune-specific or
+ when the recipe depends
+ (:term:`DEPENDS`,
+ :term:`RDEPENDS`,
+ :term:`RRECOMMENDS`,
+ :term:`RSUGGESTS`, and so forth)
+ on some other recipe that already has
+ :term:`PACKAGE_ARCH` defined
+ as "${MACHINE_ARCH}".
+
+ .. note::
+
+ Patches to fix any issues identified are most welcome as these
+ issues occasionally do occur.
+
+ For such cases, you can use some tools to help you sort out the
+ situation:
+
+ - ``state-diff-machines.sh``*:* You can find this tool in the
+ ``scripts`` directory of the Source Repositories. See the comments
+ in the script for information on how to use the tool.
+
+ - *BitBake's "-S printdiff" Option:* Using this option causes
+ BitBake to try to establish the most recent signature match
+ (e.g. in the shared state cache) and then compare matched signatures
+ to determine the stamps and delta where these two stamp trees diverge.
+
+Building Software from an External Source
+=========================================
+
+By default, the OpenEmbedded build system uses the :term:`Build Directory`
+when building source code. The build process involves fetching the source
+files, unpacking them, and then patching them if necessary before the build
+takes place.
+
+There are situations where you might want to build software from source
+files that are external to and thus outside of the OpenEmbedded build
+system. For example, suppose you have a project that includes a new BSP
+with a heavily customized kernel. And, you want to minimize exposing the
+build system to the development team so that they can focus on their
+project and maintain everyone's workflow as much as possible. In this
+case, you want a kernel source directory on the development machine
+where the development occurs. You want the recipe's
+:term:`SRC_URI` variable to point to
+the external directory and use it as is, not copy it.
+
+To build from software that comes from an external source, all you need to do
+is inherit the :ref:`ref-classes-externalsrc` class and then set
+the :term:`EXTERNALSRC` variable to point to your external source code. Here
+are the statements to put in your ``local.conf`` file::
+
+ INHERIT += "externalsrc"
+ EXTERNALSRC:pn-myrecipe = "path-to-your-source-tree"
+
+This next example shows how to accomplish the same thing by setting
+:term:`EXTERNALSRC` in the recipe itself or in the recipe's append file::
+
+ EXTERNALSRC = "path"
+ EXTERNALSRC_BUILD = "path"
+
+.. note::
+
+ In order for these settings to take effect, you must globally or
+ locally inherit the :ref:`ref-classes-externalsrc` class.
+
+By default, :ref:`ref-classes-externalsrc` builds the source code in a
+directory separate from the external source directory as specified by
+:term:`EXTERNALSRC`. If you need
+to have the source built in the same directory in which it resides, or
+some other nominated directory, you can set
+:term:`EXTERNALSRC_BUILD`
+to point to that directory::
+
+ EXTERNALSRC_BUILD:pn-myrecipe = "path-to-your-source-tree"
+
+Replicating a Build Offline
+===========================
+
+It can be useful to take a "snapshot" of upstream sources used in a
+build and then use that "snapshot" later to replicate the build offline.
+To do so, you need to first prepare and populate your downloads
+directory your "snapshot" of files. Once your downloads directory is
+ready, you can use it at any time and from any machine to replicate your
+build.
+
+Follow these steps to populate your Downloads directory:
+
+#. *Create a Clean Downloads Directory:* Start with an empty downloads
+ directory (:term:`DL_DIR`). You
+ start with an empty downloads directory by either removing the files
+ in the existing directory or by setting :term:`DL_DIR` to point to either
+ an empty location or one that does not yet exist.
+
+#. *Generate Tarballs of the Source Git Repositories:* Edit your
+ ``local.conf`` configuration file as follows::
+
+ DL_DIR = "/home/your-download-dir/"
+ BB_GENERATE_MIRROR_TARBALLS = "1"
+
+ During
+ the fetch process in the next step, BitBake gathers the source files
+ and creates tarballs in the directory pointed to by :term:`DL_DIR`. See
+ the
+ :term:`BB_GENERATE_MIRROR_TARBALLS`
+ variable for more information.
+
+#. *Populate Your Downloads Directory Without Building:* Use BitBake to
+ fetch your sources but inhibit the build::
+
+ $ bitbake target --runonly=fetch
+
+ The downloads directory (i.e. ``${DL_DIR}``) now has
+ a "snapshot" of the source files in the form of tarballs, which can
+ be used for the build.
+
+#. *Optionally Remove Any Git or other SCM Subdirectories From the
+ Downloads Directory:* If you want, you can clean up your downloads
+ directory by removing any Git or other Source Control Management
+ (SCM) subdirectories such as ``${DL_DIR}/git2/*``. The tarballs
+ already contain these subdirectories.
+
+Once your downloads directory has everything it needs regarding source
+files, you can create your "own-mirror" and build your target.
+Understand that you can use the files to build the target offline from
+any machine and at any time.
+
+Follow these steps to build your target using the files in the downloads
+directory:
+
+#. *Using Local Files Only:* Inside your ``local.conf`` file, add the
+ :term:`SOURCE_MIRROR_URL` variable, inherit the
+ :ref:`ref-classes-own-mirrors` class, and use the
+ :term:`BB_NO_NETWORK` variable to your ``local.conf``::
+
+ SOURCE_MIRROR_URL ?= "file:///home/your-download-dir/"
+ INHERIT += "own-mirrors"
+ BB_NO_NETWORK = "1"
+
+ The :term:`SOURCE_MIRROR_URL` and :ref:`ref-classes-own-mirrors`
+ class set up the system to use the downloads directory as your "own
+ mirror". Using the :term:`BB_NO_NETWORK` variable makes sure that
+ BitBake's fetching process in step 3 stays local, which means files
+ from your "own-mirror" are used.
+
+#. *Start With a Clean Build:* You can start with a clean build by
+ removing the ``${``\ :term:`TMPDIR`\ ``}`` directory or using a new
+ :term:`Build Directory`.
+
+#. *Build Your Target:* Use BitBake to build your target::
+
+ $ bitbake target
+
+ The build completes using the known local "snapshot" of source
+ files from your mirror. The resulting tarballs for your "snapshot" of
+ source files are in the downloads directory.
+
+ .. note::
+
+ The offline build does not work if recipes attempt to find the
+ latest version of software by setting
+ :term:`SRCREV` to
+ ``${``\ :term:`AUTOREV`\ ``}``::
+
+ SRCREV = "${AUTOREV}"
+
+ When a recipe sets :term:`SRCREV` to
+ ``${``\ :term:`AUTOREV`\ ``}``, the build system accesses the network in an
+ attempt to determine the latest version of software from the SCM.
+ Typically, recipes that use :term:`AUTOREV` are custom or modified
+ recipes. Recipes that reside in public repositories usually do not
+ use :term:`AUTOREV`.
+
+ If you do have recipes that use :term:`AUTOREV`, you can take steps to
+ still use the recipes in an offline build. Do the following:
+
+ #. Use a configuration generated by enabling :ref:`build
+ history <dev-manual/build-quality:maintaining build output quality>`.
+
+ #. Use the ``buildhistory-collect-srcrevs`` command to collect the
+ stored :term:`SRCREV` values from the build's history. For more
+ information on collecting these values, see the
+ ":ref:`dev-manual/build-quality:build history package information`"
+ section.
+
+ #. Once you have the correct source revisions, you can modify
+ those recipes to set :term:`SRCREV` to specific versions of the
+ software.
+
diff --git a/documentation/dev-manual/common-tasks.rst b/documentation/dev-manual/common-tasks.rst
deleted file mode 100644
index b228c75aab..0000000000
--- a/documentation/dev-manual/common-tasks.rst
+++ /dev/null
@@ -1,11784 +0,0 @@
-.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
-
-************
-Common Tasks
-************
-
-This chapter describes fundamental procedures such as creating layers,
-adding new software packages, extending or customizing images, porting
-work to new hardware (adding a new machine), and so forth. You will find
-that the procedures documented here occur often in the development cycle
-using the Yocto Project.
-
-Understanding and Creating Layers
-=================================
-
-The OpenEmbedded build system supports organizing
-:term:`Metadata` into multiple layers.
-Layers allow you to isolate different types of customizations from each
-other. For introductory information on the Yocto Project Layer Model,
-see the
-":ref:`overview-manual/yp-intro:the yocto project layer model`"
-section in the Yocto Project Overview and Concepts Manual.
-
-Creating Your Own Layer
------------------------
-
-.. note::
-
- It is very easy to create your own layers to use with the OpenEmbedded
- build system, as the Yocto Project ships with tools that speed up creating
- layers. This section describes the steps you perform by hand to create
- layers so that you can better understand them. For information about the
- layer-creation tools, see the
- ":ref:`bsp-guide/bsp:creating a new bsp layer using the \`\`bitbake-layers\`\` script`"
- section in the Yocto Project Board Support Package (BSP) Developer's
- Guide and the ":ref:`dev-manual/common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
- section further down in this manual.
-
-Follow these general steps to create your layer without using tools:
-
-1. *Check Existing Layers:* Before creating a new layer, you should be
- sure someone has not already created a layer containing the Metadata
- you need. You can see the :oe_layerindex:`OpenEmbedded Metadata Index <>`
- for a list of layers from the OpenEmbedded community that can be used in
- the Yocto Project. You could find a layer that is identical or close
- to what you need.
-
-2. *Create a Directory:* Create the directory for your layer. When you
- create the layer, be sure to create the directory in an area not
- associated with the Yocto Project :term:`Source Directory`
- (e.g. the cloned ``poky`` repository).
-
- While not strictly required, prepend the name of the directory with
- the string "meta-". For example::
-
- meta-mylayer
- meta-GUI_xyz
- meta-mymachine
-
- With rare exceptions, a layer's name follows this form::
-
- meta-root_name
-
- Following this layer naming convention can save
- you trouble later when tools, components, or variables "assume" your
- layer name begins with "meta-". A notable example is in configuration
- files as shown in the following step where layer names without the
- "meta-" string are appended to several variables used in the
- configuration.
-
-3. *Create a Layer Configuration File:* Inside your new layer folder,
- you need to create a ``conf/layer.conf`` file. It is easiest to take
- an existing layer configuration file and copy that to your layer's
- ``conf`` directory and then modify the file as needed.
-
- The ``meta-yocto-bsp/conf/layer.conf`` file in the Yocto Project
- :yocto_git:`Source Repositories </poky/tree/meta-yocto-bsp/conf>`
- demonstrates the required syntax. For your layer, you need to replace
- "yoctobsp" with a unique identifier for your layer (e.g. "machinexyz"
- for a layer named "meta-machinexyz")::
-
- # We have a conf and classes directory, add to BBPATH
- BBPATH .= ":${LAYERDIR}"
-
- # We have recipes-* directories, add to BBFILES
- BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \
- ${LAYERDIR}/recipes-*/*/*.bbappend"
-
- BBFILE_COLLECTIONS += "yoctobsp"
- BBFILE_PATTERN_yoctobsp = "^${LAYERDIR}/"
- BBFILE_PRIORITY_yoctobsp = "5"
- LAYERVERSION_yoctobsp = "4"
- LAYERSERIES_COMPAT_yoctobsp = "dunfell"
-
- Following is an explanation of the layer configuration file:
-
- - :term:`BBPATH`: Adds the layer's
- root directory to BitBake's search path. Through the use of the
- :term:`BBPATH` variable, BitBake locates class files (``.bbclass``),
- configuration files, and files that are included with ``include``
- and ``require`` statements. For these cases, BitBake uses the
- first file that matches the name found in :term:`BBPATH`. This is
- similar to the way the ``PATH`` variable is used for binaries. It
- is recommended, therefore, that you use unique class and
- configuration filenames in your custom layer.
-
- - :term:`BBFILES`: Defines the
- location for all recipes in the layer.
-
- - :term:`BBFILE_COLLECTIONS`:
- Establishes the current layer through a unique identifier that is
- used throughout the OpenEmbedded build system to refer to the
- layer. In this example, the identifier "yoctobsp" is the
- representation for the container layer named "meta-yocto-bsp".
-
- - :term:`BBFILE_PATTERN`:
- Expands immediately during parsing to provide the directory of the
- layer.
-
- - :term:`BBFILE_PRIORITY`:
- Establishes a priority to use for recipes in the layer when the
- OpenEmbedded build finds recipes of the same name in different
- layers.
-
- - :term:`LAYERVERSION`:
- Establishes a version number for the layer. You can use this
- version number to specify this exact version of the layer as a
- dependency when using the
- :term:`LAYERDEPENDS`
- variable.
-
- - :term:`LAYERDEPENDS`:
- Lists all layers on which this layer depends (if any).
-
- - :term:`LAYERSERIES_COMPAT`:
- Lists the :yocto_wiki:`Yocto Project </Releases>`
- releases for which the current version is compatible. This
- variable is a good way to indicate if your particular layer is
- current.
-
-4. *Add Content:* Depending on the type of layer, add the content. If
- the layer adds support for a machine, add the machine configuration
- in a ``conf/machine/`` file within the layer. If the layer adds
- distro policy, add the distro configuration in a ``conf/distro/``
- file within the layer. If the layer introduces new recipes, put the
- recipes you need in ``recipes-*`` subdirectories within the layer.
-
- .. note::
-
- For an explanation of layer hierarchy that is compliant with the
- Yocto Project, see the ":ref:`bsp-guide/bsp:example filesystem layout`"
- section in the Yocto Project Board Support Package (BSP) Developer's Guide.
-
-5. *Optionally Test for Compatibility:* If you want permission to use
- the Yocto Project Compatibility logo with your layer or application
- that uses your layer, perform the steps to apply for compatibility.
- See the
- ":ref:`dev-manual/common-tasks:making sure your layer is compatible with yocto project`"
- section for more information.
-
-Following Best Practices When Creating Layers
----------------------------------------------
-
-To create layers that are easier to maintain and that will not impact
-builds for other machines, you should consider the information in the
-following list:
-
-- *Avoid "Overlaying" Entire Recipes from Other Layers in Your
- Configuration:* In other words, do not copy an entire recipe into
- your layer and then modify it. Rather, use an append file
- (``.bbappend``) to override only those parts of the original recipe
- you need to modify.
-
-- *Avoid Duplicating Include Files:* Use append files (``.bbappend``)
- for each recipe that uses an include file. Or, if you are introducing
- a new recipe that requires the included file, use the path relative
- to the original layer directory to refer to the file. For example,
- use ``require recipes-core/``\ `package`\ ``/``\ `file`\ ``.inc`` instead
- of ``require`` `file`\ ``.inc``. If you're finding you have to overlay
- the include file, it could indicate a deficiency in the include file
- in the layer to which it originally belongs. If this is the case, you
- should try to address that deficiency instead of overlaying the
- include file. For example, you could address this by getting the
- maintainer of the include file to add a variable or variables to make
- it easy to override the parts needing to be overridden.
-
-- *Structure Your Layers:* Proper use of overrides within append files
- and placement of machine-specific files within your layer can ensure
- that a build is not using the wrong Metadata and negatively impacting
- a build for a different machine. Following are some examples:
-
- - *Modify Variables to Support a Different Machine:* Suppose you
- have a layer named ``meta-one`` that adds support for building
- machine "one". To do so, you use an append file named
- ``base-files.bbappend`` and create a dependency on "foo" by
- altering the :term:`DEPENDS`
- variable::
-
- DEPENDS = "foo"
-
- The dependency is created during any
- build that includes the layer ``meta-one``. However, you might not
- want this dependency for all machines. For example, suppose you
- are building for machine "two" but your ``bblayers.conf`` file has
- the ``meta-one`` layer included. During the build, the
- ``base-files`` for machine "two" will also have the dependency on
- ``foo``.
-
- To make sure your changes apply only when building machine "one",
- use a machine override with the :term:`DEPENDS` statement::
-
- DEPENDS:one = "foo"
-
- You should follow the same strategy when using ``:append``
- and ``:prepend`` operations::
-
- DEPENDS:append:one = " foo"
- DEPENDS:prepend:one = "foo "
-
- As an actual example, here's a
- snippet from the generic kernel include file ``linux-yocto.inc``,
- wherein the kernel compile and link options are adjusted in the
- case of a subset of the supported architectures::
-
- DEPENDS:append:aarch64 = " libgcc"
- KERNEL_CC:append:aarch64 = " ${TOOLCHAIN_OPTIONS}"
- KERNEL_LD:append:aarch64 = " ${TOOLCHAIN_OPTIONS}"
-
- DEPENDS:append:nios2 = " libgcc"
- KERNEL_CC:append:nios2 = " ${TOOLCHAIN_OPTIONS}"
- KERNEL_LD:append:nios2 = " ${TOOLCHAIN_OPTIONS}"
-
- DEPENDS:append:arc = " libgcc"
- KERNEL_CC:append:arc = " ${TOOLCHAIN_OPTIONS}"
- KERNEL_LD:append:arc = " ${TOOLCHAIN_OPTIONS}"
-
- KERNEL_FEATURES:append:qemuall=" features/debug/printk.scc"
-
- - *Place Machine-Specific Files in Machine-Specific Locations:* When
- you have a base recipe, such as ``base-files.bb``, that contains a
- :term:`SRC_URI` statement to a
- file, you can use an append file to cause the build to use your
- own version of the file. For example, an append file in your layer
- at ``meta-one/recipes-core/base-files/base-files.bbappend`` could
- extend :term:`FILESPATH` using :term:`FILESEXTRAPATHS` as follows::
-
- FILESEXTRAPATHS:prepend := "${THISDIR}/${BPN}:"
-
- The build for machine "one" will pick up your machine-specific file as
- long as you have the file in
- ``meta-one/recipes-core/base-files/base-files/``. However, if you
- are building for a different machine and the ``bblayers.conf``
- file includes the ``meta-one`` layer and the location of your
- machine-specific file is the first location where that file is
- found according to :term:`FILESPATH`, builds for all machines will
- also use that machine-specific file.
-
- You can make sure that a machine-specific file is used for a
- particular machine by putting the file in a subdirectory specific
- to the machine. For example, rather than placing the file in
- ``meta-one/recipes-core/base-files/base-files/`` as shown above,
- put it in ``meta-one/recipes-core/base-files/base-files/one/``.
- Not only does this make sure the file is used only when building
- for machine "one", but the build process locates the file more
- quickly.
-
- In summary, you need to place all files referenced from
- :term:`SRC_URI` in a machine-specific subdirectory within the layer in
- order to restrict those files to machine-specific builds.
-
-- *Perform Steps to Apply for Yocto Project Compatibility:* If you want
- permission to use the Yocto Project Compatibility logo with your
- layer or application that uses your layer, perform the steps to apply
- for compatibility. See the
- ":ref:`dev-manual/common-tasks:making sure your layer is compatible with yocto project`"
- section for more information.
-
-- *Follow the Layer Naming Convention:* Store custom layers in a Git
- repository that use the ``meta-layer_name`` format.
-
-- *Group Your Layers Locally:* Clone your repository alongside other
- cloned ``meta`` directories from the :term:`Source Directory`.
-
-Making Sure Your Layer is Compatible With Yocto Project
--------------------------------------------------------
-
-When you create a layer used with the Yocto Project, it is advantageous
-to make sure that the layer interacts well with existing Yocto Project
-layers (i.e. the layer is compatible with the Yocto Project). Ensuring
-compatibility makes the layer easy to be consumed by others in the Yocto
-Project community and could allow you permission to use the Yocto
-Project Compatible Logo.
-
-.. note::
-
- Only Yocto Project member organizations are permitted to use the
- Yocto Project Compatible Logo. The logo is not available for general
- use. For information on how to become a Yocto Project member
- organization, see the :yocto_home:`Yocto Project Website <>`.
-
-The Yocto Project Compatibility Program consists of a layer application
-process that requests permission to use the Yocto Project Compatibility
-Logo for your layer and application. The process consists of two parts:
-
-1. Successfully passing a script (``yocto-check-layer``) that when run
- against your layer, tests it against constraints based on experiences
- of how layers have worked in the real world and where pitfalls have
- been found. Getting a "PASS" result from the script is required for
- successful compatibility registration.
-
-2. Completion of an application acceptance form, which you can find at
- :yocto_home:`/webform/yocto-project-compatible-registration`.
-
-To be granted permission to use the logo, you need to satisfy the
-following:
-
-- Be able to check the box indicating that you got a "PASS" when
- running the script against your layer.
-
-- Answer "Yes" to the questions on the form or have an acceptable
- explanation for any questions answered "No".
-
-- Be a Yocto Project Member Organization.
-
-The remainder of this section presents information on the registration
-form and on the ``yocto-check-layer`` script.
-
-Yocto Project Compatible Program Application
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Use the form to apply for your layer's approval. Upon successful
-application, you can use the Yocto Project Compatibility Logo with your
-layer and the application that uses your layer.
-
-To access the form, use this link:
-:yocto_home:`/webform/yocto-project-compatible-registration`.
-Follow the instructions on the form to complete your application.
-
-The application consists of the following sections:
-
-- *Contact Information:* Provide your contact information as the fields
- require. Along with your information, provide the released versions
- of the Yocto Project for which your layer is compatible.
-
-- *Acceptance Criteria:* Provide "Yes" or "No" answers for each of the
- items in the checklist. There is space at the bottom of the form for
- any explanations for items for which you answered "No".
-
-- *Recommendations:* Provide answers for the questions regarding Linux
- kernel use and build success.
-
-``yocto-check-layer`` Script
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The ``yocto-check-layer`` script provides you a way to assess how
-compatible your layer is with the Yocto Project. You should run this
-script prior to using the form to apply for compatibility as described
-in the previous section. You need to achieve a "PASS" result in order to
-have your application form successfully processed.
-
-The script divides tests into three areas: COMMON, BSP, and DISTRO. For
-example, given a distribution layer (DISTRO), the layer must pass both
-the COMMON and DISTRO related tests. Furthermore, if your layer is a BSP
-layer, the layer must pass the COMMON and BSP set of tests.
-
-To execute the script, enter the following commands from your build
-directory::
-
- $ source oe-init-build-env
- $ yocto-check-layer your_layer_directory
-
-Be sure to provide the actual directory for your
-layer as part of the command.
-
-Entering the command causes the script to determine the type of layer
-and then to execute a set of specific tests against the layer. The
-following list overviews the test:
-
-- ``common.test_readme``: Tests if a ``README`` file exists in the
- layer and the file is not empty.
-
-- ``common.test_parse``: Tests to make sure that BitBake can parse the
- files without error (i.e. ``bitbake -p``).
-
-- ``common.test_show_environment``: Tests that the global or per-recipe
- environment is in order without errors (i.e. ``bitbake -e``).
-
-- ``common.test_world``: Verifies that ``bitbake world`` works.
-
-- ``common.test_signatures``: Tests to be sure that BSP and DISTRO
- layers do not come with recipes that change signatures.
-
-- ``common.test_layerseries_compat``: Verifies layer compatibility is
- set properly.
-
-- ``bsp.test_bsp_defines_machines``: Tests if a BSP layer has machine
- configurations.
-
-- ``bsp.test_bsp_no_set_machine``: Tests to ensure a BSP layer does not
- set the machine when the layer is added.
-
-- ``bsp.test_machine_world``: Verifies that ``bitbake world`` works
- regardless of which machine is selected.
-
-- ``bsp.test_machine_signatures``: Verifies that building for a
- particular machine affects only the signature of tasks specific to
- that machine.
-
-- ``distro.test_distro_defines_distros``: Tests if a DISTRO layer has
- distro configurations.
-
-- ``distro.test_distro_no_set_distros``: Tests to ensure a DISTRO layer
- does not set the distribution when the layer is added.
-
-Enabling Your Layer
--------------------
-
-Before the OpenEmbedded build system can use your new layer, you need to
-enable it. To enable your layer, simply add your layer's path to the
-:term:`BBLAYERS` variable in your ``conf/bblayers.conf`` file, which is
-found in the :term:`Build Directory`.
-The following example shows how to enable your new
-``meta-mylayer`` layer (note how your new layer exists outside of
-the official ``poky`` repository which you would have checked out earlier)::
-
- # POKY_BBLAYERS_CONF_VERSION is increased each time build/conf/bblayers.conf
- # changes incompatibly
- POKY_BBLAYERS_CONF_VERSION = "2"
- BBPATH = "${TOPDIR}"
- BBFILES ?= ""
- BBLAYERS ?= " \
- /home/user/poky/meta \
- /home/user/poky/meta-poky \
- /home/user/poky/meta-yocto-bsp \
- /home/user/mystuff/meta-mylayer \
- "
-
-BitBake parses each ``conf/layer.conf`` file from the top down as
-specified in the :term:`BBLAYERS` variable within the ``conf/bblayers.conf``
-file. During the processing of each ``conf/layer.conf`` file, BitBake
-adds the recipes, classes and configurations contained within the
-particular layer to the source directory.
-
-Appending Other Layers Metadata With Your Layer
------------------------------------------------
-
-A recipe that appends Metadata to another recipe is called a BitBake
-append file. A BitBake append file uses the ``.bbappend`` file type
-suffix, while the corresponding recipe to which Metadata is being
-appended uses the ``.bb`` file type suffix.
-
-You can use a ``.bbappend`` file in your layer to make additions or
-changes to the content of another layer's recipe without having to copy
-the other layer's recipe into your layer. Your ``.bbappend`` file
-resides in your layer, while the main ``.bb`` recipe file to which you
-are appending Metadata resides in a different layer.
-
-Being able to append information to an existing recipe not only avoids
-duplication, but also automatically applies recipe changes from a
-different layer into your layer. If you were copying recipes, you would
-have to manually merge changes as they occur.
-
-When you create an append file, you must use the same root name as the
-corresponding recipe file. For example, the append file
-``someapp_3.1.bbappend`` must apply to ``someapp_3.1.bb``. This
-means the original recipe and append filenames are version
-number-specific. If the corresponding recipe is renamed to update to a
-newer version, you must also rename and possibly update the
-corresponding ``.bbappend`` as well. During the build process, BitBake
-displays an error on starting if it detects a ``.bbappend`` file that
-does not have a corresponding recipe with a matching name. See the
-:term:`BB_DANGLINGAPPENDS_WARNONLY`
-variable for information on how to handle this error.
-
-Overlaying a File Using Your Layer
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-As an example, consider the main formfactor recipe and a corresponding
-formfactor append file both from the :term:`Source Directory`.
-Here is the main
-formfactor recipe, which is named ``formfactor_0.0.bb`` and located in
-the "meta" layer at ``meta/recipes-bsp/formfactor``::
-
- SUMMARY = "Device formfactor information"
- DESCRIPTION = "A formfactor configuration file provides information about the \
- target hardware for which the image is being built and information that the \
- build system cannot obtain from other sources such as the kernel."
- SECTION = "base"
- LICENSE = "MIT"
- LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
- PR = "r45"
-
- SRC_URI = "file://config file://machconfig"
- S = "${WORKDIR}"
-
- PACKAGE_ARCH = "${MACHINE_ARCH}"
- INHIBIT_DEFAULT_DEPS = "1"
-
- do_install() {
- # Install file only if it has contents
- install -d ${D}${sysconfdir}/formfactor/
- install -m 0644 ${S}/config ${D}${sysconfdir}/formfactor/
- if [ -s "${S}/machconfig" ]; then
- install -m 0644 ${S}/machconfig ${D}${sysconfdir}/formfactor/
- fi
- }
-
-In the main recipe, note the :term:`SRC_URI`
-variable, which tells the OpenEmbedded build system where to find files
-during the build.
-
-Following is the append file, which is named ``formfactor_0.0.bbappend``
-and is from the Raspberry Pi BSP Layer named ``meta-raspberrypi``. The
-file is in the layer at ``recipes-bsp/formfactor``::
-
- FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"
-
-By default, the build system uses the
-:term:`FILESPATH` variable to
-locate files. This append file extends the locations by setting the
-:term:`FILESEXTRAPATHS`
-variable. Setting this variable in the ``.bbappend`` file is the most
-reliable and recommended method for adding directories to the search
-path used by the build system to find files.
-
-The statement in this example extends the directories to include
-``${``\ :term:`THISDIR`\ ``}/${``\ :term:`PN`\ ``}``,
-which resolves to a directory named ``formfactor`` in the same directory
-in which the append file resides (i.e.
-``meta-raspberrypi/recipes-bsp/formfactor``. This implies that you must
-have the supporting directory structure set up that will contain any
-files or patches you will be including from the layer.
-
-Using the immediate expansion assignment operator ``:=`` is important
-because of the reference to :term:`THISDIR`. The trailing colon character is
-important as it ensures that items in the list remain colon-separated.
-
-.. note::
-
- BitBake automatically defines the :term:`THISDIR` variable. You should
- never set this variable yourself. Using ":prepend" as part of the
- :term:`FILESEXTRAPATHS` ensures your path will be searched prior to other
- paths in the final list.
-
- Also, not all append files add extra files. Many append files simply
- allow to add build options (e.g. ``systemd``). For these cases, your
- append file would not even use the :term:`FILESEXTRAPATHS` statement.
-
-The end result of this ``.bbappend`` file is that on a Raspberry Pi, where
-``rpi`` will exist in the list of :term:`OVERRIDES`, the file
-``meta-raspberrypi/recipes-bsp/formfactor/formfactor/rpi/machconfig`` will be
-used during :ref:`ref-tasks-fetch` and the test for a non-zero file size in
-:ref:`ref-tasks-install` will return true, and the file will be installed.
-
-Installing Additional Files Using Your Layer
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-As another example, consider the main ``xserver-xf86-config`` recipe and a
-corresponding ``xserver-xf86-config`` append file both from the :term:`Source
-Directory`. Here is the main ``xserver-xf86-config`` recipe, which is named
-``xserver-xf86-config_0.1.bb`` and located in the "meta" layer at
-``meta/recipes-graphics/xorg-xserver``::
-
- SUMMARY = "X.Org X server configuration file"
- HOMEPAGE = "http://www.x.org"
- SECTION = "x11/base"
- LICENSE = "MIT"
- LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
- PR = "r33"
-
- SRC_URI = "file://xorg.conf"
-
- S = "${WORKDIR}"
-
- CONFFILES:${PN} = "${sysconfdir}/X11/xorg.conf"
-
- PACKAGE_ARCH = "${MACHINE_ARCH}"
- ALLOW_EMPTY:${PN} = "1"
-
- do_install () {
- if test -s ${WORKDIR}/xorg.conf; then
- install -d ${D}/${sysconfdir}/X11
- install -m 0644 ${WORKDIR}/xorg.conf ${D}/${sysconfdir}/X11/
- fi
- }
-
-Following is the append file, which is named ``xserver-xf86-config_%.bbappend``
-and is from the Raspberry Pi BSP Layer named ``meta-raspberrypi``. The
-file is in the layer at ``recipes-graphics/xorg-xserver``::
-
- FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"
-
- SRC_URI:append:rpi = " \
- file://xorg.conf.d/98-pitft.conf \
- file://xorg.conf.d/99-calibration.conf \
- "
- do_install:append:rpi () {
- PITFT="${@bb.utils.contains("MACHINE_FEATURES", "pitft", "1", "0", d)}"
- if [ "${PITFT}" = "1" ]; then
- install -d ${D}/${sysconfdir}/X11/xorg.conf.d/
- install -m 0644 ${WORKDIR}/xorg.conf.d/98-pitft.conf ${D}/${sysconfdir}/X11/xorg.conf.d/
- install -m 0644 ${WORKDIR}/xorg.conf.d/99-calibration.conf ${D}/${sysconfdir}/X11/xorg.conf.d/
- fi
- }
-
- FILES:${PN}:append:rpi = " ${sysconfdir}/X11/xorg.conf.d/*"
-
-Building off of the previous example, we once again are setting the
-:term:`FILESEXTRAPATHS` variable. In this case we are also using
-:term:`SRC_URI` to list additional source files to use when ``rpi`` is found in
-the list of :term:`OVERRIDES`. The :ref:`ref-tasks-install` task will then perform a
-check for an additional :term:`MACHINE_FEATURES` that if set will cause these
-additional files to be installed. These additional files are listed in
-:term:`FILES` so that they will be packaged.
-
-Prioritizing Your Layer
------------------------
-
-Each layer is assigned a priority value. Priority values control which
-layer takes precedence if there are recipe files with the same name in
-multiple layers. For these cases, the recipe file from the layer with a
-higher priority number takes precedence. Priority values also affect the
-order in which multiple ``.bbappend`` files for the same recipe are
-applied. You can either specify the priority manually, or allow the
-build system to calculate it based on the layer's dependencies.
-
-To specify the layer's priority manually, use the
-:term:`BBFILE_PRIORITY`
-variable and append the layer's root name::
-
- BBFILE_PRIORITY_mylayer = "1"
-
-.. note::
-
- It is possible for a recipe with a lower version number
- :term:`PV` in a layer that has a higher
- priority to take precedence.
-
- Also, the layer priority does not currently affect the precedence
- order of ``.conf`` or ``.bbclass`` files. Future versions of BitBake
- might address this.
-
-Managing Layers
----------------
-
-You can use the BitBake layer management tool ``bitbake-layers`` to
-provide a view into the structure of recipes across a multi-layer
-project. Being able to generate output that reports on configured layers
-with their paths and priorities and on ``.bbappend`` files and their
-applicable recipes can help to reveal potential problems.
-
-For help on the BitBake layer management tool, use the following
-command::
-
- $ bitbake-layers --help
- NOTE: Starting bitbake server...
- usage: bitbake-layers [-d] [-q] [-F] [--color COLOR] [-h] <subcommand> ...
-
- BitBake layers utility
-
- optional arguments:
- -d, --debug Enable debug output
- -q, --quiet Print only errors
- -F, --force Force add without recipe parse verification
- --color COLOR Colorize output (where COLOR is auto, always, never)
- -h, --help show this help message and exit
-
- subcommands:
- <subcommand>
- layerindex-fetch Fetches a layer from a layer index along with its
- dependent layers, and adds them to conf/bblayers.conf.
- layerindex-show-depends
- Find layer dependencies from layer index.
- add-layer Add one or more layers to bblayers.conf.
- remove-layer Remove one or more layers from bblayers.conf.
- flatten flatten layer configuration into a separate output
- directory.
- show-layers show current configured layers.
- show-overlayed list overlayed recipes (where the same recipe exists
- in another layer)
- show-recipes list available recipes, showing the layer they are
- provided by
- show-appends list bbappend files and recipe files they apply to
- show-cross-depends Show dependencies between recipes that cross layer
- boundaries.
- create-layer Create a basic layer
-
- Use bitbake-layers <subcommand> --help to get help on a specific command
-
-The following list describes the available commands:
-
-- ``help:`` Displays general help or help on a specified command.
-
-- ``show-layers:`` Shows the current configured layers.
-
-- ``show-overlayed:`` Lists overlayed recipes. A recipe is overlayed
- when a recipe with the same name exists in another layer that has a
- higher layer priority.
-
-- ``show-recipes:`` Lists available recipes and the layers that
- provide them.
-
-- ``show-appends:`` Lists ``.bbappend`` files and the recipe files to
- which they apply.
-
-- ``show-cross-depends:`` Lists dependency relationships between
- recipes that cross layer boundaries.
-
-- ``add-layer:`` Adds a layer to ``bblayers.conf``.
-
-- ``remove-layer:`` Removes a layer from ``bblayers.conf``
-
-- ``flatten:`` Flattens the layer configuration into a separate
- output directory. Flattening your layer configuration builds a
- "flattened" directory that contains the contents of all layers, with
- any overlayed recipes removed and any ``.bbappend`` files appended to
- the corresponding recipes. You might have to perform some manual
- cleanup of the flattened layer as follows:
-
- - Non-recipe files (such as patches) are overwritten. The flatten
- command shows a warning for these files.
-
- - Anything beyond the normal layer setup has been added to the
- ``layer.conf`` file. Only the lowest priority layer's
- ``layer.conf`` is used.
-
- - Overridden and appended items from ``.bbappend`` files need to be
- cleaned up. The contents of each ``.bbappend`` end up in the
- flattened recipe. However, if there are appended or changed
- variable values, you need to tidy these up yourself. Consider the
- following example. Here, the ``bitbake-layers`` command adds the
- line ``#### bbappended ...`` so that you know where the following
- lines originate::
-
- ...
- DESCRIPTION = "A useful utility"
- ...
- EXTRA_OECONF = "--enable-something"
- ...
-
- #### bbappended from meta-anotherlayer ####
-
- DESCRIPTION = "Customized utility"
- EXTRA_OECONF += "--enable-somethingelse"
-
-
- Ideally, you would tidy up these utilities as follows::
-
- ...
- DESCRIPTION = "Customized utility"
- ...
- EXTRA_OECONF = "--enable-something --enable-somethingelse"
- ...
-
-- ``layerindex-fetch``: Fetches a layer from a layer index, along
- with its dependent layers, and adds the layers to the
- ``conf/bblayers.conf`` file.
-
-- ``layerindex-show-depends``: Finds layer dependencies from the
- layer index.
-
-- ``create-layer``: Creates a basic layer.
-
-Creating a General Layer Using the ``bitbake-layers`` Script
-------------------------------------------------------------
-
-The ``bitbake-layers`` script with the ``create-layer`` subcommand
-simplifies creating a new general layer.
-
-.. note::
-
- - For information on BSP layers, see the ":ref:`bsp-guide/bsp:bsp layers`"
- section in the Yocto
- Project Board Specific (BSP) Developer's Guide.
-
- - In order to use a layer with the OpenEmbedded build system, you
- need to add the layer to your ``bblayers.conf`` configuration
- file. See the ":ref:`dev-manual/common-tasks:adding a layer using the \`\`bitbake-layers\`\` script`"
- section for more information.
-
-The default mode of the script's operation with this subcommand is to
-create a layer with the following:
-
-- A layer priority of 6.
-
-- A ``conf`` subdirectory that contains a ``layer.conf`` file.
-
-- A ``recipes-example`` subdirectory that contains a further
- subdirectory named ``example``, which contains an ``example.bb``
- recipe file.
-
-- A ``COPYING.MIT``, which is the license statement for the layer. The
- script assumes you want to use the MIT license, which is typical for
- most layers, for the contents of the layer itself.
-
-- A ``README`` file, which is a file describing the contents of your
- new layer.
-
-In its simplest form, you can use the following command form to create a
-layer. The command creates a layer whose name corresponds to
-"your_layer_name" in the current directory::
-
- $ bitbake-layers create-layer your_layer_name
-
-As an example, the following command creates a layer named ``meta-scottrif``
-in your home directory::
-
- $ cd /usr/home
- $ bitbake-layers create-layer meta-scottrif
- NOTE: Starting bitbake server...
- Add your new layer with 'bitbake-layers add-layer meta-scottrif'
-
-If you want to set the priority of the layer to other than the default
-value of "6", you can either use the ``--priority`` option or you
-can edit the
-:term:`BBFILE_PRIORITY` value
-in the ``conf/layer.conf`` after the script creates it. Furthermore, if
-you want to give the example recipe file some name other than the
-default, you can use the ``--example-recipe-name`` option.
-
-The easiest way to see how the ``bitbake-layers create-layer`` command
-works is to experiment with the script. You can also read the usage
-information by entering the following::
-
- $ bitbake-layers create-layer --help
- NOTE: Starting bitbake server...
- usage: bitbake-layers create-layer [-h] [--priority PRIORITY]
- [--example-recipe-name EXAMPLERECIPE]
- layerdir
-
- Create a basic layer
-
- positional arguments:
- layerdir Layer directory to create
-
- optional arguments:
- -h, --help show this help message and exit
- --priority PRIORITY, -p PRIORITY
- Layer directory to create
- --example-recipe-name EXAMPLERECIPE, -e EXAMPLERECIPE
- Filename of the example recipe
-
-Adding a Layer Using the ``bitbake-layers`` Script
---------------------------------------------------
-
-Once you create your general layer, you must add it to your
-``bblayers.conf`` file. Adding the layer to this configuration file
-makes the OpenEmbedded build system aware of your layer so that it can
-search it for metadata.
-
-Add your layer by using the ``bitbake-layers add-layer`` command::
-
- $ bitbake-layers add-layer your_layer_name
-
-Here is an example that adds a
-layer named ``meta-scottrif`` to the configuration file. Following the
-command that adds the layer is another ``bitbake-layers`` command that
-shows the layers that are in your ``bblayers.conf`` file::
-
- $ bitbake-layers add-layer meta-scottrif
- NOTE: Starting bitbake server...
- Parsing recipes: 100% |##########################################################| Time: 0:00:49
- Parsing of 1441 .bb files complete (0 cached, 1441 parsed). 2055 targets, 56 skipped, 0 masked, 0 errors.
- $ bitbake-layers show-layers
- NOTE: Starting bitbake server...
- layer path priority
- ==========================================================================
- meta /home/scottrif/poky/meta 5
- meta-poky /home/scottrif/poky/meta-poky 5
- meta-yocto-bsp /home/scottrif/poky/meta-yocto-bsp 5
- workspace /home/scottrif/poky/build/workspace 99
- meta-scottrif /home/scottrif/poky/build/meta-scottrif 6
-
-
-Adding the layer to this file
-enables the build system to locate the layer during the build.
-
-.. note::
-
- During a build, the OpenEmbedded build system looks in the layers
- from the top of the list down to the bottom in that order.
-
-Customizing Images
-==================
-
-You can customize images to satisfy particular requirements. This
-section describes several methods and provides guidelines for each.
-
-Customizing Images Using ``local.conf``
----------------------------------------
-
-Probably the easiest way to customize an image is to add a package by
-way of the ``local.conf`` configuration file. Because it is limited to
-local use, this method generally only allows you to add packages and is
-not as flexible as creating your own customized image. When you add
-packages using local variables this way, you need to realize that these
-variable changes are in effect for every build and consequently affect
-all images, which might not be what you require.
-
-To add a package to your image using the local configuration file, use
-the :term:`IMAGE_INSTALL` variable with the ``:append`` operator::
-
- IMAGE_INSTALL:append = " strace"
-
-Use of the syntax is important; specifically, the leading space
-after the opening quote and before the package name, which is
-``strace`` in this example. This space is required since the ``:append``
-operator does not add the space.
-
-Furthermore, you must use ``:append`` instead of the ``+=`` operator if
-you want to avoid ordering issues. The reason for this is because doing
-so unconditionally appends to the variable and avoids ordering problems
-due to the variable being set in image recipes and ``.bbclass`` files
-with operators like ``?=``. Using ``:append`` ensures the operation
-takes effect.
-
-As shown in its simplest use, ``IMAGE_INSTALL:append`` affects all
-images. It is possible to extend the syntax so that the variable applies
-to a specific image only. Here is an example::
-
- IMAGE_INSTALL:append:pn-core-image-minimal = " strace"
-
-This example adds ``strace`` to the ``core-image-minimal`` image only.
-
-You can add packages using a similar approach through the
-:term:`CORE_IMAGE_EXTRA_INSTALL` variable. If you use this variable, only
-``core-image-*`` images are affected.
-
-Customizing Images Using Custom ``IMAGE_FEATURES`` and ``EXTRA_IMAGE_FEATURES``
--------------------------------------------------------------------------------
-
-Another method for customizing your image is to enable or disable
-high-level image features by using the
-:term:`IMAGE_FEATURES` and
-:term:`EXTRA_IMAGE_FEATURES`
-variables. Although the functions for both variables are nearly
-equivalent, best practices dictate using :term:`IMAGE_FEATURES` from within
-a recipe and using :term:`EXTRA_IMAGE_FEATURES` from within your
-``local.conf`` file, which is found in the
-:term:`Build Directory`.
-
-To understand how these features work, the best reference is
-:ref:`meta/classes/image.bbclass <ref-classes-image>`.
-This class lists out the available
-:term:`IMAGE_FEATURES` of which most map to package groups while some, such
-as ``debug-tweaks`` and ``read-only-rootfs``, resolve as general
-configuration settings.
-
-In summary, the file looks at the contents of the :term:`IMAGE_FEATURES`
-variable and then maps or configures the feature accordingly. Based on
-this information, the build system automatically adds the appropriate
-packages or configurations to the
-:term:`IMAGE_INSTALL` variable.
-Effectively, you are enabling extra features by extending the class or
-creating a custom class for use with specialized image ``.bb`` files.
-
-Use the :term:`EXTRA_IMAGE_FEATURES` variable from within your local
-configuration file. Using a separate area from which to enable features
-with this variable helps you avoid overwriting the features in the image
-recipe that are enabled with :term:`IMAGE_FEATURES`. The value of
-:term:`EXTRA_IMAGE_FEATURES` is added to :term:`IMAGE_FEATURES` within
-``meta/conf/bitbake.conf``.
-
-To illustrate how you can use these variables to modify your image,
-consider an example that selects the SSH server. The Yocto Project ships
-with two SSH servers you can use with your images: Dropbear and OpenSSH.
-Dropbear is a minimal SSH server appropriate for resource-constrained
-environments, while OpenSSH is a well-known standard SSH server
-implementation. By default, the ``core-image-sato`` image is configured
-to use Dropbear. The ``core-image-full-cmdline`` and ``core-image-lsb``
-images both include OpenSSH. The ``core-image-minimal`` image does not
-contain an SSH server.
-
-You can customize your image and change these defaults. Edit the
-:term:`IMAGE_FEATURES` variable in your recipe or use the
-:term:`EXTRA_IMAGE_FEATURES` in your ``local.conf`` file so that it
-configures the image you are working with to include
-``ssh-server-dropbear`` or ``ssh-server-openssh``.
-
-.. note::
-
- See the ":ref:`ref-manual/features:image features`" section in the Yocto
- Project Reference Manual for a complete list of image features that ship
- with the Yocto Project.
-
-Customizing Images Using Custom .bb Files
------------------------------------------
-
-You can also customize an image by creating a custom recipe that defines
-additional software as part of the image. The following example shows
-the form for the two lines you need::
-
- IMAGE_INSTALL = "packagegroup-core-x11-base package1 package2"
- inherit core-image
-
-Defining the software using a custom recipe gives you total control over
-the contents of the image. It is important to use the correct names of
-packages in the :term:`IMAGE_INSTALL` variable. You must use the
-OpenEmbedded notation and not the Debian notation for the names (e.g.
-``glibc-dev`` instead of ``libc6-dev``).
-
-The other method for creating a custom image is to base it on an
-existing image. For example, if you want to create an image based on
-``core-image-sato`` but add the additional package ``strace`` to the
-image, copy the ``meta/recipes-sato/images/core-image-sato.bb`` to a new
-``.bb`` and add the following line to the end of the copy::
-
- IMAGE_INSTALL += "strace"
-
-Customizing Images Using Custom Package Groups
-----------------------------------------------
-
-For complex custom images, the best approach for customizing an image is
-to create a custom package group recipe that is used to build the image
-or images. A good example of a package group recipe is
-``meta/recipes-core/packagegroups/packagegroup-base.bb``.
-
-If you examine that recipe, you see that the :term:`PACKAGES` variable lists
-the package group packages to produce. The ``inherit packagegroup``
-statement sets appropriate default values and automatically adds
-``-dev``, ``-dbg``, and ``-ptest`` complementary packages for each
-package specified in the :term:`PACKAGES` statement.
-
-.. note::
-
- The ``inherit packagegroup`` line should be located near the top of the
- recipe, certainly before the :term:`PACKAGES` statement.
-
-For each package you specify in :term:`PACKAGES`, you can use :term:`RDEPENDS`
-and :term:`RRECOMMENDS` entries to provide a list of packages the parent
-task package should contain. You can see examples of these further down
-in the ``packagegroup-base.bb`` recipe.
-
-Here is a short, fabricated example showing the same basic pieces for a
-hypothetical packagegroup defined in ``packagegroup-custom.bb``, where
-the variable :term:`PN` is the standard way to abbreviate the reference to
-the full packagegroup name ``packagegroup-custom``::
-
- DESCRIPTION = "My Custom Package Groups"
-
- inherit packagegroup
-
- PACKAGES = "\
- ${PN}-apps \
- ${PN}-tools \
- "
-
- RDEPENDS:${PN}-apps = "\
- dropbear \
- portmap \
- psplash"
-
- RDEPENDS:${PN}-tools = "\
- oprofile \
- oprofileui-server \
- lttng-tools"
-
- RRECOMMENDS:${PN}-tools = "\
- kernel-module-oprofile"
-
-In the previous example, two package group packages are created with
-their dependencies and their recommended package dependencies listed:
-``packagegroup-custom-apps``, and ``packagegroup-custom-tools``. To
-build an image using these package group packages, you need to add
-``packagegroup-custom-apps`` and/or ``packagegroup-custom-tools`` to
-:term:`IMAGE_INSTALL`. For other forms of image dependencies see the other
-areas of this section.
-
-Customizing an Image Hostname
------------------------------
-
-By default, the configured hostname (i.e. ``/etc/hostname``) in an image
-is the same as the machine name. For example, if
-:term:`MACHINE` equals "qemux86", the
-configured hostname written to ``/etc/hostname`` is "qemux86".
-
-You can customize this name by altering the value of the "hostname"
-variable in the ``base-files`` recipe using either an append file or a
-configuration file. Use the following in an append file::
-
- hostname = "myhostname"
-
-Use the following in a configuration file::
-
- hostname:pn-base-files = "myhostname"
-
-Changing the default value of the variable "hostname" can be useful in
-certain situations. For example, suppose you need to do extensive
-testing on an image and you would like to easily identify the image
-under test from existing images with typical default hostnames. In this
-situation, you could change the default hostname to "testme", which
-results in all the images using the name "testme". Once testing is
-complete and you do not need to rebuild the image for test any longer,
-you can easily reset the default hostname.
-
-Another point of interest is that if you unset the variable, the image
-will have no default hostname in the filesystem. Here is an example that
-unsets the variable in a configuration file::
-
- hostname:pn-base-files = ""
-
-Having no default hostname in the filesystem is suitable for
-environments that use dynamic hostnames such as virtual machines.
-
-Writing a New Recipe
-====================
-
-Recipes (``.bb`` files) are fundamental components in the Yocto Project
-environment. Each software component built by the OpenEmbedded build
-system requires a recipe to define the component. This section describes
-how to create, write, and test a new recipe.
-
-.. note::
-
- For information on variables that are useful for recipes and for
- information about recipe naming issues, see the
- ":ref:`ref-manual/varlocality:recipes`" section of the Yocto Project
- Reference Manual.
-
-Overview
---------
-
-The following figure shows the basic process for creating a new recipe.
-The remainder of the section provides details for the steps.
-
-.. image:: figures/recipe-workflow.png
- :align: center
-
-Locate or Automatically Create a Base Recipe
---------------------------------------------
-
-You can always write a recipe from scratch. However, there are three choices
-that can help you quickly get started with a new recipe:
-
-- ``devtool add``: A command that assists in creating a recipe and an
- environment conducive to development.
-
-- ``recipetool create``: A command provided by the Yocto Project that
- automates creation of a base recipe based on the source files.
-
-- *Existing Recipes:* Location and modification of an existing recipe
- that is similar in function to the recipe you need.
-
-.. note::
-
- For information on recipe syntax, see the
- ":ref:`dev-manual/common-tasks:recipe syntax`" section.
-
-Creating the Base Recipe Using ``devtool add``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The ``devtool add`` command uses the same logic for auto-creating the
-recipe as ``recipetool create``, which is listed below. Additionally,
-however, ``devtool add`` sets up an environment that makes it easy for
-you to patch the source and to make changes to the recipe as is often
-necessary when adding a recipe to build a new piece of software to be
-included in a build.
-
-You can find a complete description of the ``devtool add`` command in
-the ":ref:`sdk-manual/extensible:a closer look at \`\`devtool add\`\``" section
-in the Yocto Project Application Development and the Extensible Software
-Development Kit (eSDK) manual.
-
-Creating the Base Recipe Using ``recipetool create``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-``recipetool create`` automates creation of a base recipe given a set of
-source code files. As long as you can extract or point to the source
-files, the tool will construct a recipe and automatically configure all
-pre-build information into the recipe. For example, suppose you have an
-application that builds using Autotools. Creating the base recipe using
-``recipetool`` results in a recipe that has the pre-build dependencies,
-license requirements, and checksums configured.
-
-To run the tool, you just need to be in your
-:term:`Build Directory` and have sourced the
-build environment setup script (i.e.
-:ref:`structure-core-script`).
-To get help on the tool, use the following command::
-
- $ recipetool -h
- NOTE: Starting bitbake server...
- usage: recipetool [-d] [-q] [--color COLOR] [-h] <subcommand> ...
-
- OpenEmbedded recipe tool
-
- options:
- -d, --debug Enable debug output
- -q, --quiet Print only errors
- --color COLOR Colorize output (where COLOR is auto, always, never)
- -h, --help show this help message and exit
-
- subcommands:
- create Create a new recipe
- newappend Create a bbappend for the specified target in the specified
- layer
- setvar Set a variable within a recipe
- appendfile Create/update a bbappend to replace a target file
- appendsrcfiles Create/update a bbappend to add or replace source files
- appendsrcfile Create/update a bbappend to add or replace a source file
- Use recipetool <subcommand> --help to get help on a specific command
-
-Running ``recipetool create -o OUTFILE`` creates the base recipe and
-locates it properly in the layer that contains your source files.
-Following are some syntax examples:
-
- - Use this syntax to generate a recipe based on source. Once generated,
- the recipe resides in the existing source code layer::
-
- recipetool create -o OUTFILE source
-
- - Use this syntax to generate a recipe using code that
- you extract from source. The extracted code is placed in its own layer
- defined by :term:`EXTERNALSRC`.
- ::
-
- recipetool create -o OUTFILE -x EXTERNALSRC source
-
- - Use this syntax to generate a recipe based on source. The options
- direct ``recipetool`` to generate debugging information. Once generated,
- the recipe resides in the existing source code layer::
-
- recipetool create -d -o OUTFILE source
-
-Locating and Using a Similar Recipe
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Before writing a recipe from scratch, it is often useful to discover
-whether someone else has already written one that meets (or comes close
-to meeting) your needs. The Yocto Project and OpenEmbedded communities
-maintain many recipes that might be candidates for what you are doing.
-You can find a good central index of these recipes in the
-:oe_layerindex:`OpenEmbedded Layer Index <>`.
-
-Working from an existing recipe or a skeleton recipe is the best way to
-get started. Here are some points on both methods:
-
-- *Locate and modify a recipe that is close to what you want to do:*
- This method works when you are familiar with the current recipe
- space. The method does not work so well for those new to the Yocto
- Project or writing recipes.
-
- Some risks associated with this method are using a recipe that has
- areas totally unrelated to what you are trying to accomplish with
- your recipe, not recognizing areas of the recipe that you might have
- to add from scratch, and so forth. All these risks stem from
- unfamiliarity with the existing recipe space.
-
-- *Use and modify the following skeleton recipe:* If for some reason
- you do not want to use ``recipetool`` and you cannot find an existing
- recipe that is close to meeting your needs, you can use the following
- structure to provide the fundamental areas of a new recipe.
- ::
-
- DESCRIPTION = ""
- HOMEPAGE = ""
- LICENSE = ""
- SECTION = ""
- DEPENDS = ""
- LIC_FILES_CHKSUM = ""
-
- SRC_URI = ""
-
-Storing and Naming the Recipe
------------------------------
-
-Once you have your base recipe, you should put it in your own layer and
-name it appropriately. Locating it correctly ensures that the
-OpenEmbedded build system can find it when you use BitBake to process
-the recipe.
-
-- *Storing Your Recipe:* The OpenEmbedded build system locates your
- recipe through the layer's ``conf/layer.conf`` file and the
- :term:`BBFILES` variable. This
- variable sets up a path from which the build system can locate
- recipes. Here is the typical use::
-
- BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \
- ${LAYERDIR}/recipes-*/*/*.bbappend"
-
- Consequently, you need to be sure you locate your new recipe inside
- your layer such that it can be found.
-
- You can find more information on how layers are structured in the
- ":ref:`dev-manual/common-tasks:understanding and creating layers`" section.
-
-- *Naming Your Recipe:* When you name your recipe, you need to follow
- this naming convention::
-
- basename_version.bb
-
- Use lower-cased characters and do not include the reserved suffixes
- ``-native``, ``-cross``, ``-initial``, or ``-dev`` casually (i.e. do not use
- them as part of your recipe name unless the string applies). Here are some
- examples:
-
- .. code-block:: none
-
- cups_1.7.0.bb
- gawk_4.0.2.bb
- irssi_0.8.16-rc1.bb
-
-Running a Build on the Recipe
------------------------------
-
-Creating a new recipe is usually an iterative process that requires
-using BitBake to process the recipe multiple times in order to
-progressively discover and add information to the recipe file.
-
-Assuming you have sourced the build environment setup script (i.e.
-:ref:`structure-core-script`) and you are in
-the :term:`Build Directory`, use
-BitBake to process your recipe. All you need to provide is the
-``basename`` of the recipe as described in the previous section::
-
- $ bitbake basename
-
-During the build, the OpenEmbedded build system creates a temporary work
-directory for each recipe
-(``${``\ :term:`WORKDIR`\ ``}``)
-where it keeps extracted source files, log files, intermediate
-compilation and packaging files, and so forth.
-
-The path to the per-recipe temporary work directory depends on the
-context in which it is being built. The quickest way to find this path
-is to have BitBake return it by running the following::
-
- $ bitbake -e basename | grep ^WORKDIR=
-
-As an example, assume a Source Directory
-top-level folder named ``poky``, a default Build Directory at
-``poky/build``, and a ``qemux86-poky-linux`` machine target system.
-Furthermore, suppose your recipe is named ``foo_1.3.0.bb``. In this
-case, the work directory the build system uses to build the package
-would be as follows::
-
- poky/build/tmp/work/qemux86-poky-linux/foo/1.3.0-r0
-
-Inside this directory you can find sub-directories such as ``image``,
-``packages-split``, and ``temp``. After the build, you can examine these
-to determine how well the build went.
-
-.. note::
-
- You can find log files for each task in the recipe's ``temp``
- directory (e.g. ``poky/build/tmp/work/qemux86-poky-linux/foo/1.3.0-r0/temp``).
- Log files are named ``log.taskname`` (e.g. ``log.do_configure``,
- ``log.do_fetch``, and ``log.do_compile``).
-
-You can find more information about the build process in
-":doc:`/overview-manual/development-environment`"
-chapter of the Yocto Project Overview and Concepts Manual.
-
-Fetching Code
--------------
-
-The first thing your recipe must do is specify how to fetch the source
-files. Fetching is controlled mainly through the
-:term:`SRC_URI` variable. Your recipe
-must have a :term:`SRC_URI` variable that points to where the source is
-located. For a graphical representation of source locations, see the
-":ref:`overview-manual/concepts:sources`" section in
-the Yocto Project Overview and Concepts Manual.
-
-The :ref:`ref-tasks-fetch` task uses
-the prefix of each entry in the :term:`SRC_URI` variable value to determine
-which :ref:`fetcher <bitbake:bitbake-user-manual/bitbake-user-manual-fetching:fetchers>` to use to get your
-source files. It is the :term:`SRC_URI` variable that triggers the fetcher.
-The :ref:`ref-tasks-patch` task uses
-the variable after source is fetched to apply patches. The OpenEmbedded
-build system uses
-:term:`FILESOVERRIDES` for
-scanning directory locations for local files in :term:`SRC_URI`.
-
-The :term:`SRC_URI` variable in your recipe must define each unique location
-for your source files. It is good practice to not hard-code version
-numbers in a URL used in :term:`SRC_URI`. Rather than hard-code these
-values, use ``${``\ :term:`PV`\ ``}``,
-which causes the fetch process to use the version specified in the
-recipe filename. Specifying the version in this manner means that
-upgrading the recipe to a future version is as simple as renaming the
-recipe to match the new version.
-
-Here is a simple example from the
-``meta/recipes-devtools/strace/strace_5.5.bb`` recipe where the source
-comes from a single tarball. Notice the use of the
-:term:`PV` variable::
-
- SRC_URI = "https://strace.io/files/${PV}/strace-${PV}.tar.xz \
-
-Files mentioned in :term:`SRC_URI` whose names end in a typical archive
-extension (e.g. ``.tar``, ``.tar.gz``, ``.tar.bz2``, ``.zip``, and so
-forth), are automatically extracted during the
-:ref:`ref-tasks-unpack` task. For
-another example that specifies these types of files, see the
-":ref:`dev-manual/common-tasks:autotooled package`" section.
-
-Another way of specifying source is from an SCM. For Git repositories,
-you must specify :term:`SRCREV` and you should specify :term:`PV` to include
-the revision with :term:`SRCPV`. Here is an example from the recipe
-``meta/recipes-core/musl/gcompat_git.bb``::
-
- SRC_URI = "git://git.adelielinux.org/adelie/gcompat.git;protocol=https;branch=current"
-
- PV = "1.0.0+1.1+git${SRCPV}"
- SRCREV = "af5a49e489fdc04b9cf02547650d7aeaccd43793"
-
-If your :term:`SRC_URI` statement includes URLs pointing to individual files
-fetched from a remote server other than a version control system,
-BitBake attempts to verify the files against checksums defined in your
-recipe to ensure they have not been tampered with or otherwise modified
-since the recipe was written. Two checksums are used:
-``SRC_URI[md5sum]`` and ``SRC_URI[sha256sum]``.
-
-If your :term:`SRC_URI` variable points to more than a single URL (excluding
-SCM URLs), you need to provide the ``md5`` and ``sha256`` checksums for
-each URL. For these cases, you provide a name for each URL as part of
-the :term:`SRC_URI` and then reference that name in the subsequent checksum
-statements. Here is an example combining lines from the files
-``git.inc`` and ``git_2.24.1.bb``::
-
- SRC_URI = "${KERNELORG_MIRROR}/software/scm/git/git-${PV}.tar.gz;name=tarball \
- ${KERNELORG_MIRROR}/software/scm/git/git-manpages-${PV}.tar.gz;name=manpages"
-
- SRC_URI[tarball.md5sum] = "166bde96adbbc11c8843d4f8f4f9811b"
- SRC_URI[tarball.sha256sum] = "ad5334956301c86841eb1e5b1bb20884a6bad89a10a6762c958220c7cf64da02"
- SRC_URI[manpages.md5sum] = "31c2272a8979022497ba3d4202df145d"
- SRC_URI[manpages.sha256sum] = "9a7ae3a093bea39770eb96ca3e5b40bff7af0b9f6123f089d7821d0e5b8e1230"
-
-Proper values for ``md5`` and ``sha256`` checksums might be available
-with other signatures on the download page for the upstream source (e.g.
-``md5``, ``sha1``, ``sha256``, ``GPG``, and so forth). Because the
-OpenEmbedded build system only deals with ``sha256sum`` and ``md5sum``,
-you should verify all the signatures you find by hand.
-
-If no :term:`SRC_URI` checksums are specified when you attempt to build the
-recipe, or you provide an incorrect checksum, the build will produce an
-error for each missing or incorrect checksum. As part of the error
-message, the build system provides the checksum string corresponding to
-the fetched file. Once you have the correct checksums, you can copy and
-paste them into your recipe and then run the build again to continue.
-
-.. note::
-
- As mentioned, if the upstream source provides signatures for
- verifying the downloaded source code, you should verify those
- manually before setting the checksum values in the recipe and
- continuing with the build.
-
-This final example is a bit more complicated and is from the
-``meta/recipes-sato/rxvt-unicode/rxvt-unicode_9.20.bb`` recipe. The
-example's :term:`SRC_URI` statement identifies multiple files as the source
-files for the recipe: a tarball, a patch file, a desktop file, and an
-icon.
-::
-
- SRC_URI = "http://dist.schmorp.de/rxvt-unicode/Attic/rxvt-unicode-${PV}.tar.bz2 \
- file://xwc.patch \
- file://rxvt.desktop \
- file://rxvt.png"
-
-When you specify local files using the ``file://`` URI protocol, the
-build system fetches files from the local machine. The path is relative
-to the :term:`FILESPATH` variable
-and searches specific directories in a certain order:
-``${``\ :term:`BP`\ ``}``,
-``${``\ :term:`BPN`\ ``}``, and
-``files``. The directories are assumed to be subdirectories of the
-directory in which the recipe or append file resides. For another
-example that specifies these types of files, see the
-":ref:`dev-manual/common-tasks:single .c file package (hello world!)`" section.
-
-The previous example also specifies a patch file. Patch files are files
-whose names usually end in ``.patch`` or ``.diff`` but can end with
-compressed suffixes such as ``diff.gz`` and ``patch.bz2``, for example.
-The build system automatically applies patches as described in the
-":ref:`dev-manual/common-tasks:patching code`" section.
-
-Fetching Code Through Firewalls
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Some users are behind firewalls and need to fetch code through a proxy.
-See the ":doc:`/ref-manual/faq`" chapter for advice.
-
-Limiting the Number of Parallel Connections
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Some users are behind firewalls or use servers where the number of parallel
-connections is limited. In such cases, you can limit the number of fetch
-tasks being run in parallel by adding the following to your ``local.conf``
-file::
-
- do_fetch[number_threads] = "4"
-
-Unpacking Code
---------------
-
-During the build, the
-:ref:`ref-tasks-unpack` task unpacks
-the source with ``${``\ :term:`S`\ ``}``
-pointing to where it is unpacked.
-
-If you are fetching your source files from an upstream source archived
-tarball and the tarball's internal structure matches the common
-convention of a top-level subdirectory named
-``${``\ :term:`BPN`\ ``}-${``\ :term:`PV`\ ``}``,
-then you do not need to set :term:`S`. However, if :term:`SRC_URI` specifies to
-fetch source from an archive that does not use this convention, or from
-an SCM like Git or Subversion, your recipe needs to define :term:`S`.
-
-If processing your recipe using BitBake successfully unpacks the source
-files, you need to be sure that the directory pointed to by ``${S}``
-matches the structure of the source.
-
-Patching Code
--------------
-
-Sometimes it is necessary to patch code after it has been fetched. Any
-files mentioned in :term:`SRC_URI` whose names end in ``.patch`` or
-``.diff`` or compressed versions of these suffixes (e.g. ``diff.gz`` are
-treated as patches. The
-:ref:`ref-tasks-patch` task
-automatically applies these patches.
-
-The build system should be able to apply patches with the "-p1" option
-(i.e. one directory level in the path will be stripped off). If your
-patch needs to have more directory levels stripped off, specify the
-number of levels using the "striplevel" option in the :term:`SRC_URI` entry
-for the patch. Alternatively, if your patch needs to be applied in a
-specific subdirectory that is not specified in the patch file, use the
-"patchdir" option in the entry.
-
-As with all local files referenced in
-:term:`SRC_URI` using ``file://``,
-you should place patch files in a directory next to the recipe either
-named the same as the base name of the recipe
-(:term:`BP` and
-:term:`BPN`) or "files".
-
-Licensing
----------
-
-Your recipe needs to have both the
-:term:`LICENSE` and
-:term:`LIC_FILES_CHKSUM`
-variables:
-
-- :term:`LICENSE`: This variable specifies the license for the software.
- If you do not know the license under which the software you are
- building is distributed, you should go to the source code and look
- for that information. Typical files containing this information
- include ``COPYING``, :term:`LICENSE`, and ``README`` files. You could
- also find the information near the top of a source file. For example,
- given a piece of software licensed under the GNU General Public
- License version 2, you would set :term:`LICENSE` as follows::
-
- LICENSE = "GPL-2.0-only"
-
- The licenses you specify within :term:`LICENSE` can have any name as long
- as you do not use spaces, since spaces are used as separators between
- license names. For standard licenses, use the names of the files in
- ``meta/files/common-licenses/`` or the :term:`SPDXLICENSEMAP` flag names
- defined in ``meta/conf/licenses.conf``.
-
-- :term:`LIC_FILES_CHKSUM`: The OpenEmbedded build system uses this
- variable to make sure the license text has not changed. If it has,
- the build produces an error and it affords you the chance to figure
- it out and correct the problem.
-
- You need to specify all applicable licensing files for the software.
- At the end of the configuration step, the build process will compare
- the checksums of the files to be sure the text has not changed. Any
- differences result in an error with the message containing the
- current checksum. For more explanation and examples of how to set the
- :term:`LIC_FILES_CHKSUM` variable, see the
- ":ref:`dev-manual/common-tasks:tracking license changes`" section.
-
- To determine the correct checksum string, you can list the
- appropriate files in the :term:`LIC_FILES_CHKSUM` variable with incorrect
- md5 strings, attempt to build the software, and then note the
- resulting error messages that will report the correct md5 strings.
- See the ":ref:`dev-manual/common-tasks:fetching code`" section for
- additional information.
-
- Here is an example that assumes the software has a ``COPYING`` file::
-
- LIC_FILES_CHKSUM = "file://COPYING;md5=xxx"
-
- When you try to build the
- software, the build system will produce an error and give you the
- correct string that you can substitute into the recipe file for a
- subsequent build.
-
-Dependencies
-------------
-
-Most software packages have a short list of other packages that they
-require, which are called dependencies. These dependencies fall into two
-main categories: build-time dependencies, which are required when the
-software is built; and runtime dependencies, which are required to be
-installed on the target in order for the software to run.
-
-Within a recipe, you specify build-time dependencies using the
-:term:`DEPENDS` variable. Although there are nuances,
-items specified in :term:`DEPENDS` should be names of other
-recipes. It is important that you specify all build-time dependencies
-explicitly.
-
-Another consideration is that configure scripts might automatically
-check for optional dependencies and enable corresponding functionality
-if those dependencies are found. If you wish to make a recipe that is
-more generally useful (e.g. publish the recipe in a layer for others to
-use), instead of hard-disabling the functionality, you can use the
-:term:`PACKAGECONFIG` variable to allow functionality and the
-corresponding dependencies to be enabled and disabled easily by other
-users of the recipe.
-
-Similar to build-time dependencies, you specify runtime dependencies
-through a variable -
-:term:`RDEPENDS`, which is
-package-specific. All variables that are package-specific need to have
-the name of the package added to the end as an override. Since the main
-package for a recipe has the same name as the recipe, and the recipe's
-name can be found through the
-``${``\ :term:`PN`\ ``}`` variable, then
-you specify the dependencies for the main package by setting
-``RDEPENDS:${PN}``. If the package were named ``${PN}-tools``, then you
-would set ``RDEPENDS:${PN}-tools``, and so forth.
-
-Some runtime dependencies will be set automatically at packaging time.
-These dependencies include any shared library dependencies (i.e. if a
-package "example" contains "libexample" and another package "mypackage"
-contains a binary that links to "libexample" then the OpenEmbedded build
-system will automatically add a runtime dependency to "mypackage" on
-"example"). See the
-":ref:`overview-manual/concepts:automatically added runtime dependencies`"
-section in the Yocto Project Overview and Concepts Manual for further
-details.
-
-Configuring the Recipe
-----------------------
-
-Most software provides some means of setting build-time configuration
-options before compilation. Typically, setting these options is
-accomplished by running a configure script with options, or by modifying
-a build configuration file.
-
-.. note::
-
- As of Yocto Project Release 1.7, some of the core recipes that
- package binary configuration scripts now disable the scripts due to
- the scripts previously requiring error-prone path substitution. The
- OpenEmbedded build system uses ``pkg-config`` now, which is much more
- robust. You can find a list of the ``*-config`` scripts that are disabled
- in the ":ref:`migration-1.7-binary-configuration-scripts-disabled`" section
- in the Yocto Project Reference Manual.
-
-A major part of build-time configuration is about checking for
-build-time dependencies and possibly enabling optional functionality as
-a result. You need to specify any build-time dependencies for the
-software you are building in your recipe's
-:term:`DEPENDS` value, in terms of
-other recipes that satisfy those dependencies. You can often find
-build-time or runtime dependencies described in the software's
-documentation.
-
-The following list provides configuration items of note based on how
-your software is built:
-
-- *Autotools:* If your source files have a ``configure.ac`` file, then
- your software is built using Autotools. If this is the case, you just
- need to modify the configuration.
-
- When using Autotools, your recipe needs to inherit the
- :ref:`autotools <ref-classes-autotools>` class
- and your recipe does not have to contain a
- :ref:`ref-tasks-configure` task.
- However, you might still want to make some adjustments. For example,
- you can set
- :term:`EXTRA_OECONF` or
- :term:`PACKAGECONFIG_CONFARGS`
- to pass any needed configure options that are specific to the recipe.
-
-- *CMake:* If your source files have a ``CMakeLists.txt`` file, then
- your software is built using CMake. If this is the case, you just
- need to modify the configuration.
-
- When you use CMake, your recipe needs to inherit the
- :ref:`cmake <ref-classes-cmake>` class and your
- recipe does not have to contain a
- :ref:`ref-tasks-configure` task.
- You can make some adjustments by setting
- :term:`EXTRA_OECMAKE` to
- pass any needed configure options that are specific to the recipe.
-
- .. note::
-
- If you need to install one or more custom CMake toolchain files
- that are supplied by the application you are building, install the
- files to ``${D}${datadir}/cmake/Modules`` during ``do_install``.
-
-- *Other:* If your source files do not have a ``configure.ac`` or
- ``CMakeLists.txt`` file, then your software is built using some
- method other than Autotools or CMake. If this is the case, you
- normally need to provide a
- :ref:`ref-tasks-configure` task
- in your recipe unless, of course, there is nothing to configure.
-
- Even if your software is not being built by Autotools or CMake, you
- still might not need to deal with any configuration issues. You need
- to determine if configuration is even a required step. You might need
- to modify a Makefile or some configuration file used for the build to
- specify necessary build options. Or, perhaps you might need to run a
- provided, custom configure script with the appropriate options.
-
- For the case involving a custom configure script, you would run
- ``./configure --help`` and look for the options you need to set.
-
-Once configuration succeeds, it is always good practice to look at the
-``log.do_configure`` file to ensure that the appropriate options have
-been enabled and no additional build-time dependencies need to be added
-to :term:`DEPENDS`. For example, if the configure script reports that it
-found something not mentioned in :term:`DEPENDS`, or that it did not find
-something that it needed for some desired optional functionality, then
-you would need to add those to :term:`DEPENDS`. Looking at the log might
-also reveal items being checked for, enabled, or both that you do not
-want, or items not being found that are in :term:`DEPENDS`, in which case
-you would need to look at passing extra options to the configure script
-as needed. For reference information on configure options specific to
-the software you are building, you can consult the output of the
-``./configure --help`` command within ``${S}`` or consult the software's
-upstream documentation.
-
-Using Headers to Interface with Devices
----------------------------------------
-
-If your recipe builds an application that needs to communicate with some
-device or needs an API into a custom kernel, you will need to provide
-appropriate header files. Under no circumstances should you ever modify
-the existing
-``meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc`` file.
-These headers are used to build ``libc`` and must not be compromised
-with custom or machine-specific header information. If you customize
-``libc`` through modified headers all other applications that use
-``libc`` thus become affected.
-
-.. note::
-
- Never copy and customize the ``libc`` header file (i.e.
- ``meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc``).
-
-The correct way to interface to a device or custom kernel is to use a
-separate package that provides the additional headers for the driver or
-other unique interfaces. When doing so, your application also becomes
-responsible for creating a dependency on that specific provider.
-
-Consider the following:
-
-- Never modify ``linux-libc-headers.inc``. Consider that file to be
- part of the ``libc`` system, and not something you use to access the
- kernel directly. You should access ``libc`` through specific ``libc``
- calls.
-
-- Applications that must talk directly to devices should either provide
- necessary headers themselves, or establish a dependency on a special
- headers package that is specific to that driver.
-
-For example, suppose you want to modify an existing header that adds I/O
-control or network support. If the modifications are used by a small
-number programs, providing a unique version of a header is easy and has
-little impact. When doing so, bear in mind the guidelines in the
-previous list.
-
-.. note::
-
- If for some reason your changes need to modify the behavior of the ``libc``,
- and subsequently all other applications on the system, use a ``.bbappend``
- to modify the ``linux-kernel-headers.inc`` file. However, take care to not
- make the changes machine specific.
-
-Consider a case where your kernel is older and you need an older
-``libc`` ABI. The headers installed by your recipe should still be a
-standard mainline kernel, not your own custom one.
-
-When you use custom kernel headers you need to get them from
-:term:`STAGING_KERNEL_DIR`,
-which is the directory with kernel headers that are required to build
-out-of-tree modules. Your recipe will also need the following::
-
- do_configure[depends] += "virtual/kernel:do_shared_workdir"
-
-Compilation
------------
-
-During a build, the ``do_compile`` task happens after source is fetched,
-unpacked, and configured. If the recipe passes through ``do_compile``
-successfully, nothing needs to be done.
-
-However, if the compile step fails, you need to diagnose the failure.
-Here are some common issues that cause failures.
-
-.. note::
-
- For cases where improper paths are detected for configuration files
- or for when libraries/headers cannot be found, be sure you are using
- the more robust ``pkg-config``. See the note in section
- ":ref:`dev-manual/common-tasks:Configuring the Recipe`" for additional information.
-
-- *Parallel build failures:* These failures manifest themselves as
- intermittent errors, or errors reporting that a file or directory
- that should be created by some other part of the build process could
- not be found. This type of failure can occur even if, upon
- inspection, the file or directory does exist after the build has
- failed, because that part of the build process happened in the wrong
- order.
-
- To fix the problem, you need to either satisfy the missing dependency
- in the Makefile or whatever script produced the Makefile, or (as a
- workaround) set :term:`PARALLEL_MAKE` to an empty string::
-
- PARALLEL_MAKE = ""
-
- For information on parallel Makefile issues, see the
- ":ref:`dev-manual/common-tasks:debugging parallel make races`" section.
-
-- *Improper host path usage:* This failure applies to recipes building
- for the target or ``nativesdk`` only. The failure occurs when the
- compilation process uses improper headers, libraries, or other files
- from the host system when cross-compiling for the target.
-
- To fix the problem, examine the ``log.do_compile`` file to identify
- the host paths being used (e.g. ``/usr/include``, ``/usr/lib``, and
- so forth) and then either add configure options, apply a patch, or do
- both.
-
-- *Failure to find required libraries/headers:* If a build-time
- dependency is missing because it has not been declared in
- :term:`DEPENDS`, or because the
- dependency exists but the path used by the build process to find the
- file is incorrect and the configure step did not detect it, the
- compilation process could fail. For either of these failures, the
- compilation process notes that files could not be found. In these
- cases, you need to go back and add additional options to the
- configure script as well as possibly add additional build-time
- dependencies to :term:`DEPENDS`.
-
- Occasionally, it is necessary to apply a patch to the source to
- ensure the correct paths are used. If you need to specify paths to
- find files staged into the sysroot from other recipes, use the
- variables that the OpenEmbedded build system provides (e.g.
- :term:`STAGING_BINDIR`, :term:`STAGING_INCDIR`, :term:`STAGING_DATADIR`, and so
- forth).
-
-Installing
-----------
-
-During ``do_install``, the task copies the built files along with their
-hierarchy to locations that would mirror their locations on the target
-device. The installation process copies files from the
-``${``\ :term:`S`\ ``}``,
-``${``\ :term:`B`\ ``}``, and
-``${``\ :term:`WORKDIR`\ ``}``
-directories to the ``${``\ :term:`D`\ ``}``
-directory to create the structure as it should appear on the target
-system.
-
-How your software is built affects what you must do to be sure your
-software is installed correctly. The following list describes what you
-must do for installation depending on the type of build system used by
-the software being built:
-
-- *Autotools and CMake:* If the software your recipe is building uses
- Autotools or CMake, the OpenEmbedded build system understands how to
- install the software. Consequently, you do not have to have a
- ``do_install`` task as part of your recipe. You just need to make
- sure the install portion of the build completes with no issues.
- However, if you wish to install additional files not already being
- installed by ``make install``, you should do this using a
- ``do_install:append`` function using the install command as described
- in the "Manual" bulleted item later in this list.
-
-- *Other (using* ``make install``\ *)*: You need to define a ``do_install``
- function in your recipe. The function should call
- ``oe_runmake install`` and will likely need to pass in the
- destination directory as well. How you pass that path is dependent on
- how the ``Makefile`` being run is written (e.g. ``DESTDIR=${D}``,
- ``PREFIX=${D}``, ``INSTALLROOT=${D}``, and so forth).
-
- For an example recipe using ``make install``, see the
- ":ref:`dev-manual/common-tasks:makefile-based package`" section.
-
-- *Manual:* You need to define a ``do_install`` function in your
- recipe. The function must first use ``install -d`` to create the
- directories under
- ``${``\ :term:`D`\ ``}``. Once the
- directories exist, your function can use ``install`` to manually
- install the built software into the directories.
-
- You can find more information on ``install`` at
- https://www.gnu.org/software/coreutils/manual/html_node/install-invocation.html.
-
-For the scenarios that do not use Autotools or CMake, you need to track
-the installation and diagnose and fix any issues until everything
-installs correctly. You need to look in the default location of
-``${D}``, which is ``${WORKDIR}/image``, to be sure your files have been
-installed correctly.
-
-.. note::
-
- - During the installation process, you might need to modify some of
- the installed files to suit the target layout. For example, you
- might need to replace hard-coded paths in an initscript with
- values of variables provided by the build system, such as
- replacing ``/usr/bin/`` with ``${bindir}``. If you do perform such
- modifications during ``do_install``, be sure to modify the
- destination file after copying rather than before copying.
- Modifying after copying ensures that the build system can
- re-execute ``do_install`` if needed.
-
- - ``oe_runmake install``, which can be run directly or can be run
- indirectly by the
- :ref:`autotools <ref-classes-autotools>` and
- :ref:`cmake <ref-classes-cmake>` classes,
- runs ``make install`` in parallel. Sometimes, a Makefile can have
- missing dependencies between targets that can result in race
- conditions. If you experience intermittent failures during
- ``do_install``, you might be able to work around them by disabling
- parallel Makefile installs by adding the following to the recipe::
-
- PARALLEL_MAKEINST = ""
-
- See :term:`PARALLEL_MAKEINST` for additional information.
-
- - If you need to install one or more custom CMake toolchain files
- that are supplied by the application you are building, install the
- files to ``${D}${datadir}/cmake/Modules`` during
- :ref:`ref-tasks-install`.
-
-Enabling System Services
-------------------------
-
-If you want to install a service, which is a process that usually starts
-on boot and runs in the background, then you must include some
-additional definitions in your recipe.
-
-If you are adding services and the service initialization script or the
-service file itself is not installed, you must provide for that
-installation in your recipe using a ``do_install:append`` function. If
-your recipe already has a ``do_install`` function, update the function
-near its end rather than adding an additional ``do_install:append``
-function.
-
-When you create the installation for your services, you need to
-accomplish what is normally done by ``make install``. In other words,
-make sure your installation arranges the output similar to how it is
-arranged on the target system.
-
-The OpenEmbedded build system provides support for starting services two
-different ways:
-
-- *SysVinit:* SysVinit is a system and service manager that manages the
- init system used to control the very basic functions of your system.
- The init program is the first program started by the Linux kernel
- when the system boots. Init then controls the startup, running and
- shutdown of all other programs.
-
- To enable a service using SysVinit, your recipe needs to inherit the
- :ref:`update-rc.d <ref-classes-update-rc.d>`
- class. The class helps facilitate safely installing the package on
- the target.
-
- You will need to set the
- :term:`INITSCRIPT_PACKAGES`,
- :term:`INITSCRIPT_NAME`,
- and
- :term:`INITSCRIPT_PARAMS`
- variables within your recipe.
-
-- *systemd:* System Management Daemon (systemd) was designed to replace
- SysVinit and to provide enhanced management of services. For more
- information on systemd, see the systemd homepage at
- https://freedesktop.org/wiki/Software/systemd/.
-
- To enable a service using systemd, your recipe needs to inherit the
- :ref:`systemd <ref-classes-systemd>` class. See
- the ``systemd.bbclass`` file located in your :term:`Source Directory`
- section for
- more information.
-
-Packaging
----------
-
-Successful packaging is a combination of automated processes performed
-by the OpenEmbedded build system and some specific steps you need to
-take. The following list describes the process:
-
-- *Splitting Files*: The ``do_package`` task splits the files produced
- by the recipe into logical components. Even software that produces a
- single binary might still have debug symbols, documentation, and
- other logical components that should be split out. The ``do_package``
- task ensures that files are split up and packaged correctly.
-
-- *Running QA Checks*: The
- :ref:`insane <ref-classes-insane>` class adds a
- step to the package generation process so that output quality
- assurance checks are generated by the OpenEmbedded build system. This
- step performs a range of checks to be sure the build's output is free
- of common problems that show up during runtime. For information on
- these checks, see the
- :ref:`insane <ref-classes-insane>` class and
- the ":ref:`ref-manual/qa-checks:qa error and warning messages`"
- chapter in the Yocto Project Reference Manual.
-
-- *Hand-Checking Your Packages*: After you build your software, you
- need to be sure your packages are correct. Examine the
- ``${``\ :term:`WORKDIR`\ ``}/packages-split``
- directory and make sure files are where you expect them to be. If you
- discover problems, you can set
- :term:`PACKAGES`,
- :term:`FILES`,
- ``do_install(:append)``, and so forth as needed.
-
-- *Splitting an Application into Multiple Packages*: If you need to
- split an application into several packages, see the
- ":ref:`dev-manual/common-tasks:splitting an application into multiple packages`"
- section for an example.
-
-- *Installing a Post-Installation Script*: For an example showing how
- to install a post-installation script, see the
- ":ref:`dev-manual/common-tasks:post-installation scripts`" section.
-
-- *Marking Package Architecture*: Depending on what your recipe is
- building and how it is configured, it might be important to mark the
- packages produced as being specific to a particular machine, or to
- mark them as not being specific to a particular machine or
- architecture at all.
-
- By default, packages apply to any machine with the same architecture
- as the target machine. When a recipe produces packages that are
- machine-specific (e.g. the
- :term:`MACHINE` value is passed
- into the configure script or a patch is applied only for a particular
- machine), you should mark them as such by adding the following to the
- recipe::
-
- PACKAGE_ARCH = "${MACHINE_ARCH}"
-
- On the other hand, if the recipe produces packages that do not
- contain anything specific to the target machine or architecture at
- all (e.g. recipes that simply package script files or configuration
- files), you should use the
- :ref:`allarch <ref-classes-allarch>` class to
- do this for you by adding this to your recipe::
-
- inherit allarch
-
- Ensuring that the package architecture is correct is not critical
- while you are doing the first few builds of your recipe. However, it
- is important in order to ensure that your recipe rebuilds (or does
- not rebuild) appropriately in response to changes in configuration,
- and to ensure that you get the appropriate packages installed on the
- target machine, particularly if you run separate builds for more than
- one target machine.
-
-Sharing Files Between Recipes
------------------------------
-
-Recipes often need to use files provided by other recipes on the build
-host. For example, an application linking to a common library needs
-access to the library itself and its associated headers. The way this
-access is accomplished is by populating a sysroot with files. Each
-recipe has two sysroots in its work directory, one for target files
-(``recipe-sysroot``) and one for files that are native to the build host
-(``recipe-sysroot-native``).
-
-.. note::
-
- You could find the term "staging" used within the Yocto project
- regarding files populating sysroots (e.g. the :term:`STAGING_DIR`
- variable).
-
-Recipes should never populate the sysroot directly (i.e. write files
-into sysroot). Instead, files should be installed into standard
-locations during the
-:ref:`ref-tasks-install` task within
-the ``${``\ :term:`D`\ ``}`` directory. The
-reason for this limitation is that almost all files that populate the
-sysroot are cataloged in manifests in order to ensure the files can be
-removed later when a recipe is either modified or removed. Thus, the
-sysroot is able to remain free from stale files.
-
-A subset of the files installed by the :ref:`ref-tasks-install` task are
-used by the :ref:`ref-tasks-populate_sysroot` task as defined by the the
-:term:`SYSROOT_DIRS` variable to automatically populate the sysroot. It
-is possible to modify the list of directories that populate the sysroot.
-The following example shows how you could add the ``/opt`` directory to
-the list of directories within a recipe::
-
- SYSROOT_DIRS += "/opt"
-
-.. note::
-
- The `/sysroot-only` is to be used by recipes that generate artifacts
- that are not included in the target filesystem, allowing them to share
- these artifacts without needing to use the :term:`DEPLOY_DIR`.
-
-For a more complete description of the :ref:`ref-tasks-populate_sysroot`
-task and its associated functions, see the
-:ref:`staging <ref-classes-staging>` class.
-
-Using Virtual Providers
------------------------
-
-Prior to a build, if you know that several different recipes provide the
-same functionality, you can use a virtual provider (i.e. ``virtual/*``)
-as a placeholder for the actual provider. The actual provider is
-determined at build-time.
-
-A common scenario where a virtual provider is used would be for the
-kernel recipe. Suppose you have three kernel recipes whose
-:term:`PN` values map to ``kernel-big``,
-``kernel-mid``, and ``kernel-small``. Furthermore, each of these recipes
-in some way uses a :term:`PROVIDES`
-statement that essentially identifies itself as being able to provide
-``virtual/kernel``. Here is one way through the
-:ref:`kernel <ref-classes-kernel>` class::
-
- PROVIDES += "virtual/kernel"
-
-Any recipe that inherits the :ref:`kernel <ref-classes-kernel>` class is
-going to utilize a :term:`PROVIDES` statement that identifies that recipe as
-being able to provide the ``virtual/kernel`` item.
-
-Now comes the time to actually build an image and you need a kernel
-recipe, but which one? You can configure your build to call out the
-kernel recipe you want by using the :term:`PREFERRED_PROVIDER` variable. As
-an example, consider the :yocto_git:`x86-base.inc
-</poky/tree/meta/conf/machine/include/x86/x86-base.inc>` include file, which is a
-machine (i.e. :term:`MACHINE`) configuration file. This include file is the
-reason all x86-based machines use the ``linux-yocto`` kernel. Here are the
-relevant lines from the include file::
-
- PREFERRED_PROVIDER_virtual/kernel ??= "linux-yocto"
- PREFERRED_VERSION_linux-yocto ??= "4.15%"
-
-When you use a virtual provider, you do not have to "hard code" a recipe
-name as a build dependency. You can use the
-:term:`DEPENDS` variable to state the
-build is dependent on ``virtual/kernel`` for example::
-
- DEPENDS = "virtual/kernel"
-
-During the build, the OpenEmbedded build system picks
-the correct recipe needed for the ``virtual/kernel`` dependency based on
-the :term:`PREFERRED_PROVIDER` variable. If you want to use the small kernel
-mentioned at the beginning of this section, configure your build as
-follows::
-
- PREFERRED_PROVIDER_virtual/kernel ??= "kernel-small"
-
-.. note::
-
- Any recipe that :term:`PROVIDES` a ``virtual/*`` item that is ultimately not
- selected through :term:`PREFERRED_PROVIDER` does not get built. Preventing these
- recipes from building is usually the desired behavior since this mechanism's
- purpose is to select between mutually exclusive alternative providers.
-
-The following lists specific examples of virtual providers:
-
-- ``virtual/kernel``: Provides the name of the kernel recipe to use
- when building a kernel image.
-
-- ``virtual/bootloader``: Provides the name of the bootloader to use
- when building an image.
-
-- ``virtual/libgbm``: Provides ``gbm.pc``.
-
-- ``virtual/egl``: Provides ``egl.pc`` and possibly ``wayland-egl.pc``.
-
-- ``virtual/libgl``: Provides ``gl.pc`` (i.e. libGL).
-
-- ``virtual/libgles1``: Provides ``glesv1_cm.pc`` (i.e. libGLESv1_CM).
-
-- ``virtual/libgles2``: Provides ``glesv2.pc`` (i.e. libGLESv2).
-
-.. note::
-
- Virtual providers only apply to build time dependencies specified with
- :term:`PROVIDES` and :term:`DEPENDS`. They do not apply to runtime
- dependencies specified with :term:`RPROVIDES` and :term:`RDEPENDS`.
-
-Properly Versioning Pre-Release Recipes
----------------------------------------
-
-Sometimes the name of a recipe can lead to versioning problems when the
-recipe is upgraded to a final release. For example, consider the
-``irssi_0.8.16-rc1.bb`` recipe file in the list of example recipes in
-the ":ref:`dev-manual/common-tasks:storing and naming the recipe`" section.
-This recipe is at a release candidate stage (i.e. "rc1"). When the recipe is
-released, the recipe filename becomes ``irssi_0.8.16.bb``. The version
-change from ``0.8.16-rc1`` to ``0.8.16`` is seen as a decrease by the
-build system and package managers, so the resulting packages will not
-correctly trigger an upgrade.
-
-In order to ensure the versions compare properly, the recommended
-convention is to set :term:`PV` within the
-recipe to "previous_version+current_version". You can use an additional
-variable so that you can use the current version elsewhere. Here is an
-example::
-
- REALPV = "0.8.16-rc1"
- PV = "0.8.15+${REALPV}"
-
-Post-Installation Scripts
--------------------------
-
-Post-installation scripts run immediately after installing a package on
-the target or during image creation when a package is included in an
-image. To add a post-installation script to a package, add a
-``pkg_postinst:``\ `PACKAGENAME`\ ``()`` function to the recipe file
-(``.bb``) and replace `PACKAGENAME` with the name of the package you want
-to attach to the ``postinst`` script. To apply the post-installation
-script to the main package for the recipe, which is usually what is
-required, specify
-``${``\ :term:`PN`\ ``}`` in place of
-PACKAGENAME.
-
-A post-installation function has the following structure::
-
- pkg_postinst:PACKAGENAME() {
- # Commands to carry out
- }
-
-The script defined in the post-installation function is called when the
-root filesystem is created. If the script succeeds, the package is
-marked as installed.
-
-.. note::
-
- Any RPM post-installation script that runs on the target should
- return a 0 exit code. RPM does not allow non-zero exit codes for
- these scripts, and the RPM package manager will cause the package to
- fail installation on the target.
-
-Sometimes it is necessary for the execution of a post-installation
-script to be delayed until the first boot. For example, the script might
-need to be executed on the device itself. To delay script execution
-until boot time, you must explicitly mark post installs to defer to the
-target. You can use ``pkg_postinst_ontarget()`` or call
-``postinst_intercept delay_to_first_boot`` from ``pkg_postinst()``. Any
-failure of a ``pkg_postinst()`` script (including exit 1) triggers an
-error during the
-:ref:`ref-tasks-rootfs` task.
-
-If you have recipes that use ``pkg_postinst`` function and they require
-the use of non-standard native tools that have dependencies during
-root filesystem construction, you need to use the
-:term:`PACKAGE_WRITE_DEPS`
-variable in your recipe to list these tools. If you do not use this
-variable, the tools might be missing and execution of the
-post-installation script is deferred until first boot. Deferring the
-script to the first boot is undesirable and impossible for read-only
-root filesystems.
-
-.. note::
-
- There is equivalent support for pre-install, pre-uninstall, and post-uninstall
- scripts by way of ``pkg_preinst``, ``pkg_prerm``, and ``pkg_postrm``,
- respectively. These scrips work in exactly the same way as does
- ``pkg_postinst`` with the exception that they run at different times. Also,
- because of when they run, they are not applicable to being run at image
- creation time like ``pkg_postinst``.
-
-Testing
--------
-
-The final step for completing your recipe is to be sure that the
-software you built runs correctly. To accomplish runtime testing, add
-the build's output packages to your image and test them on the target.
-
-For information on how to customize your image by adding specific
-packages, see ":ref:`dev-manual/common-tasks:customizing images`" section.
-
-Examples
---------
-
-To help summarize how to write a recipe, this section provides some
-examples given various scenarios:
-
-- Recipes that use local files
-
-- Using an Autotooled package
-
-- Using a Makefile-based package
-
-- Splitting an application into multiple packages
-
-- Adding binaries to an image
-
-Single .c File Package (Hello World!)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Building an application from a single file that is stored locally (e.g.
-under ``files``) requires a recipe that has the file listed in the
-:term:`SRC_URI` variable. Additionally, you need to manually write the
-``do_compile`` and ``do_install`` tasks. The :term:`S` variable defines the
-directory containing the source code, which is set to
-:term:`WORKDIR` in this case - the
-directory BitBake uses for the build.
-::
-
- SUMMARY = "Simple helloworld application"
- SECTION = "examples"
- LICENSE = "MIT"
- LIC_FILES_CHKSUM = "file://${COMMON_LICENSE_DIR}/MIT;md5=0835ade698e0bcf8506ecda2f7b4f302"
-
- SRC_URI = "file://helloworld.c"
-
- S = "${WORKDIR}"
-
- do_compile() {
- ${CC} ${LDFLAGS} helloworld.c -o helloworld
- }
-
- do_install() {
- install -d ${D}${bindir}
- install -m 0755 helloworld ${D}${bindir}
- }
-
-By default, the ``helloworld``, ``helloworld-dbg``, and
-``helloworld-dev`` packages are built. For information on how to
-customize the packaging process, see the
-":ref:`dev-manual/common-tasks:splitting an application into multiple packages`"
-section.
-
-Autotooled Package
-~~~~~~~~~~~~~~~~~~
-
-Applications that use Autotools such as ``autoconf`` and ``automake``
-require a recipe that has a source archive listed in :term:`SRC_URI` and
-also inherit the
-:ref:`autotools <ref-classes-autotools>` class,
-which contains the definitions of all the steps needed to build an
-Autotool-based application. The result of the build is automatically
-packaged. And, if the application uses NLS for localization, packages
-with local information are generated (one package per language).
-Following is one example: (``hello_2.3.bb``)
-::
-
- SUMMARY = "GNU Helloworld application"
- SECTION = "examples"
- LICENSE = "GPL-2.0-or-later"
- LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe"
-
- SRC_URI = "${GNU_MIRROR}/hello/hello-${PV}.tar.gz"
-
- inherit autotools gettext
-
-The variable :term:`LIC_FILES_CHKSUM` is used to track source license
-changes as described in the
-":ref:`dev-manual/common-tasks:tracking license changes`" section in
-the Yocto Project Overview and Concepts Manual. You can quickly create
-Autotool-based recipes in a manner similar to the previous example.
-
-Makefile-Based Package
-~~~~~~~~~~~~~~~~~~~~~~
-
-Applications that use GNU ``make`` also require a recipe that has the
-source archive listed in :term:`SRC_URI`. You do not need to add a
-``do_compile`` step since by default BitBake starts the ``make`` command
-to compile the application. If you need additional ``make`` options, you
-should store them in the
-:term:`EXTRA_OEMAKE` or
-:term:`PACKAGECONFIG_CONFARGS`
-variables. BitBake passes these options into the GNU ``make``
-invocation. Note that a ``do_install`` task is still required.
-Otherwise, BitBake runs an empty ``do_install`` task by default.
-
-Some applications might require extra parameters to be passed to the
-compiler. For example, the application might need an additional header
-path. You can accomplish this by adding to the :term:`CFLAGS` variable. The
-following example shows this::
-
- CFLAGS:prepend = "-I ${S}/include "
-
-In the following example, ``lz4`` is a makefile-based package::
-
- SUMMARY = "Extremely Fast Compression algorithm"
- DESCRIPTION = "LZ4 is a very fast lossless compression algorithm, providing compression speed at 400 MB/s per core, scalable with multi-cores CPU. It also features an extremely fast decoder, with speed in multiple GB/s per core, typically reaching RAM speed limits on multi-core systems."
- HOMEPAGE = "https://github.com/lz4/lz4"
-
- LICENSE = "BSD-2-Clause | GPL-2.0-only"
- LIC_FILES_CHKSUM = "file://lib/LICENSE;md5=ebc2ea4814a64de7708f1571904b32cc \
- file://programs/COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
- file://LICENSE;md5=d57c0d21cb917fb4e0af2454aa48b956 \
- "
-
- PE = "1"
-
- SRCREV = "d44371841a2f1728a3f36839fd4b7e872d0927d3"
-
- SRC_URI = "git://github.com/lz4/lz4.git;branch=release;protocol=https \
- file://CVE-2021-3520.patch \
- "
- UPSTREAM_CHECK_GITTAGREGEX = "v(?P<pver>.*)"
-
- S = "${WORKDIR}/git"
-
- # Fixed in r118, which is larger than the current version.
- CVE_CHECK_IGNORE += "CVE-2014-4715"
-
- EXTRA_OEMAKE = "PREFIX=${prefix} CC='${CC}' CFLAGS='${CFLAGS}' DESTDIR=${D} LIBDIR=${libdir} INCLUDEDIR=${includedir} BUILD_STATIC=no"
-
- do_install() {
- oe_runmake install
- }
-
- BBCLASSEXTEND = "native nativesdk"
-
-Splitting an Application into Multiple Packages
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-You can use the variables :term:`PACKAGES` and :term:`FILES` to split an
-application into multiple packages.
-
-Following is an example that uses the ``libxpm`` recipe. By default,
-this recipe generates a single package that contains the library along
-with a few binaries. You can modify the recipe to split the binaries
-into separate packages::
-
- require xorg-lib-common.inc
-
- SUMMARY = "Xpm: X Pixmap extension library"
- LICENSE = "MIT"
- LIC_FILES_CHKSUM = "file://COPYING;md5=51f4270b012ecd4ab1a164f5f4ed6cf7"
- DEPENDS += "libxext libsm libxt"
- PE = "1"
-
- XORG_PN = "libXpm"
-
- PACKAGES =+ "sxpm cxpm"
- FILES:cxpm = "${bindir}/cxpm"
- FILES:sxpm = "${bindir}/sxpm"
-
-In the previous example, we want to ship the ``sxpm`` and ``cxpm``
-binaries in separate packages. Since ``bindir`` would be packaged into
-the main :term:`PN` package by default, we prepend the :term:`PACKAGES` variable
-so additional package names are added to the start of list. This results
-in the extra ``FILES:*`` variables then containing information that
-define which files and directories go into which packages. Files
-included by earlier packages are skipped by latter packages. Thus, the
-main :term:`PN` package does not include the above listed files.
-
-Packaging Externally Produced Binaries
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Sometimes, you need to add pre-compiled binaries to an image. For
-example, suppose that there are binaries for proprietary code,
-created by a particular division of a company. Your part of the company
-needs to use those binaries as part of an image that you are building
-using the OpenEmbedded build system. Since you only have the binaries
-and not the source code, you cannot use a typical recipe that expects to
-fetch the source specified in
-:term:`SRC_URI` and then compile it.
-
-One method is to package the binaries and then install them as part of
-the image. Generally, it is not a good idea to package binaries since,
-among other things, it can hinder the ability to reproduce builds and
-could lead to compatibility problems with ABI in the future. However,
-sometimes you have no choice.
-
-The easiest solution is to create a recipe that uses the
-:ref:`bin_package <ref-classes-bin-package>` class
-and to be sure that you are using default locations for build artifacts.
-In most cases, the :ref:`bin_package <ref-classes-bin-package>` class handles "skipping" the
-configure and compile steps as well as sets things up to grab packages
-from the appropriate area. In particular, this class sets ``noexec`` on
-both the :ref:`ref-tasks-configure`
-and :ref:`ref-tasks-compile` tasks,
-sets ``FILES:${PN}`` to "/" so that it picks up all files, and sets up a
-:ref:`ref-tasks-install` task, which
-effectively copies all files from ``${S}`` to ``${D}``. The
-:ref:`bin_package <ref-classes-bin-package>` class works well when the files extracted into ``${S}``
-are already laid out in the way they should be laid out on the target.
-For more information on these variables, see the
-:term:`FILES`,
-:term:`PN`,
-:term:`S`, and
-:term:`D` variables in the Yocto Project
-Reference Manual's variable glossary.
-
-.. note::
-
- - Using :term:`DEPENDS` is a good
- idea even for components distributed in binary form, and is often
- necessary for shared libraries. For a shared library, listing the
- library dependencies in :term:`DEPENDS` makes sure that the libraries
- are available in the staging sysroot when other recipes link
- against the library, which might be necessary for successful
- linking.
-
- - Using :term:`DEPENDS` also allows runtime dependencies between
- packages to be added automatically. See the
- ":ref:`overview-manual/concepts:automatically added runtime dependencies`"
- section in the Yocto Project Overview and Concepts Manual for more
- information.
-
-If you cannot use the :ref:`bin_package <ref-classes-bin-package>` class, you need to be sure you are
-doing the following:
-
-- Create a recipe where the
- :ref:`ref-tasks-configure` and
- :ref:`ref-tasks-compile` tasks do
- nothing: It is usually sufficient to just not define these tasks in
- the recipe, because the default implementations do nothing unless a
- Makefile is found in
- ``${``\ :term:`S`\ ``}``.
-
- If ``${S}`` might contain a Makefile, or if you inherit some class
- that replaces ``do_configure`` and ``do_compile`` with custom
- versions, then you can use the
- ``[``\ :ref:`noexec <bitbake-user-manual/bitbake-user-manual-metadata:variable flags>`\ ``]``
- flag to turn the tasks into no-ops, as follows::
-
- do_configure[noexec] = "1"
- do_compile[noexec] = "1"
-
- Unlike
- :ref:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata:deleting a task`,
- using the flag preserves the dependency chain from the
- :ref:`ref-tasks-fetch`,
- :ref:`ref-tasks-unpack`, and
- :ref:`ref-tasks-patch` tasks to the
- :ref:`ref-tasks-install` task.
-
-- Make sure your ``do_install`` task installs the binaries
- appropriately.
-
-- Ensure that you set up :term:`FILES`
- (usually
- ``FILES:${``\ :term:`PN`\ ``}``) to
- point to the files you have installed, which of course depends on
- where you have installed them and whether those files are in
- different locations than the defaults.
-
-Following Recipe Style Guidelines
----------------------------------
-
-When writing recipes, it is good to conform to existing style
-guidelines. The :oe_wiki:`OpenEmbedded Styleguide </Styleguide>` wiki page
-provides rough guidelines for preferred recipe style.
-
-It is common for existing recipes to deviate a bit from this style.
-However, aiming for at least a consistent style is a good idea. Some
-practices, such as omitting spaces around ``=`` operators in assignments
-or ordering recipe components in an erratic way, are widely seen as poor
-style.
-
-Recipe Syntax
--------------
-
-Understanding recipe file syntax is important for writing recipes. The
-following list overviews the basic items that make up a BitBake recipe
-file. For more complete BitBake syntax descriptions, see the
-":doc:`bitbake-user-manual/bitbake-user-manual-metadata`"
-chapter of the BitBake User Manual.
-
-- *Variable Assignments and Manipulations:* Variable assignments allow
- a value to be assigned to a variable. The assignment can be static
- text or might include the contents of other variables. In addition to
- the assignment, appending and prepending operations are also
- supported.
-
- The following example shows some of the ways you can use variables in
- recipes::
-
- S = "${WORKDIR}/postfix-${PV}"
- CFLAGS += "-DNO_ASM"
- SRC_URI:append = " file://fixup.patch"
-
-- *Functions:* Functions provide a series of actions to be performed.
- You usually use functions to override the default implementation of a
- task function or to complement a default function (i.e. append or
- prepend to an existing function). Standard functions use ``sh`` shell
- syntax, although access to OpenEmbedded variables and internal
- methods are also available.
-
- Here is an example function from the ``sed`` recipe::
-
- do_install () {
- autotools_do_install
- install -d ${D}${base_bindir}
- mv ${D}${bindir}/sed ${D}${base_bindir}/sed
- rmdir ${D}${bindir}/
- }
-
- It is
- also possible to implement new functions that are called between
- existing tasks as long as the new functions are not replacing or
- complementing the default functions. You can implement functions in
- Python instead of shell. Both of these options are not seen in the
- majority of recipes.
-
-- *Keywords:* BitBake recipes use only a few keywords. You use keywords
- to include common functions (``inherit``), load parts of a recipe
- from other files (``include`` and ``require``) and export variables
- to the environment (``export``).
-
- The following example shows the use of some of these keywords::
-
- export POSTCONF = "${STAGING_BINDIR}/postconf"
- inherit autoconf
- require otherfile.inc
-
-- *Comments (#):* Any lines that begin with the hash character (``#``)
- are treated as comment lines and are ignored::
-
- # This is a comment
-
-This next list summarizes the most important and most commonly used
-parts of the recipe syntax. For more information on these parts of the
-syntax, you can reference the
-:doc:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata` chapter
-in the BitBake User Manual.
-
-- *Line Continuation (\\):* Use the backward slash (``\``) character to
- split a statement over multiple lines. Place the slash character at
- the end of the line that is to be continued on the next line::
-
- VAR = "A really long \
- line"
-
- .. note::
-
- You cannot have any characters including spaces or tabs after the
- slash character.
-
-- *Using Variables (${VARNAME}):* Use the ``${VARNAME}`` syntax to
- access the contents of a variable::
-
- SRC_URI = "${SOURCEFORGE_MIRROR}/libpng/zlib-${PV}.tar.gz"
-
- .. note::
-
- It is important to understand that the value of a variable
- expressed in this form does not get substituted automatically. The
- expansion of these expressions happens on-demand later (e.g.
- usually when a function that makes reference to the variable
- executes). This behavior ensures that the values are most
- appropriate for the context in which they are finally used. On the
- rare occasion that you do need the variable expression to be
- expanded immediately, you can use the
- :=
- operator instead of
- =
- when you make the assignment, but this is not generally needed.
-
-- *Quote All Assignments ("value"):* Use double quotes around values in
- all variable assignments (e.g. ``"value"``). Following is an example::
-
- VAR1 = "${OTHERVAR}"
- VAR2 = "The version is ${PV}"
-
-- *Conditional Assignment (?=):* Conditional assignment is used to
- assign a value to a variable, but only when the variable is currently
- unset. Use the question mark followed by the equal sign (``?=``) to
- make a "soft" assignment used for conditional assignment. Typically,
- "soft" assignments are used in the ``local.conf`` file for variables
- that are allowed to come through from the external environment.
-
- Here is an example where ``VAR1`` is set to "New value" if it is
- currently empty. However, if ``VAR1`` has already been set, it
- remains unchanged::
-
- VAR1 ?= "New value"
-
- In this next example, ``VAR1`` is left with the value "Original value"::
-
- VAR1 = "Original value"
- VAR1 ?= "New value"
-
-- *Appending (+=):* Use the plus character followed by the equals sign
- (``+=``) to append values to existing variables.
-
- .. note::
-
- This operator adds a space between the existing content of the
- variable and the new content.
-
- Here is an example::
-
- SRC_URI += "file://fix-makefile.patch"
-
-- *Prepending (=+):* Use the equals sign followed by the plus character
- (``=+``) to prepend values to existing variables.
-
- .. note::
-
- This operator adds a space between the new content and the
- existing content of the variable.
-
- Here is an example::
-
- VAR =+ "Starts"
-
-- *Appending (:append):* Use the ``:append`` operator to append values
- to existing variables. This operator does not add any additional
- space. Also, the operator is applied after all the ``+=``, and ``=+``
- operators have been applied and after all ``=`` assignments have
- occurred.
-
- The following example shows the space being explicitly added to the
- start to ensure the appended value is not merged with the existing
- value::
-
- SRC_URI:append = " file://fix-makefile.patch"
-
- You can also use
- the ``:append`` operator with overrides, which results in the actions
- only being performed for the specified target or machine::
-
- SRC_URI:append:sh4 = " file://fix-makefile.patch"
-
-- *Prepending (:prepend):* Use the ``:prepend`` operator to prepend
- values to existing variables. This operator does not add any
- additional space. Also, the operator is applied after all the ``+=``,
- and ``=+`` operators have been applied and after all ``=``
- assignments have occurred.
-
- The following example shows the space being explicitly added to the
- end to ensure the prepended value is not merged with the existing
- value::
-
- CFLAGS:prepend = "-I${S}/myincludes "
-
- You can also use the
- ``:prepend`` operator with overrides, which results in the actions
- only being performed for the specified target or machine::
-
- CFLAGS:prepend:sh4 = "-I${S}/myincludes "
-
-- *Overrides:* You can use overrides to set a value conditionally,
- typically based on how the recipe is being built. For example, to set
- the :term:`KBRANCH` variable's
- value to "standard/base" for any target
- :term:`MACHINE`, except for
- qemuarm where it should be set to "standard/arm-versatile-926ejs",
- you would do the following::
-
- KBRANCH = "standard/base"
- KBRANCH:qemuarm = "standard/arm-versatile-926ejs"
-
- Overrides are also used to separate
- alternate values of a variable in other situations. For example, when
- setting variables such as
- :term:`FILES` and
- :term:`RDEPENDS` that are
- specific to individual packages produced by a recipe, you should
- always use an override that specifies the name of the package.
-
-- *Indentation:* Use spaces for indentation rather than tabs. For
- shell functions, both currently work. However, it is a policy
- decision of the Yocto Project to use tabs in shell functions. Realize
- that some layers have a policy to use spaces for all indentation.
-
-- *Using Python for Complex Operations:* For more advanced processing,
- it is possible to use Python code during variable assignments (e.g.
- search and replacement on a variable).
-
- You indicate Python code using the ``${@python_code}`` syntax for the
- variable assignment::
-
- SRC_URI = "ftp://ftp.info-zip.org/pub/infozip/src/zip${@d.getVar('PV',1).replace('.', '')}.tgz
-
-- *Shell Function Syntax:* Write shell functions as if you were writing
- a shell script when you describe a list of actions to take. You
- should ensure that your script works with a generic ``sh`` and that
- it does not require any ``bash`` or other shell-specific
- functionality. The same considerations apply to various system
- utilities (e.g. ``sed``, ``grep``, ``awk``, and so forth) that you
- might wish to use. If in doubt, you should check with multiple
- implementations - including those from BusyBox.
-
-Adding a New Machine
-====================
-
-Adding a new machine to the Yocto Project is a straightforward process.
-This section describes how to add machines that are similar to those
-that the Yocto Project already supports.
-
-.. note::
-
- Although well within the capabilities of the Yocto Project, adding a
- totally new architecture might require changes to ``gcc``/``glibc``
- and to the site information, which is beyond the scope of this
- manual.
-
-For a complete example that shows how to add a new machine, see the
-":ref:`bsp-guide/bsp:creating a new bsp layer using the \`\`bitbake-layers\`\` script`"
-section in the Yocto Project Board Support Package (BSP) Developer's
-Guide.
-
-Adding the Machine Configuration File
--------------------------------------
-
-To add a new machine, you need to add a new machine configuration file
-to the layer's ``conf/machine`` directory. This configuration file
-provides details about the device you are adding.
-
-The OpenEmbedded build system uses the root name of the machine
-configuration file to reference the new machine. For example, given a
-machine configuration file named ``crownbay.conf``, the build system
-recognizes the machine as "crownbay".
-
-The most important variables you must set in your machine configuration
-file or include from a lower-level configuration file are as follows:
-
-- :term:`TARGET_ARCH` (e.g. "arm")
-
-- ``PREFERRED_PROVIDER_virtual/kernel``
-
-- :term:`MACHINE_FEATURES` (e.g. "apm screen wifi")
-
-You might also need these variables:
-
-- :term:`SERIAL_CONSOLES` (e.g. "115200;ttyS0 115200;ttyS1")
-
-- :term:`KERNEL_IMAGETYPE` (e.g. "zImage")
-
-- :term:`IMAGE_FSTYPES` (e.g. "tar.gz jffs2")
-
-You can find full details on these variables in the reference section.
-You can leverage existing machine ``.conf`` files from
-``meta-yocto-bsp/conf/machine/``.
-
-Adding a Kernel for the Machine
--------------------------------
-
-The OpenEmbedded build system needs to be able to build a kernel for the
-machine. You need to either create a new kernel recipe for this machine,
-or extend an existing kernel recipe. You can find several kernel recipe
-examples in the Source Directory at ``meta/recipes-kernel/linux`` that
-you can use as references.
-
-If you are creating a new kernel recipe, normal recipe-writing rules
-apply for setting up a :term:`SRC_URI`. Thus, you need to specify any
-necessary patches and set :term:`S` to point at the source code. You need to
-create a ``do_configure`` task that configures the unpacked kernel with
-a ``defconfig`` file. You can do this by using a ``make defconfig``
-command or, more commonly, by copying in a suitable ``defconfig`` file
-and then running ``make oldconfig``. By making use of ``inherit kernel``
-and potentially some of the ``linux-*.inc`` files, most other
-functionality is centralized and the defaults of the class normally work
-well.
-
-If you are extending an existing kernel recipe, it is usually a matter
-of adding a suitable ``defconfig`` file. The file needs to be added into
-a location similar to ``defconfig`` files used for other machines in a
-given kernel recipe. A possible way to do this is by listing the file in
-the :term:`SRC_URI` and adding the machine to the expression in
-:term:`COMPATIBLE_MACHINE`::
-
- COMPATIBLE_MACHINE = '(qemux86|qemumips)'
-
-For more information on ``defconfig`` files, see the
-":ref:`kernel-dev/common:changing the configuration`"
-section in the Yocto Project Linux Kernel Development Manual.
-
-Adding a Formfactor Configuration File
---------------------------------------
-
-A formfactor configuration file provides information about the target
-hardware for which the image is being built and information that the
-build system cannot obtain from other sources such as the kernel. Some
-examples of information contained in a formfactor configuration file
-include framebuffer orientation, whether or not the system has a
-keyboard, the positioning of the keyboard in relation to the screen, and
-the screen resolution.
-
-The build system uses reasonable defaults in most cases. However, if
-customization is necessary, you need to create a ``machconfig`` file in
-the ``meta/recipes-bsp/formfactor/files`` directory. This directory
-contains directories for specific machines such as ``qemuarm`` and
-``qemux86``. For information about the settings available and the
-defaults, see the ``meta/recipes-bsp/formfactor/files/config`` file
-found in the same area.
-
-Following is an example for "qemuarm" machine::
-
- HAVE_TOUCHSCREEN=1
- HAVE_KEYBOARD=1
- DISPLAY_CAN_ROTATE=0
- DISPLAY_ORIENTATION=0
- #DISPLAY_WIDTH_PIXELS=640
- #DISPLAY_HEIGHT_PIXELS=480
- #DISPLAY_BPP=16
- DISPLAY_DPI=150
- DISPLAY_SUBPIXEL_ORDER=vrgb
-
-Upgrading Recipes
-=================
-
-Over time, upstream developers publish new versions for software built
-by layer recipes. It is recommended to keep recipes up-to-date with
-upstream version releases.
-
-While there are several methods to upgrade a recipe, you might
-consider checking on the upgrade status of a recipe first. You can do so
-using the ``devtool check-upgrade-status`` command. See the
-":ref:`devtool-checking-on-the-upgrade-status-of-a-recipe`"
-section in the Yocto Project Reference Manual for more information.
-
-The remainder of this section describes three ways you can upgrade a
-recipe. You can use the Automated Upgrade Helper (AUH) to set up
-automatic version upgrades. Alternatively, you can use
-``devtool upgrade`` to set up semi-automatic version upgrades. Finally,
-you can manually upgrade a recipe by editing the recipe itself.
-
-Using the Auto Upgrade Helper (AUH)
------------------------------------
-
-The AUH utility works in conjunction with the OpenEmbedded build system
-in order to automatically generate upgrades for recipes based on new
-versions being published upstream. Use AUH when you want to create a
-service that performs the upgrades automatically and optionally sends
-you an email with the results.
-
-AUH allows you to update several recipes with a single use. You can also
-optionally perform build and integration tests using images with the
-results saved to your hard drive and emails of results optionally sent
-to recipe maintainers. Finally, AUH creates Git commits with appropriate
-commit messages in the layer's tree for the changes made to recipes.
-
-.. note::
-
- In some conditions, you should not use AUH to upgrade recipes
- and should instead use either ``devtool upgrade`` or upgrade your
- recipes manually:
-
- - When AUH cannot complete the upgrade sequence. This situation
- usually results because custom patches carried by the recipe
- cannot be automatically rebased to the new version. In this case,
- ``devtool upgrade`` allows you to manually resolve conflicts.
-
- - When for any reason you want fuller control over the upgrade
- process. For example, when you want special arrangements for
- testing.
-
-The following steps describe how to set up the AUH utility:
-
-1. *Be Sure the Development Host is Set Up:* You need to be sure that
- your development host is set up to use the Yocto Project. For
- information on how to set up your host, see the
- ":ref:`dev-manual/start:Preparing the Build Host`" section.
-
-2. *Make Sure Git is Configured:* The AUH utility requires Git to be
- configured because AUH uses Git to save upgrades. Thus, you must have
- Git user and email configured. The following command shows your
- configurations::
-
- $ git config --list
-
- If you do not have the user and
- email configured, you can use the following commands to do so::
-
- $ git config --global user.name some_name
- $ git config --global user.email username@domain.com
-
-3. *Clone the AUH Repository:* To use AUH, you must clone the repository
- onto your development host. The following command uses Git to create
- a local copy of the repository on your system::
-
- $ git clone git://git.yoctoproject.org/auto-upgrade-helper
- Cloning into 'auto-upgrade-helper'... remote: Counting objects: 768, done.
- remote: Compressing objects: 100% (300/300), done.
- remote: Total 768 (delta 499), reused 703 (delta 434)
- Receiving objects: 100% (768/768), 191.47 KiB | 98.00 KiB/s, done.
- Resolving deltas: 100% (499/499), done.
- Checking connectivity... done.
-
- AUH is not part of the :term:`OpenEmbedded-Core (OE-Core)` or
- :term:`Poky` repositories.
-
-4. *Create a Dedicated Build Directory:* Run the
- :ref:`structure-core-script`
- script to create a fresh build directory that you use exclusively for
- running the AUH utility::
-
- $ cd poky
- $ source oe-init-build-env your_AUH_build_directory
-
- Re-using an existing build directory and its configurations is not
- recommended as existing settings could cause AUH to fail or behave
- undesirably.
-
-5. *Make Configurations in Your Local Configuration File:* Several
- settings are needed in the ``local.conf`` file in the build
- directory you just created for AUH. Make these following
- configurations:
-
- - If you want to enable :ref:`Build
- History <dev-manual/common-tasks:maintaining build output quality>`,
- which is optional, you need the following lines in the
- ``conf/local.conf`` file::
-
- INHERIT =+ "buildhistory"
- BUILDHISTORY_COMMIT = "1"
-
- With this configuration and a successful
- upgrade, a build history "diff" file appears in the
- ``upgrade-helper/work/recipe/buildhistory-diff.txt`` file found in
- your build directory.
-
- - If you want to enable testing through the
- :ref:`testimage <ref-classes-testimage*>`
- class, which is optional, you need to have the following set in
- your ``conf/local.conf`` file::
-
- INHERIT += "testimage"
-
- .. note::
-
- If your distro does not enable by default ptest, which Poky
- does, you need the following in your ``local.conf`` file::
-
- DISTRO_FEATURES:append = " ptest"
-
-
-6. *Optionally Start a vncserver:* If you are running in a server
- without an X11 session, you need to start a vncserver::
-
- $ vncserver :1
- $ export DISPLAY=:1
-
-7. *Create and Edit an AUH Configuration File:* You need to have the
- ``upgrade-helper/upgrade-helper.conf`` configuration file in your
- build directory. You can find a sample configuration file in the
- :yocto_git:`AUH source repository </auto-upgrade-helper/tree/>`.
-
- Read through the sample file and make configurations as needed. For
- example, if you enabled build history in your ``local.conf`` as
- described earlier, you must enable it in ``upgrade-helper.conf``.
-
- Also, if you are using the default ``maintainers.inc`` file supplied
- with Poky and located in ``meta-yocto`` and you do not set a
- "maintainers_whitelist" or "global_maintainer_override" in the
- ``upgrade-helper.conf`` configuration, and you specify "-e all" on
- the AUH command-line, the utility automatically sends out emails to
- all the default maintainers. Please avoid this.
-
-This next set of examples describes how to use the AUH:
-
-- *Upgrading a Specific Recipe:* To upgrade a specific recipe, use the
- following form::
-
- $ upgrade-helper.py recipe_name
-
- For example, this command upgrades the ``xmodmap`` recipe::
-
- $ upgrade-helper.py xmodmap
-
-- *Upgrading a Specific Recipe to a Particular Version:* To upgrade a
- specific recipe to a particular version, use the following form::
-
- $ upgrade-helper.py recipe_name -t version
-
- For example, this command upgrades the ``xmodmap`` recipe to version 1.2.3::
-
- $ upgrade-helper.py xmodmap -t 1.2.3
-
-- *Upgrading all Recipes to the Latest Versions and Suppressing Email
- Notifications:* To upgrade all recipes to their most recent versions
- and suppress the email notifications, use the following command::
-
- $ upgrade-helper.py all
-
-- *Upgrading all Recipes to the Latest Versions and Send Email
- Notifications:* To upgrade all recipes to their most recent versions
- and send email messages to maintainers for each attempted recipe as
- well as a status email, use the following command::
-
- $ upgrade-helper.py -e all
-
-Once you have run the AUH utility, you can find the results in the AUH
-build directory::
-
- ${BUILDDIR}/upgrade-helper/timestamp
-
-The AUH utility
-also creates recipe update commits from successful upgrade attempts in
-the layer tree.
-
-You can easily set up to run the AUH utility on a regular basis by using
-a cron job. See the
-:yocto_git:`weeklyjob.sh </auto-upgrade-helper/tree/weeklyjob.sh>`
-file distributed with the utility for an example.
-
-Using ``devtool upgrade``
--------------------------
-
-As mentioned earlier, an alternative method for upgrading recipes to
-newer versions is to use
-:doc:`devtool upgrade </ref-manual/devtool-reference>`.
-You can read about ``devtool upgrade`` in general in the
-":ref:`sdk-manual/extensible:use \`\`devtool upgrade\`\` to create a version of the recipe that supports a newer version of the software`"
-section in the Yocto Project Application Development and the Extensible
-Software Development Kit (eSDK) Manual.
-
-To see all the command-line options available with ``devtool upgrade``,
-use the following help command::
-
- $ devtool upgrade -h
-
-If you want to find out what version a recipe is currently at upstream
-without any attempt to upgrade your local version of the recipe, you can
-use the following command::
-
- $ devtool latest-version recipe_name
-
-As mentioned in the previous section describing AUH, ``devtool upgrade``
-works in a less-automated manner than AUH. Specifically,
-``devtool upgrade`` only works on a single recipe that you name on the
-command line, cannot perform build and integration testing using images,
-and does not automatically generate commits for changes in the source
-tree. Despite all these "limitations", ``devtool upgrade`` updates the
-recipe file to the new upstream version and attempts to rebase custom
-patches contained by the recipe as needed.
-
-.. note::
-
- AUH uses much of ``devtool upgrade`` behind the scenes making AUH somewhat
- of a "wrapper" application for ``devtool upgrade``.
-
-A typical scenario involves having used Git to clone an upstream
-repository that you use during build operations. Because you have built the
-recipe in the past, the layer is likely added to your
-configuration already. If for some reason, the layer is not added, you
-could add it easily using the
-":ref:`bitbake-layers <bsp-guide/bsp:creating a new bsp layer using the \`\`bitbake-layers\`\` script>`"
-script. For example, suppose you use the ``nano.bb`` recipe from the
-``meta-oe`` layer in the ``meta-openembedded`` repository. For this
-example, assume that the layer has been cloned into following area::
-
- /home/scottrif/meta-openembedded
-
-The following command from your
-:term:`Build Directory` adds the layer to
-your build configuration (i.e. ``${BUILDDIR}/conf/bblayers.conf``)::
-
- $ bitbake-layers add-layer /home/scottrif/meta-openembedded/meta-oe
- NOTE: Starting bitbake server...
- Parsing recipes: 100% |##########################################| Time: 0:00:55
- Parsing of 1431 .bb files complete (0 cached, 1431 parsed). 2040 targets, 56 skipped, 0 masked, 0 errors.
- Removing 12 recipes from the x86_64 sysroot: 100% |##############| Time: 0:00:00
- Removing 1 recipes from the x86_64_i586 sysroot: 100% |##########| Time: 0:00:00
- Removing 5 recipes from the i586 sysroot: 100% |#################| Time: 0:00:00
- Removing 5 recipes from the qemux86 sysroot: 100% |##############| Time: 0:00:00
-
-For this example, assume that the ``nano.bb`` recipe that
-is upstream has a 2.9.3 version number. However, the version in the
-local repository is 2.7.4. The following command from your build
-directory automatically upgrades the recipe for you:
-
-.. note::
-
- Using the ``-V`` option is not necessary. Omitting the version number causes
- ``devtool upgrade`` to upgrade the recipe to the most recent version.
-
-::
-
- $ devtool upgrade nano -V 2.9.3
- NOTE: Starting bitbake server...
- NOTE: Creating workspace layer in /home/scottrif/poky/build/workspace
- Parsing recipes: 100% |##########################################| Time: 0:00:46
- Parsing of 1431 .bb files complete (0 cached, 1431 parsed). 2040 targets, 56 skipped, 0 masked, 0 errors.
- NOTE: Extracting current version source...
- NOTE: Resolving any missing task queue dependencies
- .
- .
- .
- NOTE: Executing SetScene Tasks
- NOTE: Executing RunQueue Tasks
- NOTE: Tasks Summary: Attempted 74 tasks of which 72 didn't need to be rerun and all succeeded.
- Adding changed files: 100% |#####################################| Time: 0:00:00
- NOTE: Upgraded source extracted to /home/scottrif/poky/build/workspace/sources/nano
- NOTE: New recipe is /home/scottrif/poky/build/workspace/recipes/nano/nano_2.9.3.bb
-
-Continuing with this example, you can use ``devtool build`` to build the
-newly upgraded recipe::
-
- $ devtool build nano
- NOTE: Starting bitbake server...
- Loading cache: 100% |################################################################################################| Time: 0:00:01
- Loaded 2040 entries from dependency cache.
- Parsing recipes: 100% |##############################################################################################| Time: 0:00:00
- Parsing of 1432 .bb files complete (1431 cached, 1 parsed). 2041 targets, 56 skipped, 0 masked, 0 errors.
- NOTE: Resolving any missing task queue dependencies
- .
- .
- .
- NOTE: Executing SetScene Tasks
- NOTE: Executing RunQueue Tasks
- NOTE: nano: compiling from external source tree /home/scottrif/poky/build/workspace/sources/nano
- NOTE: Tasks Summary: Attempted 520 tasks of which 304 didn't need to be rerun and all succeeded.
-
-Within the ``devtool upgrade`` workflow, you can
-deploy and test your rebuilt software. For this example,
-however, running ``devtool finish`` cleans up the workspace once the
-source in your workspace is clean. This usually means using Git to stage
-and submit commits for the changes generated by the upgrade process.
-
-Once the tree is clean, you can clean things up in this example with the
-following command from the ``${BUILDDIR}/workspace/sources/nano``
-directory::
-
- $ devtool finish nano meta-oe
- NOTE: Starting bitbake server...
- Loading cache: 100% |################################################################################################| Time: 0:00:00
- Loaded 2040 entries from dependency cache.
- Parsing recipes: 100% |##############################################################################################| Time: 0:00:01
- Parsing of 1432 .bb files complete (1431 cached, 1 parsed). 2041 targets, 56 skipped, 0 masked, 0 errors.
- NOTE: Adding new patch 0001-nano.bb-Stuff-I-changed-when-upgrading-nano.bb.patch
- NOTE: Updating recipe nano_2.9.3.bb
- NOTE: Removing file /home/scottrif/meta-openembedded/meta-oe/recipes-support/nano/nano_2.7.4.bb
- NOTE: Moving recipe file to /home/scottrif/meta-openembedded/meta-oe/recipes-support/nano
- NOTE: Leaving source tree /home/scottrif/poky/build/workspace/sources/nano as-is; if you no longer need it then please delete it manually
-
-
-Using the ``devtool finish`` command cleans up the workspace and creates a patch
-file based on your commits. The tool puts all patch files back into the
-source directory in a sub-directory named ``nano`` in this case.
-
-Manually Upgrading a Recipe
----------------------------
-
-If for some reason you choose not to upgrade recipes using
-:ref:`dev-manual/common-tasks:Using the Auto Upgrade Helper (AUH)` or
-by :ref:`dev-manual/common-tasks:Using \`\`devtool upgrade\`\``,
-you can manually edit the recipe files to upgrade the versions.
-
-.. note::
-
- Manually updating multiple recipes scales poorly and involves many
- steps. The recommendation to upgrade recipe versions is through AUH
- or ``devtool upgrade``, both of which automate some steps and provide
- guidance for others needed for the manual process.
-
-To manually upgrade recipe versions, follow these general steps:
-
-1. *Change the Version:* Rename the recipe such that the version (i.e.
- the :term:`PV` part of the recipe name)
- changes appropriately. If the version is not part of the recipe name,
- change the value as it is set for :term:`PV` within the recipe itself.
-
-2. *Update* :term:`SRCREV` *if Needed*: If the source code your recipe builds
- is fetched from Git or some other version control system, update
- :term:`SRCREV` to point to the
- commit hash that matches the new version.
-
-3. *Build the Software:* Try to build the recipe using BitBake. Typical
- build failures include the following:
-
- - License statements were updated for the new version. For this
- case, you need to review any changes to the license and update the
- values of :term:`LICENSE` and
- :term:`LIC_FILES_CHKSUM`
- as needed.
-
- .. note::
-
- License changes are often inconsequential. For example, the
- license text's copyright year might have changed.
-
- - Custom patches carried by the older version of the recipe might
- fail to apply to the new version. For these cases, you need to
- review the failures. Patches might not be necessary for the new
- version of the software if the upgraded version has fixed those
- issues. If a patch is necessary and failing, you need to rebase it
- into the new version.
-
-4. *Optionally Attempt to Build for Several Architectures:* Once you
- successfully build the new software for a given architecture, you
- could test the build for other architectures by changing the
- :term:`MACHINE` variable and
- rebuilding the software. This optional step is especially important
- if the recipe is to be released publicly.
-
-5. *Check the Upstream Change Log or Release Notes:* Checking both these
- reveals if there are new features that could break
- backwards-compatibility. If so, you need to take steps to mitigate or
- eliminate that situation.
-
-6. *Optionally Create a Bootable Image and Test:* If you want, you can
- test the new software by booting it onto actual hardware.
-
-7. *Create a Commit with the Change in the Layer Repository:* After all
- builds work and any testing is successful, you can create commits for
- any changes in the layer holding your upgraded recipe.
-
-Finding Temporary Source Code
-=============================
-
-You might find it helpful during development to modify the temporary
-source code used by recipes to build packages. For example, suppose you
-are developing a patch and you need to experiment a bit to figure out
-your solution. After you have initially built the package, you can
-iteratively tweak the source code, which is located in the
-:term:`Build Directory`, and then you can
-force a re-compile and quickly test your altered code. Once you settle
-on a solution, you can then preserve your changes in the form of
-patches.
-
-During a build, the unpacked temporary source code used by recipes to
-build packages is available in the Build Directory as defined by the
-:term:`S` variable. Below is the default
-value for the :term:`S` variable as defined in the
-``meta/conf/bitbake.conf`` configuration file in the
-:term:`Source Directory`::
-
- S = "${WORKDIR}/${BP}"
-
-You should be aware that many recipes override the
-:term:`S` variable. For example, recipes that fetch their source from Git
-usually set :term:`S` to ``${WORKDIR}/git``.
-
-.. note::
-
- The :term:`BP` represents the base recipe name, which consists of the name
- and version::
-
- BP = "${BPN}-${PV}"
-
-
-The path to the work directory for the recipe
-(:term:`WORKDIR`) is defined as
-follows::
-
- ${TMPDIR}/work/${MULTIMACH_TARGET_SYS}/${PN}/${EXTENDPE}${PV}-${PR}
-
-The actual directory depends on several things:
-
-- :term:`TMPDIR`: The top-level build
- output directory.
-
-- :term:`MULTIMACH_TARGET_SYS`:
- The target system identifier.
-
-- :term:`PN`: The recipe name.
-
-- :term:`EXTENDPE`: The epoch - (if
- :term:`PE` is not specified, which is
- usually the case for most recipes, then :term:`EXTENDPE` is blank).
-
-- :term:`PV`: The recipe version.
-
-- :term:`PR`: The recipe revision.
-
-As an example, assume a Source Directory top-level folder named
-``poky``, a default Build Directory at ``poky/build``, and a
-``qemux86-poky-linux`` machine target system. Furthermore, suppose your
-recipe is named ``foo_1.3.0.bb``. In this case, the work directory the
-build system uses to build the package would be as follows::
-
- poky/build/tmp/work/qemux86-poky-linux/foo/1.3.0-r0
-
-Using Quilt in Your Workflow
-============================
-
-`Quilt <https://savannah.nongnu.org/projects/quilt>`__ is a powerful tool
-that allows you to capture source code changes without having a clean
-source tree. This section outlines the typical workflow you can use to
-modify source code, test changes, and then preserve the changes in the
-form of a patch all using Quilt.
-
-.. note::
-
- With regard to preserving changes to source files, if you clean a
- recipe or have ``rm_work`` enabled, the
- :ref:`devtool workflow <sdk-manual/extensible:using \`\`devtool\`\` in your sdk workflow>`
- as described in the Yocto Project Application Development and the
- Extensible Software Development Kit (eSDK) manual is a safer
- development flow than the flow that uses Quilt.
-
-Follow these general steps:
-
-1. *Find the Source Code:* Temporary source code used by the
- OpenEmbedded build system is kept in the
- :term:`Build Directory`. See the
- ":ref:`dev-manual/common-tasks:finding temporary source code`" section to
- learn how to locate the directory that has the temporary source code for a
- particular package.
-
-2. *Change Your Working Directory:* You need to be in the directory that
- has the temporary source code. That directory is defined by the
- :term:`S` variable.
-
-3. *Create a New Patch:* Before modifying source code, you need to
- create a new patch. To create a new patch file, use ``quilt new`` as
- below::
-
- $ quilt new my_changes.patch
-
-4. *Notify Quilt and Add Files:* After creating the patch, you need to
- notify Quilt about the files you plan to edit. You notify Quilt by
- adding the files to the patch you just created::
-
- $ quilt add file1.c file2.c file3.c
-
-5. *Edit the Files:* Make your changes in the source code to the files
- you added to the patch.
-
-6. *Test Your Changes:* Once you have modified the source code, the
- easiest way to test your changes is by calling the ``do_compile``
- task as shown in the following example::
-
- $ bitbake -c compile -f package
-
- The ``-f`` or ``--force`` option forces the specified task to
- execute. If you find problems with your code, you can just keep
- editing and re-testing iteratively until things work as expected.
-
- .. note::
-
- All the modifications you make to the temporary source code disappear
- once you run the ``do_clean`` or ``do_cleanall`` tasks using BitBake
- (i.e. ``bitbake -c clean package`` and ``bitbake -c cleanall package``).
- Modifications will also disappear if you use the ``rm_work`` feature as
- described in the
- ":ref:`dev-manual/common-tasks:conserving disk space during builds`"
- section.
-
-7. *Generate the Patch:* Once your changes work as expected, you need to
- use Quilt to generate the final patch that contains all your
- modifications.
- ::
-
- $ quilt refresh
-
- At this point, the
- ``my_changes.patch`` file has all your edits made to the ``file1.c``,
- ``file2.c``, and ``file3.c`` files.
-
- You can find the resulting patch file in the ``patches/``
- subdirectory of the source (:term:`S`) directory.
-
-8. *Copy the Patch File:* For simplicity, copy the patch file into a
- directory named ``files``, which you can create in the same directory
- that holds the recipe (``.bb``) file or the append (``.bbappend``)
- file. Placing the patch here guarantees that the OpenEmbedded build
- system will find the patch. Next, add the patch into the :term:`SRC_URI`
- of the recipe. Here is an example::
-
- SRC_URI += "file://my_changes.patch"
-
-Using a Development Shell
-=========================
-
-When debugging certain commands or even when just editing packages,
-``devshell`` can be a useful tool. When you invoke ``devshell``, all
-tasks up to and including
-:ref:`ref-tasks-patch` are run for the
-specified target. Then, a new terminal is opened and you are placed in
-``${``\ :term:`S`\ ``}``, the source
-directory. In the new terminal, all the OpenEmbedded build-related
-environment variables are still defined so you can use commands such as
-``configure`` and ``make``. The commands execute just as if the
-OpenEmbedded build system were executing them. Consequently, working
-this way can be helpful when debugging a build or preparing software to
-be used with the OpenEmbedded build system.
-
-Following is an example that uses ``devshell`` on a target named
-``matchbox-desktop``::
-
- $ bitbake matchbox-desktop -c devshell
-
-This command spawns a terminal with a shell prompt within the
-OpenEmbedded build environment. The
-:term:`OE_TERMINAL` variable
-controls what type of shell is opened.
-
-For spawned terminals, the following occurs:
-
-- The ``PATH`` variable includes the cross-toolchain.
-
-- The ``pkgconfig`` variables find the correct ``.pc`` files.
-
-- The ``configure`` command finds the Yocto Project site files as well
- as any other necessary files.
-
-Within this environment, you can run configure or compile commands as if
-they were being run by the OpenEmbedded build system itself. As noted
-earlier, the working directory also automatically changes to the Source
-Directory (:term:`S`).
-
-To manually run a specific task using ``devshell``, run the
-corresponding ``run.*`` script in the
-``${``\ :term:`WORKDIR`\ ``}/temp``
-directory (e.g., ``run.do_configure.``\ `pid`). If a task's script does
-not exist, which would be the case if the task was skipped by way of the
-sstate cache, you can create the task by first running it outside of the
-``devshell``::
-
- $ bitbake -c task
-
-.. note::
-
- - Execution of a task's ``run.*`` script and BitBake's execution of
- a task are identical. In other words, running the script re-runs
- the task just as it would be run using the ``bitbake -c`` command.
-
- - Any ``run.*`` file that does not have a ``.pid`` extension is a
- symbolic link (symlink) to the most recent version of that file.
-
-Remember, that the ``devshell`` is a mechanism that allows you to get
-into the BitBake task execution environment. And as such, all commands
-must be called just as BitBake would call them. That means you need to
-provide the appropriate options for cross-compilation and so forth as
-applicable.
-
-When you are finished using ``devshell``, exit the shell or close the
-terminal window.
-
-.. note::
-
- - It is worth remembering that when using ``devshell`` you need to
- use the full compiler name such as ``arm-poky-linux-gnueabi-gcc``
- instead of just using ``gcc``. The same applies to other
- applications such as ``binutils``, ``libtool`` and so forth.
- BitBake sets up environment variables such as :term:`CC` to assist
- applications, such as ``make`` to find the correct tools.
-
- - It is also worth noting that ``devshell`` still works over X11
- forwarding and similar situations.
-
-Using a Python Development Shell
-================================
-
-Similar to working within a development shell as described in the
-previous section, you can also spawn and work within an interactive
-Python development shell. When debugging certain commands or even when
-just editing packages, ``pydevshell`` can be a useful tool. When you
-invoke the ``pydevshell`` task, all tasks up to and including
-:ref:`ref-tasks-patch` are run for the
-specified target. Then a new terminal is opened. Additionally, key
-Python objects and code are available in the same way they are to
-BitBake tasks, in particular, the data store 'd'. So, commands such as
-the following are useful when exploring the data store and running
-functions::
-
- pydevshell> d.getVar("STAGING_DIR")
- '/media/build1/poky/build/tmp/sysroots'
- pydevshell> d.getVar("STAGING_DIR", False)
- '${TMPDIR}/sysroots'
- pydevshell> d.setVar("FOO", "bar")
- pydevshell> d.getVar("FOO")
- 'bar'
- pydevshell> d.delVar("FOO")
- pydevshell> d.getVar("FOO")
- pydevshell> bb.build.exec_func("do_unpack", d)
- pydevshell>
-
-The commands execute just as if the OpenEmbedded build
-system were executing them. Consequently, working this way can be
-helpful when debugging a build or preparing software to be used with the
-OpenEmbedded build system.
-
-Following is an example that uses ``pydevshell`` on a target named
-``matchbox-desktop``::
-
- $ bitbake matchbox-desktop -c pydevshell
-
-This command spawns a terminal and places you in an interactive Python
-interpreter within the OpenEmbedded build environment. The
-:term:`OE_TERMINAL` variable
-controls what type of shell is opened.
-
-When you are finished using ``pydevshell``, you can exit the shell
-either by using Ctrl+d or closing the terminal window.
-
-Building
-========
-
-This section describes various build procedures, such as the steps
-needed for a simple build, building a target for multiple configurations,
-generating an image for more than one machine, and so forth.
-
-Building a Simple Image
------------------------
-
-In the development environment, you need to build an image whenever you
-change hardware support, add or change system libraries, or add or
-change services that have dependencies. There are several methods that allow
-you to build an image within the Yocto Project. This section presents
-the basic steps you need to build a simple image using BitBake from a
-build host running Linux.
-
-.. note::
-
- - For information on how to build an image using
- :term:`Toaster`, see the
- :doc:`/toaster-manual/index`.
-
- - For information on how to use ``devtool`` to build images, see the
- ":ref:`sdk-manual/extensible:using \`\`devtool\`\` in your sdk workflow`"
- section in the Yocto Project Application Development and the
- Extensible Software Development Kit (eSDK) manual.
-
- - For a quick example on how to build an image using the
- OpenEmbedded build system, see the
- :doc:`/brief-yoctoprojectqs/index` document.
-
-The build process creates an entire Linux distribution from source and
-places it in your :term:`Build Directory` under
-``tmp/deploy/images``. For detailed information on the build process
-using BitBake, see the ":ref:`overview-manual/concepts:images`" section in the
-Yocto Project Overview and Concepts Manual.
-
-The following figure and list overviews the build process:
-
-.. image:: figures/bitbake-build-flow.png
- :align: center
-
-1. *Set up Your Host Development System to Support Development Using the
- Yocto Project*: See the ":doc:`start`" section for options on how to get a
- build host ready to use the Yocto Project.
-
-2. *Initialize the Build Environment:* Initialize the build environment
- by sourcing the build environment script (i.e.
- :ref:`structure-core-script`)::
-
- $ source oe-init-build-env [build_dir]
-
- When you use the initialization script, the OpenEmbedded build system
- uses ``build`` as the default :term:`Build Directory` in your current work
- directory. You can use a `build_dir` argument with the script to
- specify a different build directory.
-
- .. note::
-
- A common practice is to use a different Build Directory for
- different targets; for example, ``~/build/x86`` for a ``qemux86``
- target, and ``~/build/arm`` for a ``qemuarm`` target. In any
- event, it's typically cleaner to locate the build directory
- somewhere outside of your source directory.
-
-3. *Make Sure Your* ``local.conf`` *File is Correct*: Ensure the
- ``conf/local.conf`` configuration file, which is found in the Build
- Directory, is set up how you want it. This file defines many aspects
- of the build environment including the target machine architecture
- through the :term:`MACHINE` variable, the packaging format used during
- the build
- (:term:`PACKAGE_CLASSES`),
- and a centralized tarball download directory through the
- :term:`DL_DIR` variable.
-
-4. *Build the Image:* Build the image using the ``bitbake`` command::
-
- $ bitbake target
-
- .. note::
-
- For information on BitBake, see the :doc:`bitbake:index`.
-
- The target is the name of the recipe you want to build. Common
- targets are the images in ``meta/recipes-core/images``,
- ``meta/recipes-sato/images``, and so forth all found in the
- :term:`Source Directory`. Alternatively, the target
- can be the name of a recipe for a specific piece of software such as
- BusyBox. For more details about the images the OpenEmbedded build
- system supports, see the
- ":ref:`ref-manual/images:Images`" chapter in the Yocto
- Project Reference Manual.
-
- As an example, the following command builds the
- ``core-image-minimal`` image::
-
- $ bitbake core-image-minimal
-
- Once an
- image has been built, it often needs to be installed. The images and
- kernels built by the OpenEmbedded build system are placed in the
- Build Directory in ``tmp/deploy/images``. For information on how to
- run pre-built images such as ``qemux86`` and ``qemuarm``, see the
- :doc:`/sdk-manual/index` manual. For
- information about how to install these images, see the documentation
- for your particular board or machine.
-
-Building Images for Multiple Targets Using Multiple Configurations
-------------------------------------------------------------------
-
-You can use a single ``bitbake`` command to build multiple images or
-packages for different targets where each image or package requires a
-different configuration (multiple configuration builds). The builds, in
-this scenario, are sometimes referred to as "multiconfigs", and this
-section uses that term throughout.
-
-This section describes how to set up for multiple configuration builds
-and how to account for cross-build dependencies between the
-multiconfigs.
-
-Setting Up and Running a Multiple Configuration Build
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To accomplish a multiple configuration build, you must define each
-target's configuration separately using a parallel configuration file in
-the :term:`Build Directory`, and you
-must follow a required file hierarchy. Additionally, you must enable the
-multiple configuration builds in your ``local.conf`` file.
-
-Follow these steps to set up and execute multiple configuration builds:
-
-- *Create Separate Configuration Files*: You need to create a single
- configuration file for each build target (each multiconfig).
- Minimally, each configuration file must define the machine and the
- temporary directory BitBake uses for the build. Suggested practice
- dictates that you do not overlap the temporary directories used
- during the builds. However, it is possible that you can share the
- temporary directory
- (:term:`TMPDIR`). For example,
- consider a scenario with two different multiconfigs for the same
- :term:`MACHINE`: "qemux86" built
- for two distributions such as "poky" and "poky-lsb". In this case,
- you might want to use the same :term:`TMPDIR`.
-
- Here is an example showing the minimal statements needed in a
- configuration file for a "qemux86" target whose temporary build
- directory is ``tmpmultix86``::
-
- MACHINE = "qemux86"
- TMPDIR = "${TOPDIR}/tmpmultix86"
-
- The location for these multiconfig configuration files is specific.
- They must reside in the current build directory in a sub-directory of
- ``conf`` named ``multiconfig``. Following is an example that defines
- two configuration files for the "x86" and "arm" multiconfigs:
-
- .. image:: figures/multiconfig_files.png
- :align: center
-
- The reason for this required file hierarchy is because the :term:`BBPATH`
- variable is not constructed until the layers are parsed.
- Consequently, using the configuration file as a pre-configuration
- file is not possible unless it is located in the current working
- directory.
-
-- *Add the BitBake Multi-configuration Variable to the Local
- Configuration File*: Use the
- :term:`BBMULTICONFIG`
- variable in your ``conf/local.conf`` configuration file to specify
- each multiconfig. Continuing with the example from the previous
- figure, the :term:`BBMULTICONFIG` variable needs to enable two
- multiconfigs: "x86" and "arm" by specifying each configuration file::
-
- BBMULTICONFIG = "x86 arm"
-
- .. note::
-
- A "default" configuration already exists by definition. This
- configuration is named: "" (i.e. empty string) and is defined by
- the variables coming from your ``local.conf``
- file. Consequently, the previous example actually adds two
- additional configurations to your build: "arm" and "x86" along
- with "".
-
-- *Launch BitBake*: Use the following BitBake command form to launch
- the multiple configuration build::
-
- $ bitbake [mc:multiconfigname:]target [[[mc:multiconfigname:]target] ... ]
-
- For the example in this section, the following command applies::
-
- $ bitbake mc:x86:core-image-minimal mc:arm:core-image-sato mc::core-image-base
-
- The previous BitBake command builds a ``core-image-minimal`` image
- that is configured through the ``x86.conf`` configuration file, a
- ``core-image-sato`` image that is configured through the ``arm.conf``
- configuration file and a ``core-image-base`` that is configured
- through your ``local.conf`` configuration file.
-
-.. note::
-
- Support for multiple configuration builds in the Yocto Project &DISTRO;
- (&DISTRO_NAME;) Release does not include Shared State (sstate)
- optimizations. Consequently, if a build uses the same object twice
- in, for example, two different :term:`TMPDIR`
- directories, the build either loads from an existing sstate cache for
- that build at the start or builds the object fresh.
-
-Enabling Multiple Configuration Build Dependencies
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Sometimes dependencies can exist between targets (multiconfigs) in a
-multiple configuration build. For example, suppose that in order to
-build a ``core-image-sato`` image for an "x86" multiconfig, the root
-filesystem of an "arm" multiconfig must exist. This dependency is
-essentially that the
-:ref:`ref-tasks-image` task in the
-``core-image-sato`` recipe depends on the completion of the
-:ref:`ref-tasks-rootfs` task of the
-``core-image-minimal`` recipe.
-
-To enable dependencies in a multiple configuration build, you must
-declare the dependencies in the recipe using the following statement
-form::
-
- task_or_package[mcdepends] = "mc:from_multiconfig:to_multiconfig:recipe_name:task_on_which_to_depend"
-
-To better show how to use this statement, consider the example scenario
-from the first paragraph of this section. The following statement needs
-to be added to the recipe that builds the ``core-image-sato`` image::
-
- do_image[mcdepends] = "mc:x86:arm:core-image-minimal:do_rootfs"
-
-In this example, the `from_multiconfig` is "x86". The `to_multiconfig` is "arm". The
-task on which the ``do_image`` task in the recipe depends is the
-``do_rootfs`` task from the ``core-image-minimal`` recipe associated
-with the "arm" multiconfig.
-
-Once you set up this dependency, you can build the "x86" multiconfig
-using a BitBake command as follows::
-
- $ bitbake mc:x86:core-image-sato
-
-This command executes all the tasks needed to create the
-``core-image-sato`` image for the "x86" multiconfig. Because of the
-dependency, BitBake also executes through the ``do_rootfs`` task for the
-"arm" multiconfig build.
-
-Having a recipe depend on the root filesystem of another build might not
-seem that useful. Consider this change to the statement in the
-``core-image-sato`` recipe::
-
- do_image[mcdepends] = "mc:x86:arm:core-image-minimal:do_image"
-
-In this case, BitBake must
-create the ``core-image-minimal`` image for the "arm" build since the
-"x86" build depends on it.
-
-Because "x86" and "arm" are enabled for multiple configuration builds
-and have separate configuration files, BitBake places the artifacts for
-each build in the respective temporary build directories (i.e.
-:term:`TMPDIR`).
-
-Building an Initial RAM Filesystem (initramfs) Image
-----------------------------------------------------
-
-An initial RAM filesystem (initramfs) image provides a temporary root
-filesystem used for early system initialization (e.g. loading of modules
-needed to locate and mount the "real" root filesystem).
-
-.. note::
-
- The initramfs image is the successor of initial RAM disk (initrd). It
- is a "copy in and out" (cpio) archive of the initial filesystem that
- gets loaded into memory during the Linux startup process. Because
- Linux uses the contents of the archive during initialization, the
- initramfs image needs to contain all of the device drivers and tools
- needed to mount the final root filesystem.
-
-Follow these steps to create an initramfs image:
-
-1. *Create the initramfs Image Recipe:* You can reference the
- ``core-image-minimal-initramfs.bb`` recipe found in the
- ``meta/recipes-core`` directory of the :term:`Source Directory`
- as an example
- from which to work.
-
-2. *Decide if You Need to Bundle the initramfs Image Into the Kernel
- Image:* If you want the initramfs image that is built to be bundled
- in with the kernel image, set the
- :term:`INITRAMFS_IMAGE_BUNDLE`
- variable to "1" in your ``local.conf`` configuration file and set the
- :term:`INITRAMFS_IMAGE`
- variable in the recipe that builds the kernel image.
-
- .. note::
-
- It is recommended that you bundle the initramfs image with the
- kernel image to avoid circular dependencies between the kernel
- recipe and the initramfs recipe should the initramfs image include
- kernel modules.
-
- Setting the :term:`INITRAMFS_IMAGE_BUNDLE` flag causes the initramfs
- image to be unpacked into the ``${B}/usr/`` directory. The unpacked
- initramfs image is then passed to the kernel's ``Makefile`` using the
- :term:`CONFIG_INITRAMFS_SOURCE`
- variable, allowing the initramfs image to be built into the kernel
- normally.
-
- .. note::
-
- Bundling the initramfs with the kernel conflates the code in the initramfs
- with the GPLv2 licensed Linux kernel binary. Thus only GPLv2 compatible
- software may be part of a bundled initramfs.
-
- .. note::
-
- If you choose to not bundle the initramfs image with the kernel
- image, you are essentially using an
- `Initial RAM Disk (initrd) <https://en.wikipedia.org/wiki/Initrd>`__.
- Creating an initrd is handled primarily through the :term:`INITRD_IMAGE`,
- ``INITRD_LIVE``, and ``INITRD_IMAGE_LIVE`` variables. For more
- information, see the :ref:`ref-classes-image-live` file.
-
-3. *Optionally Add Items to the initramfs Image Through the initramfs
- Image Recipe:* If you add items to the initramfs image by way of its
- recipe, you should use
- :term:`PACKAGE_INSTALL`
- rather than
- :term:`IMAGE_INSTALL`.
- :term:`PACKAGE_INSTALL` gives more direct control of what is added to the
- image as compared to the defaults you might not necessarily want that
- are set by the :ref:`image <ref-classes-image>`
- or :ref:`core-image <ref-classes-core-image>`
- classes.
-
-4. *Build the Kernel Image and the initramfs Image:* Build your kernel
- image using BitBake. Because the initramfs image recipe is a
- dependency of the kernel image, the initramfs image is built as well
- and bundled with the kernel image if you used the
- :term:`INITRAMFS_IMAGE_BUNDLE`
- variable described earlier.
-
-Bundling an Initramfs Image From a Separate Multiconfig
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There may be a case where we want to build an initramfs image which does not
-inherit the same distro policy as our main image, for example, we may want
-our main image to use ``TCLIBC="glibc"``, but to use ``TCLIBC="musl"`` in our initramfs
-image to keep a smaller footprint. However, by performing the steps mentioned
-above the initramfs image will inherit ``TCLIBC="glibc"`` without allowing us
-to override it.
-
-To achieve this, you need to perform some additional steps:
-
-1. *Create a multiconfig for your initramfs image:* You can perform the steps
- on ":ref:`dev-manual/common-tasks:building images for multiple targets using multiple configurations`" to create a separate multiconfig.
- For the sake of simplicity let's assume such multiconfig is called: ``initramfscfg.conf`` and
- contains the variables::
-
- TMPDIR="${TOPDIR}/tmp-initramfscfg"
- TCLIBC="musl"
-
-2. *Set additional initramfs variables on your main configuration:*
- Additionally, on your main configuration (``local.conf``) you need to set the
- variables::
-
- INITRAMFS_MULTICONFIG = "initramfscfg"
- INITRAMFS_DEPLOY_DIR_IMAGE = "${TOPDIR}/tmp-initramfscfg/deploy/images/${MACHINE}"
-
- The variables :term:`INITRAMFS_MULTICONFIG` and :term:`INITRAMFS_DEPLOY_DIR_IMAGE`
- are used to create a multiconfig dependency from the kernel to the :term:`INITRAMFS_IMAGE`
- to be built coming from the ``initramfscfg`` multiconfig, and to let the
- buildsystem know where the :term:`INITRAMFS_IMAGE` will be located.
-
- Building a system with such configuration will build the kernel using the
- main configuration but the ``do_bundle_initramfs`` task will grab the
- selected :term:`INITRAMFS_IMAGE` from :term:`INITRAMFS_DEPLOY_DIR_IMAGE`
- instead, resulting in a musl based initramfs image bundled in the kernel
- but a glibc based main image.
-
- The same is applicable to avoid inheriting :term:`DISTRO_FEATURES` on :term:`INITRAMFS_IMAGE`
- or to build a different :term:`DISTRO` for it such as ``poky-tiny``.
-
-
-Building a Tiny System
-----------------------
-
-Very small distributions have some significant advantages such as
-requiring less on-die or in-package memory (cheaper), better performance
-through efficient cache usage, lower power requirements due to less
-memory, faster boot times, and reduced development overhead. Some
-real-world examples where a very small distribution gives you distinct
-advantages are digital cameras, medical devices, and small headless
-systems.
-
-This section presents information that shows you how you can trim your
-distribution to even smaller sizes than the ``poky-tiny`` distribution,
-which is around 5 Mbytes, that can be built out-of-the-box using the
-Yocto Project.
-
-Tiny System Overview
-~~~~~~~~~~~~~~~~~~~~
-
-The following list presents the overall steps you need to consider and
-perform to create distributions with smaller root filesystems, achieve
-faster boot times, maintain your critical functionality, and avoid
-initial RAM disks:
-
-- :ref:`Determine your goals and guiding principles
- <dev-manual/common-tasks:goals and guiding principles>`
-
-- :ref:`dev-manual/common-tasks:understand what contributes to your image size`
-
-- :ref:`Reduce the size of the root filesystem
- <dev-manual/common-tasks:trim the root filesystem>`
-
-- :ref:`Reduce the size of the kernel <dev-manual/common-tasks:trim the kernel>`
-
-- :ref:`dev-manual/common-tasks:remove package management requirements`
-
-- :ref:`dev-manual/common-tasks:look for other ways to minimize size`
-
-- :ref:`dev-manual/common-tasks:iterate on the process`
-
-Goals and Guiding Principles
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Before you can reach your destination, you need to know where you are
-going. Here is an example list that you can use as a guide when creating
-very small distributions:
-
-- Determine how much space you need (e.g. a kernel that is 1 Mbyte or
- less and a root filesystem that is 3 Mbytes or less).
-
-- Find the areas that are currently taking 90% of the space and
- concentrate on reducing those areas.
-
-- Do not create any difficult "hacks" to achieve your goals.
-
-- Leverage the device-specific options.
-
-- Work in a separate layer so that you keep changes isolated. For
- information on how to create layers, see the
- ":ref:`dev-manual/common-tasks:understanding and creating layers`" section.
-
-Understand What Contributes to Your Image Size
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-It is easiest to have something to start with when creating your own
-distribution. You can use the Yocto Project out-of-the-box to create the
-``poky-tiny`` distribution. Ultimately, you will want to make changes in
-your own distribution that are likely modeled after ``poky-tiny``.
-
-.. note::
-
- To use ``poky-tiny`` in your build, set the :term:`DISTRO` variable in your
- ``local.conf`` file to "poky-tiny" as described in the
- ":ref:`dev-manual/common-tasks:creating your own distribution`"
- section.
-
-Understanding some memory concepts will help you reduce the system size.
-Memory consists of static, dynamic, and temporary memory. Static memory
-is the TEXT (code), DATA (initialized data in the code), and BSS
-(uninitialized data) sections. Dynamic memory represents memory that is
-allocated at runtime: stacks, hash tables, and so forth. Temporary
-memory is recovered after the boot process. This memory consists of
-memory used for decompressing the kernel and for the ``__init__``
-functions.
-
-To help you see where you currently are with kernel and root filesystem
-sizes, you can use two tools found in the :term:`Source Directory`
-in the
-``scripts/tiny/`` directory:
-
-- ``ksize.py``: Reports component sizes for the kernel build objects.
-
-- ``dirsize.py``: Reports component sizes for the root filesystem.
-
-This next tool and command help you organize configuration fragments and
-view file dependencies in a human-readable form:
-
-- ``merge_config.sh``: Helps you manage configuration files and
- fragments within the kernel. With this tool, you can merge individual
- configuration fragments together. The tool allows you to make
- overrides and warns you of any missing configuration options. The
- tool is ideal for allowing you to iterate on configurations, create
- minimal configurations, and create configuration files for different
- machines without having to duplicate your process.
-
- The ``merge_config.sh`` script is part of the Linux Yocto kernel Git
- repositories (i.e. ``linux-yocto-3.14``, ``linux-yocto-3.10``,
- ``linux-yocto-3.8``, and so forth) in the ``scripts/kconfig``
- directory.
-
- For more information on configuration fragments, see the
- ":ref:`kernel-dev/common:creating configuration fragments`"
- section in the Yocto Project Linux Kernel Development Manual.
-
-- ``bitbake -u taskexp -g bitbake_target``: Using the BitBake command
- with these options brings up a Dependency Explorer from which you can
- view file dependencies. Understanding these dependencies allows you
- to make informed decisions when cutting out various pieces of the
- kernel and root filesystem.
-
-Trim the Root Filesystem
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-The root filesystem is made up of packages for booting, libraries, and
-applications. To change things, you can configure how the packaging
-happens, which changes the way you build them. You can also modify the
-filesystem itself or select a different filesystem.
-
-First, find out what is hogging your root filesystem by running the
-``dirsize.py`` script from your root directory::
-
- $ cd root-directory-of-image
- $ dirsize.py 100000 > dirsize-100k.log
- $ cat dirsize-100k.log
-
-You can apply a filter to the script to ignore files
-under a certain size. The previous example filters out any files below
-100 Kbytes. The sizes reported by the tool are uncompressed, and thus
-will be smaller by a relatively constant factor in a compressed root
-filesystem. When you examine your log file, you can focus on areas of
-the root filesystem that take up large amounts of memory.
-
-You need to be sure that what you eliminate does not cripple the
-functionality you need. One way to see how packages relate to each other
-is by using the Dependency Explorer UI with the BitBake command::
-
- $ cd image-directory
- $ bitbake -u taskexp -g image
-
-Use the interface to
-select potential packages you wish to eliminate and see their dependency
-relationships.
-
-When deciding how to reduce the size, get rid of packages that result in
-minimal impact on the feature set. For example, you might not need a VGA
-display. Or, you might be able to get by with ``devtmpfs`` and ``mdev``
-instead of ``udev``.
-
-Use your ``local.conf`` file to make changes. For example, to eliminate
-``udev`` and ``glib``, set the following in the local configuration
-file::
-
- VIRTUAL-RUNTIME_dev_manager = ""
-
-Finally, you should consider exactly the type of root filesystem you
-need to meet your needs while also reducing its size. For example,
-consider ``cramfs``, ``squashfs``, ``ubifs``, ``ext2``, or an
-``initramfs`` using ``initramfs``. Be aware that ``ext3`` requires a 1
-Mbyte journal. If you are okay with running read-only, you do not need
-this journal.
-
-.. note::
-
- After each round of elimination, you need to rebuild your system and
- then use the tools to see the effects of your reductions.
-
-Trim the Kernel
-~~~~~~~~~~~~~~~
-
-The kernel is built by including policies for hardware-independent
-aspects. What subsystems do you enable? For what architecture are you
-building? Which drivers do you build by default?
-
-.. note::
-
- You can modify the kernel source if you want to help with boot time.
-
-Run the ``ksize.py`` script from the top-level Linux build directory to
-get an idea of what is making up the kernel::
-
- $ cd top-level-linux-build-directory
- $ ksize.py > ksize.log
- $ cat ksize.log
-
-When you examine the log, you will see how much space is taken up with
-the built-in ``.o`` files for drivers, networking, core kernel files,
-filesystem, sound, and so forth. The sizes reported by the tool are
-uncompressed, and thus will be smaller by a relatively constant factor
-in a compressed kernel image. Look to reduce the areas that are large
-and taking up around the "90% rule."
-
-To examine, or drill down, into any particular area, use the ``-d``
-option with the script::
-
- $ ksize.py -d > ksize.log
-
-Using this option
-breaks out the individual file information for each area of the kernel
-(e.g. drivers, networking, and so forth).
-
-Use your log file to see what you can eliminate from the kernel based on
-features you can let go. For example, if you are not going to need
-sound, you do not need any drivers that support sound.
-
-After figuring out what to eliminate, you need to reconfigure the kernel
-to reflect those changes during the next build. You could run
-``menuconfig`` and make all your changes at once. However, that makes it
-difficult to see the effects of your individual eliminations and also
-makes it difficult to replicate the changes for perhaps another target
-device. A better method is to start with no configurations using
-``allnoconfig``, create configuration fragments for individual changes,
-and then manage the fragments into a single configuration file using
-``merge_config.sh``. The tool makes it easy for you to iterate using the
-configuration change and build cycle.
-
-Each time you make configuration changes, you need to rebuild the kernel
-and check to see what impact your changes had on the overall size.
-
-Remove Package Management Requirements
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Packaging requirements add size to the image. One way to reduce the size
-of the image is to remove all the packaging requirements from the image.
-This reduction includes both removing the package manager and its unique
-dependencies as well as removing the package management data itself.
-
-To eliminate all the packaging requirements for an image, be sure that
-"package-management" is not part of your
-:term:`IMAGE_FEATURES`
-statement for the image. When you remove this feature, you are removing
-the package manager as well as its dependencies from the root
-filesystem.
-
-Look for Other Ways to Minimize Size
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Depending on your particular circumstances, other areas that you can
-trim likely exist. The key to finding these areas is through tools and
-methods described here combined with experimentation and iteration. Here
-are a couple of areas to experiment with:
-
-- ``glibc``: In general, follow this process:
-
- 1. Remove ``glibc`` features from
- :term:`DISTRO_FEATURES`
- that you think you do not need.
-
- 2. Build your distribution.
-
- 3. If the build fails due to missing symbols in a package, determine
- if you can reconfigure the package to not need those features. For
- example, change the configuration to not support wide character
- support as is done for ``ncurses``. Or, if support for those
- characters is needed, determine what ``glibc`` features provide
- the support and restore the configuration.
-
- 4. Rebuild and repeat the process.
-
-- ``busybox``: For BusyBox, use a process similar as described for
- ``glibc``. A difference is you will need to boot the resulting system
- to see if you are able to do everything you expect from the running
- system. You need to be sure to integrate configuration fragments into
- Busybox because BusyBox handles its own core features and then allows
- you to add configuration fragments on top.
-
-Iterate on the Process
-~~~~~~~~~~~~~~~~~~~~~~
-
-If you have not reached your goals on system size, you need to iterate
-on the process. The process is the same. Use the tools and see just what
-is taking up 90% of the root filesystem and the kernel. Decide what you
-can eliminate without limiting your device beyond what you need.
-
-Depending on your system, a good place to look might be Busybox, which
-provides a stripped down version of Unix tools in a single, executable
-file. You might be able to drop virtual terminal services or perhaps
-ipv6.
-
-Building Images for More than One Machine
------------------------------------------
-
-A common scenario developers face is creating images for several
-different machines that use the same software environment. In this
-situation, it is tempting to set the tunings and optimization flags for
-each build specifically for the targeted hardware (i.e. "maxing out" the
-tunings). Doing so can considerably add to build times and package feed
-maintenance collectively for the machines. For example, selecting tunes
-that are extremely specific to a CPU core used in a system might enable
-some micro optimizations in GCC for that particular system but would
-otherwise not gain you much of a performance difference across the other
-systems as compared to using a more general tuning across all the builds
-(e.g. setting :term:`DEFAULTTUNE`
-specifically for each machine's build). Rather than "max out" each
-build's tunings, you can take steps that cause the OpenEmbedded build
-system to reuse software across the various machines where it makes
-sense.
-
-If build speed and package feed maintenance are considerations, you
-should consider the points in this section that can help you optimize
-your tunings to best consider build times and package feed maintenance.
-
-- *Share the Build Directory:* If at all possible, share the
- :term:`TMPDIR` across builds. The
- Yocto Project supports switching between different
- :term:`MACHINE` values in the same
- :term:`TMPDIR`. This practice is well supported and regularly used by
- developers when building for multiple machines. When you use the same
- :term:`TMPDIR` for multiple machine builds, the OpenEmbedded build system
- can reuse the existing native and often cross-recipes for multiple
- machines. Thus, build time decreases.
-
- .. note::
-
- If :term:`DISTRO` settings change or fundamental configuration settings
- such as the filesystem layout, you need to work with a clean :term:`TMPDIR`.
- Sharing :term:`TMPDIR` under these circumstances might work but since it is
- not guaranteed, you should use a clean :term:`TMPDIR`.
-
-- *Enable the Appropriate Package Architecture:* By default, the
- OpenEmbedded build system enables three levels of package
- architectures: "all", "tune" or "package", and "machine". Any given
- recipe usually selects one of these package architectures (types) for
- its output. Depending for what a given recipe creates packages,
- making sure you enable the appropriate package architecture can
- directly impact the build time.
-
- A recipe that just generates scripts can enable "all" architecture
- because there are no binaries to build. To specifically enable "all"
- architecture, be sure your recipe inherits the
- :ref:`allarch <ref-classes-allarch>` class.
- This class is useful for "all" architectures because it configures
- many variables so packages can be used across multiple architectures.
-
- If your recipe needs to generate packages that are machine-specific
- or when one of the build or runtime dependencies is already
- machine-architecture dependent, which makes your recipe also
- machine-architecture dependent, make sure your recipe enables the
- "machine" package architecture through the
- :term:`MACHINE_ARCH`
- variable::
-
- PACKAGE_ARCH = "${MACHINE_ARCH}"
-
- When you do not
- specifically enable a package architecture through the
- :term:`PACKAGE_ARCH`, The
- OpenEmbedded build system defaults to the
- :term:`TUNE_PKGARCH` setting::
-
- PACKAGE_ARCH = "${TUNE_PKGARCH}"
-
-- *Choose a Generic Tuning File if Possible:* Some tunes are more
- generic and can run on multiple targets (e.g. an ``armv5`` set of
- packages could run on ``armv6`` and ``armv7`` processors in most
- cases). Similarly, ``i486`` binaries could work on ``i586`` and
- higher processors. You should realize, however, that advances on
- newer processor versions would not be used.
-
- If you select the same tune for several different machines, the
- OpenEmbedded build system reuses software previously built, thus
- speeding up the overall build time. Realize that even though a new
- sysroot for each machine is generated, the software is not recompiled
- and only one package feed exists.
-
-- *Manage Granular Level Packaging:* Sometimes there are cases where
- injecting another level of package architecture beyond the three
- higher levels noted earlier can be useful. For example, consider how
- NXP (formerly Freescale) allows for the easy reuse of binary packages
- in their layer
- :yocto_git:`meta-freescale </meta-freescale/>`.
- In this example, the
- :yocto_git:`fsl-dynamic-packagearch </meta-freescale/tree/classes/fsl-dynamic-packagearch.bbclass>`
- class shares GPU packages for i.MX53 boards because all boards share
- the AMD GPU. The i.MX6-based boards can do the same because all
- boards share the Vivante GPU. This class inspects the BitBake
- datastore to identify if the package provides or depends on one of
- the sub-architecture values. If so, the class sets the
- :term:`PACKAGE_ARCH` value
- based on the ``MACHINE_SUBARCH`` value. If the package does not
- provide or depend on one of the sub-architecture values but it
- matches a value in the machine-specific filter, it sets
- :term:`MACHINE_ARCH`. This
- behavior reduces the number of packages built and saves build time by
- reusing binaries.
-
-- *Use Tools to Debug Issues:* Sometimes you can run into situations
- where software is being rebuilt when you think it should not be. For
- example, the OpenEmbedded build system might not be using shared
- state between machines when you think it should be. These types of
- situations are usually due to references to machine-specific
- variables such as :term:`MACHINE`,
- :term:`SERIAL_CONSOLES`,
- :term:`XSERVER`,
- :term:`MACHINE_FEATURES`,
- and so forth in code that is supposed to only be tune-specific or
- when the recipe depends
- (:term:`DEPENDS`,
- :term:`RDEPENDS`,
- :term:`RRECOMMENDS`,
- :term:`RSUGGESTS`, and so forth)
- on some other recipe that already has
- :term:`PACKAGE_ARCH` defined
- as "${MACHINE_ARCH}".
-
- .. note::
-
- Patches to fix any issues identified are most welcome as these
- issues occasionally do occur.
-
- For such cases, you can use some tools to help you sort out the
- situation:
-
- - ``state-diff-machines.sh``*:* You can find this tool in the
- ``scripts`` directory of the Source Repositories. See the comments
- in the script for information on how to use the tool.
-
- - *BitBake's "-S printdiff" Option:* Using this option causes
- BitBake to try to establish the closest signature match it can
- (e.g. in the shared state cache) and then run ``bitbake-diffsigs``
- over the matches to determine the stamps and delta where these two
- stamp trees diverge.
-
-Building Software from an External Source
------------------------------------------
-
-By default, the OpenEmbedded build system uses the
-:term:`Build Directory` when building source
-code. The build process involves fetching the source files, unpacking
-them, and then patching them if necessary before the build takes place.
-
-There are situations where you might want to build software from source
-files that are external to and thus outside of the OpenEmbedded build
-system. For example, suppose you have a project that includes a new BSP
-with a heavily customized kernel. And, you want to minimize exposing the
-build system to the development team so that they can focus on their
-project and maintain everyone's workflow as much as possible. In this
-case, you want a kernel source directory on the development machine
-where the development occurs. You want the recipe's
-:term:`SRC_URI` variable to point to
-the external directory and use it as is, not copy it.
-
-To build from software that comes from an external source, all you need
-to do is inherit the
-:ref:`externalsrc <ref-classes-externalsrc>` class
-and then set the
-:term:`EXTERNALSRC` variable to
-point to your external source code. Here are the statements to put in
-your ``local.conf`` file::
-
- INHERIT += "externalsrc"
- EXTERNALSRC:pn-myrecipe = "path-to-your-source-tree"
-
-This next example shows how to accomplish the same thing by setting
-:term:`EXTERNALSRC` in the recipe itself or in the recipe's append file::
-
- EXTERNALSRC = "path"
- EXTERNALSRC_BUILD = "path"
-
-.. note::
-
- In order for these settings to take effect, you must globally or
- locally inherit the :ref:`externalsrc <ref-classes-externalsrc>`
- class.
-
-By default, :ref:`ref-classes-externalsrc` builds the source code in a
-directory separate from the external source directory as specified by
-:term:`EXTERNALSRC`. If you need
-to have the source built in the same directory in which it resides, or
-some other nominated directory, you can set
-:term:`EXTERNALSRC_BUILD`
-to point to that directory::
-
- EXTERNALSRC_BUILD:pn-myrecipe = "path-to-your-source-tree"
-
-Replicating a Build Offline
----------------------------
-
-It can be useful to take a "snapshot" of upstream sources used in a
-build and then use that "snapshot" later to replicate the build offline.
-To do so, you need to first prepare and populate your downloads
-directory your "snapshot" of files. Once your downloads directory is
-ready, you can use it at any time and from any machine to replicate your
-build.
-
-Follow these steps to populate your Downloads directory:
-
-1. *Create a Clean Downloads Directory:* Start with an empty downloads
- directory (:term:`DL_DIR`). You
- start with an empty downloads directory by either removing the files
- in the existing directory or by setting :term:`DL_DIR` to point to either
- an empty location or one that does not yet exist.
-
-2. *Generate Tarballs of the Source Git Repositories:* Edit your
- ``local.conf`` configuration file as follows::
-
- DL_DIR = "/home/your-download-dir/"
- BB_GENERATE_MIRROR_TARBALLS = "1"
-
- During
- the fetch process in the next step, BitBake gathers the source files
- and creates tarballs in the directory pointed to by :term:`DL_DIR`. See
- the
- :term:`BB_GENERATE_MIRROR_TARBALLS`
- variable for more information.
-
-3. *Populate Your Downloads Directory Without Building:* Use BitBake to
- fetch your sources but inhibit the build::
-
- $ bitbake target --runonly=fetch
-
- The downloads directory (i.e. ``${DL_DIR}``) now has
- a "snapshot" of the source files in the form of tarballs, which can
- be used for the build.
-
-4. *Optionally Remove Any Git or other SCM Subdirectories From the
- Downloads Directory:* If you want, you can clean up your downloads
- directory by removing any Git or other Source Control Management
- (SCM) subdirectories such as ``${DL_DIR}/git2/*``. The tarballs
- already contain these subdirectories.
-
-Once your downloads directory has everything it needs regarding source
-files, you can create your "own-mirror" and build your target.
-Understand that you can use the files to build the target offline from
-any machine and at any time.
-
-Follow these steps to build your target using the files in the downloads
-directory:
-
-1. *Using Local Files Only:* Inside your ``local.conf`` file, add the
- :term:`SOURCE_MIRROR_URL` variable, inherit the
- :ref:`own-mirrors <ref-classes-own-mirrors>` class, and use the
- :term:`BB_NO_NETWORK` variable to your ``local.conf``.
- ::
-
- SOURCE_MIRROR_URL ?= "file:///home/your-download-dir/"
- INHERIT += "own-mirrors"
- BB_NO_NETWORK = "1"
-
- The :term:`SOURCE_MIRROR_URL` and :ref:`own-mirrors <ref-classes-own-mirrors>`
- class set up the system to use the downloads directory as your "own
- mirror". Using the :term:`BB_NO_NETWORK` variable makes sure that
- BitBake's fetching process in step 3 stays local, which means files
- from your "own-mirror" are used.
-
-2. *Start With a Clean Build:* You can start with a clean build by
- removing the
- ``${``\ :term:`TMPDIR`\ ``}``
- directory or using a new :term:`Build Directory`.
-
-3. *Build Your Target:* Use BitBake to build your target::
-
- $ bitbake target
-
- The build completes using the known local "snapshot" of source
- files from your mirror. The resulting tarballs for your "snapshot" of
- source files are in the downloads directory.
-
- .. note::
-
- The offline build does not work if recipes attempt to find the
- latest version of software by setting
- :term:`SRCREV` to
- ``${``\ :term:`AUTOREV`\ ``}``::
-
- SRCREV = "${AUTOREV}"
-
- When a recipe sets :term:`SRCREV` to
- ``${``\ :term:`AUTOREV`\ ``}``, the build system accesses the network in an
- attempt to determine the latest version of software from the SCM.
- Typically, recipes that use :term:`AUTOREV` are custom or modified
- recipes. Recipes that reside in public repositories usually do not
- use :term:`AUTOREV`.
-
- If you do have recipes that use :term:`AUTOREV`, you can take steps to
- still use the recipes in an offline build. Do the following:
-
- 1. Use a configuration generated by enabling :ref:`build
- history <dev-manual/common-tasks:maintaining build output quality>`.
-
- 2. Use the ``buildhistory-collect-srcrevs`` command to collect the
- stored :term:`SRCREV` values from the build's history. For more
- information on collecting these values, see the
- ":ref:`dev-manual/common-tasks:build history package information`"
- section.
-
- 3. Once you have the correct source revisions, you can modify
- those recipes to set :term:`SRCREV` to specific versions of the
- software.
-
-Speeding Up a Build
-===================
-
-Build time can be an issue. By default, the build system uses simple
-controls to try and maximize build efficiency. In general, the default
-settings for all the following variables result in the most efficient
-build times when dealing with single socket systems (i.e. a single CPU).
-If you have multiple CPUs, you might try increasing the default values
-to gain more speed. See the descriptions in the glossary for each
-variable for more information:
-
-- :term:`BB_NUMBER_THREADS`:
- The maximum number of threads BitBake simultaneously executes.
-
-- :term:`BB_NUMBER_PARSE_THREADS`:
- The number of threads BitBake uses during parsing.
-
-- :term:`PARALLEL_MAKE`: Extra
- options passed to the ``make`` command during the
- :ref:`ref-tasks-compile` task in
- order to specify parallel compilation on the local build host.
-
-- :term:`PARALLEL_MAKEINST`:
- Extra options passed to the ``make`` command during the
- :ref:`ref-tasks-install` task in
- order to specify parallel installation on the local build host.
-
-As mentioned, these variables all scale to the number of processor cores
-available on the build system. For single socket systems, this
-auto-scaling ensures that the build system fundamentally takes advantage
-of potential parallel operations during the build based on the build
-machine's capabilities.
-
-Following are additional factors that can affect build speed:
-
-- File system type: The file system type that the build is being
- performed on can also influence performance. Using ``ext4`` is
- recommended as compared to ``ext2`` and ``ext3`` due to ``ext4``
- improved features such as extents.
-
-- Disabling the updating of access time using ``noatime``: The
- ``noatime`` mount option prevents the build system from updating file
- and directory access times.
-
-- Setting a longer commit: Using the "commit=" mount option increases
- the interval in seconds between disk cache writes. Changing this
- interval from the five second default to something longer increases
- the risk of data loss but decreases the need to write to the disk,
- thus increasing the build performance.
-
-- Choosing the packaging backend: Of the available packaging backends,
- IPK is the fastest. Additionally, selecting a singular packaging
- backend also helps.
-
-- Using ``tmpfs`` for :term:`TMPDIR`
- as a temporary file system: While this can help speed up the build,
- the benefits are limited due to the compiler using ``-pipe``. The
- build system goes to some lengths to avoid ``sync()`` calls into the
- file system on the principle that if there was a significant failure,
- the :term:`Build Directory`
- contents could easily be rebuilt.
-
-- Inheriting the
- :ref:`rm_work <ref-classes-rm-work>` class:
- Inheriting this class has shown to speed up builds due to
- significantly lower amounts of data stored in the data cache as well
- as on disk. Inheriting this class also makes cleanup of
- :term:`TMPDIR` faster, at the
- expense of being easily able to dive into the source code. File
- system maintainers have recommended that the fastest way to clean up
- large numbers of files is to reformat partitions rather than delete
- files due to the linear nature of partitions. This, of course,
- assumes you structure the disk partitions and file systems in a way
- that this is practical.
-
-Aside from the previous list, you should keep some trade offs in mind
-that can help you speed up the build:
-
-- Remove items from
- :term:`DISTRO_FEATURES`
- that you might not need.
-
-- Exclude debug symbols and other debug information: If you do not need
- these symbols and other debug information, disabling the ``*-dbg``
- package generation can speed up the build. You can disable this
- generation by setting the
- :term:`INHIBIT_PACKAGE_DEBUG_SPLIT`
- variable to "1".
-
-- Disable static library generation for recipes derived from
- ``autoconf`` or ``libtool``: Following is an example showing how to
- disable static libraries and still provide an override to handle
- exceptions::
-
- STATICLIBCONF = "--disable-static"
- STATICLIBCONF:sqlite3-native = ""
- EXTRA_OECONF += "${STATICLIBCONF}"
-
- .. note::
-
- - Some recipes need static libraries in order to work correctly
- (e.g. ``pseudo-native`` needs ``sqlite3-native``). Overrides,
- as in the previous example, account for these kinds of
- exceptions.
-
- - Some packages have packaging code that assumes the presence of
- the static libraries. If so, you might need to exclude them as
- well.
-
-Working With Libraries
-======================
-
-Libraries are an integral part of your system. This section describes
-some common practices you might find helpful when working with libraries
-to build your system:
-
-- :ref:`How to include static library files
- <dev-manual/common-tasks:including static library files>`
-
-- :ref:`How to use the Multilib feature to combine multiple versions of
- library files into a single image
- <dev-manual/common-tasks:combining multiple versions of library files into one image>`
-
-- :ref:`How to install multiple versions of the same library in parallel on
- the same system
- <dev-manual/common-tasks:installing multiple versions of the same library>`
-
-Including Static Library Files
-------------------------------
-
-If you are building a library and the library offers static linking, you
-can control which static library files (``*.a`` files) get included in
-the built library.
-
-The :term:`PACKAGES` and
-:term:`FILES:* <FILES>` variables in the
-``meta/conf/bitbake.conf`` configuration file define how files installed
-by the ``do_install`` task are packaged. By default, the :term:`PACKAGES`
-variable includes ``${PN}-staticdev``, which represents all static
-library files.
-
-.. note::
-
- Some previously released versions of the Yocto Project defined the
- static library files through ``${PN}-dev``.
-
-Following is part of the BitBake configuration file, where you can see
-how the static library files are defined::
-
- PACKAGE_BEFORE_PN ?= ""
- PACKAGES = "${PN}-src ${PN}-dbg ${PN}-staticdev ${PN}-dev ${PN}-doc ${PN}-locale ${PACKAGE_BEFORE_PN} ${PN}"
- PACKAGES_DYNAMIC = "^${PN}-locale-.*"
- FILES = ""
-
- FILES:${PN} = "${bindir}/* ${sbindir}/* ${libexecdir}/* ${libdir}/lib*${SOLIBS} \
- ${sysconfdir} ${sharedstatedir} ${localstatedir} \
- ${base_bindir}/* ${base_sbindir}/* \
- ${base_libdir}/*${SOLIBS} \
- ${base_prefix}/lib/udev ${prefix}/lib/udev \
- ${base_libdir}/udev ${libdir}/udev \
- ${datadir}/${BPN} ${libdir}/${BPN}/* \
- ${datadir}/pixmaps ${datadir}/applications \
- ${datadir}/idl ${datadir}/omf ${datadir}/sounds \
- ${libdir}/bonobo/servers"
-
- FILES:${PN}-bin = "${bindir}/* ${sbindir}/*"
-
- FILES:${PN}-doc = "${docdir} ${mandir} ${infodir} ${datadir}/gtk-doc \
- ${datadir}/gnome/help"
- SECTION:${PN}-doc = "doc"
-
- FILES_SOLIBSDEV ?= "${base_libdir}/lib*${SOLIBSDEV} ${libdir}/lib*${SOLIBSDEV}"
- FILES:${PN}-dev = "${includedir} ${FILES_SOLIBSDEV} ${libdir}/*.la \
- ${libdir}/*.o ${libdir}/pkgconfig ${datadir}/pkgconfig \
- ${datadir}/aclocal ${base_libdir}/*.o \
- ${libdir}/${BPN}/*.la ${base_libdir}/*.la \
- ${libdir}/cmake ${datadir}/cmake"
- SECTION:${PN}-dev = "devel"
- ALLOW_EMPTY:${PN}-dev = "1"
- RDEPENDS:${PN}-dev = "${PN} (= ${EXTENDPKGV})"
-
- FILES:${PN}-staticdev = "${libdir}/*.a ${base_libdir}/*.a ${libdir}/${BPN}/*.a"
- SECTION:${PN}-staticdev = "devel"
- RDEPENDS:${PN}-staticdev = "${PN}-dev (= ${EXTENDPKGV})"
-
-Combining Multiple Versions of Library Files into One Image
------------------------------------------------------------
-
-The build system offers the ability to build libraries with different
-target optimizations or architecture formats and combine these together
-into one system image. You can link different binaries in the image
-against the different libraries as needed for specific use cases. This
-feature is called "Multilib".
-
-An example would be where you have most of a system compiled in 32-bit
-mode using 32-bit libraries, but you have something large, like a
-database engine, that needs to be a 64-bit application and uses 64-bit
-libraries. Multilib allows you to get the best of both 32-bit and 64-bit
-libraries.
-
-While the Multilib feature is most commonly used for 32 and 64-bit
-differences, the approach the build system uses facilitates different
-target optimizations. You could compile some binaries to use one set of
-libraries and other binaries to use a different set of libraries. The
-libraries could differ in architecture, compiler options, or other
-optimizations.
-
-There are several examples in the ``meta-skeleton`` layer found in the
-:term:`Source Directory`:
-
-- :oe_git:`conf/multilib-example.conf </openembedded-core/tree/meta-skeleton/conf/multilib-example.conf>`
- configuration file.
-
-- :oe_git:`conf/multilib-example2.conf </openembedded-core/tree/meta-skeleton/conf/multilib-example2.conf>`
- configuration file.
-
-- :oe_git:`recipes-multilib/images/core-image-multilib-example.bb </openembedded-core/tree/meta-skeleton/recipes-multilib/images/core-image-multilib-example.bb>`
- recipe
-
-Preparing to Use Multilib
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-User-specific requirements drive the Multilib feature. Consequently,
-there is no one "out-of-the-box" configuration that would
-meet your needs.
-
-In order to enable Multilib, you first need to ensure your recipe is
-extended to support multiple libraries. Many standard recipes are
-already extended and support multiple libraries. You can check in the
-``meta/conf/multilib.conf`` configuration file in the
-:term:`Source Directory` to see how this is
-done using the
-:term:`BBCLASSEXTEND` variable.
-Eventually, all recipes will be covered and this list will not be
-needed.
-
-For the most part, the :ref:`Multilib <ref-classes-multilib*>`
-class extension works automatically to
-extend the package name from ``${PN}`` to ``${MLPREFIX}${PN}``, where
-:term:`MLPREFIX` is the particular multilib (e.g. "lib32-" or "lib64-").
-Standard variables such as
-:term:`DEPENDS`,
-:term:`RDEPENDS`,
-:term:`RPROVIDES`,
-:term:`RRECOMMENDS`,
-:term:`PACKAGES`, and
-:term:`PACKAGES_DYNAMIC` are
-automatically extended by the system. If you are extending any manual
-code in the recipe, you can use the ``${MLPREFIX}`` variable to ensure
-those names are extended correctly.
-
-Using Multilib
-~~~~~~~~~~~~~~
-
-After you have set up the recipes, you need to define the actual
-combination of multiple libraries you want to build. You accomplish this
-through your ``local.conf`` configuration file in the
-:term:`Build Directory`. An example
-configuration would be as follows::
-
- MACHINE = "qemux86-64"
- require conf/multilib.conf
- MULTILIBS = "multilib:lib32"
- DEFAULTTUNE:virtclass-multilib-lib32 = "x86"
- IMAGE_INSTALL:append = "lib32-glib-2.0"
-
-This example enables an additional library named
-``lib32`` alongside the normal target packages. When combining these
-"lib32" alternatives, the example uses "x86" for tuning. For information
-on this particular tuning, see
-``meta/conf/machine/include/ia32/arch-ia32.inc``.
-
-The example then includes ``lib32-glib-2.0`` in all the images, which
-illustrates one method of including a multiple library dependency. You
-can use a normal image build to include this dependency, for example::
-
- $ bitbake core-image-sato
-
-You can also build Multilib packages
-specifically with a command like this::
-
- $ bitbake lib32-glib-2.0
-
-Additional Implementation Details
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There are generic implementation details as well as details that are specific to
-package management systems. Following are implementation details
-that exist regardless of the package management system:
-
-- The typical convention used for the class extension code as used by
- Multilib assumes that all package names specified in
- :term:`PACKAGES` that contain
- ``${PN}`` have ``${PN}`` at the start of the name. When that
- convention is not followed and ``${PN}`` appears at the middle or the
- end of a name, problems occur.
-
-- The :term:`TARGET_VENDOR`
- value under Multilib will be extended to "-vendormlmultilib" (e.g.
- "-pokymllib32" for a "lib32" Multilib with Poky). The reason for this
- slightly unwieldy contraction is that any "-" characters in the
- vendor string presently break Autoconf's ``config.sub``, and other
- separators are problematic for different reasons.
-
-Here are the implementation details for the RPM Package Management System:
-
-- A unique architecture is defined for the Multilib packages, along
- with creating a unique deploy folder under ``tmp/deploy/rpm`` in the
- :term:`Build Directory`. For
- example, consider ``lib32`` in a ``qemux86-64`` image. The possible
- architectures in the system are "all", "qemux86_64",
- "lib32:qemux86_64", and "lib32:x86".
-
-- The ``${MLPREFIX}`` variable is stripped from ``${PN}`` during RPM
- packaging. The naming for a normal RPM package and a Multilib RPM
- package in a ``qemux86-64`` system resolves to something similar to
- ``bash-4.1-r2.x86_64.rpm`` and ``bash-4.1.r2.lib32_x86.rpm``,
- respectively.
-
-- When installing a Multilib image, the RPM backend first installs the
- base image and then installs the Multilib libraries.
-
-- The build system relies on RPM to resolve the identical files in the
- two (or more) Multilib packages.
-
-Here are the implementation details for the IPK Package Management System:
-
-- The ``${MLPREFIX}`` is not stripped from ``${PN}`` during IPK
- packaging. The naming for a normal RPM package and a Multilib IPK
- package in a ``qemux86-64`` system resolves to something like
- ``bash_4.1-r2.x86_64.ipk`` and ``lib32-bash_4.1-rw:x86.ipk``,
- respectively.
-
-- The IPK deploy folder is not modified with ``${MLPREFIX}`` because
- packages with and without the Multilib feature can exist in the same
- folder due to the ``${PN}`` differences.
-
-- IPK defines a sanity check for Multilib installation using certain
- rules for file comparison, overridden, etc.
-
-Installing Multiple Versions of the Same Library
-------------------------------------------------
-
-There are be situations where you need to install and use multiple versions
-of the same library on the same system at the same time. This
-almost always happens when a library API changes and you have
-multiple pieces of software that depend on the separate versions of the
-library. To accommodate these situations, you can install multiple
-versions of the same library in parallel on the same system.
-
-The process is straightforward as long as the libraries use proper
-versioning. With properly versioned libraries, all you need to do to
-individually specify the libraries is create separate, appropriately
-named recipes where the :term:`PN` part of
-the name includes a portion that differentiates each library version
-(e.g. the major part of the version number). Thus, instead of having a
-single recipe that loads one version of a library (e.g. ``clutter``),
-you provide multiple recipes that result in different versions of the
-libraries you want. As an example, the following two recipes would allow
-the two separate versions of the ``clutter`` library to co-exist on the
-same system:
-
-.. code-block:: none
-
- clutter-1.6_1.6.20.bb
- clutter-1.8_1.8.4.bb
-
-Additionally, if
-you have other recipes that depend on a given library, you need to use
-the :term:`DEPENDS` variable to
-create the dependency. Continuing with the same example, if you want to
-have a recipe depend on the 1.8 version of the ``clutter`` library, use
-the following in your recipe::
-
- DEPENDS = "clutter-1.8"
-
-Working with Pre-Built Libraries
-================================
-
-Introduction
--------------
-
-Some library vendors do not release source code for their software but do
-release pre-built binaries. When shared libraries are built, they should
-be versioned (see `this article
-<https://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html>`__
-for some background), but sometimes this is not done.
-
-To summarize, a versioned library must meet two conditions:
-
-#. The filename must have the version appended, for example: ``libfoo.so.1.2.3``.
-#. The library must have the ELF tag ``SONAME`` set to the major version
- of the library, for example: ``libfoo.so.1``. You can check this by
- running ``readelf -d filename | grep SONAME``.
-
-This section shows how to deal with both versioned and unversioned
-pre-built libraries.
-
-Versioned Libraries
--------------------
-
-In this example we work with pre-built libraries for the FT4222H USB I/O chip.
-Libraries are built for several target architecture variants and packaged in
-an archive as follows::
-
- ├── build-arm-hisiv300
- │   └── libft4222.so.1.4.4.44
- ├── build-arm-v5-sf
- │   └── libft4222.so.1.4.4.44
- ├── build-arm-v6-hf
- │   └── libft4222.so.1.4.4.44
- ├── build-arm-v7-hf
- │   └── libft4222.so.1.4.4.44
- ├── build-arm-v8
- │   └── libft4222.so.1.4.4.44
- ├── build-i386
- │   └── libft4222.so.1.4.4.44
- ├── build-i486
- │   └── libft4222.so.1.4.4.44
- ├── build-mips-eglibc-hf
- │   └── libft4222.so.1.4.4.44
- ├── build-pentium
- │   └── libft4222.so.1.4.4.44
- ├── build-x86_64
- │   └── libft4222.so.1.4.4.44
- ├── examples
- │   ├── get-version.c
- │   ├── i2cm.c
- │   ├── spim.c
- │   └── spis.c
- ├── ftd2xx.h
- ├── install4222.sh
- ├── libft4222.h
- ├── ReadMe.txt
- └── WinTypes.h
-
-To write a recipe to use such a library in your system:
-
-- The vendor will probably have a proprietary licence, so set
- :term:`LICENSE_FLAGS` in your recipe.
-- The vendor provides a tarball containing libraries so set :term:`SRC_URI`
- appropriately.
-- Set :term:`COMPATIBLE_HOST` so that the recipe cannot be used with an
- unsupported architecture. In the following example, we only support the 32
- and 64 bit variants of the ``x86`` architecture.
-- As the vendor provides versioned libraries, we can use ``oe_soinstall``
- from :ref:`ref-classes-utils` to install the shared library and create
- symbolic links. If the vendor does not do this, we need to follow the
- non-versioned library guidelines in the next section.
-- As the vendor likely used :term:`LDFLAGS` different from those in your Yocto
- Project build, disable the corresponding checks by adding ``ldflags``
- to :term:`INSANE_SKIP`.
-- The vendor will typically ship release builds without debugging symbols.
- Avoid errors by preventing the packaging task from stripping out the symbols
- and adding them to a separate debug package. This is done by setting the
- ``INHIBIT_`` flags shown below.
-
-The complete recipe would look like this::
-
- SUMMARY = "FTDI FT4222H Library"
- SECTION = "libs"
- LICENSE_FLAGS = "ftdi"
- LICENSE = "CLOSED"
-
- COMPATIBLE_HOST = "(i.86|x86_64).*-linux"
-
- # Sources available in a .tgz file in .zip archive
- # at https://ftdichip.com/wp-content/uploads/2021/01/libft4222-linux-1.4.4.44.zip
- # Found on https://ftdichip.com/software-examples/ft4222h-software-examples/
- # Since dealing with this particular type of archive is out of topic here,
- # we use a local link.
- SRC_URI = "file://libft4222-linux-${PV}.tgz"
-
- S = "${WORKDIR}"
-
- ARCH_DIR:x86-64 = "build-x86_64"
- ARCH_DIR:i586 = "build-i386"
- ARCH_DIR:i686 = "build-i386"
-
- INSANE_SKIP:${PN} = "ldflags"
- INHIBIT_PACKAGE_STRIP = "1"
- INHIBIT_SYSROOT_STRIP = "1"
- INHIBIT_PACKAGE_DEBUG_SPLIT = "1"
-
- do_install () {
- install -m 0755 -d ${D}${libdir}
- oe_soinstall ${S}/${ARCH_DIR}/libft4222.so.${PV} ${D}${libdir}
- install -d ${D}${includedir}
- install -m 0755 ${S}/*.h ${D}${includedir}
- }
-
-If the precompiled binaries are not statically linked and have dependencies on
-other libraries, then by adding those libraries to :term:`DEPENDS`, the linking
-can be examined and the appropriate :term:`RDEPENDS` automatically added.
-
-Non-Versioned Libraries
------------------------
-
-Some Background
-~~~~~~~~~~~~~~~
-
-Libraries in Linux systems are generally versioned so that it is possible
-to have multiple versions of the same library installed, which eases upgrades
-and support for older software. For example, suppose that in a versioned
-library, an actual library is called ``libfoo.so.1.2``, a symbolic link named
-``libfoo.so.1`` points to ``libfoo.so.1.2``, and a symbolic link named
-``libfoo.so`` points to ``libfoo.so.1.2``. Given these conditions, when you
-link a binary against a library, you typically provide the unversioned file
-name (i.e. ``-lfoo`` to the linker). However, the linker follows the symbolic
-link and actually links against the versioned filename. The unversioned symbolic
-link is only used at development time. Consequently, the library is packaged
-along with the headers in the development package ``${PN}-dev`` along with the
-actual library and versioned symbolic links in ``${PN}``. Because versioned
-libraries are far more common than unversioned libraries, the default packaging
-rules assume versioned libraries.
-
-Yocto Library Packaging Overview
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-It follows that packaging an unversioned library requires a bit of work in the
-recipe. By default, ``libfoo.so`` gets packaged into ``${PN}-dev``, which
-triggers a QA warning that a non-symlink library is in a ``-dev`` package,
-and binaries in the same recipe link to the library in ``${PN}-dev``,
-which triggers more QA warnings. To solve this problem, you need to package the
-unversioned library into ``${PN}`` where it belongs. The following are the abridged
-default :term:`FILES` variables in ``bitbake.conf``::
-
- SOLIBS = ".so.*"
- SOLIBSDEV = ".so"
- FILES_${PN} = "... ${libdir}/lib*${SOLIBS} ..."
- FILES_SOLIBSDEV ?= "... ${libdir}/lib*${SOLIBSDEV} ..."
- FILES_${PN}-dev = "... ${FILES_SOLIBSDEV} ..."
-
-:term:`SOLIBS` defines a pattern that matches real shared object libraries.
-:term:`SOLIBSDEV` matches the development form (unversioned symlink). These two
-variables are then used in ``FILES:${PN}`` and ``FILES:${PN}-dev``, which puts
-the real libraries into ``${PN}`` and the unversioned symbolic link into ``${PN}-dev``.
-To package unversioned libraries, you need to modify the variables in the recipe
-as follows::
-
- SOLIBS = ".so"
- FILES_SOLIBSDEV = ""
-
-The modifications cause the ``.so`` file to be the real library
-and unset :term:`FILES_SOLIBSDEV` so that no libraries get packaged into
-``${PN}-dev``. The changes are required because unless :term:`PACKAGES` is changed,
-``${PN}-dev`` collects files before `${PN}`. ``${PN}-dev`` must not collect any of
-the files you want in ``${PN}``.
-
-Finally, loadable modules, essentially unversioned libraries that are linked
-at runtime using ``dlopen()`` instead of at build time, should generally be
-installed in a private directory. However, if they are installed in ``${libdir}``,
-then the modules can be treated as unversioned libraries.
-
-Example
-~~~~~~~
-
-The example below installs an unversioned x86-64 pre-built library named
-``libfoo.so``. The :term:`COMPATIBLE_HOST` variable limits recipes to the
-x86-64 architecture while the :term:`INSANE_SKIP`, :term:`INHIBIT_PACKAGE_STRIP`
-and :term:`INHIBIT_SYSROOT_STRIP` variables are all set as in the above
-versioned library example. The "magic" is setting the :term:`SOLIBS` and
-:term:`FILES_SOLIBSDEV` variables as explained above::
-
- SUMMARY = "libfoo sample recipe"
- SECTION = "libs"
- LICENSE = "CLOSED"
-
- SRC_URI = "file://libfoo.so"
-
- COMPATIBLE_HOST = "x86_64.*-linux"
-
- INSANE_SKIP:${PN} = "ldflags"
- INHIBIT_PACKAGE_STRIP = "1"
- INHIBIT_SYSROOT_STRIP = "1"
- SOLIBS = ".so"
- FILES_SOLIBSDEV = ""
-
- do_install () {
- install -d ${D}${libdir}
- install -m 0755 ${WORKDIR}/libfoo.so ${D}${libdir}
- }
-
-Using x32 psABI
-===============
-
-x32 processor-specific Application Binary Interface (`x32
-psABI <https://software.intel.com/en-us/node/628948>`__) is a native
-32-bit processor-specific ABI for Intel 64 (x86-64) architectures. An
-ABI defines the calling conventions between functions in a processing
-environment. The interface determines what registers are used and what
-the sizes are for various C data types.
-
-Some processing environments prefer using 32-bit applications even when
-running on Intel 64-bit platforms. Consider the i386 psABI, which is a
-very old 32-bit ABI for Intel 64-bit platforms. The i386 psABI does not
-provide efficient use and access of the Intel 64-bit processor
-resources, leaving the system underutilized. Now consider the x86_64
-psABI. This ABI is newer and uses 64-bits for data sizes and program
-pointers. The extra bits increase the footprint size of the programs,
-libraries, and also increases the memory and file system size
-requirements. Executing under the x32 psABI enables user programs to
-utilize CPU and system resources more efficiently while keeping the
-memory footprint of the applications low. Extra bits are used for
-registers but not for addressing mechanisms.
-
-The Yocto Project supports the final specifications of x32 psABI as
-follows:
-
-- You can create packages and images in x32 psABI format on x86_64
- architecture targets.
-
-- You can successfully build recipes with the x32 toolchain.
-
-- You can create and boot ``core-image-minimal`` and
- ``core-image-sato`` images.
-
-- There is RPM Package Manager (RPM) support for x32 binaries.
-
-- There is support for large images.
-
-To use the x32 psABI, you need to edit your ``conf/local.conf``
-configuration file as follows::
-
- MACHINE = "qemux86-64"
- DEFAULTTUNE = "x86-64-x32"
- baselib = "${@d.getVar('BASE_LIB:tune-' + (d.getVar('DEFAULTTUNE') \
- or 'INVALID')) or 'lib'}"
-
-Once you have set
-up your configuration file, use BitBake to build an image that supports
-the x32 psABI. Here is an example::
-
- $ bitbake core-image-sato
-
-Enabling GObject Introspection Support
-======================================
-
-`GObject introspection <https://gi.readthedocs.io/en/latest/>`__
-is the standard mechanism for accessing GObject-based software from
-runtime environments. GObject is a feature of the GLib library that
-provides an object framework for the GNOME desktop and related software.
-GObject Introspection adds information to GObject that allows objects
-created within it to be represented across different programming
-languages. If you want to construct GStreamer pipelines using Python, or
-control UPnP infrastructure using Javascript and GUPnP, GObject
-introspection is the only way to do it.
-
-This section describes the Yocto Project support for generating and
-packaging GObject introspection data. GObject introspection data is a
-description of the API provided by libraries built on top of the GLib
-framework, and, in particular, that framework's GObject mechanism.
-GObject Introspection Repository (GIR) files go to ``-dev`` packages,
-``typelib`` files go to main packages as they are packaged together with
-libraries that are introspected.
-
-The data is generated when building such a library, by linking the
-library with a small executable binary that asks the library to describe
-itself, and then executing the binary and processing its output.
-
-Generating this data in a cross-compilation environment is difficult
-because the library is produced for the target architecture, but its
-code needs to be executed on the build host. This problem is solved with
-the OpenEmbedded build system by running the code through QEMU, which
-allows precisely that. Unfortunately, QEMU does not always work
-perfectly as mentioned in the ":ref:`dev-manual/common-tasks:known issues`"
-section.
-
-Enabling the Generation of Introspection Data
----------------------------------------------
-
-Enabling the generation of introspection data (GIR files) in your
-library package involves the following:
-
-1. Inherit the
- :ref:`gobject-introspection <ref-classes-gobject-introspection>`
- class.
-
-2. Make sure introspection is not disabled anywhere in the recipe or
- from anything the recipe includes. Also, make sure that
- "gobject-introspection-data" is not in
- :term:`DISTRO_FEATURES_BACKFILL_CONSIDERED`
- and that "qemu-usermode" is not in
- :term:`MACHINE_FEATURES_BACKFILL_CONSIDERED`.
- In either of these conditions, nothing will happen.
-
-3. Try to build the recipe. If you encounter build errors that look like
- something is unable to find ``.so`` libraries, check where these
- libraries are located in the source tree and add the following to the
- recipe::
-
- GIR_EXTRA_LIBS_PATH = "${B}/something/.libs"
-
- .. note::
-
- See recipes in the ``oe-core`` repository that use that
- :term:`GIR_EXTRA_LIBS_PATH` variable as an example.
-
-4. Look for any other errors, which probably mean that introspection
- support in a package is not entirely standard, and thus breaks down
- in a cross-compilation environment. For such cases, custom-made fixes
- are needed. A good place to ask and receive help in these cases is
- the :ref:`Yocto Project mailing
- lists <resources-mailinglist>`.
-
-.. note::
-
- Using a library that no longer builds against the latest Yocto
- Project release and prints introspection related errors is a good
- candidate for the previous procedure.
-
-Disabling the Generation of Introspection Data
-----------------------------------------------
-
-You might find that you do not want to generate introspection data. Or,
-perhaps QEMU does not work on your build host and target architecture
-combination. If so, you can use either of the following methods to
-disable GIR file generations:
-
-- Add the following to your distro configuration::
-
- DISTRO_FEATURES_BACKFILL_CONSIDERED = "gobject-introspection-data"
-
- Adding this statement disables generating introspection data using
- QEMU but will still enable building introspection tools and libraries
- (i.e. building them does not require the use of QEMU).
-
-- Add the following to your machine configuration::
-
- MACHINE_FEATURES_BACKFILL_CONSIDERED = "qemu-usermode"
-
- Adding this statement disables the use of QEMU when building packages for your
- machine. Currently, this feature is used only by introspection
- recipes and has the same effect as the previously described option.
-
- .. note::
-
- Future releases of the Yocto Project might have other features
- affected by this option.
-
-If you disable introspection data, you can still obtain it through other
-means such as copying the data from a suitable sysroot, or by generating
-it on the target hardware. The OpenEmbedded build system does not
-currently provide specific support for these techniques.
-
-Testing that Introspection Works in an Image
---------------------------------------------
-
-Use the following procedure to test if generating introspection data is
-working in an image:
-
-1. Make sure that "gobject-introspection-data" is not in
- :term:`DISTRO_FEATURES_BACKFILL_CONSIDERED`
- and that "qemu-usermode" is not in
- :term:`MACHINE_FEATURES_BACKFILL_CONSIDERED`.
-
-2. Build ``core-image-sato``.
-
-3. Launch a Terminal and then start Python in the terminal.
-
-4. Enter the following in the terminal::
-
- >>> from gi.repository import GLib
- >>> GLib.get_host_name()
-
-5. For something a little more advanced, enter the following see:
- https://python-gtk-3-tutorial.readthedocs.io/en/latest/introduction.html
-
-Known Issues
-------------
-
-Here are know issues in GObject Introspection Support:
-
-- ``qemu-ppc64`` immediately crashes. Consequently, you cannot build
- introspection data on that architecture.
-
-- x32 is not supported by QEMU. Consequently, introspection data is
- disabled.
-
-- musl causes transient GLib binaries to crash on assertion failures.
- Consequently, generating introspection data is disabled.
-
-- Because QEMU is not able to run the binaries correctly, introspection
- is disabled for some specific packages under specific architectures
- (e.g. ``gcr``, ``libsecret``, and ``webkit``).
-
-- QEMU usermode might not work properly when running 64-bit binaries
- under 32-bit host machines. In particular, "qemumips64" is known to
- not work under i686.
-
-Optionally Using an External Toolchain
-======================================
-
-You might want to use an external toolchain as part of your development.
-If this is the case, the fundamental steps you need to accomplish are as
-follows:
-
-- Understand where the installed toolchain resides. For cases where you
- need to build the external toolchain, you would need to take separate
- steps to build and install the toolchain.
-
-- Make sure you add the layer that contains the toolchain to your
- ``bblayers.conf`` file through the
- :term:`BBLAYERS` variable.
-
-- Set the ``EXTERNAL_TOOLCHAIN`` variable in your ``local.conf`` file
- to the location in which you installed the toolchain.
-
-A good example of an external toolchain used with the Yocto Project is
-Mentor Graphics Sourcery G++ Toolchain. You can see information on how
-to use that particular layer in the ``README`` file at
-https://github.com/MentorEmbedded/meta-sourcery/. You can find
-further information by reading about the
-:term:`TCMODE` variable in the Yocto
-Project Reference Manual's variable glossary.
-
-Creating Partitioned Images Using Wic
-=====================================
-
-Creating an image for a particular hardware target using the
-OpenEmbedded build system does not necessarily mean you can boot that
-image as is on your device. Physical devices accept and boot images in
-various ways depending on the specifics of the device. Usually,
-information about the hardware can tell you what image format the device
-requires. Should your device require multiple partitions on an SD card,
-flash, or an HDD, you can use the OpenEmbedded Image Creator, Wic, to
-create the properly partitioned image.
-
-The ``wic`` command generates partitioned images from existing
-OpenEmbedded build artifacts. Image generation is driven by partitioning
-commands contained in an Openembedded kickstart file (``.wks``)
-specified either directly on the command line or as one of a selection
-of canned kickstart files as shown with the ``wic list images`` command
-in the
-":ref:`dev-manual/common-tasks:generate an image using an existing kickstart file`"
-section. When you apply the command to a given set of build artifacts, the
-result is an image or set of images that can be directly written onto media and
-used on a particular system.
-
-.. note::
-
- For a kickstart file reference, see the
- ":ref:`ref-manual/kickstart:openembedded kickstart (\`\`.wks\`\`) reference`"
- Chapter in the Yocto Project Reference Manual.
-
-The ``wic`` command and the infrastructure it is based on is by
-definition incomplete. The purpose of the command is to allow the
-generation of customized images, and as such, was designed to be
-completely extensible through a plugin interface. See the
-":ref:`dev-manual/common-tasks:using the wic plugin interface`" section
-for information on these plugins.
-
-This section provides some background information on Wic, describes what
-you need to have in place to run the tool, provides instruction on how
-to use the Wic utility, provides information on using the Wic plugins
-interface, and provides several examples that show how to use Wic.
-
-Background
-----------
-
-This section provides some background on the Wic utility. While none of
-this information is required to use Wic, you might find it interesting.
-
-- The name "Wic" is derived from OpenEmbedded Image Creator (oeic). The
- "oe" diphthong in "oeic" was promoted to the letter "w", because
- "oeic" is both difficult to remember and to pronounce.
-
-- Wic is loosely based on the Meego Image Creator (``mic``) framework.
- The Wic implementation has been heavily modified to make direct use
- of OpenEmbedded build artifacts instead of package installation and
- configuration, which are already incorporated within the OpenEmbedded
- artifacts.
-
-- Wic is a completely independent standalone utility that initially
- provides easier-to-use and more flexible replacements for an existing
- functionality in OE-Core's
- :ref:`image-live <ref-classes-image-live>`
- class. The difference between Wic and those examples is that with Wic
- the functionality of those scripts is implemented by a
- general-purpose partitioning language, which is based on Redhat
- kickstart syntax.
-
-Requirements
-------------
-
-In order to use the Wic utility with the OpenEmbedded Build system, your
-system needs to meet the following requirements:
-
-- The Linux distribution on your development host must support the
- Yocto Project. See the ":ref:`detailed-supported-distros`"
- section in the Yocto Project Reference Manual for the list of
- distributions that support the Yocto Project.
-
-- The standard system utilities, such as ``cp``, must be installed on
- your development host system.
-
-- You must have sourced the build environment setup script (i.e.
- :ref:`structure-core-script`) found in the
- :term:`Build Directory`.
-
-- You need to have the build artifacts already available, which
- typically means that you must have already created an image using the
- Openembedded build system (e.g. ``core-image-minimal``). While it
- might seem redundant to generate an image in order to create an image
- using Wic, the current version of Wic requires the artifacts in the
- form generated by the OpenEmbedded build system.
-
-- You must build several native tools, which are built to run on the
- build system::
-
- $ bitbake parted-native dosfstools-native mtools-native
-
-- Include "wic" as part of the
- :term:`IMAGE_FSTYPES`
- variable.
-
-- Include the name of the :ref:`wic kickstart file <openembedded-kickstart-wks-reference>`
- as part of the :term:`WKS_FILE` variable
-
-Getting Help
-------------
-
-You can get general help for the ``wic`` command by entering the ``wic``
-command by itself or by entering the command with a help argument as
-follows::
-
- $ wic -h
- $ wic --help
- $ wic help
-
-Currently, Wic supports seven commands: ``cp``, ``create``, ``help``,
-``list``, ``ls``, ``rm``, and ``write``. You can get help for all these
-commands except "help" by using the following form::
-
- $ wic help command
-
-For example, the following command returns help for the ``write``
-command::
-
- $ wic help write
-
-Wic supports help for three topics: ``overview``, ``plugins``, and
-``kickstart``. You can get help for any topic using the following form::
-
- $ wic help topic
-
-For example, the following returns overview help for Wic::
-
- $ wic help overview
-
-There is one additional level of help for Wic. You can get help on
-individual images through the ``list`` command. You can use the ``list``
-command to return the available Wic images as follows::
-
- $ wic list images
- genericx86 Create an EFI disk image for genericx86*
- edgerouter Create SD card image for Edgerouter
- beaglebone-yocto Create SD card image for Beaglebone
- qemux86-directdisk Create a qemu machine 'pcbios' direct disk image
- systemd-bootdisk Create an EFI disk image with systemd-boot
- mkhybridiso Create a hybrid ISO image
- mkefidisk Create an EFI disk image
- sdimage-bootpart Create SD card image with a boot partition
- directdisk-multi-rootfs Create multi rootfs image using rootfs plugin
- directdisk Create a 'pcbios' direct disk image
- directdisk-bootloader-config Create a 'pcbios' direct disk image with custom bootloader config
- qemuriscv Create qcow2 image for RISC-V QEMU machines
- directdisk-gpt Create a 'pcbios' direct disk image
- efi-bootdisk
-
-Once you know the list of available
-Wic images, you can use ``help`` with the command to get help on a
-particular image. For example, the following command returns help on the
-"beaglebone-yocto" image::
-
- $ wic list beaglebone-yocto help
-
- Creates a partitioned SD card image for Beaglebone.
- Boot files are located in the first vfat partition.
-
-Operational Modes
------------------
-
-You can use Wic in two different modes, depending on how much control
-you need for specifying the Openembedded build artifacts that are used
-for creating the image: Raw and Cooked:
-
-- *Raw Mode:* You explicitly specify build artifacts through Wic
- command-line arguments.
-
-- *Cooked Mode:* The current
- :term:`MACHINE` setting and image
- name are used to automatically locate and provide the build
- artifacts. You just supply a kickstart file and the name of the image
- from which to use artifacts.
-
-Regardless of the mode you use, you need to have the build artifacts
-ready and available.
-
-Raw Mode
-~~~~~~~~
-
-Running Wic in raw mode allows you to specify all the partitions through
-the ``wic`` command line. The primary use for raw mode is if you have
-built your kernel outside of the Yocto Project
-:term:`Build Directory`. In other words, you
-can point to arbitrary kernel, root filesystem locations, and so forth.
-Contrast this behavior with cooked mode where Wic looks in the Build
-Directory (e.g. ``tmp/deploy/images/``\ machine).
-
-The general form of the ``wic`` command in raw mode is::
-
- $ wic create wks_file options ...
-
- Where:
-
- wks_file:
- An OpenEmbedded kickstart file. You can provide
- your own custom file or use a file from a set of
- existing files as described by further options.
-
- optional arguments:
- -h, --help show this help message and exit
- -o OUTDIR, --outdir OUTDIR
- name of directory to create image in
- -e IMAGE_NAME, --image-name IMAGE_NAME
- name of the image to use the artifacts from e.g. core-
- image-sato
- -r ROOTFS_DIR, --rootfs-dir ROOTFS_DIR
- path to the /rootfs dir to use as the .wks rootfs
- source
- -b BOOTIMG_DIR, --bootimg-dir BOOTIMG_DIR
- path to the dir containing the boot artifacts (e.g.
- /EFI or /syslinux dirs) to use as the .wks bootimg
- source
- -k KERNEL_DIR, --kernel-dir KERNEL_DIR
- path to the dir containing the kernel to use in the
- .wks bootimg
- -n NATIVE_SYSROOT, --native-sysroot NATIVE_SYSROOT
- path to the native sysroot containing the tools to use
- to build the image
- -s, --skip-build-check
- skip the build check
- -f, --build-rootfs build rootfs
- -c {gzip,bzip2,xz}, --compress-with {gzip,bzip2,xz}
- compress image with specified compressor
- -m, --bmap generate .bmap
- --no-fstab-update Do not change fstab file.
- -v VARS_DIR, --vars VARS_DIR
- directory with <image>.env files that store bitbake
- variables
- -D, --debug output debug information
-
-.. note::
-
- You do not need root privileges to run Wic. In fact, you should not
- run as root when using the utility.
-
-Cooked Mode
-~~~~~~~~~~~
-
-Running Wic in cooked mode leverages off artifacts in the Build
-Directory. In other words, you do not have to specify kernel or root
-filesystem locations as part of the command. All you need to provide is
-a kickstart file and the name of the image from which to use artifacts
-by using the "-e" option. Wic looks in the Build Directory (e.g.
-``tmp/deploy/images/``\ machine) for artifacts.
-
-The general form of the ``wic`` command using Cooked Mode is as follows::
-
- $ wic create wks_file -e IMAGE_NAME
-
- Where:
-
- wks_file:
- An OpenEmbedded kickstart file. You can provide
- your own custom file or use a file from a set of
- existing files provided with the Yocto Project
- release.
-
- required argument:
- -e IMAGE_NAME, --image-name IMAGE_NAME
- name of the image to use the artifacts from e.g. core-
- image-sato
-
-Using an Existing Kickstart File
---------------------------------
-
-If you do not want to create your own kickstart file, you can use an
-existing file provided by the Wic installation. As shipped, kickstart
-files can be found in the :ref:`overview-manual/development-environment:yocto project source repositories` in the
-following two locations::
-
- poky/meta-yocto-bsp/wic
- poky/scripts/lib/wic/canned-wks
-
-Use the following command to list the available kickstart files::
-
- $ wic list images
- genericx86 Create an EFI disk image for genericx86*
- beaglebone-yocto Create SD card image for Beaglebone
- edgerouter Create SD card image for Edgerouter
- qemux86-directdisk Create a QEMU machine 'pcbios' direct disk image
- directdisk-gpt Create a 'pcbios' direct disk image
- mkefidisk Create an EFI disk image
- directdisk Create a 'pcbios' direct disk image
- systemd-bootdisk Create an EFI disk image with systemd-boot
- mkhybridiso Create a hybrid ISO image
- sdimage-bootpart Create SD card image with a boot partition
- directdisk-multi-rootfs Create multi rootfs image using rootfs plugin
- directdisk-bootloader-config Create a 'pcbios' direct disk image with custom bootloader config
-
-When you use an existing file, you
-do not have to use the ``.wks`` extension. Here is an example in Raw
-Mode that uses the ``directdisk`` file::
-
- $ wic create directdisk -r rootfs_dir -b bootimg_dir \
- -k kernel_dir -n native_sysroot
-
-Here are the actual partition language commands used in the
-``genericx86.wks`` file to generate an image::
-
- # short-description: Create an EFI disk image for genericx86*
- # long-description: Creates a partitioned EFI disk image for genericx86* machines
- part /boot --source bootimg-efi --sourceparams="loader=grub-efi" --ondisk sda --label msdos --active --align 1024
- part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid
- part swap --ondisk sda --size 44 --label swap1 --fstype=swap
-
- bootloader --ptable gpt --timeout=5 --append="rootfstype=ext4 console=ttyS0,115200 console=tty0"
-
-Using the Wic Plugin Interface
-------------------------------
-
-You can extend and specialize Wic functionality by using Wic plugins.
-This section explains the Wic plugin interface.
-
-.. note::
-
- Wic plugins consist of "source" and "imager" plugins. Imager plugins
- are beyond the scope of this section.
-
-Source plugins provide a mechanism to customize partition content during
-the Wic image generation process. You can use source plugins to map
-values that you specify using ``--source`` commands in kickstart files
-(i.e. ``*.wks``) to a plugin implementation used to populate a given
-partition.
-
-.. note::
-
- If you use plugins that have build-time dependencies (e.g. native
- tools, bootloaders, and so forth) when building a Wic image, you need
- to specify those dependencies using the :term:`WKS_FILE_DEPENDS`
- variable.
-
-Source plugins are subclasses defined in plugin files. As shipped, the
-Yocto Project provides several plugin files. You can see the source
-plugin files that ship with the Yocto Project
-:yocto_git:`here </poky/tree/scripts/lib/wic/plugins/source>`.
-Each of these plugin files contains source plugins that are designed to
-populate a specific Wic image partition.
-
-Source plugins are subclasses of the ``SourcePlugin`` class, which is
-defined in the ``poky/scripts/lib/wic/pluginbase.py`` file. For example,
-the ``BootimgEFIPlugin`` source plugin found in the ``bootimg-efi.py``
-file is a subclass of the ``SourcePlugin`` class, which is found in the
-``pluginbase.py`` file.
-
-You can also implement source plugins in a layer outside of the Source
-Repositories (external layer). To do so, be sure that your plugin files
-are located in a directory whose path is
-``scripts/lib/wic/plugins/source/`` within your external layer. When the
-plugin files are located there, the source plugins they contain are made
-available to Wic.
-
-When the Wic implementation needs to invoke a partition-specific
-implementation, it looks for the plugin with the same name as the
-``--source`` parameter used in the kickstart file given to that
-partition. For example, if the partition is set up using the following
-command in a kickstart file::
-
- part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
-
-The methods defined as class
-members of the matching source plugin (i.e. ``bootimg-pcbios``) in the
-``bootimg-pcbios.py`` plugin file are used.
-
-To be more concrete, here is the corresponding plugin definition from
-the ``bootimg-pcbios.py`` file for the previous command along with an
-example method called by the Wic implementation when it needs to prepare
-a partition using an implementation-specific function::
-
- .
- .
- .
- class BootimgPcbiosPlugin(SourcePlugin):
- """
- Create MBR boot partition and install syslinux on it.
- """
-
- name = 'bootimg-pcbios'
- .
- .
- .
- @classmethod
- def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
- oe_builddir, bootimg_dir, kernel_dir,
- rootfs_dir, native_sysroot):
- """
- Called to do the actual content population for a partition i.e. it
- 'prepares' the partition to be incorporated into the image.
- In this case, prepare content for legacy bios boot partition.
- """
- .
- .
- .
-
-If a
-subclass (plugin) itself does not implement a particular function, Wic
-locates and uses the default version in the superclass. It is for this
-reason that all source plugins are derived from the ``SourcePlugin``
-class.
-
-The ``SourcePlugin`` class defined in the ``pluginbase.py`` file defines
-a set of methods that source plugins can implement or override. Any
-plugins (subclass of ``SourcePlugin``) that do not implement a
-particular method inherit the implementation of the method from the
-``SourcePlugin`` class. For more information, see the ``SourcePlugin``
-class in the ``pluginbase.py`` file for details:
-
-The following list describes the methods implemented in the
-``SourcePlugin`` class:
-
-- ``do_prepare_partition()``: Called to populate a partition with
- actual content. In other words, the method prepares the final
- partition image that is incorporated into the disk image.
-
-- ``do_configure_partition()``: Called before
- ``do_prepare_partition()`` to create custom configuration files for a
- partition (e.g. syslinux or grub configuration files).
-
-- ``do_install_disk()``: Called after all partitions have been
- prepared and assembled into a disk image. This method provides a hook
- to allow finalization of a disk image (e.g. writing an MBR).
-
-- ``do_stage_partition()``: Special content-staging hook called
- before ``do_prepare_partition()``. This method is normally empty.
-
- Typically, a partition just uses the passed-in parameters (e.g. the
- unmodified value of ``bootimg_dir``). However, in some cases, things
- might need to be more tailored. As an example, certain files might
- additionally need to be taken from ``bootimg_dir + /boot``. This hook
- allows those files to be staged in a customized fashion.
-
- .. note::
-
- ``get_bitbake_var()`` allows you to access non-standard variables that
- you might want to use for this behavior.
-
-You can extend the source plugin mechanism. To add more hooks, create
-more source plugin methods within ``SourcePlugin`` and the corresponding
-derived subclasses. The code that calls the plugin methods uses the
-``plugin.get_source_plugin_methods()`` function to find the method or
-methods needed by the call. Retrieval of those methods is accomplished
-by filling up a dict with keys that contain the method names of
-interest. On success, these will be filled in with the actual methods.
-See the Wic implementation for examples and details.
-
-Wic Examples
-------------
-
-This section provides several examples that show how to use the Wic
-utility. All the examples assume the list of requirements in the
-":ref:`dev-manual/common-tasks:requirements`" section have been met. The
-examples assume the previously generated image is
-``core-image-minimal``.
-
-Generate an Image using an Existing Kickstart File
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This example runs in Cooked Mode and uses the ``mkefidisk`` kickstart
-file::
-
- $ wic create mkefidisk -e core-image-minimal
- INFO: Building wic-tools...
- .
- .
- .
- INFO: The new image(s) can be found here:
- ./mkefidisk-201804191017-sda.direct
-
- The following build artifacts were used to create the image(s):
- ROOTFS_DIR: /home/stephano/yocto/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/rootfs
- BOOTIMG_DIR: /home/stephano/yocto/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/recipe-sysroot/usr/share
- KERNEL_DIR: /home/stephano/yocto/build/tmp-glibc/deploy/images/qemux86
- NATIVE_SYSROOT: /home/stephano/yocto/build/tmp-glibc/work/i586-oe-linux/wic-tools/1.0-r0/recipe-sysroot-native
-
- INFO: The image(s) were created using OE kickstart file:
- /home/stephano/yocto/openembedded-core/scripts/lib/wic/canned-wks/mkefidisk.wks
-
-The previous example shows the easiest way to create an image by running
-in cooked mode and supplying a kickstart file and the "-e" option to
-point to the existing build artifacts. Your ``local.conf`` file needs to
-have the :term:`MACHINE` variable set
-to the machine you are using, which is "qemux86" in this example.
-
-Once the image builds, the output provides image location, artifact use,
-and kickstart file information.
-
-.. note::
-
- You should always verify the details provided in the output to make
- sure that the image was indeed created exactly as expected.
-
-Continuing with the example, you can now write the image from the Build
-Directory onto a USB stick, or whatever media for which you built your
-image, and boot from the media. You can write the image by using
-``bmaptool`` or ``dd``::
-
- $ oe-run-native bmaptool copy mkefidisk-201804191017-sda.direct /dev/sdX
-
-or ::
-
- $ sudo dd if=mkefidisk-201804191017-sda.direct of=/dev/sdX
-
-.. note::
-
- For more information on how to use the ``bmaptool``
- to flash a device with an image, see the
- ":ref:`dev-manual/common-tasks:flashing images using \`\`bmaptool\`\``"
- section.
-
-Using a Modified Kickstart File
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Because partitioned image creation is driven by the kickstart file, it
-is easy to affect image creation by changing the parameters in the file.
-This next example demonstrates that through modification of the
-``directdisk-gpt`` kickstart file.
-
-As mentioned earlier, you can use the command ``wic list images`` to
-show the list of existing kickstart files. The directory in which the
-``directdisk-gpt.wks`` file resides is
-``scripts/lib/image/canned-wks/``, which is located in the
-:term:`Source Directory` (e.g. ``poky``).
-Because available files reside in this directory, you can create and add
-your own custom files to the directory. Subsequent use of the
-``wic list images`` command would then include your kickstart files.
-
-In this example, the existing ``directdisk-gpt`` file already does most
-of what is needed. However, for the hardware in this example, the image
-will need to boot from ``sdb`` instead of ``sda``, which is what the
-``directdisk-gpt`` kickstart file uses.
-
-The example begins by making a copy of the ``directdisk-gpt.wks`` file
-in the ``scripts/lib/image/canned-wks`` directory and then by changing
-the lines that specify the target disk from which to boot.
-::
-
- $ cp /home/stephano/yocto/poky/scripts/lib/wic/canned-wks/directdisk-gpt.wks \
- /home/stephano/yocto/poky/scripts/lib/wic/canned-wks/directdisksdb-gpt.wks
-
-Next, the example modifies the ``directdisksdb-gpt.wks`` file and
-changes all instances of "``--ondisk sda``" to "``--ondisk sdb``". The
-example changes the following two lines and leaves the remaining lines
-untouched::
-
- part /boot --source bootimg-pcbios --ondisk sdb --label boot --active --align 1024
- part / --source rootfs --ondisk sdb --fstype=ext4 --label platform --align 1024 --use-uuid
-
-Once the lines are changed, the
-example generates the ``directdisksdb-gpt`` image. The command points
-the process at the ``core-image-minimal`` artifacts for the Next Unit of
-Computing (nuc) :term:`MACHINE` the
-``local.conf``.
-::
-
- $ wic create directdisksdb-gpt -e core-image-minimal
- INFO: Building wic-tools...
- .
- .
- .
- Initialising tasks: 100% |#######################################| Time: 0:00:01
- NOTE: Executing SetScene Tasks
- NOTE: Executing RunQueue Tasks
- NOTE: Tasks Summary: Attempted 1161 tasks of which 1157 didn't need to be rerun and all succeeded.
- INFO: Creating image(s)...
-
- INFO: The new image(s) can be found here:
- ./directdisksdb-gpt-201710090938-sdb.direct
-
- The following build artifacts were used to create the image(s):
- ROOTFS_DIR: /home/stephano/yocto/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/rootfs
- BOOTIMG_DIR: /home/stephano/yocto/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/recipe-sysroot/usr/share
- KERNEL_DIR: /home/stephano/yocto/build/tmp-glibc/deploy/images/qemux86
- NATIVE_SYSROOT: /home/stephano/yocto/build/tmp-glibc/work/i586-oe-linux/wic-tools/1.0-r0/recipe-sysroot-native
-
- INFO: The image(s) were created using OE kickstart file:
- /home/stephano/yocto/poky/scripts/lib/wic/canned-wks/directdisksdb-gpt.wks
-
-Continuing with the example, you can now directly ``dd`` the image to a
-USB stick, or whatever media for which you built your image, and boot
-the resulting media::
-
- $ sudo dd if=directdisksdb-gpt-201710090938-sdb.direct of=/dev/sdb
- 140966+0 records in
- 140966+0 records out
- 72174592 bytes (72 MB, 69 MiB) copied, 78.0282 s, 925 kB/s
- $ sudo eject /dev/sdb
-
-Using a Modified Kickstart File and Running in Raw Mode
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This next example manually specifies each build artifact (runs in Raw
-Mode) and uses a modified kickstart file. The example also uses the
-``-o`` option to cause Wic to create the output somewhere other than the
-default output directory, which is the current directory::
-
- $ wic create test.wks -o /home/stephano/testwic \
- --rootfs-dir /home/stephano/yocto/build/tmp/work/qemux86-poky-linux/core-image-minimal/1.0-r0/rootfs \
- --bootimg-dir /home/stephano/yocto/build/tmp/work/qemux86-poky-linux/core-image-minimal/1.0-r0/recipe-sysroot/usr/share \
- --kernel-dir /home/stephano/yocto/build/tmp/deploy/images/qemux86 \
- --native-sysroot /home/stephano/yocto/build/tmp/work/i586-poky-linux/wic-tools/1.0-r0/recipe-sysroot-native
-
- INFO: Creating image(s)...
-
- INFO: The new image(s) can be found here:
- /home/stephano/testwic/test-201710091445-sdb.direct
-
- The following build artifacts were used to create the image(s):
- ROOTFS_DIR: /home/stephano/yocto/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/rootfs
- BOOTIMG_DIR: /home/stephano/yocto/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/recipe-sysroot/usr/share
- KERNEL_DIR: /home/stephano/yocto/build/tmp-glibc/deploy/images/qemux86
- NATIVE_SYSROOT: /home/stephano/yocto/build/tmp-glibc/work/i586-oe-linux/wic-tools/1.0-r0/recipe-sysroot-native
-
- INFO: The image(s) were created using OE kickstart file:
- test.wks
-
-For this example,
-:term:`MACHINE` did not have to be
-specified in the ``local.conf`` file since the artifact is manually
-specified.
-
-Using Wic to Manipulate an Image
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Wic image manipulation allows you to shorten turnaround time during
-image development. For example, you can use Wic to delete the kernel
-partition of a Wic image and then insert a newly built kernel. This
-saves you time from having to rebuild the entire image each time you
-modify the kernel.
-
-.. note::
-
- In order to use Wic to manipulate a Wic image as in this example,
- your development machine must have the ``mtools`` package installed.
-
-The following example examines the contents of the Wic image, deletes
-the existing kernel, and then inserts a new kernel:
-
-1. *List the Partitions:* Use the ``wic ls`` command to list all the
- partitions in the Wic image::
-
- $ wic ls tmp/deploy/images/qemux86/core-image-minimal-qemux86.wic
- Num Start End Size Fstype
- 1 1048576 25041919 23993344 fat16
- 2 25165824 72157183 46991360 ext4
-
- The previous output shows two partitions in the
- ``core-image-minimal-qemux86.wic`` image.
-
-2. *Examine a Particular Partition:* Use the ``wic ls`` command again
- but in a different form to examine a particular partition.
-
- .. note::
-
- You can get command usage on any Wic command using the following
- form::
-
- $ wic help command
-
-
- For example, the following command shows you the various ways to
- use the
- wic ls
- command::
-
- $ wic help ls
-
-
- The following command shows what is in partition one::
-
- $ wic ls tmp/deploy/images/qemux86/core-image-minimal-qemux86.wic:1
- Volume in drive : is boot
- Volume Serial Number is E894-1809
- Directory for ::/
-
- libcom32 c32 186500 2017-10-09 16:06
- libutil c32 24148 2017-10-09 16:06
- syslinux cfg 220 2017-10-09 16:06
- vesamenu c32 27104 2017-10-09 16:06
- vmlinuz 6904608 2017-10-09 16:06
- 5 files 7 142 580 bytes
- 16 582 656 bytes free
-
- The previous output shows five files, with the
- ``vmlinuz`` being the kernel.
-
- .. note::
-
- If you see the following error, you need to update or create a
- ``~/.mtoolsrc`` file and be sure to have the line "mtools_skip_check=1"
- in the file. Then, run the Wic command again::
-
- ERROR: _exec_cmd: /usr/bin/mdir -i /tmp/wic-parttfokuwra ::/ returned '1' instead of 0
- output: Total number of sectors (47824) not a multiple of sectors per track (32)!
- Add mtools_skip_check=1 to your .mtoolsrc file to skip this test
-
-
-3. *Remove the Old Kernel:* Use the ``wic rm`` command to remove the
- ``vmlinuz`` file (kernel)::
-
- $ wic rm tmp/deploy/images/qemux86/core-image-minimal-qemux86.wic:1/vmlinuz
-
-4. *Add In the New Kernel:* Use the ``wic cp`` command to add the
- updated kernel to the Wic image. Depending on how you built your
- kernel, it could be in different places. If you used ``devtool`` and
- an SDK to build your kernel, it resides in the ``tmp/work`` directory
- of the extensible SDK. If you used ``make`` to build the kernel, the
- kernel will be in the ``workspace/sources`` area.
-
- The following example assumes ``devtool`` was used to build the
- kernel::
-
- $ wic cp poky_sdk/tmp/work/qemux86-poky-linux/linux-yocto/4.12.12+git999-r0/linux-yocto-4.12.12+git999/arch/x86/boot/bzImage \
- poky/build/tmp/deploy/images/qemux86/core-image-minimal-qemux86.wic:1/vmlinuz
-
- Once the new kernel is added back into the image, you can use the
- ``dd`` command or :ref:`bmaptool
- <dev-manual/common-tasks:flashing images using \`\`bmaptool\`\`>`
- to flash your wic image onto an SD card or USB stick and test your
- target.
-
- .. note::
-
- Using ``bmaptool`` is generally 10 to 20 times faster than using ``dd``.
-
-Flashing Images Using ``bmaptool``
-==================================
-
-A fast and easy way to flash an image to a bootable device is to use
-Bmaptool, which is integrated into the OpenEmbedded build system.
-Bmaptool is a generic tool that creates a file's block map (bmap) and
-then uses that map to copy the file. As compared to traditional tools
-such as dd or cp, Bmaptool can copy (or flash) large files like raw
-system image files much faster.
-
-.. note::
-
- - If you are using Ubuntu or Debian distributions, you can install
- the ``bmap-tools`` package using the following command and then
- use the tool without specifying ``PATH`` even from the root
- account::
-
- $ sudo apt install bmap-tools
-
- - If you are unable to install the ``bmap-tools`` package, you will
- need to build Bmaptool before using it. Use the following command::
-
- $ bitbake bmap-tools-native
-
-Following, is an example that shows how to flash a Wic image. Realize
-that while this example uses a Wic image, you can use Bmaptool to flash
-any type of image. Use these steps to flash an image using Bmaptool:
-
-1. *Update your local.conf File:* You need to have the following set
- in your ``local.conf`` file before building your image::
-
- IMAGE_FSTYPES += "wic wic.bmap"
-
-2. *Get Your Image:* Either have your image ready (pre-built with the
- :term:`IMAGE_FSTYPES`
- setting previously mentioned) or take the step to build the image::
-
- $ bitbake image
-
-3. *Flash the Device:* Flash the device with the image by using Bmaptool
- depending on your particular setup. The following commands assume the
- image resides in the Build Directory's ``deploy/images/`` area:
-
- - If you have write access to the media, use this command form::
-
- $ oe-run-native bmap-tools-native bmaptool copy build-directory/tmp/deploy/images/machine/image.wic /dev/sdX
-
- - If you do not have write access to the media, set your permissions
- first and then use the same command form::
-
- $ sudo chmod 666 /dev/sdX
- $ oe-run-native bmap-tools-native bmaptool copy build-directory/tmp/deploy/images/machine/image.wic /dev/sdX
-
-For help on the ``bmaptool`` command, use the following command::
-
- $ bmaptool --help
-
-Making Images More Secure
-=========================
-
-Security is of increasing concern for embedded devices. Consider the
-issues and problems discussed in just this sampling of work found across
-the Internet:
-
-- *"*\ `Security Risks of Embedded
- Systems <https://www.schneier.com/blog/archives/2014/01/security_risks_9.html>`__\ *"*
- by Bruce Schneier
-
-- *"*\ `Internet Census
- 2012 <http://census2012.sourceforge.net/paper.html>`__\ *"* by Carna
- Botnet
-
-- *"*\ `Security Issues for Embedded
- Devices <https://elinux.org/images/6/6f/Security-issues.pdf>`__\ *"*
- by Jake Edge
-
-When securing your image is of concern, there are steps, tools, and
-variables that you can consider to help you reach the security goals you
-need for your particular device. Not all situations are identical when
-it comes to making an image secure. Consequently, this section provides
-some guidance and suggestions for consideration when you want to make
-your image more secure.
-
-.. note::
-
- Because the security requirements and risks are different for every
- type of device, this section cannot provide a complete reference on
- securing your custom OS. It is strongly recommended that you also
- consult other sources of information on embedded Linux system
- hardening and on security.
-
-General Considerations
-----------------------
-
-There are general considerations that help you create more secure images.
-You should consider the following suggestions to make your device
-more secure:
-
-- Scan additional code you are adding to the system (e.g. application
- code) by using static analysis tools. Look for buffer overflows and
- other potential security problems.
-
-- Pay particular attention to the security for any web-based
- administration interface.
-
- Web interfaces typically need to perform administrative functions and
- tend to need to run with elevated privileges. Thus, the consequences
- resulting from the interface's security becoming compromised can be
- serious. Look for common web vulnerabilities such as
- cross-site-scripting (XSS), unvalidated inputs, and so forth.
-
- As with system passwords, the default credentials for accessing a
- web-based interface should not be the same across all devices. This
- is particularly true if the interface is enabled by default as it can
- be assumed that many end-users will not change the credentials.
-
-- Ensure you can update the software on the device to mitigate
- vulnerabilities discovered in the future. This consideration
- especially applies when your device is network-enabled.
-
-- Ensure you remove or disable debugging functionality before producing
- the final image. For information on how to do this, see the
- ":ref:`dev-manual/common-tasks:considerations specific to the openembedded build system`"
- section.
-
-- Ensure you have no network services listening that are not needed.
-
-- Remove any software from the image that is not needed.
-
-- Enable hardware support for secure boot functionality when your
- device supports this functionality.
-
-Security Flags
---------------
-
-The Yocto Project has security flags that you can enable that help make
-your build output more secure. The security flags are in the
-``meta/conf/distro/include/security_flags.inc`` file in your
-:term:`Source Directory` (e.g. ``poky``).
-
-.. note::
-
- Depending on the recipe, certain security flags are enabled and
- disabled by default.
-
-Use the following line in your ``local.conf`` file or in your custom
-distribution configuration file to enable the security compiler and
-linker flags for your build::
-
- require conf/distro/include/security_flags.inc
-
-Considerations Specific to the OpenEmbedded Build System
---------------------------------------------------------
-
-You can take some steps that are specific to the OpenEmbedded build
-system to make your images more secure:
-
-- Ensure "debug-tweaks" is not one of your selected
- :term:`IMAGE_FEATURES`.
- When creating a new project, the default is to provide you with an
- initial ``local.conf`` file that enables this feature using the
- :term:`EXTRA_IMAGE_FEATURES`
- variable with the line::
-
- EXTRA_IMAGE_FEATURES = "debug-tweaks"
-
- To disable that feature, simply comment out that line in your
- ``local.conf`` file, or make sure :term:`IMAGE_FEATURES` does not contain
- "debug-tweaks" before producing your final image. Among other things,
- leaving this in place sets the root password as blank, which makes
- logging in for debugging or inspection easy during development but
- also means anyone can easily log in during production.
-
-- It is possible to set a root password for the image and also to set
- passwords for any extra users you might add (e.g. administrative or
- service type users). When you set up passwords for multiple images or
- users, you should not duplicate passwords.
-
- To set up passwords, use the
- :ref:`extrausers <ref-classes-extrausers>`
- class, which is the preferred method. For an example on how to set up
- both root and user passwords, see the
- ":ref:`ref-classes-extrausers`" section.
-
- .. note::
-
- When adding extra user accounts or setting a root password, be
- cautious about setting the same password on every device. If you
- do this, and the password you have set is exposed, then every
- device is now potentially compromised. If you need this access but
- want to ensure security, consider setting a different, random
- password for each device. Typically, you do this as a separate
- step after you deploy the image onto the device.
-
-- Consider enabling a Mandatory Access Control (MAC) framework such as
- SMACK or SELinux and tuning it appropriately for your device's usage.
- You can find more information in the
- :yocto_git:`meta-selinux </meta-selinux/>` layer.
-
-Tools for Hardening Your Image
-------------------------------
-
-The Yocto Project provides tools for making your image more secure. You
-can find these tools in the ``meta-security`` layer of the
-:yocto_git:`Yocto Project Source Repositories <>`.
-
-Creating Your Own Distribution
-==============================
-
-When you build an image using the Yocto Project and do not alter any
-distribution :term:`Metadata`, you are
-creating a Poky distribution. If you wish to gain more control over
-package alternative selections, compile-time options, and other
-low-level configurations, you can create your own distribution.
-
-To create your own distribution, the basic steps consist of creating
-your own distribution layer, creating your own distribution
-configuration file, and then adding any needed code and Metadata to the
-layer. The following steps provide some more detail:
-
-- *Create a layer for your new distro:* Create your distribution layer
- so that you can keep your Metadata and code for the distribution
- separate. It is strongly recommended that you create and use your own
- layer for configuration and code. Using your own layer as compared to
- just placing configurations in a ``local.conf`` configuration file
- makes it easier to reproduce the same build configuration when using
- multiple build machines. See the
- ":ref:`dev-manual/common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
- section for information on how to quickly set up a layer.
-
-- *Create the distribution configuration file:* The distribution
- configuration file needs to be created in the ``conf/distro``
- directory of your layer. You need to name it using your distribution
- name (e.g. ``mydistro.conf``).
-
- .. note::
-
- The :term:`DISTRO` variable in your ``local.conf`` file determines the
- name of your distribution.
-
- You can split out parts of your configuration file into include files
- and then "require" them from within your distribution configuration
- file. Be sure to place the include files in the
- ``conf/distro/include`` directory of your layer. A common example
- usage of include files would be to separate out the selection of
- desired version and revisions for individual recipes.
-
- Your configuration file needs to set the following required
- variables:
-
- - :term:`DISTRO_NAME`
-
- - :term:`DISTRO_VERSION`
-
- These following variables are optional and you typically set them
- from the distribution configuration file:
-
- - :term:`DISTRO_FEATURES`
-
- - :term:`DISTRO_EXTRA_RDEPENDS`
-
- - :term:`DISTRO_EXTRA_RRECOMMENDS`
-
- - :term:`TCLIBC`
-
- .. tip::
-
- If you want to base your distribution configuration file on the
- very basic configuration from OE-Core, you can use
- ``conf/distro/defaultsetup.conf`` as a reference and just include
- variables that differ as compared to ``defaultsetup.conf``.
- Alternatively, you can create a distribution configuration file
- from scratch using the ``defaultsetup.conf`` file or configuration files
- from another distribution such as Poky as a reference.
-
-- *Provide miscellaneous variables:* Be sure to define any other
- variables for which you want to create a default or enforce as part
- of the distribution configuration. You can include nearly any
- variable from the ``local.conf`` file. The variables you use are not
- limited to the list in the previous bulleted item.
-
-- *Point to Your distribution configuration file:* In your
- ``local.conf`` file in the :term:`Build Directory`,
- set your
- :term:`DISTRO` variable to point to
- your distribution's configuration file. For example, if your
- distribution's configuration file is named ``mydistro.conf``, then
- you point to it as follows::
-
- DISTRO = "mydistro"
-
-- *Add more to the layer if necessary:* Use your layer to hold other
- information needed for the distribution:
-
- - Add recipes for installing distro-specific configuration files
- that are not already installed by another recipe. If you have
- distro-specific configuration files that are included by an
- existing recipe, you should add an append file (``.bbappend``) for
- those. For general information and recommendations on how to add
- recipes to your layer, see the
- ":ref:`dev-manual/common-tasks:creating your own layer`" and
- ":ref:`dev-manual/common-tasks:following best practices when creating layers`"
- sections.
-
- - Add any image recipes that are specific to your distribution.
-
- - Add a ``psplash`` append file for a branded splash screen. For
- information on append files, see the
- ":ref:`dev-manual/common-tasks:appending other layers metadata with your layer`"
- section.
-
- - Add any other append files to make custom changes that are
- specific to individual recipes.
-
-Creating a Custom Template Configuration Directory
-==================================================
-
-If you are producing your own customized version of the build system for
-use by other users, you might want to customize the message shown by the
-setup script or you might want to change the template configuration
-files (i.e. ``local.conf`` and ``bblayers.conf``) that are created in a
-new build directory.
-
-The OpenEmbedded build system uses the environment variable
-``TEMPLATECONF`` to locate the directory from which it gathers
-configuration information that ultimately ends up in the
-:term:`Build Directory` ``conf`` directory.
-By default, ``TEMPLATECONF`` is set as follows in the ``poky``
-repository::
-
- TEMPLATECONF=${TEMPLATECONF:-meta-poky/conf}
-
-This is the
-directory used by the build system to find templates from which to build
-some key configuration files. If you look at this directory, you will
-see the ``bblayers.conf.sample``, ``local.conf.sample``, and
-``conf-notes.txt`` files. The build system uses these files to form the
-respective ``bblayers.conf`` file, ``local.conf`` file, and display the
-list of BitBake targets when running the setup script.
-
-To override these default configuration files with configurations you
-want used within every new Build Directory, simply set the
-``TEMPLATECONF`` variable to your directory. The ``TEMPLATECONF``
-variable is set in the ``.templateconf`` file, which is in the top-level
-:term:`Source Directory` folder
-(e.g. ``poky``). Edit the ``.templateconf`` so that it can locate your
-directory.
-
-Best practices dictate that you should keep your template configuration
-directory in your custom distribution layer. For example, suppose you
-have a layer named ``meta-mylayer`` located in your home directory and
-you want your template configuration directory named ``myconf``.
-Changing the ``.templateconf`` as follows causes the OpenEmbedded build
-system to look in your directory and base its configuration files on the
-``*.sample`` configuration files it finds. The final configuration files
-(i.e. ``local.conf`` and ``bblayers.conf`` ultimately still end up in
-your Build Directory, but they are based on your ``*.sample`` files.
-::
-
- TEMPLATECONF=${TEMPLATECONF:-meta-mylayer/myconf}
-
-Aside from the ``*.sample`` configuration files, the ``conf-notes.txt``
-also resides in the default ``meta-poky/conf`` directory. The script
-that sets up the build environment (i.e.
-:ref:`structure-core-script`) uses this file to
-display BitBake targets as part of the script output. Customizing this
-``conf-notes.txt`` file is a good way to make sure your list of custom
-targets appears as part of the script's output.
-
-Here is the default list of targets displayed as a result of running
-either of the setup scripts::
-
- You can now run 'bitbake <target>'
-
- Common targets are:
- core-image-minimal
- core-image-sato
- meta-toolchain
- meta-ide-support
-
-Changing the listed common targets is as easy as editing your version of
-``conf-notes.txt`` in your custom template configuration directory and
-making sure you have ``TEMPLATECONF`` set to your directory.
-
-Conserving Disk Space
-=====================
-
-Conserving Disk Space During Builds
------------------------------------
-
-To help conserve disk space during builds, you can add the following
-statement to your project's ``local.conf`` configuration file found in
-the :term:`Build Directory`::
-
- INHERIT += "rm_work"
-
-Adding this statement deletes the work directory used for
-building a recipe once the recipe is built. For more information on
-"rm_work", see the
-:ref:`rm_work <ref-classes-rm-work>` class in the
-Yocto Project Reference Manual.
-
-Purging Duplicate Shared State Cache Files
--------------------------------------------
-
-After multiple build iterations, the Shared State (sstate) cache can contain
-duplicate cache files for a given package, while only the most recent one
-is likely to be reusable. The following command purges all but the
-newest sstate cache file for each package::
-
- sstate-cache-management.sh --remove-duplicated --cache-dir=build/sstate-cache
-
-This command will ask you to confirm the deletions it identifies.
-
-Note::
-
- The duplicated sstate cache files of one package must have the same
- architecture, which means that sstate cache files with multiple
- architectures are not considered as duplicate.
-
-Run ``sstate-cache-management.sh`` for more details about this script.
-
-Working with Packages
-=====================
-
-This section describes a few tasks that involve packages:
-
-- :ref:`dev-manual/common-tasks:excluding packages from an image`
-
-- :ref:`dev-manual/common-tasks:incrementing a package version`
-
-- :ref:`dev-manual/common-tasks:handling optional module packaging`
-
-- :ref:`dev-manual/common-tasks:using runtime package management`
-
-- :ref:`dev-manual/common-tasks:generating and using signed packages`
-
-- :ref:`Setting up and running package test
- (ptest) <dev-manual/common-tasks:testing packages with ptest>`
-
-- :ref:`dev-manual/common-tasks:creating node package manager (npm) packages`
-
-- :ref:`dev-manual/common-tasks:adding custom metadata to packages`
-
-Excluding Packages from an Image
---------------------------------
-
-You might find it necessary to prevent specific packages from being
-installed into an image. If so, you can use several variables to direct
-the build system to essentially ignore installing recommended packages
-or to not install a package at all.
-
-The following list introduces variables you can use to prevent packages
-from being installed into your image. Each of these variables only works
-with IPK and RPM package types, not for Debian packages.
-Also, you can use these variables from your ``local.conf`` file
-or attach them to a specific image recipe by using a recipe name
-override. For more detail on the variables, see the descriptions in the
-Yocto Project Reference Manual's glossary chapter.
-
-- :term:`BAD_RECOMMENDATIONS`:
- Use this variable to specify "recommended-only" packages that you do
- not want installed.
-
-- :term:`NO_RECOMMENDATIONS`:
- Use this variable to prevent all "recommended-only" packages from
- being installed.
-
-- :term:`PACKAGE_EXCLUDE`:
- Use this variable to prevent specific packages from being installed
- regardless of whether they are "recommended-only" or not. You need to
- realize that the build process could fail with an error when you
- prevent the installation of a package whose presence is required by
- an installed package.
-
-Incrementing a Package Version
-------------------------------
-
-This section provides some background on how binary package versioning
-is accomplished and presents some of the services, variables, and
-terminology involved.
-
-In order to understand binary package versioning, you need to consider
-the following:
-
-- Binary Package: The binary package that is eventually built and
- installed into an image.
-
-- Binary Package Version: The binary package version is composed of two
- components - a version and a revision.
-
- .. note::
-
- Technically, a third component, the "epoch" (i.e. :term:`PE`) is involved
- but this discussion for the most part ignores :term:`PE`.
-
- The version and revision are taken from the
- :term:`PV` and
- :term:`PR` variables, respectively.
-
-- :term:`PV`: The recipe version. :term:`PV` represents the version of the
- software being packaged. Do not confuse :term:`PV` with the binary
- package version.
-
-- :term:`PR`: The recipe revision.
-
-- :term:`SRCPV`: The OpenEmbedded
- build system uses this string to help define the value of :term:`PV` when
- the source code revision needs to be included in it.
-
-- :yocto_wiki:`PR Service </PR_Service>`: A
- network-based service that helps automate keeping package feeds
- compatible with existing package manager applications such as RPM,
- APT, and OPKG.
-
-Whenever the binary package content changes, the binary package version
-must change. Changing the binary package version is accomplished by
-changing or "bumping" the :term:`PR` and/or :term:`PV` values. Increasing these
-values occurs one of two ways:
-
-- Automatically using a Package Revision Service (PR Service).
-
-- Manually incrementing the :term:`PR` and/or :term:`PV` variables.
-
-Given a primary challenge of any build system and its users is how to
-maintain a package feed that is compatible with existing package manager
-applications such as RPM, APT, and OPKG, using an automated system is
-much preferred over a manual system. In either system, the main
-requirement is that binary package version numbering increases in a
-linear fashion and that there is a number of version components that
-support that linear progression. For information on how to ensure
-package revisioning remains linear, see the
-":ref:`dev-manual/common-tasks:automatically incrementing a package version number`"
-section.
-
-The following three sections provide related information on the PR
-Service, the manual method for "bumping" :term:`PR` and/or :term:`PV`, and on
-how to ensure binary package revisioning remains linear.
-
-Working With a PR Service
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-As mentioned, attempting to maintain revision numbers in the
-:term:`Metadata` is error prone, inaccurate,
-and causes problems for people submitting recipes. Conversely, the PR
-Service automatically generates increasing numbers, particularly the
-revision field, which removes the human element.
-
-.. note::
-
- For additional information on using a PR Service, you can see the
- :yocto_wiki:`PR Service </PR_Service>` wiki page.
-
-The Yocto Project uses variables in order of decreasing priority to
-facilitate revision numbering (i.e.
-:term:`PE`,
-:term:`PV`, and
-:term:`PR` for epoch, version, and
-revision, respectively). The values are highly dependent on the policies
-and procedures of a given distribution and package feed.
-
-Because the OpenEmbedded build system uses
-":ref:`signatures <overview-manual/concepts:checksums (signatures)>`", which are
-unique to a given build, the build system knows when to rebuild
-packages. All the inputs into a given task are represented by a
-signature, which can trigger a rebuild when different. Thus, the build
-system itself does not rely on the :term:`PR`, :term:`PV`, and :term:`PE` numbers to
-trigger a rebuild. The signatures, however, can be used to generate
-these values.
-
-The PR Service works with both ``OEBasic`` and ``OEBasicHash``
-generators. The value of :term:`PR` bumps when the checksum changes and the
-different generator mechanisms change signatures under different
-circumstances.
-
-As implemented, the build system includes values from the PR Service
-into the :term:`PR` field as an addition using the form "``.x``" so ``r0``
-becomes ``r0.1``, ``r0.2`` and so forth. This scheme allows existing
-:term:`PR` values to be used for whatever reasons, which include manual
-:term:`PR` bumps, should it be necessary.
-
-By default, the PR Service is not enabled or running. Thus, the packages
-generated are just "self consistent". The build system adds and removes
-packages and there are no guarantees about upgrade paths but images will
-be consistent and correct with the latest changes.
-
-The simplest form for a PR Service is for a single host
-development system that builds the package feed (building system). For
-this scenario, you can enable a local PR Service by setting
-:term:`PRSERV_HOST` in your
-``local.conf`` file in the :term:`Build Directory`::
-
- PRSERV_HOST = "localhost:0"
-
-Once the service is started, packages will automatically
-get increasing :term:`PR` values and BitBake takes care of starting and
-stopping the server.
-
-If you have a more complex setup where multiple host development systems
-work against a common, shared package feed, you have a single PR Service
-running and it is connected to each building system. For this scenario,
-you need to start the PR Service using the ``bitbake-prserv`` command::
-
- bitbake-prserv --host ip --port port --start
-
-In addition to
-hand-starting the service, you need to update the ``local.conf`` file of
-each building system as described earlier so each system points to the
-server and port.
-
-It is also recommended you use build history, which adds some sanity
-checks to binary package versions, in conjunction with the server that
-is running the PR Service. To enable build history, add the following to
-each building system's ``local.conf`` file::
-
- # It is recommended to activate "buildhistory" for testing the PR service
- INHERIT += "buildhistory"
- BUILDHISTORY_COMMIT = "1"
-
-For information on build
-history, see the
-":ref:`dev-manual/common-tasks:maintaining build output quality`" section.
-
-.. note::
-
- The OpenEmbedded build system does not maintain :term:`PR` information as
- part of the shared state (sstate) packages. If you maintain an sstate
- feed, it's expected that either all your building systems that
- contribute to the sstate feed use a shared PR Service, or you do not
- run a PR Service on any of your building systems. Having some systems
- use a PR Service while others do not leads to obvious problems.
-
- For more information on shared state, see the
- ":ref:`overview-manual/concepts:shared state cache`"
- section in the Yocto Project Overview and Concepts Manual.
-
-Manually Bumping PR
-~~~~~~~~~~~~~~~~~~~
-
-The alternative to setting up a PR Service is to manually "bump" the
-:term:`PR` variable.
-
-If a committed change results in changing the package output, then the
-value of the PR variable needs to be increased (or "bumped") as part of
-that commit. For new recipes you should add the :term:`PR` variable and set
-its initial value equal to "r0", which is the default. Even though the
-default value is "r0", the practice of adding it to a new recipe makes
-it harder to forget to bump the variable when you make changes to the
-recipe in future.
-
-If you are sharing a common ``.inc`` file with multiple recipes, you can
-also use the :term:`INC_PR` variable to ensure that the recipes sharing the
-``.inc`` file are rebuilt when the ``.inc`` file itself is changed. The
-``.inc`` file must set :term:`INC_PR` (initially to "r0"), and all recipes
-referring to it should set :term:`PR` to "${INC_PR}.0" initially,
-incrementing the last number when the recipe is changed. If the ``.inc``
-file is changed then its :term:`INC_PR` should be incremented.
-
-When upgrading the version of a binary package, assuming the :term:`PV`
-changes, the :term:`PR` variable should be reset to "r0" (or "${INC_PR}.0"
-if you are using :term:`INC_PR`).
-
-Usually, version increases occur only to binary packages. However, if
-for some reason :term:`PV` changes but does not increase, you can increase
-the :term:`PE` variable (Package Epoch). The :term:`PE` variable defaults to
-"0".
-
-Binary package version numbering strives to follow the `Debian Version
-Field Policy
-Guidelines <https://www.debian.org/doc/debian-policy/ch-controlfields.html>`__.
-These guidelines define how versions are compared and what "increasing"
-a version means.
-
-Automatically Incrementing a Package Version Number
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-When fetching a repository, BitBake uses the
-:term:`SRCREV` variable to determine
-the specific source code revision from which to build. You set the
-:term:`SRCREV` variable to
-:term:`AUTOREV` to cause the
-OpenEmbedded build system to automatically use the latest revision of
-the software::
-
- SRCREV = "${AUTOREV}"
-
-Furthermore, you need to reference :term:`SRCPV` in :term:`PV` in order to
-automatically update the version whenever the revision of the source
-code changes. Here is an example::
-
- PV = "1.0+git${SRCPV}"
-
-The OpenEmbedded build system substitutes :term:`SRCPV` with the following:
-
-.. code-block:: none
-
- AUTOINC+source_code_revision
-
-The build system replaces the ``AUTOINC``
-with a number. The number used depends on the state of the PR Service:
-
-- If PR Service is enabled, the build system increments the number,
- which is similar to the behavior of
- :term:`PR`. This behavior results in
- linearly increasing package versions, which is desirable. Here is an
- example:
-
- .. code-block:: none
-
- hello-world-git_0.0+git0+b6558dd387-r0.0_armv7a-neon.ipk
- hello-world-git_0.0+git1+dd2f5c3565-r0.0_armv7a-neon.ipk
-
-- If PR Service is not enabled, the build system replaces the
- ``AUTOINC`` placeholder with zero (i.e. "0"). This results in
- changing the package version since the source revision is included.
- However, package versions are not increased linearly. Here is an
- example:
-
- .. code-block:: none
-
- hello-world-git_0.0+git0+b6558dd387-r0.0_armv7a-neon.ipk
- hello-world-git_0.0+git0+dd2f5c3565-r0.0_armv7a-neon.ipk
-
-In summary, the OpenEmbedded build system does not track the history of
-binary package versions for this purpose. ``AUTOINC``, in this case, is
-comparable to :term:`PR`. If PR server is not enabled, ``AUTOINC`` in the
-package version is simply replaced by "0". If PR server is enabled, the
-build system keeps track of the package versions and bumps the number
-when the package revision changes.
-
-Handling Optional Module Packaging
-----------------------------------
-
-Many pieces of software split functionality into optional modules (or
-plugins) and the plugins that are built might depend on configuration
-options. To avoid having to duplicate the logic that determines what
-modules are available in your recipe or to avoid having to package each
-module by hand, the OpenEmbedded build system provides functionality to
-handle module packaging dynamically.
-
-To handle optional module packaging, you need to do two things:
-
-- Ensure the module packaging is actually done.
-
-- Ensure that any dependencies on optional modules from other recipes
- are satisfied by your recipe.
-
-Making Sure the Packaging is Done
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To ensure the module packaging actually gets done, you use the
-``do_split_packages`` function within the ``populate_packages`` Python
-function in your recipe. The ``do_split_packages`` function searches for
-a pattern of files or directories under a specified path and creates a
-package for each one it finds by appending to the
-:term:`PACKAGES` variable and
-setting the appropriate values for ``FILES:packagename``,
-``RDEPENDS:packagename``, ``DESCRIPTION:packagename``, and so forth.
-Here is an example from the ``lighttpd`` recipe::
-
- python populate_packages:prepend () {
- lighttpd_libdir = d.expand('${libdir}')
- do_split_packages(d, lighttpd_libdir, '^mod_(.*).so$',
- 'lighttpd-module-%s', 'Lighttpd module for %s',
- extra_depends='')
- }
-
-The previous example specifies a number of things in the call to
-``do_split_packages``.
-
-- A directory within the files installed by your recipe through
- ``do_install`` in which to search.
-
-- A regular expression used to match module files in that directory. In
- the example, note the parentheses () that mark the part of the
- expression from which the module name should be derived.
-
-- A pattern to use for the package names.
-
-- A description for each package.
-
-- An empty string for ``extra_depends``, which disables the default
- dependency on the main ``lighttpd`` package. Thus, if a file in
- ``${libdir}`` called ``mod_alias.so`` is found, a package called
- ``lighttpd-module-alias`` is created for it and the
- :term:`DESCRIPTION` is set to
- "Lighttpd module for alias".
-
-Often, packaging modules is as simple as the previous example. However,
-there are more advanced options that you can use within
-``do_split_packages`` to modify its behavior. And, if you need to, you
-can add more logic by specifying a hook function that is called for each
-package. It is also perfectly acceptable to call ``do_split_packages``
-multiple times if you have more than one set of modules to package.
-
-For more examples that show how to use ``do_split_packages``, see the
-``connman.inc`` file in the ``meta/recipes-connectivity/connman/``
-directory of the ``poky`` :ref:`source repository <overview-manual/development-environment:yocto project source repositories>`. You can
-also find examples in ``meta/classes/kernel.bbclass``.
-
-Following is a reference that shows ``do_split_packages`` mandatory and
-optional arguments::
-
- Mandatory arguments
-
- root
- The path in which to search
- file_regex
- Regular expression to match searched files.
- Use parentheses () to mark the part of this
- expression that should be used to derive the
- module name (to be substituted where %s is
- used in other function arguments as noted below)
- output_pattern
- Pattern to use for the package names. Must
- include %s.
- description
- Description to set for each package. Must
- include %s.
-
- Optional arguments
-
- postinst
- Postinstall script to use for all packages
- (as a string)
- recursive
- True to perform a recursive search - default
- False
- hook
- A hook function to be called for every match.
- The function will be called with the following
- arguments (in the order listed):
-
- f
- Full path to the file/directory match
- pkg
- The package name
- file_regex
- As above
- output_pattern
- As above
- modulename
- The module name derived using file_regex
- extra_depends
- Extra runtime dependencies (RDEPENDS) to be
- set for all packages. The default value of None
- causes a dependency on the main package
- (${PN}) - if you do not want this, pass empty
- string '' for this parameter.
- aux_files_pattern
- Extra item(s) to be added to FILES for each
- package. Can be a single string item or a list
- of strings for multiple items. Must include %s.
- postrm
- postrm script to use for all packages (as a
- string)
- allow_dirs
- True to allow directories to be matched -
- default False
- prepend
- If True, prepend created packages to PACKAGES
- instead of the default False which appends them
- match_path
- match file_regex on the whole relative path to
- the root rather than just the filename
- aux_files_pattern_verbatim
- Extra item(s) to be added to FILES for each
- package, using the actual derived module name
- rather than converting it to something legal
- for a package name. Can be a single string item
- or a list of strings for multiple items. Must
- include %s.
- allow_links
- True to allow symlinks to be matched - default
- False
- summary
- Summary to set for each package. Must include %s;
- defaults to description if not set.
-
-
-
-Satisfying Dependencies
-~~~~~~~~~~~~~~~~~~~~~~~
-
-The second part for handling optional module packaging is to ensure that
-any dependencies on optional modules from other recipes are satisfied by
-your recipe. You can be sure these dependencies are satisfied by using
-the :term:`PACKAGES_DYNAMIC`
-variable. Here is an example that continues with the ``lighttpd`` recipe
-shown earlier::
-
- PACKAGES_DYNAMIC = "lighttpd-module-.*"
-
-The name
-specified in the regular expression can of course be anything. In this
-example, it is ``lighttpd-module-`` and is specified as the prefix to
-ensure that any :term:`RDEPENDS` and
-:term:`RRECOMMENDS` on a package
-name starting with the prefix are satisfied during build time. If you
-are using ``do_split_packages`` as described in the previous section,
-the value you put in :term:`PACKAGES_DYNAMIC` should correspond to the name
-pattern specified in the call to ``do_split_packages``.
-
-Using Runtime Package Management
---------------------------------
-
-During a build, BitBake always transforms a recipe into one or more
-packages. For example, BitBake takes the ``bash`` recipe and produces a
-number of packages (e.g. ``bash``, ``bash-bashbug``,
-``bash-completion``, ``bash-completion-dbg``, ``bash-completion-dev``,
-``bash-completion-extra``, ``bash-dbg``, and so forth). Not all
-generated packages are included in an image.
-
-In several situations, you might need to update, add, remove, or query
-the packages on a target device at runtime (i.e. without having to
-generate a new image). Examples of such situations include:
-
-- You want to provide in-the-field updates to deployed devices (e.g.
- security updates).
-
-- You want to have a fast turn-around development cycle for one or more
- applications that run on your device.
-
-- You want to temporarily install the "debug" packages of various
- applications on your device so that debugging can be greatly improved
- by allowing access to symbols and source debugging.
-
-- You want to deploy a more minimal package selection of your device
- but allow in-the-field updates to add a larger selection for
- customization.
-
-In all these situations, you have something similar to a more
-traditional Linux distribution in that in-field devices are able to
-receive pre-compiled packages from a server for installation or update.
-Being able to install these packages on a running, in-field device is
-what is termed "runtime package management".
-
-In order to use runtime package management, you need a host or server
-machine that serves up the pre-compiled packages plus the required
-metadata. You also need package manipulation tools on the target. The
-build machine is a likely candidate to act as the server. However, that
-machine does not necessarily have to be the package server. The build
-machine could push its artifacts to another machine that acts as the
-server (e.g. Internet-facing). In fact, doing so is advantageous for a
-production environment as getting the packages away from the development
-system's build directory prevents accidental overwrites.
-
-A simple build that targets just one device produces more than one
-package database. In other words, the packages produced by a build are
-separated out into a couple of different package groupings based on
-criteria such as the target's CPU architecture, the target board, or the
-C library used on the target. For example, a build targeting the
-``qemux86`` device produces the following three package databases:
-``noarch``, ``i586``, and ``qemux86``. If you wanted your ``qemux86``
-device to be aware of all the packages that were available to it, you
-would need to point it to each of these databases individually. In a
-similar way, a traditional Linux distribution usually is configured to
-be aware of a number of software repositories from which it retrieves
-packages.
-
-Using runtime package management is completely optional and not required
-for a successful build or deployment in any way. But if you want to make
-use of runtime package management, you need to do a couple things above
-and beyond the basics. The remainder of this section describes what you
-need to do.
-
-Build Considerations
-~~~~~~~~~~~~~~~~~~~~
-
-This section describes build considerations of which you need to be
-aware in order to provide support for runtime package management.
-
-When BitBake generates packages, it needs to know what format or formats
-to use. In your configuration, you use the
-:term:`PACKAGE_CLASSES`
-variable to specify the format:
-
-1. Open the ``local.conf`` file inside your
- :term:`Build Directory` (e.g.
- ``poky/build/conf/local.conf``).
-
-2. Select the desired package format as follows::
-
- PACKAGE_CLASSES ?= "package_packageformat"
-
- where packageformat can be "ipk", "rpm",
- "deb", or "tar" which are the supported package formats.
-
- .. note::
-
- Because the Yocto Project supports four different package formats,
- you can set the variable with more than one argument. However, the
- OpenEmbedded build system only uses the first argument when
- creating an image or Software Development Kit (SDK).
-
-If you would like your image to start off with a basic package database
-containing the packages in your current build as well as to have the
-relevant tools available on the target for runtime package management,
-you can include "package-management" in the
-:term:`IMAGE_FEATURES`
-variable. Including "package-management" in this configuration variable
-ensures that when the image is assembled for your target, the image
-includes the currently-known package databases as well as the
-target-specific tools required for runtime package management to be
-performed on the target. However, this is not strictly necessary. You
-could start your image off without any databases but only include the
-required on-target package tool(s). As an example, you could include
-"opkg" in your
-:term:`IMAGE_INSTALL` variable
-if you are using the IPK package format. You can then initialize your
-target's package database(s) later once your image is up and running.
-
-Whenever you perform any sort of build step that can potentially
-generate a package or modify existing package, it is always a good idea
-to re-generate the package index after the build by using the following
-command::
-
- $ bitbake package-index
-
-It might be tempting to build the
-package and the package index at the same time with a command such as
-the following::
-
- $ bitbake some-package package-index
-
-Do not do this as
-BitBake does not schedule the package index for after the completion of
-the package you are building. Consequently, you cannot be sure of the
-package index including information for the package you just built.
-Thus, be sure to run the package update step separately after building
-any packages.
-
-You can use the
-:term:`PACKAGE_FEED_ARCHS`,
-:term:`PACKAGE_FEED_BASE_PATHS`,
-and
-:term:`PACKAGE_FEED_URIS`
-variables to pre-configure target images to use a package feed. If you
-do not define these variables, then manual steps as described in the
-subsequent sections are necessary to configure the target. You should
-set these variables before building the image in order to produce a
-correctly configured image.
-
-When your build is complete, your packages reside in the
-``${TMPDIR}/deploy/packageformat`` directory. For example, if
-``${``\ :term:`TMPDIR`\ ``}`` is
-``tmp`` and your selected package type is RPM, then your RPM packages
-are available in ``tmp/deploy/rpm``.
-
-Host or Server Machine Setup
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Although other protocols are possible, a server using HTTP typically
-serves packages. If you want to use HTTP, then set up and configure a
-web server such as Apache 2, lighttpd, or Python web server on the
-machine serving the packages.
-
-To keep things simple, this section describes how to set up a
-Python web server to share package feeds from the developer's
-machine. Although this server might not be the best for a production
-environment, the setup is simple and straight forward. Should you want
-to use a different server more suited for production (e.g. Apache 2,
-Lighttpd, or Nginx), take the appropriate steps to do so.
-
-From within the build directory where you have built an image based on
-your packaging choice (i.e. the
-:term:`PACKAGE_CLASSES`
-setting), simply start the server. The following example assumes a build
-directory of ``poky/build/tmp/deploy/rpm`` and a :term:`PACKAGE_CLASSES`
-setting of "package_rpm"::
-
- $ cd poky/build/tmp/deploy/rpm
- $ python3 -m http.server
-
-Target Setup
-~~~~~~~~~~~~
-
-Setting up the target differs depending on the package management
-system. This section provides information for RPM, IPK, and DEB.
-
-Using RPM
-^^^^^^^^^
-
-The `Dandified Packaging
-Tool <https://en.wikipedia.org/wiki/DNF_(software)>`__ (DNF) performs
-runtime package management of RPM packages. In order to use DNF for
-runtime package management, you must perform an initial setup on the
-target machine for cases where the ``PACKAGE_FEED_*`` variables were not
-set as part of the image that is running on the target. This means if
-you built your image and did not use these variables as part of the
-build and your image is now running on the target, you need to perform
-the steps in this section if you want to use runtime package management.
-
-.. note::
-
- For information on the ``PACKAGE_FEED_*`` variables, see
- :term:`PACKAGE_FEED_ARCHS`, :term:`PACKAGE_FEED_BASE_PATHS`, and
- :term:`PACKAGE_FEED_URIS` in the Yocto Project Reference Manual variables
- glossary.
-
-On the target, you must inform DNF that package databases are available.
-You do this by creating a file named
-``/etc/yum.repos.d/oe-packages.repo`` and defining the ``oe-packages``.
-
-As an example, assume the target is able to use the following package
-databases: ``all``, ``i586``, and ``qemux86`` from a server named
-``my.server``. The specifics for setting up the web server are up to
-you. The critical requirement is that the URIs in the target repository
-configuration point to the correct remote location for the feeds.
-
-.. note::
-
- For development purposes, you can point the web server to the build
- system's ``deploy`` directory. However, for production use, it is better to
- copy the package directories to a location outside of the build area and use
- that location. Doing so avoids situations where the build system
- overwrites or changes the ``deploy`` directory.
-
-When telling DNF where to look for the package databases, you must
-declare individual locations per architecture or a single location used
-for all architectures. You cannot do both:
-
-- *Create an Explicit List of Architectures:* Define individual base
- URLs to identify where each package database is located:
-
- .. code-block:: none
-
- [oe-packages]
- baseurl=http://my.server/rpm/i586 http://my.server/rpm/qemux86 http://my.server/rpm/all
-
- This example
- informs DNF about individual package databases for all three
- architectures.
-
-- *Create a Single (Full) Package Index:* Define a single base URL that
- identifies where a full package database is located::
-
- [oe-packages]
- baseurl=http://my.server/rpm
-
- This example informs DNF about a single
- package database that contains all the package index information for
- all supported architectures.
-
-Once you have informed DNF where to find the package databases, you need
-to fetch them:
-
-.. code-block:: none
-
- # dnf makecache
-
-DNF is now able to find, install, and
-upgrade packages from the specified repository or repositories.
-
-.. note::
-
- See the `DNF documentation <https://dnf.readthedocs.io/en/latest/>`__ for
- additional information.
-
-Using IPK
-^^^^^^^^^
-
-The ``opkg`` application performs runtime package management of IPK
-packages. You must perform an initial setup for ``opkg`` on the target
-machine if the
-:term:`PACKAGE_FEED_ARCHS`,
-:term:`PACKAGE_FEED_BASE_PATHS`,
-and
-:term:`PACKAGE_FEED_URIS`
-variables have not been set or the target image was built before the
-variables were set.
-
-The ``opkg`` application uses configuration files to find available
-package databases. Thus, you need to create a configuration file inside
-the ``/etc/opkg/`` directory, which informs ``opkg`` of any repository
-you want to use.
-
-As an example, suppose you are serving packages from a ``ipk/``
-directory containing the ``i586``, ``all``, and ``qemux86`` databases
-through an HTTP server named ``my.server``. On the target, create a
-configuration file (e.g. ``my_repo.conf``) inside the ``/etc/opkg/``
-directory containing the following:
-
-.. code-block:: none
-
- src/gz all http://my.server/ipk/all
- src/gz i586 http://my.server/ipk/i586
- src/gz qemux86 http://my.server/ipk/qemux86
-
-Next, instruct ``opkg`` to fetch the
-repository information:
-
-.. code-block:: none
-
- # opkg update
-
-The ``opkg`` application is now able to find, install, and upgrade packages
-from the specified repository.
-
-Using DEB
-^^^^^^^^^
-
-The ``apt`` application performs runtime package management of DEB
-packages. This application uses a source list file to find available
-package databases. You must perform an initial setup for ``apt`` on the
-target machine if the
-:term:`PACKAGE_FEED_ARCHS`,
-:term:`PACKAGE_FEED_BASE_PATHS`,
-and
-:term:`PACKAGE_FEED_URIS`
-variables have not been set or the target image was built before the
-variables were set.
-
-To inform ``apt`` of the repository you want to use, you might create a
-list file (e.g. ``my_repo.list``) inside the
-``/etc/apt/sources.list.d/`` directory. As an example, suppose you are
-serving packages from a ``deb/`` directory containing the ``i586``,
-``all``, and ``qemux86`` databases through an HTTP server named
-``my.server``. The list file should contain:
-
-.. code-block:: none
-
- deb http://my.server/deb/all ./
- deb http://my.server/deb/i586 ./
- deb http://my.server/deb/qemux86 ./
-
-Next, instruct the ``apt`` application
-to fetch the repository information:
-
-.. code-block:: none
-
- $ sudo apt update
-
-After this step,
-``apt`` is able to find, install, and upgrade packages from the
-specified repository.
-
-Generating and Using Signed Packages
-------------------------------------
-
-In order to add security to RPM packages used during a build, you can
-take steps to securely sign them. Once a signature is verified, the
-OpenEmbedded build system can use the package in the build. If security
-fails for a signed package, the build system stops the build.
-
-This section describes how to sign RPM packages during a build and how
-to use signed package feeds (repositories) when doing a build.
-
-Signing RPM Packages
-~~~~~~~~~~~~~~~~~~~~
-
-To enable signing RPM packages, you must set up the following
-configurations in either your ``local.config`` or ``distro.config``
-file::
-
- # Inherit sign_rpm.bbclass to enable signing functionality
- INHERIT += " sign_rpm"
- # Define the GPG key that will be used for signing.
- RPM_GPG_NAME = "key_name"
- # Provide passphrase for the key
- RPM_GPG_PASSPHRASE = "passphrase"
-
-.. note::
-
- Be sure to supply appropriate values for both `key_name` and
- `passphrase`.
-
-Aside from the ``RPM_GPG_NAME`` and ``RPM_GPG_PASSPHRASE`` variables in
-the previous example, two optional variables related to signing are available:
-
-- *GPG_BIN:* Specifies a ``gpg`` binary/wrapper that is executed
- when the package is signed.
-
-- *GPG_PATH:* Specifies the ``gpg`` home directory used when the
- package is signed.
-
-Processing Package Feeds
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-In addition to being able to sign RPM packages, you can also enable
-signed package feeds for IPK and RPM packages.
-
-The steps you need to take to enable signed package feed use are similar
-to the steps used to sign RPM packages. You must define the following in
-your ``local.config`` or ``distro.config`` file::
-
- INHERIT += "sign_package_feed"
- PACKAGE_FEED_GPG_NAME = "key_name"
- PACKAGE_FEED_GPG_PASSPHRASE_FILE = "path_to_file_containing_passphrase"
-
-For signed package feeds, the passphrase must be specified in a separate file,
-which is pointed to by the ``PACKAGE_FEED_GPG_PASSPHRASE_FILE``
-variable. Regarding security, keeping a plain text passphrase out of the
-configuration is more secure.
-
-Aside from the ``PACKAGE_FEED_GPG_NAME`` and
-``PACKAGE_FEED_GPG_PASSPHRASE_FILE`` variables, three optional variables
-related to signed package feeds are available:
-
-- *GPG_BIN* Specifies a ``gpg`` binary/wrapper that is executed
- when the package is signed.
-
-- *GPG_PATH:* Specifies the ``gpg`` home directory used when the
- package is signed.
-
-- *PACKAGE_FEED_GPG_SIGNATURE_TYPE:* Specifies the type of ``gpg``
- signature. This variable applies only to RPM and IPK package feeds.
- Allowable values for the ``PACKAGE_FEED_GPG_SIGNATURE_TYPE`` are
- "ASC", which is the default and specifies ascii armored, and "BIN",
- which specifies binary.
-
-Testing Packages With ptest
----------------------------
-
-A Package Test (ptest) runs tests against packages built by the
-OpenEmbedded build system on the target machine. A ptest contains at
-least two items: the actual test, and a shell script (``run-ptest``)
-that starts the test. The shell script that starts the test must not
-contain the actual test - the script only starts the test. On the other
-hand, the test can be anything from a simple shell script that runs a
-binary and checks the output to an elaborate system of test binaries and
-data files.
-
-The test generates output in the format used by Automake::
-
- result: testname
-
-where the result can be ``PASS``, ``FAIL``, or ``SKIP``, and
-the testname can be any identifying string.
-
-For a list of Yocto Project recipes that are already enabled with ptest,
-see the :yocto_wiki:`Ptest </Ptest>` wiki page.
-
-.. note::
-
- A recipe is "ptest-enabled" if it inherits the
- :ref:`ptest <ref-classes-ptest>` class.
-
-Adding ptest to Your Build
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To add package testing to your build, add the
-:term:`DISTRO_FEATURES` and
-:term:`EXTRA_IMAGE_FEATURES`
-variables to your ``local.conf`` file, which is found in the
-:term:`Build Directory`::
-
- DISTRO_FEATURES:append = " ptest"
- EXTRA_IMAGE_FEATURES += "ptest-pkgs"
-
-Once your build is complete, the ptest files are installed into the
-``/usr/lib/package/ptest`` directory within the image, where ``package``
-is the name of the package.
-
-Running ptest
-~~~~~~~~~~~~~
-
-The ``ptest-runner`` package installs a shell script that loops through
-all installed ptest test suites and runs them in sequence. Consequently,
-you might want to add this package to your image.
-
-Getting Your Package Ready
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In order to enable a recipe to run installed ptests on target hardware,
-you need to prepare the recipes that build the packages you want to
-test. Here is what you have to do for each recipe:
-
-- *Be sure the recipe inherits
- the* :ref:`ptest <ref-classes-ptest>` *class:*
- Include the following line in each recipe::
-
- inherit ptest
-
-- *Create run-ptest:* This script starts your test. Locate the
- script where you will refer to it using
- :term:`SRC_URI`. Here is an
- example that starts a test for ``dbus``::
-
- #!/bin/sh
- cd test
- make -k runtest-TESTS
-
-- *Ensure dependencies are met:* If the test adds build or runtime
- dependencies that normally do not exist for the package (such as
- requiring "make" to run the test suite), use the
- :term:`DEPENDS` and
- :term:`RDEPENDS` variables in
- your recipe in order for the package to meet the dependencies. Here
- is an example where the package has a runtime dependency on "make"::
-
- RDEPENDS:${PN}-ptest += "make"
-
-- *Add a function to build the test suite:* Not many packages support
- cross-compilation of their test suites. Consequently, you usually
- need to add a cross-compilation function to the package.
-
- Many packages based on Automake compile and run the test suite by
- using a single command such as ``make check``. However, the host
- ``make check`` builds and runs on the same computer, while
- cross-compiling requires that the package is built on the host but
- executed for the target architecture (though often, as in the case
- for ptest, the execution occurs on the host). The built version of
- Automake that ships with the Yocto Project includes a patch that
- separates building and execution. Consequently, packages that use the
- unaltered, patched version of ``make check`` automatically
- cross-compiles.
-
- Regardless, you still must add a ``do_compile_ptest`` function to
- build the test suite. Add a function similar to the following to your
- recipe::
-
- do_compile_ptest() {
- oe_runmake buildtest-TESTS
- }
-
-- *Ensure special configurations are set:* If the package requires
- special configurations prior to compiling the test code, you must
- insert a ``do_configure_ptest`` function into the recipe.
-
-- *Install the test suite:* The ``ptest`` class automatically copies
- the file ``run-ptest`` to the target and then runs make
- ``install-ptest`` to run the tests. If this is not enough, you need
- to create a ``do_install_ptest`` function and make sure it gets
- called after the "make install-ptest" completes.
-
-Creating Node Package Manager (NPM) Packages
---------------------------------------------
-
-`NPM <https://en.wikipedia.org/wiki/Npm_(software)>`__ is a package
-manager for the JavaScript programming language. The Yocto Project
-supports the NPM :ref:`fetcher <bitbake:bitbake-user-manual/bitbake-user-manual-fetching:fetchers>`. You can
-use this fetcher in combination with
-:doc:`devtool </ref-manual/devtool-reference>` to create
-recipes that produce NPM packages.
-
-There are two workflows that allow you to create NPM packages using
-``devtool``: the NPM registry modules method and the NPM project code
-method.
-
-.. note::
-
- While it is possible to create NPM recipes manually, using
- ``devtool`` is far simpler.
-
-Additionally, some requirements and caveats exist.
-
-Requirements and Caveats
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-You need to be aware of the following before using ``devtool`` to create
-NPM packages:
-
-- Of the two methods that you can use ``devtool`` to create NPM
- packages, the registry approach is slightly simpler. However, you
- might consider the project approach because you do not have to
- publish your module in the NPM registry
- (`npm-registry <https://docs.npmjs.com/misc/registry>`_), which
- is NPM's public registry.
-
-- Be familiar with
- :doc:`devtool </ref-manual/devtool-reference>`.
-
-- The NPM host tools need the native ``nodejs-npm`` package, which is
- part of the OpenEmbedded environment. You need to get the package by
- cloning the https://github.com/openembedded/meta-openembedded
- repository out of GitHub. Be sure to add the path to your local copy
- to your ``bblayers.conf`` file.
-
-- ``devtool`` cannot detect native libraries in module dependencies.
- Consequently, you must manually add packages to your recipe.
-
-- While deploying NPM packages, ``devtool`` cannot determine which
- dependent packages are missing on the target (e.g. the node runtime
- ``nodejs``). Consequently, you need to find out what files are
- missing and be sure they are on the target.
-
-- Although you might not need NPM to run your node package, it is
- useful to have NPM on your target. The NPM package name is
- ``nodejs-npm``.
-
-Using the Registry Modules Method
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This section presents an example that uses the ``cute-files`` module,
-which is a file browser web application.
-
-.. note::
-
- You must know the ``cute-files`` module version.
-
-The first thing you need to do is use ``devtool`` and the NPM fetcher to
-create the recipe::
-
- $ devtool add "npm://registry.npmjs.org;package=cute-files;version=1.0.2"
-
-The
-``devtool add`` command runs ``recipetool create`` and uses the same
-fetch URI to download each dependency and capture license details where
-possible. The result is a generated recipe.
-
-The recipe file is fairly simple and contains every license that
-``recipetool`` finds and includes the licenses in the recipe's
-:term:`LIC_FILES_CHKSUM`
-variables. You need to examine the variables and look for those with
-"unknown" in the :term:`LICENSE`
-field. You need to track down the license information for "unknown"
-modules and manually add the information to the recipe.
-
-``recipetool`` creates a "shrinkwrap" file for your recipe. Shrinkwrap
-files capture the version of all dependent modules. Many packages do not
-provide shrinkwrap files. ``recipetool`` create a shrinkwrap file as it
-runs.
-
-.. note::
-
- A package is created for each sub-module. This policy is the only
- practical way to have the licenses for all of the dependencies
- represented in the license manifest of the image.
-
-The ``devtool edit-recipe`` command lets you take a look at the recipe::
-
- $ devtool edit-recipe cute-files
- SUMMARY = "Turn any folder on your computer into a cute file browser, available on the local network."
- LICENSE = "MIT & ISC & Unknown"
- LIC_FILES_CHKSUM = "file://LICENSE;md5=71d98c0a1db42956787b1909c74a86ca \
- file://node_modules/toidentifier/LICENSE;md5=1a261071a044d02eb6f2bb47f51a3502 \
- file://node_modules/debug/LICENSE;md5=ddd815a475e7338b0be7a14d8ee35a99 \
- ...
- SRC_URI = " \
- npm://registry.npmjs.org/;package=cute-files;version=${PV} \
- npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \
- "
- S = "${WORKDIR}/npm"
- inherit npm
- LICENSE:${PN} = "MIT"
- LICENSE:${PN}-accepts = "MIT"
- LICENSE:${PN}-array-flatten = "MIT"
- ...
- LICENSE:${PN}-vary = "MIT"
-
-Here are three key points in the previous example:
-
-- :term:`SRC_URI` uses the NPM
- scheme so that the NPM fetcher is used.
-
-- ``recipetool`` collects all the license information. If a
- sub-module's license is unavailable, the sub-module's name appears in
- the comments.
-
-- The ``inherit npm`` statement causes the
- :ref:`npm <ref-classes-npm>` class to package
- up all the modules.
-
-You can run the following command to build the ``cute-files`` package::
-
- $ devtool build cute-files
-
-Remember that ``nodejs`` must be installed on
-the target before your package.
-
-Assuming 192.168.7.2 for the target's IP address, use the following
-command to deploy your package::
-
- $ devtool deploy-target -s cute-files root@192.168.7.2
-
-Once the package is installed on the target, you can
-test the application:
-
-.. note::
-
- Because of a known issue, you cannot simply run ``cute-files`` as you would
- if you had run ``npm install``.
-
-::
-
- $ cd /usr/lib/node_modules/cute-files
- $ node cute-files.js
-
-On a browser,
-go to ``http://192.168.7.2:3000`` and you see the following:
-
-.. image:: figures/cute-files-npm-example.png
- :align: center
-
-You can find the recipe in ``workspace/recipes/cute-files``. You can use
-the recipe in any layer you choose.
-
-Using the NPM Projects Code Method
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Although it is useful to package modules already in the NPM registry,
-adding ``node.js`` projects under development is a more common developer
-use case.
-
-This section covers the NPM projects code method, which is very similar
-to the "registry" approach described in the previous section. In the NPM
-projects method, you provide ``devtool`` with an URL that points to the
-source files.
-
-Replicating the same example, (i.e. ``cute-files``) use the following
-command::
-
- $ devtool add https://github.com/martinaglv/cute-files.git
-
-The
-recipe this command generates is very similar to the recipe created in
-the previous section. However, the :term:`SRC_URI` looks like the following::
-
- SRC_URI = " \
- git://github.com/martinaglv/cute-files.git;protocol=https \
- npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \
- "
-
-In this example,
-the main module is taken from the Git repository and dependencies are
-taken from the NPM registry. Other than those differences, the recipe is
-basically the same between the two methods. You can build and deploy the
-package exactly as described in the previous section that uses the
-registry modules method.
-
-Adding custom metadata to packages
-----------------------------------
-
-The variable
-:term:`PACKAGE_ADD_METADATA`
-can be used to add additional metadata to packages. This is reflected in
-the package control/spec file. To take the ipk format for example, the
-CONTROL file stored inside would contain the additional metadata as
-additional lines.
-
-The variable can be used in multiple ways, including using suffixes to
-set it for a specific package type and/or package. Note that the order
-of precedence is the same as this list:
-
-- ``PACKAGE_ADD_METADATA_<PKGTYPE>:<PN>``
-
-- ``PACKAGE_ADD_METADATA_<PKGTYPE>``
-
-- ``PACKAGE_ADD_METADATA:<PN>``
-
-- :term:`PACKAGE_ADD_METADATA`
-
-`<PKGTYPE>` is a parameter and expected to be a distinct name of specific
-package type:
-
-- IPK for .ipk packages
-
-- DEB for .deb packages
-
-- RPM for .rpm packages
-
-`<PN>` is a parameter and expected to be a package name.
-
-The variable can contain multiple [one-line] metadata fields separated
-by the literal sequence '\\n'. The separator can be redefined using the
-variable flag ``separator``.
-
-Here is an example that adds two custom fields for ipk
-packages::
-
- PACKAGE_ADD_METADATA_IPK = "Vendor: CustomIpk\nGroup:Applications/Spreadsheets"
-
-Efficiently Fetching Source Files During a Build
-================================================
-
-The OpenEmbedded build system works with source files located through
-the :term:`SRC_URI` variable. When
-you build something using BitBake, a big part of the operation is
-locating and downloading all the source tarballs. For images,
-downloading all the source for various packages can take a significant
-amount of time.
-
-This section shows you how you can use mirrors to speed up fetching
-source files and how you can pre-fetch files all of which leads to more
-efficient use of resources and time.
-
-Setting up Effective Mirrors
-----------------------------
-
-A good deal that goes into a Yocto Project build is simply downloading
-all of the source tarballs. Maybe you have been working with another
-build system for which you have built up a
-sizable directory of source tarballs. Or, perhaps someone else has such
-a directory for which you have read access. If so, you can save time by
-adding statements to your configuration file so that the build process
-checks local directories first for existing tarballs before checking the
-Internet.
-
-Here is an efficient way to set it up in your ``local.conf`` file::
-
- SOURCE_MIRROR_URL ?= "file:///home/you/your-download-dir/"
- INHERIT += "own-mirrors"
- BB_GENERATE_MIRROR_TARBALLS = "1"
- # BB_NO_NETWORK = "1"
-
-In the previous example, the
-:term:`BB_GENERATE_MIRROR_TARBALLS`
-variable causes the OpenEmbedded build system to generate tarballs of
-the Git repositories and store them in the
-:term:`DL_DIR` directory. Due to
-performance reasons, generating and storing these tarballs is not the
-build system's default behavior.
-
-You can also use the
-:term:`PREMIRRORS` variable. For
-an example, see the variable's glossary entry in the Yocto Project
-Reference Manual.
-
-Getting Source Files and Suppressing the Build
-----------------------------------------------
-
-Another technique you can use to ready yourself for a successive string
-of build operations, is to pre-fetch all the source files without
-actually starting a build. This technique lets you work through any
-download issues and ultimately gathers all the source files into your
-download directory :ref:`structure-build-downloads`,
-which is located with :term:`DL_DIR`.
-
-Use the following BitBake command form to fetch all the necessary
-sources without starting the build::
-
- $ bitbake target --runall=fetch
-
-This
-variation of the BitBake command guarantees that you have all the
-sources for that BitBake target should you disconnect from the Internet
-and want to do the build later offline.
-
-Selecting an Initialization Manager
-===================================
-
-By default, the Yocto Project uses SysVinit as the initialization
-manager. However, there is also support for systemd, which is a full
-replacement for init with parallel starting of services, reduced shell
-overhead and other features that are used by many distributions.
-
-Within the system, SysVinit treats system components as services. These
-services are maintained as shell scripts stored in the ``/etc/init.d/``
-directory. Services organize into different run levels. This
-organization is maintained by putting links to the services in the
-``/etc/rcN.d/`` directories, where `N/` is one of the following options:
-"S", "0", "1", "2", "3", "4", "5", or "6".
-
-.. note::
-
- Each runlevel has a dependency on the previous runlevel. This
- dependency allows the services to work properly.
-
-In comparison, systemd treats components as units. Using units is a
-broader concept as compared to using a service. A unit includes several
-different types of entities. Service is one of the types of entities.
-The runlevel concept in SysVinit corresponds to the concept of a target
-in systemd, where target is also a type of supported unit.
-
-In a SysVinit-based system, services load sequentially (i.e. one by one)
-during init and parallelization is not supported. With systemd, services
-start in parallel. Needless to say, the method can have an impact on
-system startup performance.
-
-If you want to use SysVinit, you do not have to do anything. But, if you
-want to use systemd, you must take some steps as described in the
-following sections.
-
-Using systemd Exclusively
--------------------------
-
-Set these variables in your distribution configuration file as follows::
-
- DISTRO_FEATURES:append = " systemd"
- VIRTUAL-RUNTIME_init_manager = "systemd"
-
-You can also prevent the SysVinit distribution feature from
-being automatically enabled as follows::
-
- DISTRO_FEATURES_BACKFILL_CONSIDERED = "sysvinit"
-
-Doing so removes any
-redundant SysVinit scripts.
-
-To remove initscripts from your image altogether, set this variable
-also::
-
- VIRTUAL-RUNTIME_initscripts = ""
-
-For information on the backfill variable, see
-:term:`DISTRO_FEATURES_BACKFILL_CONSIDERED`.
-
-Using systemd for the Main Image and Using SysVinit for the Rescue Image
-------------------------------------------------------------------------
-
-Set these variables in your distribution configuration file as follows::
-
- DISTRO_FEATURES:append = " systemd"
- VIRTUAL-RUNTIME_init_manager = "systemd"
-
-Doing so causes your main image to use the
-``packagegroup-core-boot.bb`` recipe and systemd. The rescue/minimal
-image cannot use this package group. However, it can install SysVinit
-and the appropriate packages will have support for both systemd and
-SysVinit.
-
-Using systemd-journald without a traditional syslog daemon
-----------------------------------------------------------
-
-Counter-intuitively, ``systemd-journald`` is not a syslog runtime or provider,
-and the proper way to use systemd-journald as your sole logging mechanism is to
-effectively disable syslog entirely by setting these variables in your distribution
-configuration file::
-
- VIRTUAL-RUNTIME_syslog = ""
- VIRTUAL-RUNTIME_base-utils-syslog = ""
-
-Doing so will prevent ``rsyslog`` / ``busybox-syslog`` from being pulled in by
-default, leaving only ``journald``.
-
-Selecting a Device Manager
-==========================
-
-The Yocto Project provides multiple ways to manage the device manager
-(``/dev``):
-
-- Persistent and Pre-Populated ``/dev``: For this case, the ``/dev``
- directory is persistent and the required device nodes are created
- during the build.
-
-- Use ``devtmpfs`` with a Device Manager: For this case, the ``/dev``
- directory is provided by the kernel as an in-memory file system and
- is automatically populated by the kernel at runtime. Additional
- configuration of device nodes is done in user space by a device
- manager like ``udev`` or ``busybox-mdev``.
-
-Using Persistent and Pre-Populated ``/dev``
---------------------------------------------
-
-To use the static method for device population, you need to set the
-:term:`USE_DEVFS` variable to "0"
-as follows::
-
- USE_DEVFS = "0"
-
-The content of the resulting ``/dev`` directory is defined in a Device
-Table file. The
-:term:`IMAGE_DEVICE_TABLES`
-variable defines the Device Table to use and should be set in the
-machine or distro configuration file. Alternatively, you can set this
-variable in your ``local.conf`` configuration file.
-
-If you do not define the :term:`IMAGE_DEVICE_TABLES` variable, the default
-``device_table-minimal.txt`` is used::
-
- IMAGE_DEVICE_TABLES = "device_table-mymachine.txt"
-
-The population is handled by the ``makedevs`` utility during image
-creation:
-
-Using ``devtmpfs`` and a Device Manager
----------------------------------------
-
-To use the dynamic method for device population, you need to use (or be
-sure to set) the :term:`USE_DEVFS`
-variable to "1", which is the default::
-
- USE_DEVFS = "1"
-
-With this
-setting, the resulting ``/dev`` directory is populated by the kernel
-using ``devtmpfs``. Make sure the corresponding kernel configuration
-variable ``CONFIG_DEVTMPFS`` is set when building you build a Linux
-kernel.
-
-All devices created by ``devtmpfs`` will be owned by ``root`` and have
-permissions ``0600``.
-
-To have more control over the device nodes, you can use a device manager
-like ``udev`` or ``busybox-mdev``. You choose the device manager by
-defining the ``VIRTUAL-RUNTIME_dev_manager`` variable in your machine or
-distro configuration file. Alternatively, you can set this variable in
-your ``local.conf`` configuration file::
-
- VIRTUAL-RUNTIME_dev_manager = "udev"
-
- # Some alternative values
- # VIRTUAL-RUNTIME_dev_manager = "busybox-mdev"
- # VIRTUAL-RUNTIME_dev_manager = "systemd"
-
-Using an External SCM
-=====================
-
-If you're working on a recipe that pulls from an external Source Code
-Manager (SCM), it is possible to have the OpenEmbedded build system
-notice new recipe changes added to the SCM and then build the resulting
-packages that depend on the new recipes by using the latest versions.
-This only works for SCMs from which it is possible to get a sensible
-revision number for changes. Currently, you can do this with Apache
-Subversion (SVN), Git, and Bazaar (BZR) repositories.
-
-To enable this behavior, the :term:`PV` of
-the recipe needs to reference
-:term:`SRCPV`. Here is an example::
-
- PV = "1.2.3+git${SRCPV}"
-
-Then, you can add the following to your
-``local.conf``::
-
- SRCREV:pn-PN = "${AUTOREV}"
-
-:term:`PN` is the name of the recipe for
-which you want to enable automatic source revision updating.
-
-If you do not want to update your local configuration file, you can add
-the following directly to the recipe to finish enabling the feature::
-
- SRCREV = "${AUTOREV}"
-
-The Yocto Project provides a distribution named ``poky-bleeding``, whose
-configuration file contains the line::
-
- require conf/distro/include/poky-floating-revisions.inc
-
-This line pulls in the
-listed include file that contains numerous lines of exactly that form::
-
- #SRCREV:pn-opkg-native ?= "${AUTOREV}"
- #SRCREV:pn-opkg-sdk ?= "${AUTOREV}"
- #SRCREV:pn-opkg ?= "${AUTOREV}"
- #SRCREV:pn-opkg-utils-native ?= "${AUTOREV}"
- #SRCREV:pn-opkg-utils ?= "${AUTOREV}"
- SRCREV:pn-gconf-dbus ?= "${AUTOREV}"
- SRCREV:pn-matchbox-common ?= "${AUTOREV}"
- SRCREV:pn-matchbox-config-gtk ?= "${AUTOREV}"
- SRCREV:pn-matchbox-desktop ?= "${AUTOREV}"
- SRCREV:pn-matchbox-keyboard ?= "${AUTOREV}"
- SRCREV:pn-matchbox-panel-2 ?= "${AUTOREV}"
- SRCREV:pn-matchbox-themes-extra ?= "${AUTOREV}"
- SRCREV:pn-matchbox-terminal ?= "${AUTOREV}"
- SRCREV:pn-matchbox-wm ?= "${AUTOREV}"
- SRCREV:pn-settings-daemon ?= "${AUTOREV}"
- SRCREV:pn-screenshot ?= "${AUTOREV}"
- . . .
-
-These lines allow you to
-experiment with building a distribution that tracks the latest
-development source for numerous packages.
-
-.. note::
-
- The ``poky-bleeding`` distribution is not tested on a regular basis. Keep
- this in mind if you use it.
-
-Creating a Read-Only Root Filesystem
-====================================
-
-Suppose, for security reasons, you need to disable your target device's
-root filesystem's write permissions (i.e. you need a read-only root
-filesystem). Or, perhaps you are running the device's operating system
-from a read-only storage device. For either case, you can customize your
-image for that behavior.
-
-.. note::
-
- Supporting a read-only root filesystem requires that the system and
- applications do not try to write to the root filesystem. You must
- configure all parts of the target system to write elsewhere, or to
- gracefully fail in the event of attempting to write to the root
- filesystem.
-
-Creating the Root Filesystem
-----------------------------
-
-To create the read-only root filesystem, simply add the
-"read-only-rootfs" feature to your image, normally in one of two ways.
-The first way is to add the "read-only-rootfs" image feature in the
-image's recipe file via the :term:`IMAGE_FEATURES` variable::
-
- IMAGE_FEATURES += "read-only-rootfs"
-
-As an alternative, you can add the same feature
-from within your build directory's ``local.conf`` file with the
-associated :term:`EXTRA_IMAGE_FEATURES` variable, as in::
-
- EXTRA_IMAGE_FEATURES = "read-only-rootfs"
-
-For more information on how to use these variables, see the
-":ref:`dev-manual/common-tasks:Customizing Images Using Custom \`\`IMAGE_FEATURES\`\` and \`\`EXTRA_IMAGE_FEATURES\`\``"
-section. For information on the variables, see
-:term:`IMAGE_FEATURES` and
-:term:`EXTRA_IMAGE_FEATURES`.
-
-Post-Installation Scripts and Read-Only Root Filesystem
--------------------------------------------------------
-
-It is very important that you make sure all post-Installation
-(``pkg_postinst``) scripts for packages that are installed into the
-image can be run at the time when the root filesystem is created during
-the build on the host system. These scripts cannot attempt to run during
-the first boot on the target device. With the "read-only-rootfs" feature
-enabled, the build system makes sure that all post-installation scripts
-succeed at file system creation time. If any of these scripts
-still need to be run after the root filesystem is created, the build
-immediately fails. These build-time checks ensure that the build fails
-rather than the target device fails later during its initial boot
-operation.
-
-Most of the common post-installation scripts generated by the build
-system for the out-of-the-box Yocto Project are engineered so that they
-can run during root filesystem creation (e.g. post-installation scripts
-for caching fonts). However, if you create and add custom scripts, you
-need to be sure they can be run during this file system creation.
-
-Here are some common problems that prevent post-installation scripts
-from running during root filesystem creation:
-
-- *Not using $D in front of absolute paths:* The build system defines
- ``$``\ :term:`D` when the root
- filesystem is created. Furthermore, ``$D`` is blank when the script
- is run on the target device. This implies two purposes for ``$D``:
- ensuring paths are valid in both the host and target environments,
- and checking to determine which environment is being used as a method
- for taking appropriate actions.
-
-- *Attempting to run processes that are specific to or dependent on the
- target architecture:* You can work around these attempts by using
- native tools, which run on the host system, to accomplish the same
- tasks, or by alternatively running the processes under QEMU, which
- has the ``qemu_run_binary`` function. For more information, see the
- :ref:`qemu <ref-classes-qemu>` class.
-
-Areas With Write Access
------------------------
-
-With the "read-only-rootfs" feature enabled, any attempt by the target
-to write to the root filesystem at runtime fails. Consequently, you must
-make sure that you configure processes and applications that attempt
-these types of writes do so to directories with write access (e.g.
-``/tmp`` or ``/var/run``).
-
-Maintaining Build Output Quality
-================================
-
-Many factors can influence the quality of a build. For example, if you
-upgrade a recipe to use a new version of an upstream software package or
-you experiment with some new configuration options, subtle changes can
-occur that you might not detect until later. Consider the case where
-your recipe is using a newer version of an upstream package. In this
-case, a new version of a piece of software might introduce an optional
-dependency on another library, which is auto-detected. If that library
-has already been built when the software is building, the software will
-link to the built library and that library will be pulled into your
-image along with the new software even if you did not want the library.
-
-The :ref:`buildhistory <ref-classes-buildhistory>`
-class helps you maintain the quality of your build output. You
-can use the class to highlight unexpected and possibly unwanted changes
-in the build output. When you enable build history, it records
-information about the contents of each package and image and then
-commits that information to a local Git repository where you can examine
-the information.
-
-The remainder of this section describes the following:
-
-- :ref:`How you can enable and disable build history <dev-manual/common-tasks:enabling and disabling build history>`
-
-- :ref:`How to understand what the build history contains <dev-manual/common-tasks:understanding what the build history contains>`
-
-- :ref:`How to limit the information used for build history <dev-manual/common-tasks:using build history to gather image information only>`
-
-- :ref:`How to examine the build history from both a command-line and web interface <dev-manual/common-tasks:examining build history information>`
-
-Enabling and Disabling Build History
-------------------------------------
-
-Build history is disabled by default. To enable it, add the following
-:term:`INHERIT` statement and set the
-:term:`BUILDHISTORY_COMMIT`
-variable to "1" at the end of your ``conf/local.conf`` file found in the
-:term:`Build Directory`::
-
- INHERIT += "buildhistory"
- BUILDHISTORY_COMMIT = "1"
-
-Enabling build history as
-previously described causes the OpenEmbedded build system to collect
-build output information and commit it as a single commit to a local
-:ref:`overview-manual/development-environment:git` repository.
-
-.. note::
-
- Enabling build history increases your build times slightly,
- particularly for images, and increases the amount of disk space used
- during the build.
-
-You can disable build history by removing the previous statements from
-your ``conf/local.conf`` file.
-
-Understanding What the Build History Contains
----------------------------------------------
-
-Build history information is kept in
-``${``\ :term:`TOPDIR`\ ``}/buildhistory``
-in the Build Directory as defined by the
-:term:`BUILDHISTORY_DIR`
-variable. Here is an example abbreviated listing:
-
-.. image:: figures/buildhistory.png
- :align: center
-
-At the top level, there is a ``metadata-revs`` file that lists the
-revisions of the repositories for the enabled layers when the build was
-produced. The rest of the data splits into separate ``packages``,
-``images`` and ``sdk`` directories, the contents of which are described
-as follows.
-
-Build History Package Information
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The history for each package contains a text file that has name-value
-pairs with information about the package. For example,
-``buildhistory/packages/i586-poky-linux/busybox/busybox/latest``
-contains the following:
-
-.. code-block:: none
-
- PV = 1.22.1
- PR = r32
- RPROVIDES =
- RDEPENDS = glibc (>= 2.20) update-alternatives-opkg
- RRECOMMENDS = busybox-syslog busybox-udhcpc update-rc.d
- PKGSIZE = 540168
- FILES = /usr/bin/* /usr/sbin/* /usr/lib/busybox/* /usr/lib/lib*.so.* \
- /etc /com /var /bin/* /sbin/* /lib/*.so.* /lib/udev/rules.d \
- /usr/lib/udev/rules.d /usr/share/busybox /usr/lib/busybox/* \
- /usr/share/pixmaps /usr/share/applications /usr/share/idl \
- /usr/share/omf /usr/share/sounds /usr/lib/bonobo/servers
- FILELIST = /bin/busybox /bin/busybox.nosuid /bin/busybox.suid /bin/sh \
- /etc/busybox.links.nosuid /etc/busybox.links.suid
-
-Most of these
-name-value pairs correspond to variables used to produce the package.
-The exceptions are ``FILELIST``, which is the actual list of files in
-the package, and ``PKGSIZE``, which is the total size of files in the
-package in bytes.
-
-There is also a file that corresponds to the recipe from which the package
-came (e.g. ``buildhistory/packages/i586-poky-linux/busybox/latest``):
-
-.. code-block:: none
-
- PV = 1.22.1
- PR = r32
- DEPENDS = initscripts kern-tools-native update-rc.d-native \
- virtual/i586-poky-linux-compilerlibs virtual/i586-poky-linux-gcc \
- virtual/libc virtual/update-alternatives
- PACKAGES = busybox-ptest busybox-httpd busybox-udhcpd busybox-udhcpc \
- busybox-syslog busybox-mdev busybox-hwclock busybox-dbg \
- busybox-staticdev busybox-dev busybox-doc busybox-locale busybox
-
-Finally, for those recipes fetched from a version control system (e.g.,
-Git), there is a file that lists source revisions that are specified in
-the recipe and the actual revisions used during the build. Listed
-and actual revisions might differ when
-:term:`SRCREV` is set to
-${:term:`AUTOREV`}. Here is an
-example assuming
-``buildhistory/packages/qemux86-poky-linux/linux-yocto/latest_srcrev``)::
-
- # SRCREV_machine = "38cd560d5022ed2dbd1ab0dca9642e47c98a0aa1"
- SRCREV_machine = "38cd560d5022ed2dbd1ab0dca9642e47c98a0aa1"
- # SRCREV_meta = "a227f20eff056e511d504b2e490f3774ab260d6f"
- SRCREV_meta ="a227f20eff056e511d504b2e490f3774ab260d6f"
-
-You can use the
-``buildhistory-collect-srcrevs`` command with the ``-a`` option to
-collect the stored :term:`SRCREV` values from build history and report them
-in a format suitable for use in global configuration (e.g.,
-``local.conf`` or a distro include file) to override floating
-:term:`AUTOREV` values to a fixed set of revisions. Here is some example
-output from this command::
-
- $ buildhistory-collect-srcrevs -a
- # all-poky-linux
- SRCREV:pn-ca-certificates = "07de54fdcc5806bde549e1edf60738c6bccf50e8"
- SRCREV:pn-update-rc.d = "8636cf478d426b568c1be11dbd9346f67e03adac"
- # core2-64-poky-linux
- SRCREV:pn-binutils = "87d4632d36323091e731eb07b8aa65f90293da66"
- SRCREV:pn-btrfs-tools = "8ad326b2f28c044cb6ed9016d7c3285e23b673c8"
- SRCREV_bzip2-tests:pn-bzip2 = "f9061c030a25de5b6829e1abf373057309c734c0"
- SRCREV:pn-e2fsprogs = "02540dedd3ddc52c6ae8aaa8a95ce75c3f8be1c0"
- SRCREV:pn-file = "504206e53a89fd6eed71aeaf878aa3512418eab1"
- SRCREV_glibc:pn-glibc = "24962427071fa532c3c48c918e9d64d719cc8a6c"
- SRCREV:pn-gnome-desktop-testing = "e346cd4ed2e2102c9b195b614f3c642d23f5f6e7"
- SRCREV:pn-init-system-helpers = "dbd9197569c0935029acd5c9b02b84c68fd937ee"
- SRCREV:pn-kmod = "b6ecfc916a17eab8f93be5b09f4e4f845aabd3d1"
- SRCREV:pn-libnsl2 = "82245c0c58add79a8e34ab0917358217a70e5100"
- SRCREV:pn-libseccomp = "57357d2741a3b3d3e8425889a6b79a130e0fa2f3"
- SRCREV:pn-libxcrypt = "50cf2b6dd4fdf04309445f2eec8de7051d953abf"
- SRCREV:pn-ncurses = "51d0fd9cc3edb975f04224f29f777f8f448e8ced"
- SRCREV:pn-procps = "19a508ea121c0c4ac6d0224575a036de745eaaf8"
- SRCREV:pn-psmisc = "5fab6b7ab385080f1db725d6803136ec1841a15f"
- SRCREV:pn-ptest-runner = "bcb82804daa8f725b6add259dcef2067e61a75aa"
- SRCREV:pn-shared-mime-info = "18e558fa1c8b90b86757ade09a4ba4d6a6cf8f70"
- SRCREV:pn-zstd = "e47e674cd09583ff0503f0f6defd6d23d8b718d3"
- # qemux86_64-poky-linux
- SRCREV_machine:pn-linux-yocto = "20301aeb1a64164b72bc72af58802b315e025c9c"
- SRCREV_meta:pn-linux-yocto = "2d38a472b21ae343707c8bd64ac68a9eaca066a0"
- # x86_64-linux
- SRCREV:pn-binutils-cross-x86_64 = "87d4632d36323091e731eb07b8aa65f90293da66"
- SRCREV_glibc:pn-cross-localedef-native = "24962427071fa532c3c48c918e9d64d719cc8a6c"
- SRCREV_localedef:pn-cross-localedef-native = "794da69788cbf9bf57b59a852f9f11307663fa87"
- SRCREV:pn-debianutils-native = "de14223e5bffe15e374a441302c528ffc1cbed57"
- SRCREV:pn-libmodulemd-native = "ee80309bc766d781a144e6879419b29f444d94eb"
- SRCREV:pn-virglrenderer-native = "363915595e05fb252e70d6514be2f0c0b5ca312b"
- SRCREV:pn-zstd-native = "e47e674cd09583ff0503f0f6defd6d23d8b718d3"
-
-.. note::
-
- Here are some notes on using the ``buildhistory-collect-srcrevs`` command:
-
- - By default, only values where the :term:`SRCREV` was not hardcoded
- (usually when :term:`AUTOREV` is used) are reported. Use the ``-a``
- option to see all :term:`SRCREV` values.
-
- - The output statements might not have any effect if overrides are
- applied elsewhere in the build system configuration. Use the
- ``-f`` option to add the ``forcevariable`` override to each output
- line if you need to work around this restriction.
-
- - The script does apply special handling when building for multiple
- machines. However, the script does place a comment before each set
- of values that specifies which triplet to which they belong as
- previously shown (e.g., ``i586-poky-linux``).
-
-Build History Image Information
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The files produced for each image are as follows:
-
-- ``image-files:`` A directory containing selected files from the root
- filesystem. The files are defined by
- :term:`BUILDHISTORY_IMAGE_FILES`.
-
-- ``build-id.txt:`` Human-readable information about the build
- configuration and metadata source revisions. This file contains the
- full build header as printed by BitBake.
-
-- ``*.dot:`` Dependency graphs for the image that are compatible with
- ``graphviz``.
-
-- ``files-in-image.txt:`` A list of files in the image with
- permissions, owner, group, size, and symlink information.
-
-- ``image-info.txt:`` A text file containing name-value pairs with
- information about the image. See the following listing example for
- more information.
-
-- ``installed-package-names.txt:`` A list of installed packages by name
- only.
-
-- ``installed-package-sizes.txt:`` A list of installed packages ordered
- by size.
-
-- ``installed-packages.txt:`` A list of installed packages with full
- package filenames.
-
-.. note::
-
- Installed package information is able to be gathered and produced
- even if package management is disabled for the final image.
-
-Here is an example of ``image-info.txt``:
-
-.. code-block:: none
-
- DISTRO = poky
- DISTRO_VERSION = 3.4+snapshot-a0245d7be08f3d24ea1875e9f8872aa6bbff93be
- USER_CLASSES = buildstats
- IMAGE_CLASSES = qemuboot qemuboot license_image
- IMAGE_FEATURES = debug-tweaks
- IMAGE_LINGUAS =
- IMAGE_INSTALL = packagegroup-core-boot speex speexdsp
- BAD_RECOMMENDATIONS =
- NO_RECOMMENDATIONS =
- PACKAGE_EXCLUDE =
- ROOTFS_POSTPROCESS_COMMAND = write_package_manifest; license_create_manifest; cve_check_write_rootfs_manifest; ssh_allow_empty_password; ssh_allow_root_login; postinst_enable_logging; rootfs_update_timestamp; write_image_test_data; empty_var_volatile; sort_passwd; rootfs_reproducible;
- IMAGE_POSTPROCESS_COMMAND = buildhistory_get_imageinfo ;
- IMAGESIZE = 9265
-
-Other than ``IMAGESIZE``,
-which is the total size of the files in the image in Kbytes, the
-name-value pairs are variables that may have influenced the content of
-the image. This information is often useful when you are trying to
-determine why a change in the package or file listings has occurred.
-
-Using Build History to Gather Image Information Only
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-As you can see, build history produces image information, including
-dependency graphs, so you can see why something was pulled into the
-image. If you are just interested in this information and not interested
-in collecting specific package or SDK information, you can enable
-writing only image information without any history by adding the
-following to your ``conf/local.conf`` file found in the
-:term:`Build Directory`::
-
- INHERIT += "buildhistory"
- BUILDHISTORY_COMMIT = "0"
- BUILDHISTORY_FEATURES = "image"
-
-Here, you set the
-:term:`BUILDHISTORY_FEATURES`
-variable to use the image feature only.
-
-Build History SDK Information
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Build history collects similar information on the contents of SDKs (e.g.
-``bitbake -c populate_sdk imagename``) as compared to information it
-collects for images. Furthermore, this information differs depending on
-whether an extensible or standard SDK is being produced.
-
-The following list shows the files produced for SDKs:
-
-- ``files-in-sdk.txt:`` A list of files in the SDK with permissions,
- owner, group, size, and symlink information. This list includes both
- the host and target parts of the SDK.
-
-- ``sdk-info.txt:`` A text file containing name-value pairs with
- information about the SDK. See the following listing example for more
- information.
-
-- ``sstate-task-sizes.txt:`` A text file containing name-value pairs
- with information about task group sizes (e.g. ``do_populate_sysroot``
- tasks have a total size). The ``sstate-task-sizes.txt`` file exists
- only when an extensible SDK is created.
-
-- ``sstate-package-sizes.txt:`` A text file containing name-value pairs
- with information for the shared-state packages and sizes in the SDK.
- The ``sstate-package-sizes.txt`` file exists only when an extensible
- SDK is created.
-
-- ``sdk-files:`` A folder that contains copies of the files mentioned
- in ``BUILDHISTORY_SDK_FILES`` if the files are present in the output.
- Additionally, the default value of ``BUILDHISTORY_SDK_FILES`` is
- specific to the extensible SDK although you can set it differently if
- you would like to pull in specific files from the standard SDK.
-
- The default files are ``conf/local.conf``, ``conf/bblayers.conf``,
- ``conf/auto.conf``, ``conf/locked-sigs.inc``, and
- ``conf/devtool.conf``. Thus, for an extensible SDK, these files get
- copied into the ``sdk-files`` directory.
-
-- The following information appears under each of the ``host`` and
- ``target`` directories for the portions of the SDK that run on the
- host and on the target, respectively:
-
- .. note::
-
- The following files for the most part are empty when producing an
- extensible SDK because this type of SDK is not constructed from
- packages as is the standard SDK.
-
- - ``depends.dot:`` Dependency graph for the SDK that is compatible
- with ``graphviz``.
-
- - ``installed-package-names.txt:`` A list of installed packages by
- name only.
-
- - ``installed-package-sizes.txt:`` A list of installed packages
- ordered by size.
-
- - ``installed-packages.txt:`` A list of installed packages with full
- package filenames.
-
-Here is an example of ``sdk-info.txt``:
-
-.. code-block:: none
-
- DISTRO = poky
- DISTRO_VERSION = 1.3+snapshot-20130327
- SDK_NAME = poky-glibc-i686-arm
- SDK_VERSION = 1.3+snapshot
- SDKMACHINE =
- SDKIMAGE_FEATURES = dev-pkgs dbg-pkgs
- BAD_RECOMMENDATIONS =
- SDKSIZE = 352712
-
-Other than ``SDKSIZE``, which is
-the total size of the files in the SDK in Kbytes, the name-value pairs
-are variables that might have influenced the content of the SDK. This
-information is often useful when you are trying to determine why a
-change in the package or file listings has occurred.
-
-Examining Build History Information
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-You can examine build history output from the command line or from a web
-interface.
-
-To see any changes that have occurred (assuming you have
-:term:`BUILDHISTORY_COMMIT` = "1"),
-you can simply use any Git command that allows you to view the history
-of a repository. Here is one method::
-
- $ git log -p
-
-You need to realize,
-however, that this method does show changes that are not significant
-(e.g. a package's size changing by a few bytes).
-
-There is a command-line tool called ``buildhistory-diff``, though,
-that queries the Git repository and prints just the differences that
-might be significant in human-readable form. Here is an example::
-
- $ poky/poky/scripts/buildhistory-diff . HEAD^
- Changes to images/qemux86_64/glibc/core-image-minimal (files-in-image.txt):
- /etc/anotherpkg.conf was added
- /sbin/anotherpkg was added
- * (installed-package-names.txt):
- * anotherpkg was added
- Changes to images/qemux86_64/glibc/core-image-minimal (installed-package-names.txt):
- anotherpkg was added
- packages/qemux86_64-poky-linux/v86d: PACKAGES: added "v86d-extras"
- * PR changed from "r0" to "r1"
- * PV changed from "0.1.10" to "0.1.12"
- packages/qemux86_64-poky-linux/v86d/v86d: PKGSIZE changed from 110579 to 144381 (+30%)
- * PR changed from "r0" to "r1"
- * PV changed from "0.1.10" to "0.1.12"
-
-.. note::
-
- The ``buildhistory-diff`` tool requires the ``GitPython``
- package. Be sure to install it using Pip3 as follows::
-
- $ pip3 install GitPython --user
-
-
- Alternatively, you can install ``python3-git`` using the appropriate
- distribution package manager (e.g. ``apt``, ``dnf``, or ``zipper``).
-
-To see changes to the build history using a web interface, follow the
-instruction in the ``README`` file
-:yocto_git:`here </buildhistory-web/>`.
-
-Here is a sample screenshot of the interface:
-
-.. image:: figures/buildhistory-web.png
- :align: center
-
-Performing Automated Runtime Testing
-====================================
-
-The OpenEmbedded build system makes available a series of automated
-tests for images to verify runtime functionality. You can run these
-tests on either QEMU or actual target hardware. Tests are written in
-Python making use of the ``unittest`` module, and the majority of them
-run commands on the target system over SSH. This section describes how
-you set up the environment to use these tests, run available tests, and
-write and add your own tests.
-
-For information on the test and QA infrastructure available within the
-Yocto Project, see the ":ref:`ref-manual/release-process:testing and quality assurance`"
-section in the Yocto Project Reference Manual.
-
-Enabling Tests
---------------
-
-Depending on whether you are planning to run tests using QEMU or on the
-hardware, you have to take different steps to enable the tests. See the
-following subsections for information on how to enable both types of
-tests.
-
-Enabling Runtime Tests on QEMU
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In order to run tests, you need to do the following:
-
-- *Set up to avoid interaction with sudo for networking:* To
- accomplish this, you must do one of the following:
-
- - Add ``NOPASSWD`` for your user in ``/etc/sudoers`` either for all
- commands or just for ``runqemu-ifup``. You must provide the full
- path as that can change if you are using multiple clones of the
- source repository.
-
- .. note::
-
- On some distributions, you also need to comment out "Defaults
- requiretty" in ``/etc/sudoers``.
-
- - Manually configure a tap interface for your system.
-
- - Run as root the script in ``scripts/runqemu-gen-tapdevs``, which
- should generate a list of tap devices. This is the option
- typically chosen for Autobuilder-type environments.
-
- .. note::
-
- - Be sure to use an absolute path when calling this script
- with sudo.
-
- - The package recipe ``qemu-helper-native`` is required to run
- this script. Build the package using the following command::
-
- $ bitbake qemu-helper-native
-
-- *Set the DISPLAY variable:* You need to set this variable so that
- you have an X server available (e.g. start ``vncserver`` for a
- headless machine).
-
-- *Be sure your host's firewall accepts incoming connections from
- 192.168.7.0/24:* Some of the tests (in particular DNF tests) start an
- HTTP server on a random high number port, which is used to serve
- files to the target. The DNF module serves
- ``${WORKDIR}/oe-rootfs-repo`` so it can run DNF channel commands.
- That means your host's firewall must accept incoming connections from
- 192.168.7.0/24, which is the default IP range used for tap devices by
- ``runqemu``.
-
-- *Be sure your host has the correct packages installed:* Depending
- your host's distribution, you need to have the following packages
- installed:
-
- - Ubuntu and Debian: ``sysstat`` and ``iproute2``
-
- - openSUSE: ``sysstat`` and ``iproute2``
-
- - Fedora: ``sysstat`` and ``iproute``
-
- - CentOS: ``sysstat`` and ``iproute``
-
-Once you start running the tests, the following happens:
-
-1. A copy of the root filesystem is written to ``${WORKDIR}/testimage``.
-
-2. The image is booted under QEMU using the standard ``runqemu`` script.
-
-3. A default timeout of 500 seconds occurs to allow for the boot process
- to reach the login prompt. You can change the timeout period by
- setting
- :term:`TEST_QEMUBOOT_TIMEOUT`
- in the ``local.conf`` file.
-
-4. Once the boot process is reached and the login prompt appears, the
- tests run. The full boot log is written to
- ``${WORKDIR}/testimage/qemu_boot_log``.
-
-5. Each test module loads in the order found in :term:`TEST_SUITES`. You can
- find the full output of the commands run over SSH in
- ``${WORKDIR}/testimgage/ssh_target_log``.
-
-6. If no failures occur, the task running the tests ends successfully.
- You can find the output from the ``unittest`` in the task log at
- ``${WORKDIR}/temp/log.do_testimage``.
-
-Enabling Runtime Tests on Hardware
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The OpenEmbedded build system can run tests on real hardware, and for
-certain devices it can also deploy the image to be tested onto the
-device beforehand.
-
-For automated deployment, a "controller image" is installed onto the
-hardware once as part of setup. Then, each time tests are to be run, the
-following occurs:
-
-1. The controller image is booted into and used to write the image to be
- tested to a second partition.
-
-2. The device is then rebooted using an external script that you need to
- provide.
-
-3. The device boots into the image to be tested.
-
-When running tests (independent of whether the image has been deployed
-automatically or not), the device is expected to be connected to a
-network on a pre-determined IP address. You can either use static IP
-addresses written into the image, or set the image to use DHCP and have
-your DHCP server on the test network assign a known IP address based on
-the MAC address of the device.
-
-In order to run tests on hardware, you need to set :term:`TEST_TARGET` to an
-appropriate value. For QEMU, you do not have to change anything, the
-default value is "qemu". For running tests on hardware, the following
-options are available:
-
-- *"simpleremote":* Choose "simpleremote" if you are going to run tests
- on a target system that is already running the image to be tested and
- is available on the network. You can use "simpleremote" in
- conjunction with either real hardware or an image running within a
- separately started QEMU or any other virtual machine manager.
-
-- *"SystemdbootTarget":* Choose "SystemdbootTarget" if your hardware is
- an EFI-based machine with ``systemd-boot`` as bootloader and
- ``core-image-testmaster`` (or something similar) is installed. Also,
- your hardware under test must be in a DHCP-enabled network that gives
- it the same IP address for each reboot.
-
- If you choose "SystemdbootTarget", there are additional requirements
- and considerations. See the
- ":ref:`dev-manual/common-tasks:selecting systemdboottarget`" section, which
- follows, for more information.
-
-- *"BeagleBoneTarget":* Choose "BeagleBoneTarget" if you are deploying
- images and running tests on the BeagleBone "Black" or original
- "White" hardware. For information on how to use these tests, see the
- comments at the top of the BeagleBoneTarget
- ``meta-yocto-bsp/lib/oeqa/controllers/beaglebonetarget.py`` file.
-
-- *"EdgeRouterTarget":* Choose "EdgeRouterTarget" if you are deploying
- images and running tests on the Ubiquiti Networks EdgeRouter Lite.
- For information on how to use these tests, see the comments at the
- top of the EdgeRouterTarget
- ``meta-yocto-bsp/lib/oeqa/controllers/edgeroutertarget.py`` file.
-
-- *"GrubTarget":* Choose "GrubTarget" if you are deploying images and running
- tests on any generic PC that boots using GRUB. For information on how
- to use these tests, see the comments at the top of the GrubTarget
- ``meta-yocto-bsp/lib/oeqa/controllers/grubtarget.py`` file.
-
-- *"your-target":* Create your own custom target if you want to run
- tests when you are deploying images and running tests on a custom
- machine within your BSP layer. To do this, you need to add a Python
- unit that defines the target class under ``lib/oeqa/controllers/``
- within your layer. You must also provide an empty ``__init__.py``.
- For examples, see files in ``meta-yocto-bsp/lib/oeqa/controllers/``.
-
-Selecting SystemdbootTarget
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-If you did not set :term:`TEST_TARGET` to "SystemdbootTarget", then you do
-not need any information in this section. You can skip down to the
-":ref:`dev-manual/common-tasks:running tests`" section.
-
-If you did set :term:`TEST_TARGET` to "SystemdbootTarget", you also need to
-perform a one-time setup of your controller image by doing the following:
-
-1. *Set EFI_PROVIDER:* Be sure that :term:`EFI_PROVIDER` is as follows::
-
- EFI_PROVIDER = "systemd-boot"
-
-2. *Build the controller image:* Build the ``core-image-testmaster`` image.
- The ``core-image-testmaster`` recipe is provided as an example for a
- "controller" image and you can customize the image recipe as you would
- any other recipe.
-
- Here are the image recipe requirements:
-
- - Inherits ``core-image`` so that kernel modules are installed.
-
- - Installs normal linux utilities not BusyBox ones (e.g. ``bash``,
- ``coreutils``, ``tar``, ``gzip``, and ``kmod``).
-
- - Uses a custom Initial RAM Disk (initramfs) image with a custom
- installer. A normal image that you can install usually creates a
- single root filesystem partition. This image uses another installer that
- creates a specific partition layout. Not all Board Support
- Packages (BSPs) can use an installer. For such cases, you need to
- manually create the following partition layout on the target:
-
- - First partition mounted under ``/boot``, labeled "boot".
-
- - The main root filesystem partition where this image gets installed,
- which is mounted under ``/``.
-
- - Another partition labeled "testrootfs" where test images get
- deployed.
-
-3. *Install image:* Install the image that you just built on the target
- system.
-
-The final thing you need to do when setting :term:`TEST_TARGET` to
-"SystemdbootTarget" is to set up the test image:
-
-1. *Set up your local.conf file:* Make sure you have the following
- statements in your ``local.conf`` file::
-
- IMAGE_FSTYPES += "tar.gz"
- INHERIT += "testimage"
- TEST_TARGET = "SystemdbootTarget"
- TEST_TARGET_IP = "192.168.2.3"
-
-2. *Build your test image:* Use BitBake to build the image::
-
- $ bitbake core-image-sato
-
-Power Control
-~~~~~~~~~~~~~
-
-For most hardware targets other than "simpleremote", you can control
-power:
-
-- You can use :term:`TEST_POWERCONTROL_CMD` together with
- :term:`TEST_POWERCONTROL_EXTRA_ARGS` as a command that runs on the host
- and does power cycling. The test code passes one argument to that
- command: off, on or cycle (off then on). Here is an example that
- could appear in your ``local.conf`` file::
-
- TEST_POWERCONTROL_CMD = "powercontrol.exp test 10.11.12.1 nuc1"
-
- In this example, the expect
- script does the following:
-
- .. code-block:: shell
-
- ssh test@10.11.12.1 "pyctl nuc1 arg"
-
- It then runs a Python script that controls power for a label called
- ``nuc1``.
-
- .. note::
-
- You need to customize :term:`TEST_POWERCONTROL_CMD` and
- :term:`TEST_POWERCONTROL_EXTRA_ARGS` for your own setup. The one requirement
- is that it accepts "on", "off", and "cycle" as the last argument.
-
-- When no command is defined, it connects to the device over SSH and
- uses the classic reboot command to reboot the device. Classic reboot
- is fine as long as the machine actually reboots (i.e. the SSH test
- has not failed). It is useful for scenarios where you have a simple
- setup, typically with a single board, and where some manual
- interaction is okay from time to time.
-
-If you have no hardware to automatically perform power control but still
-wish to experiment with automated hardware testing, you can use the
-``dialog-power-control`` script that shows a dialog prompting you to perform
-the required power action. This script requires either KDialog or Zenity
-to be installed. To use this script, set the
-:term:`TEST_POWERCONTROL_CMD`
-variable as follows::
-
- TEST_POWERCONTROL_CMD = "${COREBASE}/scripts/contrib/dialog-power-control"
-
-Serial Console Connection
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-For test target classes requiring a serial console to interact with the
-bootloader (e.g. BeagleBoneTarget, EdgeRouterTarget, and GrubTarget),
-you need to specify a command to use to connect to the serial console of
-the target machine by using the
-:term:`TEST_SERIALCONTROL_CMD`
-variable and optionally the
-:term:`TEST_SERIALCONTROL_EXTRA_ARGS`
-variable.
-
-These cases could be a serial terminal program if the machine is
-connected to a local serial port, or a ``telnet`` or ``ssh`` command
-connecting to a remote console server. Regardless of the case, the
-command simply needs to connect to the serial console and forward that
-connection to standard input and output as any normal terminal program
-does. For example, to use the picocom terminal program on serial device
-``/dev/ttyUSB0`` at 115200bps, you would set the variable as follows::
-
- TEST_SERIALCONTROL_CMD = "picocom /dev/ttyUSB0 -b 115200"
-
-For local
-devices where the serial port device disappears when the device reboots,
-an additional "serdevtry" wrapper script is provided. To use this
-wrapper, simply prefix the terminal command with
-``${COREBASE}/scripts/contrib/serdevtry``::
-
- TEST_SERIALCONTROL_CMD = "${COREBASE}/scripts/contrib/serdevtry picocom -b 115200 /dev/ttyUSB0"
-
-Running Tests
--------------
-
-You can start the tests automatically or manually:
-
-- *Automatically running tests:* To run the tests automatically after
- the OpenEmbedded build system successfully creates an image, first
- set the
- :term:`TESTIMAGE_AUTO`
- variable to "1" in your ``local.conf`` file in the
- :term:`Build Directory`::
-
- TESTIMAGE_AUTO = "1"
-
- Next, build your image. If the image successfully builds, the
- tests run::
-
- bitbake core-image-sato
-
-- *Manually running tests:* To manually run the tests, first globally
- inherit the
- :ref:`testimage <ref-classes-testimage*>` class
- by editing your ``local.conf`` file::
-
- INHERIT += "testimage"
-
- Next, use BitBake to run the tests::
-
- bitbake -c testimage image
-
-All test files reside in ``meta/lib/oeqa/runtime`` in the
-:term:`Source Directory`. A test name maps
-directly to a Python module. Each test module may contain a number of
-individual tests. Tests are usually grouped together by the area tested
-(e.g tests for systemd reside in ``meta/lib/oeqa/runtime/systemd.py``).
-
-You can add tests to any layer provided you place them in the proper
-area and you extend :term:`BBPATH` in
-the ``local.conf`` file as normal. Be sure that tests reside in
-``layer/lib/oeqa/runtime``.
-
-.. note::
-
- Be sure that module names do not collide with module names used in
- the default set of test modules in ``meta/lib/oeqa/runtime``.
-
-You can change the set of tests run by appending or overriding
-:term:`TEST_SUITES` variable in
-``local.conf``. Each name in :term:`TEST_SUITES` represents a required test
-for the image. Test modules named within :term:`TEST_SUITES` cannot be
-skipped even if a test is not suitable for an image (e.g. running the
-RPM tests on an image without ``rpm``). Appending "auto" to
-:term:`TEST_SUITES` causes the build system to try to run all tests that are
-suitable for the image (i.e. each test module may elect to skip itself).
-
-The order you list tests in :term:`TEST_SUITES` is important and influences
-test dependencies. Consequently, tests that depend on other tests should
-be added after the test on which they depend. For example, since the
-``ssh`` test depends on the ``ping`` test, "ssh" needs to come after
-"ping" in the list. The test class provides no re-ordering or dependency
-handling.
-
-.. note::
-
- Each module can have multiple classes with multiple test methods.
- And, Python ``unittest`` rules apply.
-
-Here are some things to keep in mind when running tests:
-
-- The default tests for the image are defined as::
-
- DEFAULT_TEST_SUITES:pn-image = "ping ssh df connman syslog xorg scp vnc date rpm dnf dmesg"
-
-- Add your own test to the list of the by using the following::
-
- TEST_SUITES:append = " mytest"
-
-- Run a specific list of tests as follows::
-
- TEST_SUITES = "test1 test2 test3"
-
- Remember, order is important. Be sure to place a test that is
- dependent on another test later in the order.
-
-Exporting Tests
----------------
-
-You can export tests so that they can run independently of the build
-system. Exporting tests is required if you want to be able to hand the
-test execution off to a scheduler. You can only export tests that are
-defined in :term:`TEST_SUITES`.
-
-If your image is already built, make sure the following are set in your
-``local.conf`` file::
-
- INHERIT += "testexport"
- TEST_TARGET_IP = "IP-address-for-the-test-target"
- TEST_SERVER_IP = "IP-address-for-the-test-server"
-
-You can then export the tests with the
-following BitBake command form::
-
- $ bitbake image -c testexport
-
-Exporting the tests places them in the
-:term:`Build Directory` in
-``tmp/testexport/``\ image, which is controlled by the
-:term:`TEST_EXPORT_DIR` variable.
-
-You can now run the tests outside of the build environment::
-
- $ cd tmp/testexport/image
- $ ./runexported.py testdata.json
-
-Here is a complete example that shows IP addresses and uses the
-``core-image-sato`` image::
-
- INHERIT += "testexport"
- TEST_TARGET_IP = "192.168.7.2"
- TEST_SERVER_IP = "192.168.7.1"
-
-Use BitBake to export the tests::
-
- $ bitbake core-image-sato -c testexport
-
-Run the tests outside of
-the build environment using the following::
-
- $ cd tmp/testexport/core-image-sato
- $ ./runexported.py testdata.json
-
-Writing New Tests
------------------
-
-As mentioned previously, all new test files need to be in the proper
-place for the build system to find them. New tests for additional
-functionality outside of the core should be added to the layer that adds
-the functionality, in ``layer/lib/oeqa/runtime`` (as long as
-:term:`BBPATH` is extended in the
-layer's ``layer.conf`` file as normal). Just remember the following:
-
-- Filenames need to map directly to test (module) names.
-
-- Do not use module names that collide with existing core tests.
-
-- Minimally, an empty ``__init__.py`` file must be present in the runtime
- directory.
-
-To create a new test, start by copying an existing module (e.g.
-``syslog.py`` or ``gcc.py`` are good ones to use). Test modules can use
-code from ``meta/lib/oeqa/utils``, which are helper classes.
-
-.. note::
-
- Structure shell commands such that you rely on them and they return a
- single code for success. Be aware that sometimes you will need to
- parse the output. See the ``df.py`` and ``date.py`` modules for examples.
-
-You will notice that all test classes inherit ``oeRuntimeTest``, which
-is found in ``meta/lib/oetest.py``. This base class offers some helper
-attributes, which are described in the following sections:
-
-Class Methods
-~~~~~~~~~~~~~
-
-Class methods are as follows:
-
-- *hasPackage(pkg):* Returns "True" if ``pkg`` is in the installed
- package list of the image, which is based on the manifest file that
- is generated during the ``do_rootfs`` task.
-
-- *hasFeature(feature):* Returns "True" if the feature is in
- :term:`IMAGE_FEATURES` or
- :term:`DISTRO_FEATURES`.
-
-Class Attributes
-~~~~~~~~~~~~~~~~
-
-Class attributes are as follows:
-
-- *pscmd:* Equals "ps -ef" if ``procps`` is installed in the image.
- Otherwise, ``pscmd`` equals "ps" (busybox).
-
-- *tc:* The called test context, which gives access to the
- following attributes:
-
- - *d:* The BitBake datastore, which allows you to use stuff such
- as ``oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager")``.
-
- - *testslist and testsrequired:* Used internally. The tests
- do not need these.
-
- - *filesdir:* The absolute path to
- ``meta/lib/oeqa/runtime/files``, which contains helper files for
- tests meant for copying on the target such as small files written
- in C for compilation.
-
- - *target:* The target controller object used to deploy and
- start an image on a particular target (e.g. Qemu, SimpleRemote,
- and SystemdbootTarget). Tests usually use the following:
-
- - *ip:* The target's IP address.
-
- - *server_ip:* The host's IP address, which is usually used
- by the DNF test suite.
-
- - *run(cmd, timeout=None):* The single, most used method.
- This command is a wrapper for: ``ssh root@host "cmd"``. The
- command returns a tuple: (status, output), which are what their
- names imply - the return code of "cmd" and whatever output it
- produces. The optional timeout argument represents the number
- of seconds the test should wait for "cmd" to return. If the
- argument is "None", the test uses the default instance's
- timeout period, which is 300 seconds. If the argument is "0",
- the test runs until the command returns.
-
- - *copy_to(localpath, remotepath):*
- ``scp localpath root@ip:remotepath``.
-
- - *copy_from(remotepath, localpath):*
- ``scp root@host:remotepath localpath``.
-
-Instance Attributes
-~~~~~~~~~~~~~~~~~~~
-
-There is a single instance attribute, which is ``target``. The ``target``
-instance attribute is identical to the class attribute of the same name,
-which is described in the previous section. This attribute exists as
-both an instance and class attribute so tests can use
-``self.target.run(cmd)`` in instance methods instead of
-``oeRuntimeTest.tc.target.run(cmd)``.
-
-Installing Packages in the DUT Without the Package Manager
-----------------------------------------------------------
-
-When a test requires a package built by BitBake, it is possible to
-install that package. Installing the package does not require a package
-manager be installed in the device under test (DUT). It does, however,
-require an SSH connection and the target must be using the
-``sshcontrol`` class.
-
-.. note::
-
- This method uses ``scp`` to copy files from the host to the target, which
- causes permissions and special attributes to be lost.
-
-A JSON file is used to define the packages needed by a test. This file
-must be in the same path as the file used to define the tests.
-Furthermore, the filename must map directly to the test module name with
-a ``.json`` extension.
-
-The JSON file must include an object with the test name as keys of an
-object or an array. This object (or array of objects) uses the following
-data:
-
-- "pkg" - A mandatory string that is the name of the package to be
- installed.
-
-- "rm" - An optional boolean, which defaults to "false", that specifies
- to remove the package after the test.
-
-- "extract" - An optional boolean, which defaults to "false", that
- specifies if the package must be extracted from the package format.
- When set to "true", the package is not automatically installed into
- the DUT.
-
-Following is an example JSON file that handles test "foo" installing
-package "bar" and test "foobar" installing packages "foo" and "bar".
-Once the test is complete, the packages are removed from the DUT.
-::
-
- {
- "foo": {
- "pkg": "bar"
- },
- "foobar": [
- {
- "pkg": "foo",
- "rm": true
- },
- {
- "pkg": "bar",
- "rm": true
- }
- ]
- }
-
-Debugging Tools and Techniques
-==============================
-
-The exact method for debugging build failures depends on the nature of
-the problem and on the system's area from which the bug originates.
-Standard debugging practices such as comparison against the last known
-working version with examination of the changes and the re-application
-of steps to identify the one causing the problem are valid for the Yocto
-Project just as they are for any other system. Even though it is
-impossible to detail every possible potential failure, this section
-provides some general tips to aid in debugging given a variety of
-situations.
-
-.. note::
-
- A useful feature for debugging is the error reporting tool.
- Configuring the Yocto Project to use this tool causes the
- OpenEmbedded build system to produce error reporting commands as part
- of the console output. You can enter the commands after the build
- completes to log error information into a common database, that can
- help you figure out what might be going wrong. For information on how
- to enable and use this feature, see the
- ":ref:`dev-manual/common-tasks:using the error reporting tool`"
- section.
-
-The following list shows the debugging topics in the remainder of this
-section:
-
-- ":ref:`dev-manual/common-tasks:viewing logs from failed tasks`" describes
- how to find and view logs from tasks that failed during the build
- process.
-
-- ":ref:`dev-manual/common-tasks:viewing variable values`" describes how to
- use the BitBake ``-e`` option to examine variable values after a
- recipe has been parsed.
-
-- ":ref:`dev-manual/common-tasks:viewing package information with \`\`oe-pkgdata-util\`\``"
- describes how to use the ``oe-pkgdata-util`` utility to query
- :term:`PKGDATA_DIR` and
- display package-related information for built packages.
-
-- ":ref:`dev-manual/common-tasks:viewing dependencies between recipes and tasks`"
- describes how to use the BitBake ``-g`` option to display recipe
- dependency information used during the build.
-
-- ":ref:`dev-manual/common-tasks:viewing task variable dependencies`" describes
- how to use the ``bitbake-dumpsig`` command in conjunction with key
- subdirectories in the
- :term:`Build Directory` to determine
- variable dependencies.
-
-- ":ref:`dev-manual/common-tasks:running specific tasks`" describes
- how to use several BitBake options (e.g. ``-c``, ``-C``, and ``-f``)
- to run specific tasks in the build chain. It can be useful to run
- tasks "out-of-order" when trying isolate build issues.
-
-- ":ref:`dev-manual/common-tasks:general bitbake problems`" describes how
- to use BitBake's ``-D`` debug output option to reveal more about what
- BitBake is doing during the build.
-
-- ":ref:`dev-manual/common-tasks:building with no dependencies`"
- describes how to use the BitBake ``-b`` option to build a recipe
- while ignoring dependencies.
-
-- ":ref:`dev-manual/common-tasks:recipe logging mechanisms`"
- describes how to use the many recipe logging functions to produce
- debugging output and report errors and warnings.
-
-- ":ref:`dev-manual/common-tasks:debugging parallel make races`"
- describes how to debug situations where the build consists of several
- parts that are run simultaneously and when the output or result of
- one part is not ready for use with a different part of the build that
- depends on that output.
-
-- ":ref:`dev-manual/common-tasks:debugging with the gnu project debugger (gdb) remotely`"
- describes how to use GDB to allow you to examine running programs, which can
- help you fix problems.
-
-- ":ref:`dev-manual/common-tasks:debugging with the gnu project debugger (gdb) on the target`"
- describes how to use GDB directly on target hardware for debugging.
-
-- ":ref:`dev-manual/common-tasks:other debugging tips`" describes
- miscellaneous debugging tips that can be useful.
-
-Viewing Logs from Failed Tasks
-------------------------------
-
-You can find the log for a task in the file
-``${``\ :term:`WORKDIR`\ ``}/temp/log.do_``\ `taskname`.
-For example, the log for the
-:ref:`ref-tasks-compile` task of the
-QEMU minimal image for the x86 machine (``qemux86``) might be in
-``tmp/work/qemux86-poky-linux/core-image-minimal/1.0-r0/temp/log.do_compile``.
-To see the commands :term:`BitBake` ran
-to generate a log, look at the corresponding ``run.do_``\ `taskname` file
-in the same directory.
-
-``log.do_``\ `taskname` and ``run.do_``\ `taskname` are actually symbolic
-links to ``log.do_``\ `taskname`\ ``.``\ `pid` and
-``log.run_``\ `taskname`\ ``.``\ `pid`, where `pid` is the PID the task had
-when it ran. The symlinks always point to the files corresponding to the
-most recent run.
-
-Viewing Variable Values
------------------------
-
-Sometimes you need to know the value of a variable as a result of
-BitBake's parsing step. This could be because some unexpected behavior
-occurred in your project. Perhaps an attempt to :ref:`modify a variable
-<bitbake:bitbake-user-manual/bitbake-user-manual-metadata:modifying existing
-variables>` did not work out as expected.
-
-BitBake's ``-e`` option is used to display variable values after
-parsing. The following command displays the variable values after the
-configuration files (i.e. ``local.conf``, ``bblayers.conf``,
-``bitbake.conf`` and so forth) have been parsed::
-
- $ bitbake -e
-
-The following command displays variable values after a specific recipe has
-been parsed. The variables include those from the configuration as well::
-
- $ bitbake -e recipename
-
-.. note::
-
- Each recipe has its own private set of variables (datastore).
- Internally, after parsing the configuration, a copy of the resulting
- datastore is made prior to parsing each recipe. This copying implies
- that variables set in one recipe will not be visible to other
- recipes.
-
- Likewise, each task within a recipe gets a private datastore based on
- the recipe datastore, which means that variables set within one task
- will not be visible to other tasks.
-
-In the output of ``bitbake -e``, each variable is preceded by a
-description of how the variable got its value, including temporary
-values that were later overridden. This description also includes
-variable flags (varflags) set on the variable. The output can be very
-helpful during debugging.
-
-Variables that are exported to the environment are preceded by
-``export`` in the output of ``bitbake -e``. See the following example::
-
- export CC="i586-poky-linux-gcc -m32 -march=i586 --sysroot=/home/ulf/poky/build/tmp/sysroots/qemux86"
-
-In addition to variable values, the output of the ``bitbake -e`` and
-``bitbake -e`` recipe commands includes the following information:
-
-- The output starts with a tree listing all configuration files and
- classes included globally, recursively listing the files they include
- or inherit in turn. Much of the behavior of the OpenEmbedded build
- system (including the behavior of the :ref:`ref-manual/tasks:normal recipe build tasks`) is
- implemented in the
- :ref:`base <ref-classes-base>` class and the
- classes it inherits, rather than being built into BitBake itself.
-
-- After the variable values, all functions appear in the output. For
- shell functions, variables referenced within the function body are
- expanded. If a function has been modified using overrides or using
- override-style operators like ``:append`` and ``:prepend``, then the
- final assembled function body appears in the output.
-
-Viewing Package Information with ``oe-pkgdata-util``
-----------------------------------------------------
-
-You can use the ``oe-pkgdata-util`` command-line utility to query
-:term:`PKGDATA_DIR` and display
-various package-related information. When you use the utility, you must
-use it to view information on packages that have already been built.
-
-Following are a few of the available ``oe-pkgdata-util`` subcommands.
-
-.. note::
-
- You can use the standard \* and ? globbing wildcards as part of
- package names and paths.
-
-- ``oe-pkgdata-util list-pkgs [pattern]``: Lists all packages
- that have been built, optionally limiting the match to packages that
- match pattern.
-
-- ``oe-pkgdata-util list-pkg-files package ...``: Lists the
- files and directories contained in the given packages.
-
- .. note::
-
- A different way to view the contents of a package is to look at
- the
- ``${``\ :term:`WORKDIR`\ ``}/packages-split``
- directory of the recipe that generates the package. This directory
- is created by the
- :ref:`ref-tasks-package` task
- and has one subdirectory for each package the recipe generates,
- which contains the files stored in that package.
-
- If you want to inspect the ``${WORKDIR}/packages-split``
- directory, make sure that
- :ref:`rm_work <ref-classes-rm-work>` is not
- enabled when you build the recipe.
-
-- ``oe-pkgdata-util find-path path ...``: Lists the names of
- the packages that contain the given paths. For example, the following
- tells us that ``/usr/share/man/man1/make.1`` is contained in the
- ``make-doc`` package::
-
- $ oe-pkgdata-util find-path /usr/share/man/man1/make.1
- make-doc: /usr/share/man/man1/make.1
-
-- ``oe-pkgdata-util lookup-recipe package ...``: Lists the name
- of the recipes that produce the given packages.
-
-For more information on the ``oe-pkgdata-util`` command, use the help
-facility::
-
- $ oe-pkgdata-util --help
- $ oe-pkgdata-util subcommand --help
-
-Viewing Dependencies Between Recipes and Tasks
-----------------------------------------------
-
-Sometimes it can be hard to see why BitBake wants to build other recipes
-before the one you have specified. Dependency information can help you
-understand why a recipe is built.
-
-To generate dependency information for a recipe, run the following
-command::
-
- $ bitbake -g recipename
-
-This command writes the following files in the current directory:
-
-- ``pn-buildlist``: A list of recipes/targets involved in building
- `recipename`. "Involved" here means that at least one task from the
- recipe needs to run when building `recipename` from scratch. Targets
- that are in
- :term:`ASSUME_PROVIDED`
- are not listed.
-
-- ``task-depends.dot``: A graph showing dependencies between tasks.
-
-The graphs are in
-`DOT <https://en.wikipedia.org/wiki/DOT_%28graph_description_language%29>`__
-format and can be converted to images (e.g. using the ``dot`` tool from
-`Graphviz <https://www.graphviz.org/>`__).
-
-.. note::
-
- - DOT files use a plain text format. The graphs generated using the
- ``bitbake -g`` command are often so large as to be difficult to
- read without special pruning (e.g. with Bitbake's ``-I`` option)
- and processing. Despite the form and size of the graphs, the
- corresponding ``.dot`` files can still be possible to read and
- provide useful information.
-
- As an example, the ``task-depends.dot`` file contains lines such
- as the following::
-
- "libxslt.do_configure" -> "libxml2.do_populate_sysroot"
-
- The above example line reveals that the
- :ref:`ref-tasks-configure`
- task in ``libxslt`` depends on the
- :ref:`ref-tasks-populate_sysroot`
- task in ``libxml2``, which is a normal
- :term:`DEPENDS` dependency
- between the two recipes.
-
- - For an example of how ``.dot`` files can be processed, see the
- ``scripts/contrib/graph-tool`` Python script, which finds and
- displays paths between graph nodes.
-
-You can use a different method to view dependency information by using
-the following command::
-
- $ bitbake -g -u taskexp recipename
-
-This command
-displays a GUI window from which you can view build-time and runtime
-dependencies for the recipes involved in building recipename.
-
-Viewing Task Variable Dependencies
-----------------------------------
-
-As mentioned in the
-":ref:`bitbake:bitbake-user-manual/bitbake-user-manual-execution:checksums (signatures)`" section of the BitBake
-User Manual, BitBake tries to automatically determine what variables a
-task depends on so that it can rerun the task if any values of the
-variables change. This determination is usually reliable. However, if
-you do things like construct variable names at runtime, then you might
-have to manually declare dependencies on those variables using
-``vardeps`` as described in the
-":ref:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata:variable flags`" section of the BitBake
-User Manual.
-
-If you are unsure whether a variable dependency is being picked up
-automatically for a given task, you can list the variable dependencies
-BitBake has determined by doing the following:
-
-1. Build the recipe containing the task::
-
- $ bitbake recipename
-
-2. Inside the :term:`STAMPS_DIR`
- directory, find the signature data (``sigdata``) file that
- corresponds to the task. The ``sigdata`` files contain a pickled
- Python database of all the metadata that went into creating the input
- checksum for the task. As an example, for the
- :ref:`ref-tasks-fetch` task of the
- ``db`` recipe, the ``sigdata`` file might be found in the following
- location::
-
- ${BUILDDIR}/tmp/stamps/i586-poky-linux/db/6.0.30-r1.do_fetch.sigdata.7c048c18222b16ff0bcee2000ef648b1
-
- For tasks that are accelerated through the shared state
- (:ref:`sstate <overview-manual/concepts:shared state cache>`) cache, an
- additional ``siginfo`` file is written into
- :term:`SSTATE_DIR` along with
- the cached task output. The ``siginfo`` files contain exactly the
- same information as ``sigdata`` files.
-
-3. Run ``bitbake-dumpsig`` on the ``sigdata`` or ``siginfo`` file. Here
- is an example::
-
- $ bitbake-dumpsig ${BUILDDIR}/tmp/stamps/i586-poky-linux/db/6.0.30-r1.do_fetch.sigdata.7c048c18222b16ff0bcee2000ef648b1
-
- In the output of the above command, you will find a line like the
- following, which lists all the (inferred) variable dependencies for
- the task. This list also includes indirect dependencies from
- variables depending on other variables, recursively.
- ::
-
- Task dependencies: ['PV', 'SRCREV', 'SRC_URI', 'SRC_URI[md5sum]', 'SRC_URI[sha256sum]', 'base_do_fetch']
-
- .. note::
-
- Functions (e.g. ``base_do_fetch``) also count as variable dependencies.
- These functions in turn depend on the variables they reference.
-
- The output of ``bitbake-dumpsig`` also includes the value each
- variable had, a list of dependencies for each variable, and
- :term:`BB_BASEHASH_IGNORE_VARS`
- information.
-
-There is also a ``bitbake-diffsigs`` command for comparing two
-``siginfo`` or ``sigdata`` files. This command can be helpful when
-trying to figure out what changed between two versions of a task. If you
-call ``bitbake-diffsigs`` with just one file, the command behaves like
-``bitbake-dumpsig``.
-
-You can also use BitBake to dump out the signature construction
-information without executing tasks by using either of the following
-BitBake command-line options::
-
- ‐‐dump-signatures=SIGNATURE_HANDLER
- -S SIGNATURE_HANDLER
-
-
-.. note::
-
- Two common values for `SIGNATURE_HANDLER` are "none" and "printdiff", which
- dump only the signature or compare the dumped signature with the cached one,
- respectively.
-
-Using BitBake with either of these options causes BitBake to dump out
-``sigdata`` files in the ``stamps`` directory for every task it would
-have executed instead of building the specified target package.
-
-Viewing Metadata Used to Create the Input Signature of a Shared State Task
---------------------------------------------------------------------------
-
-Seeing what metadata went into creating the input signature of a shared
-state (sstate) task can be a useful debugging aid. This information is
-available in signature information (``siginfo``) files in
-:term:`SSTATE_DIR`. For
-information on how to view and interpret information in ``siginfo``
-files, see the
-":ref:`dev-manual/common-tasks:viewing task variable dependencies`" section.
-
-For conceptual information on shared state, see the
-":ref:`overview-manual/concepts:shared state`"
-section in the Yocto Project Overview and Concepts Manual.
-
-Invalidating Shared State to Force a Task to Run
-------------------------------------------------
-
-The OpenEmbedded build system uses
-:ref:`checksums <overview-manual/concepts:checksums (signatures)>` and
-:ref:`overview-manual/concepts:shared state` cache to avoid unnecessarily
-rebuilding tasks. Collectively, this scheme is known as "shared state
-code".
-
-As with all schemes, this one has some drawbacks. It is possible that
-you could make implicit changes to your code that the checksum
-calculations do not take into account. These implicit changes affect a
-task's output but do not trigger the shared state code into rebuilding a
-recipe. Consider an example during which a tool changes its output.
-Assume that the output of ``rpmdeps`` changes. The result of the change
-should be that all the ``package`` and ``package_write_rpm`` shared
-state cache items become invalid. However, because the change to the
-output is external to the code and therefore implicit, the associated
-shared state cache items do not become invalidated. In this case, the
-build process uses the cached items rather than running the task again.
-Obviously, these types of implicit changes can cause problems.
-
-To avoid these problems during the build, you need to understand the
-effects of any changes you make. Realize that changes you make directly
-to a function are automatically factored into the checksum calculation.
-Thus, these explicit changes invalidate the associated area of shared
-state cache. However, you need to be aware of any implicit changes that
-are not obvious changes to the code and could affect the output of a
-given task.
-
-When you identify an implicit change, you can easily take steps to
-invalidate the cache and force the tasks to run. The steps you can take
-are as simple as changing a function's comments in the source code. For
-example, to invalidate package shared state files, change the comment
-statements of
-:ref:`ref-tasks-package` or the
-comments of one of the functions it calls. Even though the change is
-purely cosmetic, it causes the checksum to be recalculated and forces
-the build system to run the task again.
-
-.. note::
-
- For an example of a commit that makes a cosmetic change to invalidate
- shared state, see this
- :yocto_git:`commit </poky/commit/meta/classes/package.bbclass?id=737f8bbb4f27b4837047cb9b4fbfe01dfde36d54>`.
-
-Running Specific Tasks
-----------------------
-
-Any given recipe consists of a set of tasks. The standard BitBake
-behavior in most cases is: ``do_fetch``, ``do_unpack``, ``do_patch``,
-``do_configure``, ``do_compile``, ``do_install``, ``do_package``,
-``do_package_write_*``, and ``do_build``. The default task is
-``do_build`` and any tasks on which it depends build first. Some tasks,
-such as ``do_devshell``, are not part of the default build chain. If you
-wish to run a task that is not part of the default build chain, you can
-use the ``-c`` option in BitBake. Here is an example::
-
- $ bitbake matchbox-desktop -c devshell
-
-The ``-c`` option respects task dependencies, which means that all other
-tasks (including tasks from other recipes) that the specified task
-depends on will be run before the task. Even when you manually specify a
-task to run with ``-c``, BitBake will only run the task if it considers
-it "out of date". See the
-":ref:`overview-manual/concepts:stamp files and the rerunning of tasks`"
-section in the Yocto Project Overview and Concepts Manual for how
-BitBake determines whether a task is "out of date".
-
-If you want to force an up-to-date task to be rerun (e.g. because you
-made manual modifications to the recipe's
-:term:`WORKDIR` that you want to try
-out), then you can use the ``-f`` option.
-
-.. note::
-
- The reason ``-f`` is never required when running the
- :ref:`ref-tasks-devshell` task is because the
- [\ :ref:`nostamp <bitbake:bitbake-user-manual/bitbake-user-manual-metadata:variable flags>`\ ]
- variable flag is already set for the task.
-
-The following example shows one way you can use the ``-f`` option::
-
- $ bitbake matchbox-desktop
- .
- .
- make some changes to the source code in the work directory
- .
- .
- $ bitbake matchbox-desktop -c compile -f
- $ bitbake matchbox-desktop
-
-This sequence first builds and then recompiles ``matchbox-desktop``. The
-last command reruns all tasks (basically the packaging tasks) after the
-compile. BitBake recognizes that the ``do_compile`` task was rerun and
-therefore understands that the other tasks also need to be run again.
-
-Another, shorter way to rerun a task and all
-:ref:`ref-manual/tasks:normal recipe build tasks`
-that depend on it is to use the ``-C`` option.
-
-.. note::
-
- This option is upper-cased and is separate from the ``-c``
- option, which is lower-cased.
-
-Using this option invalidates the given task and then runs the
-:ref:`ref-tasks-build` task, which is
-the default task if no task is given, and the tasks on which it depends.
-You could replace the final two commands in the previous example with
-the following single command::
-
- $ bitbake matchbox-desktop -C compile
-
-Internally, the ``-f`` and ``-C`` options work by tainting (modifying)
-the input checksum of the specified task. This tainting indirectly
-causes the task and its dependent tasks to be rerun through the normal
-task dependency mechanisms.
-
-.. note::
-
- BitBake explicitly keeps track of which tasks have been tainted in
- this fashion, and will print warnings such as the following for
- builds involving such tasks:
-
- .. code-block:: none
-
- WARNING: /home/ulf/poky/meta/recipes-sato/matchbox-desktop/matchbox-desktop_2.1.bb.do_compile is tainted from a forced run
-
-
- The purpose of the warning is to let you know that the work directory
- and build output might not be in the clean state they would be in for
- a "normal" build, depending on what actions you took. To get rid of
- such warnings, you can remove the work directory and rebuild the
- recipe, as follows::
-
- $ bitbake matchbox-desktop -c clean
- $ bitbake matchbox-desktop
-
-
-You can view a list of tasks in a given package by running the
-``do_listtasks`` task as follows::
-
- $ bitbake matchbox-desktop -c listtasks
-
-The results appear as output to the console and are also in
-the file ``${WORKDIR}/temp/log.do_listtasks``.
-
-General BitBake Problems
-------------------------
-
-You can see debug output from BitBake by using the ``-D`` option. The
-debug output gives more information about what BitBake is doing and the
-reason behind it. Each ``-D`` option you use increases the logging
-level. The most common usage is ``-DDD``.
-
-The output from ``bitbake -DDD -v targetname`` can reveal why BitBake
-chose a certain version of a package or why BitBake picked a certain
-provider. This command could also help you in a situation where you
-think BitBake did something unexpected.
-
-Building with No Dependencies
------------------------------
-
-To build a specific recipe (``.bb`` file), you can use the following
-command form::
-
- $ bitbake -b somepath/somerecipe.bb
-
-This command form does
-not check for dependencies. Consequently, you should use it only when
-you know existing dependencies have been met.
-
-.. note::
-
- You can also specify fragments of the filename. In this case, BitBake
- checks for a unique match.
-
-Recipe Logging Mechanisms
--------------------------
-
-The Yocto Project provides several logging functions for producing
-debugging output and reporting errors and warnings. For Python
-functions, the following logging functions are available. All of these functions
-log to ``${T}/log.do_``\ `task`, and can also log to standard output
-(stdout) with the right settings:
-
-- ``bb.plain(msg)``: Writes msg as is to the log while also
- logging to stdout.
-
-- ``bb.note(msg)``: Writes "NOTE: msg" to the log. Also logs to
- stdout if BitBake is called with "-v".
-
-- ``bb.debug(level, msg)``: Writes "DEBUG: msg" to the
- log. Also logs to stdout if the log level is greater than or equal to
- level. See the ":ref:`bitbake:bitbake-user-manual/bitbake-user-manual-intro:usage and syntax`" option
- in the BitBake User Manual for more information.
-
-- ``bb.warn(msg)``: Writes "WARNING: msg" to the log while also
- logging to stdout.
-
-- ``bb.error(msg)``: Writes "ERROR: msg" to the log while also
- logging to standard out (stdout).
-
- .. note::
-
- Calling this function does not cause the task to fail.
-
-- ``bb.fatal(msg)``: This logging function is similar to
- ``bb.error(msg)`` but also causes the calling task to fail.
-
- .. note::
-
- ``bb.fatal()`` raises an exception, which means you do not need to put a
- "return" statement after the function.
-
-The same logging functions are also available in shell functions, under
-the names ``bbplain``, ``bbnote``, ``bbdebug``, ``bbwarn``, ``bberror``,
-and ``bbfatal``. The
-:ref:`logging <ref-classes-logging>` class
-implements these functions. See that class in the ``meta/classes``
-folder of the :term:`Source Directory` for information.
-
-Logging With Python
-~~~~~~~~~~~~~~~~~~~
-
-When creating recipes using Python and inserting code that handles build
-logs, keep in mind the goal is to have informative logs while keeping
-the console as "silent" as possible. Also, if you want status messages
-in the log, use the "debug" loglevel.
-
-Following is an example written in Python. The code handles logging for
-a function that determines the number of tasks needed to be run. See the
-":ref:`ref-tasks-listtasks`"
-section for additional information::
-
- python do_listtasks() {
- bb.debug(2, "Starting to figure out the task list")
- if noteworthy_condition:
- bb.note("There are 47 tasks to run")
- bb.debug(2, "Got to point xyz")
- if warning_trigger:
- bb.warn("Detected warning_trigger, this might be a problem later.")
- if recoverable_error:
- bb.error("Hit recoverable_error, you really need to fix this!")
- if fatal_error:
- bb.fatal("fatal_error detected, unable to print the task list")
- bb.plain("The tasks present are abc")
- bb.debug(2, "Finished figuring out the tasklist")
- }
-
-Logging With Bash
-~~~~~~~~~~~~~~~~~
-
-When creating recipes using Bash and inserting code that handles build
-logs, you have the same goals - informative with minimal console output.
-The syntax you use for recipes written in Bash is similar to that of
-recipes written in Python described in the previous section.
-
-Following is an example written in Bash. The code logs the progress of
-the ``do_my_function`` function.
-::
-
- do_my_function() {
- bbdebug 2 "Running do_my_function"
- if [ exceptional_condition ]; then
- bbnote "Hit exceptional_condition"
- fi
- bbdebug 2 "Got to point xyz"
- if [ warning_trigger ]; then
- bbwarn "Detected warning_trigger, this might cause a problem later."
- fi
- if [ recoverable_error ]; then
- bberror "Hit recoverable_error, correcting"
- fi
- if [ fatal_error ]; then
- bbfatal "fatal_error detected"
- fi
- bbdebug 2 "Completed do_my_function"
- }
-
-
-Debugging Parallel Make Races
------------------------------
-
-A parallel ``make`` race occurs when the build consists of several parts
-that are run simultaneously and a situation occurs when the output or
-result of one part is not ready for use with a different part of the
-build that depends on that output. Parallel make races are annoying and
-can sometimes be difficult to reproduce and fix. However, there are some simple
-tips and tricks that can help you debug and fix them. This section
-presents a real-world example of an error encountered on the Yocto
-Project autobuilder and the process used to fix it.
-
-.. note::
-
- If you cannot properly fix a ``make`` race condition, you can work around it
- by clearing either the :term:`PARALLEL_MAKE` or :term:`PARALLEL_MAKEINST`
- variables.
-
-The Failure
-~~~~~~~~~~~
-
-For this example, assume that you are building an image that depends on
-the "neard" package. And, during the build, BitBake runs into problems
-and creates the following output.
-
-.. note::
-
- This example log file has longer lines artificially broken to make
- the listing easier to read.
-
-If you examine the output or the log file, you see the failure during
-``make``:
-
-.. code-block:: none
-
- | DEBUG: SITE files ['endian-little', 'bit-32', 'ix86-common', 'common-linux', 'common-glibc', 'i586-linux', 'common']
- | DEBUG: Executing shell function do_compile
- | NOTE: make -j 16
- | make --no-print-directory all-am
- | /bin/mkdir -p include/near
- | /bin/mkdir -p include/near
- | /bin/mkdir -p include/near
- | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
- 0.14-r0/neard-0.14/include/types.h include/near/types.h
- | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
- 0.14-r0/neard-0.14/include/log.h include/near/log.h
- | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
- 0.14-r0/neard-0.14/include/plugin.h include/near/plugin.h
- | /bin/mkdir -p include/near
- | /bin/mkdir -p include/near
- | /bin/mkdir -p include/near
- | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
- 0.14-r0/neard-0.14/include/tag.h include/near/tag.h
- | /bin/mkdir -p include/near
- | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
- 0.14-r0/neard-0.14/include/adapter.h include/near/adapter.h
- | /bin/mkdir -p include/near
- | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
- 0.14-r0/neard-0.14/include/ndef.h include/near/ndef.h
- | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
- 0.14-r0/neard-0.14/include/tlv.h include/near/tlv.h
- | /bin/mkdir -p include/near
- | /bin/mkdir -p include/near
- | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
- 0.14-r0/neard-0.14/include/setting.h include/near/setting.h
- | /bin/mkdir -p include/near
- | /bin/mkdir -p include/near
- | /bin/mkdir -p include/near
- | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
- 0.14-r0/neard-0.14/include/device.h include/near/device.h
- | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
- 0.14-r0/neard-0.14/include/nfc_copy.h include/near/nfc_copy.h
- | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
- 0.14-r0/neard-0.14/include/snep.h include/near/snep.h
- | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
- 0.14-r0/neard-0.14/include/version.h include/near/version.h
- | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
- 0.14-r0/neard-0.14/include/dbus.h include/near/dbus.h
- | ./src/genbuiltin nfctype1 nfctype2 nfctype3 nfctype4 p2p > src/builtin.h
- | i586-poky-linux-gcc -m32 -march=i586 --sysroot=/home/pokybuild/yocto-autobuilder/nightly-x86/
- build/build/tmp/sysroots/qemux86 -DHAVE_CONFIG_H -I. -I./include -I./src -I./gdbus -I/home/pokybuild/
- yocto-autobuilder/nightly-x86/build/build/tmp/sysroots/qemux86/usr/include/glib-2.0
- -I/home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/sysroots/qemux86/usr/
- lib/glib-2.0/include -I/home/pokybuild/yocto-autobuilder/nightly-x86/build/build/
- tmp/sysroots/qemux86/usr/include/dbus-1.0 -I/home/pokybuild/yocto-autobuilder/
- nightly-x86/build/build/tmp/sysroots/qemux86/usr/lib/dbus-1.0/include -I/home/pokybuild/yocto-autobuilder/
- nightly-x86/build/build/tmp/sysroots/qemux86/usr/include/libnl3
- -DNEAR_PLUGIN_BUILTIN -DPLUGINDIR=\""/usr/lib/near/plugins"\"
- -DCONFIGDIR=\""/etc/neard\"" -O2 -pipe -g -feliminate-unused-debug-types -c
- -o tools/snep-send.o tools/snep-send.c
- | In file included from tools/snep-send.c:16:0:
- | tools/../src/near.h:41:23: fatal error: near/dbus.h: No such file or directory
- | #include <near/dbus.h>
- | ^
- | compilation terminated.
- | make[1]: *** [tools/snep-send.o] Error 1
- | make[1]: *** Waiting for unfinished jobs....
- | make: *** [all] Error 2
- | ERROR: oe_runmake failed
-
-Reproducing the Error
-~~~~~~~~~~~~~~~~~~~~~
-
-Because race conditions are intermittent, they do not manifest
-themselves every time you do the build. In fact, most times the build
-will complete without problems even though the potential race condition
-exists. Thus, once the error surfaces, you need a way to reproduce it.
-
-In this example, compiling the "neard" package is causing the problem.
-So the first thing to do is build "neard" locally. Before you start the
-build, set the
-:term:`PARALLEL_MAKE` variable
-in your ``local.conf`` file to a high number (e.g. "-j 20"). Using a
-high value for :term:`PARALLEL_MAKE` increases the chances of the race
-condition showing up::
-
- $ bitbake neard
-
-Once the local build for "neard" completes, start a ``devshell`` build::
-
- $ bitbake neard -c devshell
-
-For information on how to use a ``devshell``, see the
-":ref:`dev-manual/common-tasks:using a development shell`" section.
-
-In the ``devshell``, do the following::
-
- $ make clean
- $ make tools/snep-send.o
-
-The ``devshell`` commands cause the failure to clearly
-be visible. In this case, there is a missing dependency for the ``neard``
-Makefile target. Here is some abbreviated, sample output with the
-missing dependency clearly visible at the end::
-
- i586-poky-linux-gcc -m32 -march=i586 --sysroot=/home/scott-lenovo/......
- .
- .
- .
- tools/snep-send.c
- In file included from tools/snep-send.c:16:0:
- tools/../src/near.h:41:23: fatal error: near/dbus.h: No such file or directory
- #include <near/dbus.h>
- ^
- compilation terminated.
- make: *** [tools/snep-send.o] Error 1
- $
-
-
-Creating a Patch for the Fix
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Because there is a missing dependency for the Makefile target, you need
-to patch the ``Makefile.am`` file, which is generated from
-``Makefile.in``. You can use Quilt to create the patch::
-
- $ quilt new parallelmake.patch
- Patch patches/parallelmake.patch is now on top
- $ quilt add Makefile.am
- File Makefile.am added to patch patches/parallelmake.patch
-
-For more information on using Quilt, see the
-":ref:`dev-manual/common-tasks:using quilt in your workflow`" section.
-
-At this point you need to make the edits to ``Makefile.am`` to add the
-missing dependency. For our example, you have to add the following line
-to the file::
-
- tools/snep-send.$(OBJEXT): include/near/dbus.h
-
-Once you have edited the file, use the ``refresh`` command to create the
-patch::
-
- $ quilt refresh
- Refreshed patch patches/parallelmake.patch
-
-Once the patch file is created, you need to add it back to the originating
-recipe folder. Here is an example assuming a top-level
-:term:`Source Directory` named ``poky``::
-
- $ cp patches/parallelmake.patch poky/meta/recipes-connectivity/neard/neard
-
-The final thing you need to do to implement the fix in the build is to
-update the "neard" recipe (i.e. ``neard-0.14.bb``) so that the
-:term:`SRC_URI` statement includes
-the patch file. The recipe file is in the folder above the patch. Here
-is what the edited :term:`SRC_URI` statement would look like::
-
- SRC_URI = "${KERNELORG_MIRROR}/linux/network/nfc/${BPN}-${PV}.tar.xz \
- file://neard.in \
- file://neard.service.in \
- file://parallelmake.patch \
- "
-
-With the patch complete and moved to the correct folder and the
-:term:`SRC_URI` statement updated, you can exit the ``devshell``::
-
- $ exit
-
-Testing the Build
-~~~~~~~~~~~~~~~~~
-
-With everything in place, you can get back to trying the build again
-locally::
-
- $ bitbake neard
-
-This build should succeed.
-
-Now you can open up a ``devshell`` again and repeat the clean and make
-operations as follows::
-
- $ bitbake neard -c devshell
- $ make clean
- $ make tools/snep-send.o
-
-The build should work without issue.
-
-As with all solved problems, if they originated upstream, you need to
-submit the fix for the recipe in OE-Core and upstream so that the
-problem is taken care of at its source. See the
-":ref:`dev-manual/common-tasks:submitting a change to the yocto project`"
-section for more information.
-
-Debugging With the GNU Project Debugger (GDB) Remotely
-------------------------------------------------------
-
-GDB allows you to examine running programs, which in turn helps you to
-understand and fix problems. It also allows you to perform post-mortem
-style analysis of program crashes. GDB is available as a package within
-the Yocto Project and is installed in SDK images by default. See the
-":ref:`ref-manual/images:Images`" chapter in the Yocto
-Project Reference Manual for a description of these images. You can find
-information on GDB at https://sourceware.org/gdb/.
-
-.. note::
-
- For best results, install debug (``-dbg``) packages for the applications you
- are going to debug. Doing so makes extra debug symbols available that give
- you more meaningful output.
-
-Sometimes, due to memory or disk space constraints, it is not possible
-to use GDB directly on the remote target to debug applications. These
-constraints arise because GDB needs to load the debugging information
-and the binaries of the process being debugged. Additionally, GDB needs
-to perform many computations to locate information such as function
-names, variable names and values, stack traces and so forth - even
-before starting the debugging process. These extra computations place
-more load on the target system and can alter the characteristics of the
-program being debugged.
-
-To help get past the previously mentioned constraints, there are two
-methods you can use: running a debuginfod server and using gdbserver.
-
-Using the debuginfod server method
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-``debuginfod`` from ``elfutils`` is a way to distribute ``debuginfo`` files.
-Running a ``debuginfod`` server makes debug symbols readily available,
-which means you don't need to download debugging information
-and the binaries of the process being debugged. You can just fetch
-debug symbols from the server.
-
-To run a ``debuginfod`` server, you need to do the following:
-
-- Ensure that ``debuginfod`` is present in :term:`DISTRO_FEATURES`
- (it already is in ``OpenEmbedded-core`` defaults and ``poky`` reference distribution).
- If not, set in your distro config file or in ``local.conf``::
-
- DISTRO_FEATURES:append = " debuginfod"
-
- This distro feature enables the server and client library in ``elfutils``,
- and enables ``debuginfod`` support in clients (at the moment, ``gdb`` and ``binutils``).
-
-- Run the following commands to launch the ``debuginfod`` server on the host::
-
- $ oe-debuginfod
-
-- To use ``debuginfod`` on the target, you need to know the ip:port where
- ``debuginfod`` is listening on the host (port defaults to 8002), and export
- that into the shell environment, for example in ``qemu``::
-
- root@qemux86-64:~# export DEBUGINFOD_URLS="http://192.168.7.1:8002/"
-
-- Then debug info fetching should simply work when running the target ``gdb``,
- ``readelf`` or ``objdump``, for example::
-
- root@qemux86-64:~# gdb /bin/cat
- ...
- Reading symbols from /bin/cat...
- Downloading separate debug info for /bin/cat...
- Reading symbols from /home/root/.cache/debuginfod_client/923dc4780cfbc545850c616bffa884b6b5eaf322/debuginfo...
-
-- It's also possible to use ``debuginfod-find`` to just query the server::
-
- root@qemux86-64:~# debuginfod-find debuginfo /bin/ls
- /home/root/.cache/debuginfod_client/356edc585f7f82d46f94fcb87a86a3fe2d2e60bd/debuginfo
-
-
-Using the gdbserver method
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-gdbserver, which runs on the remote target and does not load any
-debugging information from the debugged process. Instead, a GDB instance
-processes the debugging information that is run on a remote computer -
-the host GDB. The host GDB then sends control commands to gdbserver to
-make it stop or start the debugged program, as well as read or write
-memory regions of that debugged program. All the debugging information
-loaded and processed as well as all the heavy debugging is done by the
-host GDB. Offloading these processes gives the gdbserver running on the
-target a chance to remain small and fast.
-
-Because the host GDB is responsible for loading the debugging
-information and for doing the necessary processing to make actual
-debugging happen, you have to make sure the host can access the
-unstripped binaries complete with their debugging information and also
-be sure the target is compiled with no optimizations. The host GDB must
-also have local access to all the libraries used by the debugged
-program. Because gdbserver does not need any local debugging
-information, the binaries on the remote target can remain stripped.
-However, the binaries must also be compiled without optimization so they
-match the host's binaries.
-
-To remain consistent with GDB documentation and terminology, the binary
-being debugged on the remote target machine is referred to as the
-"inferior" binary. For documentation on GDB see the `GDB
-site <https://sourceware.org/gdb/documentation/>`__.
-
-The following steps show you how to debug using the GNU project
-debugger.
-
-1. *Configure your build system to construct the companion debug
- filesystem:*
-
- In your ``local.conf`` file, set the following::
-
- IMAGE_GEN_DEBUGFS = "1"
- IMAGE_FSTYPES_DEBUGFS = "tar.bz2"
-
- These options cause the
- OpenEmbedded build system to generate a special companion filesystem
- fragment, which contains the matching source and debug symbols to
- your deployable filesystem. The build system does this by looking at
- what is in the deployed filesystem, and pulling the corresponding
- ``-dbg`` packages.
-
- The companion debug filesystem is not a complete filesystem, but only
- contains the debug fragments. This filesystem must be combined with
- the full filesystem for debugging. Subsequent steps in this procedure
- show how to combine the partial filesystem with the full filesystem.
-
-2. *Configure the system to include gdbserver in the target filesystem:*
-
- Make the following addition in either your ``local.conf`` file or in
- an image recipe::
-
- IMAGE_INSTALL:append = " gdbserver"
-
- The change makes
- sure the ``gdbserver`` package is included.
-
-3. *Build the environment:*
-
- Use the following command to construct the image and the companion
- Debug Filesystem::
-
- $ bitbake image
-
- Build the cross GDB component and
- make it available for debugging. Build the SDK that matches the
- image. Building the SDK is best for a production build that can be
- used later for debugging, especially during long term maintenance::
-
- $ bitbake -c populate_sdk image
-
- Alternatively, you can build the minimal toolchain components that
- match the target. Doing so creates a smaller than typical SDK and
- only contains a minimal set of components with which to build simple
- test applications, as well as run the debugger::
-
- $ bitbake meta-toolchain
-
- A final method is to build Gdb itself within the build system::
-
- $ bitbake gdb-cross-<architecture>
-
- Doing so produces a temporary copy of
- ``cross-gdb`` you can use for debugging during development. While
- this is the quickest approach, the two previous methods in this step
- are better when considering long-term maintenance strategies.
-
- .. note::
-
- If you run ``bitbake gdb-cross``, the OpenEmbedded build system suggests
- the actual image (e.g. ``gdb-cross-i586``). The suggestion is usually the
- actual name you want to use.
-
-4. *Set up the* ``debugfs``\ *:*
-
- Run the following commands to set up the ``debugfs``::
-
- $ mkdir debugfs
- $ cd debugfs
- $ tar xvfj build-dir/tmp-glibc/deploy/images/machine/image.rootfs.tar.bz2
- $ tar xvfj build-dir/tmp-glibc/deploy/images/machine/image-dbg.rootfs.tar.bz2
-
-5. *Set up GDB:*
-
- Install the SDK (if you built one) and then source the correct
- environment file. Sourcing the environment file puts the SDK in your
- ``PATH`` environment variable.
-
- If you are using the build system, Gdb is located in
- `build-dir`\ ``/tmp/sysroots/``\ `host`\ ``/usr/bin/``\ `architecture`\ ``/``\ `architecture`\ ``-gdb``
-
-6. *Boot the target:*
-
- For information on how to run QEMU, see the `QEMU
- Documentation <https://wiki.qemu.org/Documentation/GettingStartedDevelopers>`__.
-
- .. note::
-
- Be sure to verify that your host can access the target via TCP.
-
-7. *Debug a program:*
-
- Debugging a program involves running gdbserver on the target and then
- running Gdb on the host. The example in this step debugs ``gzip``:
-
- .. code-block:: shell
-
- root@qemux86:~# gdbserver localhost:1234 /bin/gzip —help
-
- For
- additional gdbserver options, see the `GDB Server
- Documentation <https://www.gnu.org/software/gdb/documentation/>`__.
-
- After running gdbserver on the target, you need to run Gdb on the
- host and configure it and connect to the target. Use these commands::
-
- $ cd directory-holding-the-debugfs-directory
- $ arch-gdb
- (gdb) set sysroot debugfs
- (gdb) set substitute-path /usr/src/debug debugfs/usr/src/debug
- (gdb) target remote IP-of-target:1234
-
- At this
- point, everything should automatically load (i.e. matching binaries,
- symbols and headers).
-
- .. note::
-
- The Gdb ``set`` commands in the previous example can be placed into the
- users ``~/.gdbinit`` file. Upon starting, Gdb automatically runs whatever
- commands are in that file.
-
-8. *Deploying without a full image rebuild:*
-
- In many cases, during development you want a quick method to deploy a
- new binary to the target and debug it, without waiting for a full
- image build.
-
- One approach to solving this situation is to just build the component
- you want to debug. Once you have built the component, copy the
- executable directly to both the target and the host ``debugfs``.
-
- If the binary is processed through the debug splitting in
- OpenEmbedded, you should also copy the debug items (i.e. ``.debug``
- contents and corresponding ``/usr/src/debug`` files) from the work
- directory. Here is an example::
-
- $ bitbake bash
- $ bitbake -c devshell bash
- $ cd ..
- $ scp packages-split/bash/bin/bash target:/bin/bash
- $ cp -a packages-split/bash-dbg/\* path/debugfs
-
-Debugging with the GNU Project Debugger (GDB) on the Target
------------------------------------------------------------
-
-The previous section addressed using GDB remotely for debugging
-purposes, which is the most usual case due to the inherent hardware
-limitations on many embedded devices. However, debugging in the target
-hardware itself is also possible with more powerful devices. This
-section describes what you need to do in order to support using GDB to
-debug on the target hardware.
-
-To support this kind of debugging, you need do the following:
-
-- Ensure that GDB is on the target. You can do this by adding "gdb" to
- :term:`IMAGE_INSTALL`::
-
- IMAGE_INSTALL:append = " gdb"
-
- Alternatively, you can add "tools-debug" to :term:`IMAGE_FEATURES`::
-
- IMAGE_FEATURES:append = " tools-debug"
-
-- Ensure that debug symbols are present. You can make sure these
- symbols are present by installing ``-dbg``::
-
- IMAGE_INSTALL:append = "packagename-dbg"
-
- Alternatively, you can do the following to include
- all the debug symbols::
-
- IMAGE_FEATURES:append = " dbg-pkgs"
-
-.. note::
-
- To improve the debug information accuracy, you can reduce the level
- of optimization used by the compiler. For example, when adding the
- following line to your ``local.conf`` file, you will reduce optimization
- from :term:`FULL_OPTIMIZATION` of "-O2" to :term:`DEBUG_OPTIMIZATION`
- of "-O -fno-omit-frame-pointer"::
-
- DEBUG_BUILD = "1"
-
- Consider that this will reduce the application's performance and is
- recommended only for debugging purposes.
-
-Other Debugging Tips
---------------------
-
-Here are some other tips that you might find useful:
-
-- When adding new packages, it is worth watching for undesirable items
- making their way into compiler command lines. For example, you do not
- want references to local system files like ``/usr/lib/`` or
- ``/usr/include/``.
-
-- If you want to remove the ``psplash`` boot splashscreen, add
- ``psplash=false`` to the kernel command line. Doing so prevents
- ``psplash`` from loading and thus allows you to see the console. It
- is also possible to switch out of the splashscreen by switching the
- virtual console (e.g. Fn+Left or Fn+Right on a Zaurus).
-
-- Removing :term:`TMPDIR` (usually
- ``tmp/``, within the
- :term:`Build Directory`) can often fix
- temporary build issues. Removing :term:`TMPDIR` is usually a relatively
- cheap operation, because task output will be cached in
- :term:`SSTATE_DIR` (usually
- ``sstate-cache/``, which is also in the Build Directory).
-
- .. note::
-
- Removing :term:`TMPDIR` might be a workaround rather than a fix.
- Consequently, trying to determine the underlying cause of an issue before
- removing the directory is a good idea.
-
-- Understanding how a feature is used in practice within existing
- recipes can be very helpful. It is recommended that you configure
- some method that allows you to quickly search through files.
-
- Using GNU Grep, you can use the following shell function to
- recursively search through common recipe-related files, skipping
- binary files, ``.git`` directories, and the Build Directory (assuming
- its name starts with "build")::
-
- g() {
- grep -Ir \
- --exclude-dir=.git \
- --exclude-dir='build*' \
- --include='*.bb*' \
- --include='*.inc*' \
- --include='*.conf*' \
- --include='*.py*' \
- "$@"
- }
-
- Following are some usage examples::
-
- $ g FOO # Search recursively for "FOO"
- $ g -i foo # Search recursively for "foo", ignoring case
- $ g -w FOO # Search recursively for "FOO" as a word, ignoring e.g. "FOOBAR"
-
- If figuring
- out how some feature works requires a lot of searching, it might
- indicate that the documentation should be extended or improved. In
- such cases, consider filing a documentation bug using the Yocto
- Project implementation of
- :yocto_bugs:`Bugzilla <>`. For information on
- how to submit a bug against the Yocto Project, see the Yocto Project
- Bugzilla :yocto_wiki:`wiki page </Bugzilla_Configuration_and_Bug_Tracking>`
- and the
- ":ref:`dev-manual/common-tasks:submitting a defect against the yocto project`"
- section.
-
- .. note::
-
- The manuals might not be the right place to document variables
- that are purely internal and have a limited scope (e.g. internal
- variables used to implement a single ``.bbclass`` file).
-
-Making Changes to the Yocto Project
-===================================
-
-Because the Yocto Project is an open-source, community-based project,
-you can effect changes to the project. This section presents procedures
-that show you how to submit a defect against the project and how to
-submit a change.
-
-Submitting a Defect Against the Yocto Project
----------------------------------------------
-
-Use the Yocto Project implementation of
-`Bugzilla <https://www.bugzilla.org/about/>`__ to submit a defect (bug)
-against the Yocto Project. For additional information on this
-implementation of Bugzilla see the ":ref:`Yocto Project
-Bugzilla <resources-bugtracker>`" section in the
-Yocto Project Reference Manual. For more detail on any of the following
-steps, see the Yocto Project
-:yocto_wiki:`Bugzilla wiki page </Bugzilla_Configuration_and_Bug_Tracking>`.
-
-Use the following general steps to submit a bug:
-
-1. Open the Yocto Project implementation of :yocto_bugs:`Bugzilla <>`.
-
-2. Click "File a Bug" to enter a new bug.
-
-3. Choose the appropriate "Classification", "Product", and "Component"
- for which the bug was found. Bugs for the Yocto Project fall into
- one of several classifications, which in turn break down into
- several products and components. For example, for a bug against the
- ``meta-intel`` layer, you would choose "Build System, Metadata &
- Runtime", "BSPs", and "bsps-meta-intel", respectively.
-
-4. Choose the "Version" of the Yocto Project for which you found the
- bug (e.g. &DISTRO;).
-
-5. Determine and select the "Severity" of the bug. The severity
- indicates how the bug impacted your work.
-
-6. Choose the "Hardware" that the bug impacts.
-
-7. Choose the "Architecture" that the bug impacts.
-
-8. Choose a "Documentation change" item for the bug. Fixing a bug might
- or might not affect the Yocto Project documentation. If you are
- unsure of the impact to the documentation, select "Don't Know".
-
-9. Provide a brief "Summary" of the bug. Try to limit your summary to
- just a line or two and be sure to capture the essence of the bug.
-
-10. Provide a detailed "Description" of the bug. You should provide as
- much detail as you can about the context, behavior, output, and so
- forth that surrounds the bug. You can even attach supporting files
- for output from logs by using the "Add an attachment" button.
-
-11. Click the "Submit Bug" button submit the bug. A new Bugzilla number
- is assigned to the bug and the defect is logged in the bug tracking
- system.
-
-Once you file a bug, the bug is processed by the Yocto Project Bug
-Triage Team and further details concerning the bug are assigned (e.g.
-priority and owner). You are the "Submitter" of the bug and any further
-categorization, progress, or comments on the bug result in Bugzilla
-sending you an automated email concerning the particular change or
-progress to the bug.
-
-Submitting a Change to the Yocto Project
-----------------------------------------
-
-Contributions to the Yocto Project and OpenEmbedded are very welcome.
-Because the system is extremely configurable and flexible, we recognize
-that developers will want to extend, configure or optimize it for their
-specific uses.
-
-The Yocto Project uses a mailing list and a patch-based workflow that is
-similar to the Linux kernel but contains important differences. In
-general, there is a mailing list through which you can submit patches. You
-should send patches to the appropriate mailing list so that they can be
-reviewed and merged by the appropriate maintainer. The specific mailing
-list you need to use depends on the location of the code you are
-changing. Each component (e.g. layer) should have a ``README`` file that
-indicates where to send the changes and which process to follow.
-
-You can send the patch to the mailing list using whichever approach you
-feel comfortable with to generate the patch. Once sent, the patch is
-usually reviewed by the community at large. If somebody has concerns
-with the patch, they will usually voice their concern over the mailing
-list. If a patch does not receive any negative reviews, the maintainer
-of the affected layer typically takes the patch, tests it, and then
-based on successful testing, merges the patch.
-
-The "poky" repository, which is the Yocto Project's reference build
-environment, is a hybrid repository that contains several individual
-pieces (e.g. BitBake, Metadata, documentation, and so forth) built using
-the combo-layer tool. The upstream location used for submitting changes
-varies by component:
-
-- *Core Metadata:* Send your patch to the
- :oe_lists:`openembedded-core </g/openembedded-core>`
- mailing list. For example, a change to anything under the ``meta`` or
- ``scripts`` directories should be sent to this mailing list.
-
-- *BitBake:* For changes to BitBake (i.e. anything under the
- ``bitbake`` directory), send your patch to the
- :oe_lists:`bitbake-devel </g/bitbake-devel>`
- mailing list.
-
-- *"meta-\*" trees:* These trees contain Metadata. Use the
- :yocto_lists:`poky </g/poky>` mailing list.
-
-- *Documentation*: For changes to the Yocto Project documentation, use the
- :yocto_lists:`docs </g/docs>` mailing list.
-
-For changes to other layers hosted in the Yocto Project source
-repositories (i.e. ``yoctoproject.org``) and tools use the
-:yocto_lists:`Yocto Project </g/yocto/>` general mailing list.
-
-.. note::
-
- Sometimes a layer's documentation specifies to use a particular
- mailing list. If so, use that list.
-
-For additional recipes that do not fit into the core Metadata, you
-should determine which layer the recipe should go into and submit the
-change in the manner recommended by the documentation (e.g. the
-``README`` file) supplied with the layer. If in doubt, please ask on the
-Yocto general mailing list or on the openembedded-devel mailing list.
-
-You can also push a change upstream and request a maintainer to pull the
-change into the component's upstream repository. You do this by pushing
-to a contribution repository that is upstream. See the
-":ref:`overview-manual/development-environment:git workflows and the yocto project`"
-section in the Yocto Project Overview and Concepts Manual for additional
-concepts on working in the Yocto Project development environment.
-
-Maintainers commonly use ``-next`` branches to test submissions prior to
-merging patches. Thus, you can get an idea of the status of a patch based on
-whether the patch has been merged into one of these branches. The commonly
-used testing branches for OpenEmbedded-Core are as follows:
-
-- *openembedded-core "master-next" branch:* This branch is part of the
- :oe_git:`openembedded-core </openembedded-core/>` repository and contains
- proposed changes to the core metadata.
-
-- *poky "master-next" branch:* This branch is part of the
- :yocto_git:`poky </poky/>` repository and combines proposed
- changes to bitbake, the core metadata and the poky distro.
-
-Similarly, stable branches maintained by the project may have corresponding
-``-next`` branches which collect proposed changes. For example,
-``&DISTRO_NAME_NO_CAP;-next`` and ``&DISTRO_NAME_NO_CAP_MINUS_ONE;-next``
-branches in both the "openembdedded-core" and "poky" repositories.
-
-Other layers may have similar testing branches but there is no formal
-requirement or standard for these so please check the documentation for the
-layers you are contributing to.
-
-The following sections provide procedures for submitting a change.
-
-Preparing Changes for Submission
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-1. *Make Your Changes Locally:* Make your changes in your local Git
- repository. You should make small, controlled, isolated changes.
- Keeping changes small and isolated aids review, makes
- merging/rebasing easier and keeps the change history clean should
- anyone need to refer to it in future.
-
-2. *Stage Your Changes:* Stage your changes by using the ``git add``
- command on each file you changed.
-
-3. *Commit Your Changes:* Commit the change by using the ``git commit``
- command. Make sure your commit information follows standards by
- following these accepted conventions:
-
- - Be sure to include a "Signed-off-by:" line in the same style as
- required by the Linux kernel. This can be done by using the
- ``git commit -s`` command. Adding this line signifies that you,
- the submitter, have agreed to the Developer's Certificate of
- Origin 1.1 as follows:
-
- .. code-block:: none
-
- Developer's Certificate of Origin 1.1
-
- By making a contribution to this project, I certify that:
-
- (a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
- (b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
- (c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
- (d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-
- - Provide a single-line summary of the change and, if more
- explanation is needed, provide more detail in the body of the
- commit. This summary is typically viewable in the "shortlist" of
- changes. Thus, providing something short and descriptive that
- gives the reader a summary of the change is useful when viewing a
- list of many commits. You should prefix this short description
- with the recipe name (if changing a recipe), or else with the
- short form path to the file being changed.
-
- - For the body of the commit message, provide detailed information
- that describes what you changed, why you made the change, and the
- approach you used. It might also be helpful if you mention how you
- tested the change. Provide as much detail as you can in the body
- of the commit message.
-
- .. note::
-
- You do not need to provide a more detailed explanation of a
- change if the change is minor to the point of the single line
- summary providing all the information.
-
- - If the change addresses a specific bug or issue that is associated
- with a bug-tracking ID, include a reference to that ID in your
- detailed description. For example, the Yocto Project uses a
- specific convention for bug references - any commit that addresses
- a specific bug should use the following form for the detailed
- description. Be sure to use the actual bug-tracking ID from
- Bugzilla for bug-id::
-
- Fixes [YOCTO #bug-id]
-
- detailed description of change
-
-Using Email to Submit a Patch
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Depending on the components changed, you need to submit the email to a
-specific mailing list. For some guidance on which mailing list to use,
-see the
-:ref:`list <dev-manual/common-tasks:submitting a change to the yocto project>`
-at the beginning of this section. For a description of all the available
-mailing lists, see the ":ref:`Mailing Lists <resources-mailinglist>`" section in the
-Yocto Project Reference Manual.
-
-Here is the general procedure on how to submit a patch through email
-without using the scripts once the steps in
-:ref:`dev-manual/common-tasks:preparing changes for submission` have been followed:
-
-1. *Format the Commit:* Format the commit into an email message. To
- format commits, use the ``git format-patch`` command. When you
- provide the command, you must include a revision list or a number of
- patches as part of the command. For example, either of these two
- commands takes your most recent single commit and formats it as an
- email message in the current directory::
-
- $ git format-patch -1
-
- or ::
-
- $ git format-patch HEAD~
-
- After the command is run, the current directory contains a numbered
- ``.patch`` file for the commit.
-
- If you provide several commits as part of the command, the
- ``git format-patch`` command produces a series of numbered files in
- the current directory – one for each commit. If you have more than
- one patch, you should also use the ``--cover`` option with the
- command, which generates a cover letter as the first "patch" in the
- series. You can then edit the cover letter to provide a description
- for the series of patches. For information on the
- ``git format-patch`` command, see ``GIT_FORMAT_PATCH(1)`` displayed
- using the ``man git-format-patch`` command.
-
- .. note::
-
- If you are or will be a frequent contributor to the Yocto Project
- or to OpenEmbedded, you might consider requesting a contrib area
- and the necessary associated rights.
-
-2. *Send the patches via email:* Send the patches to the recipients and
- relevant mailing lists by using the ``git send-email`` command.
-
- .. note::
-
- In order to use ``git send-email``, you must have the proper Git packages
- installed on your host.
- For Ubuntu, Debian, and Fedora the package is ``git-email``.
-
- The ``git send-email`` command sends email by using a local or remote
- Mail Transport Agent (MTA) such as ``msmtp``, ``sendmail``, or
- through a direct ``smtp`` configuration in your Git ``~/.gitconfig``
- file. If you are submitting patches through email only, it is very
- important that you submit them without any whitespace or HTML
- formatting that either you or your mailer introduces. The maintainer
- that receives your patches needs to be able to save and apply them
- directly from your emails. A good way to verify that what you are
- sending will be applicable by the maintainer is to do a dry run and
- send them to yourself and then save and apply them as the maintainer
- would.
-
- The ``git send-email`` command is the preferred method for sending
- your patches using email since there is no risk of compromising
- whitespace in the body of the message, which can occur when you use
- your own mail client. The command also has several options that let
- you specify recipients and perform further editing of the email
- message. For information on how to use the ``git send-email``
- command, see ``GIT-SEND-EMAIL(1)`` displayed using the
- ``man git-send-email`` command.
-
-The Yocto Project uses a `Patchwork instance <https://patchwork.openembedded.org/>`__
-to track the status of patches submitted to the various mailing lists and to
-support automated patch testing. Each submitted patch is checked for common
-mistakes and deviations from the expected patch format and submitters are
-notified by patchtest if such mistakes are found. This process helps to
-reduce the burden of patch review on maintainers.
-
-.. note::
-
- This system is imperfect and changes can sometimes get lost in the flow.
- Asking about the status of a patch or change is reasonable if the change
- has been idle for a while with no feedback.
-
-Using Scripts to Push a Change Upstream and Request a Pull
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-For larger patch series it is preferable to send a pull request which not
-only includes the patch but also a pointer to a branch that can be pulled
-from. This involves making a local branch for your changes, pushing this
-branch to an accessible repository and then using the ``create-pull-request``
-and ``send-pull-request`` scripts from openembedded-core to create and send a
-patch series with a link to the branch for review.
-
-Follow this procedure to push a change to an upstream "contrib" Git
-repository once the steps in :ref:`dev-manual/common-tasks:preparing changes for submission` have
-been followed:
-
-.. note::
-
- You can find general Git information on how to push a change upstream
- in the
- `Git Community Book <https://git-scm.com/book/en/v2/Distributed-Git-Distributed-Workflows>`__.
-
-1. *Push Your Commits to a "Contrib" Upstream:* If you have arranged for
- permissions to push to an upstream contrib repository, push the
- change to that repository::
-
- $ git push upstream_remote_repo local_branch_name
-
- For example, suppose you have permissions to push
- into the upstream ``meta-intel-contrib`` repository and you are
- working in a local branch named `your_name`\ ``/README``. The following
- command pushes your local commits to the ``meta-intel-contrib``
- upstream repository and puts the commit in a branch named
- `your_name`\ ``/README``::
-
- $ git push meta-intel-contrib your_name/README
-
-2. *Determine Who to Notify:* Determine the maintainer or the mailing
- list that you need to notify for the change.
-
- Before submitting any change, you need to be sure who the maintainer
- is or what mailing list that you need to notify. Use either these
- methods to find out:
-
- - *Maintenance File:* Examine the ``maintainers.inc`` file, which is
- located in the :term:`Source Directory` at
- ``meta/conf/distro/include``, to see who is responsible for code.
-
- - *Search by File:* Using :ref:`overview-manual/development-environment:git`, you can
- enter the following command to bring up a short list of all
- commits against a specific file::
-
- git shortlog -- filename
-
- Just provide the name of the file for which you are interested. The
- information returned is not ordered by history but does include a
- list of everyone who has committed grouped by name. From the list,
- you can see who is responsible for the bulk of the changes against
- the file.
-
- - *Examine the List of Mailing Lists:* For a list of the Yocto
- Project and related mailing lists, see the ":ref:`Mailing
- lists <resources-mailinglist>`" section in
- the Yocto Project Reference Manual.
-
-3. *Make a Pull Request:* Notify the maintainer or the mailing list that
- you have pushed a change by making a pull request.
-
- The Yocto Project provides two scripts that conveniently let you
- generate and send pull requests to the Yocto Project. These scripts
- are ``create-pull-request`` and ``send-pull-request``. You can find
- these scripts in the ``scripts`` directory within the
- :term:`Source Directory` (e.g.
- ``poky/scripts``).
-
- Using these scripts correctly formats the requests without
- introducing any whitespace or HTML formatting. The maintainer that
- receives your patches either directly or through the mailing list
- needs to be able to save and apply them directly from your emails.
- Using these scripts is the preferred method for sending patches.
-
- First, create the pull request. For example, the following command
- runs the script, specifies the upstream repository in the contrib
- directory into which you pushed the change, and provides a subject
- line in the created patch files::
-
- $ poky/scripts/create-pull-request -u meta-intel-contrib -s "Updated Manual Section Reference in README"
-
- Running this script forms ``*.patch`` files in a folder named
- ``pull-``\ `PID` in the current directory. One of the patch files is a
- cover letter.
-
- Before running the ``send-pull-request`` script, you must edit the
- cover letter patch to insert information about your change. After
- editing the cover letter, send the pull request. For example, the
- following command runs the script and specifies the patch directory
- and email address. In this example, the email address is a mailing
- list::
-
- $ poky/scripts/send-pull-request -p ~/meta-intel/pull-10565 -t meta-intel@lists.yoctoproject.org
-
- You need to follow the prompts as the script is interactive.
-
- .. note::
-
- For help on using these scripts, simply provide the ``-h``
- argument as follows::
-
- $ poky/scripts/create-pull-request -h
- $ poky/scripts/send-pull-request -h
-
-Responding to Patch Review
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-You may get feedback on your submitted patches from other community members
-or from the automated patchtest service. If issues are identified in your
-patch then it is usually necessary to address these before the patch will be
-accepted into the project. In this case you should amend the patch according
-to the feedback and submit an updated version to the relevant mailing list,
-copying in the reviewers who provided feedback to the previous version of the
-patch.
-
-The patch should be amended using ``git commit --amend`` or perhaps ``git
-rebase`` for more expert git users. You should also modify the ``[PATCH]``
-tag in the email subject line when sending the revised patch to mark the new
-iteration as ``[PATCH v2]``, ``[PATCH v3]``, etc as appropriate. This can be
-done by passing the ``-v`` argument to ``git format-patch`` with a version
-number.
-
-Lastly please ensure that you also test your revised changes. In particular
-please don't just edit the patch file written out by ``git format-patch`` and
-resend it.
-
-Submitting Changes to Stable Release Branches
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The process for proposing changes to a Yocto Project stable branch differs
-from the steps described above. Changes to a stable branch must address
-identified bugs or CVEs and should be made carefully in order to avoid the
-risk of introducing new bugs or breaking backwards compatibility. Typically
-bug fixes must already be accepted into the master branch before they can be
-backported to a stable branch unless the bug in question does not affect the
-master branch or the fix on the master branch is unsuitable for backporting.
-
-The list of stable branches along with the status and maintainer for each
-branch can be obtained from the
-:yocto_wiki:`Releases wiki page </Releases>`.
-
-.. note::
-
- Changes will not typically be accepted for branches which are marked as
- End-Of-Life (EOL).
-
-With this in mind, the steps to submit a change for a stable branch are as
-follows:
-
-1. *Identify the bug or CVE to be fixed:* This information should be
- collected so that it can be included in your submission.
-
- See :ref:`dev-manual/common-tasks:checking for vulnerabilities`
- for details about CVE tracking.
-
-2. *Check if the fix is already present in the master branch:* This will
- result in the most straightforward path into the stable branch for the
- fix.
-
- a. *If the fix is present in the master branch - Submit a backport request
- by email:* You should send an email to the relevant stable branch
- maintainer and the mailing list with details of the bug or CVE to be
- fixed, the commit hash on the master branch that fixes the issue and
- the stable branches which you would like this fix to be backported to.
-
- b. *If the fix is not present in the master branch - Submit the fix to the
- master branch first:* This will ensure that the fix passes through the
- project's usual patch review and test processes before being accepted.
- It will also ensure that bugs are not left unresolved in the master
- branch itself. Once the fix is accepted in the master branch a backport
- request can be submitted as above.
-
- c. *If the fix is unsuitable for the master branch - Submit a patch
- directly for the stable branch:* This method should be considered as a
- last resort. It is typically necessary when the master branch is using
- a newer version of the software which includes an upstream fix for the
- issue or when the issue has been fixed on the master branch in a way
- that introduces backwards incompatible changes. In this case follow the
- steps in :ref:`dev-manual/common-tasks:preparing changes for submission` and
- :ref:`dev-manual/common-tasks:using email to submit a patch` but modify the subject header of your patch
- email to include the name of the stable branch which you are
- targetting. This can be done using the ``--subject-prefix`` argument to
- ``git format-patch``, for example to submit a patch to the dunfell
- branch use
- ``git format-patch --subject-prefix='&DISTRO_NAME_NO_CAP_MINUS_ONE;][PATCH' ...``.
-
-Working With Licenses
-=====================
-
-As mentioned in the ":ref:`overview-manual/development-environment:licensing`"
-section in the Yocto Project Overview and Concepts Manual, open source
-projects are open to the public and they consequently have different
-licensing structures in place. This section describes the mechanism by
-which the :term:`OpenEmbedded Build System`
-tracks changes to
-licensing text and covers how to maintain open source license compliance
-during your project's lifecycle. The section also describes how to
-enable commercially licensed recipes, which by default are disabled.
-
-Tracking License Changes
-------------------------
-
-The license of an upstream project might change in the future. In order
-to prevent these changes going unnoticed, the
-:term:`LIC_FILES_CHKSUM`
-variable tracks changes to the license text. The checksums are validated
-at the end of the configure step, and if the checksums do not match, the
-build will fail.
-
-Specifying the ``LIC_FILES_CHKSUM`` Variable
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The :term:`LIC_FILES_CHKSUM` variable contains checksums of the license text
-in the source code for the recipe. Following is an example of how to
-specify :term:`LIC_FILES_CHKSUM`::
-
- LIC_FILES_CHKSUM = "file://COPYING;md5=xxxx \
- file://licfile1.txt;beginline=5;endline=29;md5=yyyy \
- file://licfile2.txt;endline=50;md5=zzzz \
- ..."
-
-.. note::
-
- - When using "beginline" and "endline", realize that line numbering
- begins with one and not zero. Also, the included lines are
- inclusive (i.e. lines five through and including 29 in the
- previous example for ``licfile1.txt``).
-
- - When a license check fails, the selected license text is included
- as part of the QA message. Using this output, you can determine
- the exact start and finish for the needed license text.
-
-The build system uses the :term:`S`
-variable as the default directory when searching files listed in
-:term:`LIC_FILES_CHKSUM`. The previous example employs the default
-directory.
-
-Consider this next example::
-
- LIC_FILES_CHKSUM = "file://src/ls.c;beginline=5;endline=16;\
- md5=bb14ed3c4cda583abc85401304b5cd4e"
- LIC_FILES_CHKSUM = "file://${WORKDIR}/license.html;md5=5c94767cedb5d6987c902ac850ded2c6"
-
-The first line locates a file in ``${S}/src/ls.c`` and isolates lines
-five through 16 as license text. The second line refers to a file in
-:term:`WORKDIR`.
-
-Note that :term:`LIC_FILES_CHKSUM` variable is mandatory for all recipes,
-unless the :term:`LICENSE` variable is set to "CLOSED".
-
-Explanation of Syntax
-~~~~~~~~~~~~~~~~~~~~~
-
-As mentioned in the previous section, the :term:`LIC_FILES_CHKSUM` variable
-lists all the important files that contain the license text for the
-source code. It is possible to specify a checksum for an entire file, or
-a specific section of a file (specified by beginning and ending line
-numbers with the "beginline" and "endline" parameters, respectively).
-The latter is useful for source files with a license notice header,
-README documents, and so forth. If you do not use the "beginline"
-parameter, then it is assumed that the text begins on the first line of
-the file. Similarly, if you do not use the "endline" parameter, it is
-assumed that the license text ends with the last line of the file.
-
-The "md5" parameter stores the md5 checksum of the license text. If the
-license text changes in any way as compared to this parameter then a
-mismatch occurs. This mismatch triggers a build failure and notifies the
-developer. Notification allows the developer to review and address the
-license text changes. Also note that if a mismatch occurs during the
-build, the correct md5 checksum is placed in the build log and can be
-easily copied to the recipe.
-
-There is no limit to how many files you can specify using the
-:term:`LIC_FILES_CHKSUM` variable. Generally, however, every project
-requires a few specifications for license tracking. Many projects have a
-"COPYING" file that stores the license information for all the source
-code files. This practice allows you to just track the "COPYING" file as
-long as it is kept up to date.
-
-.. note::
-
- - If you specify an empty or invalid "md5" parameter,
- :term:`BitBake` returns an md5
- mis-match error and displays the correct "md5" parameter value
- during the build. The correct parameter is also captured in the
- build log.
-
- - If the whole file contains only license text, you do not need to
- use the "beginline" and "endline" parameters.
-
-Enabling Commercially Licensed Recipes
---------------------------------------
-
-By default, the OpenEmbedded build system disables components that have
-commercial or other special licensing requirements. Such requirements
-are defined on a recipe-by-recipe basis through the
-:term:`LICENSE_FLAGS` variable
-definition in the affected recipe. For instance, the
-``poky/meta/recipes-multimedia/gstreamer/gst-plugins-ugly`` recipe
-contains the following statement::
-
- LICENSE_FLAGS = "commercial"
-
-Here is a
-slightly more complicated example that contains both an explicit recipe
-name and version (after variable expansion)::
-
- LICENSE_FLAGS = "license_${PN}_${PV}"
-
-In order for a component restricted by a
-:term:`LICENSE_FLAGS` definition to be enabled and included in an image, it
-needs to have a matching entry in the global
-:term:`LICENSE_FLAGS_ACCEPTED`
-variable, which is a variable typically defined in your ``local.conf``
-file. For example, to enable the
-``poky/meta/recipes-multimedia/gstreamer/gst-plugins-ugly`` package, you
-could add either the string "commercial_gst-plugins-ugly" or the more
-general string "commercial" to :term:`LICENSE_FLAGS_ACCEPTED`. See the
-":ref:`dev-manual/common-tasks:license flag matching`" section for a full
-explanation of how :term:`LICENSE_FLAGS` matching works. Here is the
-example::
-
- LICENSE_FLAGS_ACCEPTED = "commercial_gst-plugins-ugly"
-
-Likewise, to additionally enable the package built from the recipe
-containing ``LICENSE_FLAGS = "license_${PN}_${PV}"``, and assuming that
-the actual recipe name was ``emgd_1.10.bb``, the following string would
-enable that package as well as the original ``gst-plugins-ugly``
-package::
-
- LICENSE_FLAGS_ACCEPTED = "commercial_gst-plugins-ugly license_emgd_1.10"
-
-As a convenience, you do not need to specify the
-complete license string for every package. You can use
-an abbreviated form, which consists of just the first portion or
-portions of the license string before the initial underscore character
-or characters. A partial string will match any license that contains the
-given string as the first portion of its license. For example, the
-following value will also match both of the packages
-previously mentioned as well as any other packages that have licenses
-starting with "commercial" or "license".
-::
-
- LICENSE_FLAGS_ACCEPTED = "commercial license"
-
-License Flag Matching
-~~~~~~~~~~~~~~~~~~~~~
-
-License flag matching allows you to control what recipes the
-OpenEmbedded build system includes in the build. Fundamentally, the
-build system attempts to match :term:`LICENSE_FLAGS` strings found in
-recipes against strings found in :term:`LICENSE_FLAGS_ACCEPTED`.
-A match causes the build system to include a recipe in the
-build, while failure to find a match causes the build system to exclude
-a recipe.
-
-In general, license flag matching is simple. However, understanding some
-concepts will help you correctly and effectively use matching.
-
-Before a flag defined by a particular recipe is tested against the
-entries of :term:`LICENSE_FLAGS_ACCEPTED`, the expanded
-string ``_${PN}`` is appended to the flag. This expansion makes each
-:term:`LICENSE_FLAGS` value recipe-specific. After expansion, the
-string is then matched against the entries. Thus, specifying
-``LICENSE_FLAGS = "commercial"`` in recipe "foo", for example, results
-in the string ``"commercial_foo"``. And, to create a match, that string
-must appear among the entries of :term:`LICENSE_FLAGS_ACCEPTED`.
-
-Judicious use of the :term:`LICENSE_FLAGS` strings and the contents of the
-:term:`LICENSE_FLAGS_ACCEPTED` variable allows you a lot of flexibility for
-including or excluding recipes based on licensing. For example, you can
-broaden the matching capabilities by using license flags string subsets
-in :term:`LICENSE_FLAGS_ACCEPTED`.
-
-.. note::
-
- When using a string subset, be sure to use the part of the expanded
- string that precedes the appended underscore character (e.g.
- ``usethispart_1.3``, ``usethispart_1.4``, and so forth).
-
-For example, simply specifying the string "commercial" in the
-:term:`LICENSE_FLAGS_ACCEPTED` variable matches any expanded
-:term:`LICENSE_FLAGS` definition that starts with the string
-"commercial" such as "commercial_foo" and "commercial_bar", which
-are the strings the build system automatically generates for
-hypothetical recipes named "foo" and "bar" assuming those recipes simply
-specify the following::
-
- LICENSE_FLAGS = "commercial"
-
-Thus, you can choose to exhaustively enumerate each license flag in the
-list and allow only specific recipes into the image, or you can use a
-string subset that causes a broader range of matches to allow a range of
-recipes into the image.
-
-This scheme works even if the :term:`LICENSE_FLAGS` string already has
-``_${PN}`` appended. For example, the build system turns the license
-flag "commercial_1.2_foo" into "commercial_1.2_foo_foo" and would match
-both the general "commercial" and the specific "commercial_1.2_foo"
-strings found in the :term:`LICENSE_FLAGS_ACCEPTED` variable, as expected.
-
-Here are some other scenarios:
-
-- You can specify a versioned string in the recipe such as
- "commercial_foo_1.2" in a "foo" recipe. The build system expands this
- string to "commercial_foo_1.2_foo". Combine this license flag with a
- :term:`LICENSE_FLAGS_ACCEPTED` variable that has the string
- "commercial" and you match the flag along with any other flag that
- starts with the string "commercial".
-
-- Under the same circumstances, you can add "commercial_foo" in the
- :term:`LICENSE_FLAGS_ACCEPTED` variable and the build system not only
- matches "commercial_foo_1.2" but also matches any license flag with
- the string "commercial_foo", regardless of the version.
-
-- You can be very specific and use both the package and version parts
- in the :term:`LICENSE_FLAGS_ACCEPTED` list (e.g.
- "commercial_foo_1.2") to specifically match a versioned recipe.
-
-Other Variables Related to Commercial Licenses
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There are other helpful variables related to commercial license handling,
-defined in the
-``poky/meta/conf/distro/include/default-distrovars.inc`` file::
-
- COMMERCIAL_AUDIO_PLUGINS ?= ""
- COMMERCIAL_VIDEO_PLUGINS ?= ""
-
-If you
-want to enable these components, you can do so by making sure you have
-statements similar to the following in your ``local.conf`` configuration
-file::
-
- COMMERCIAL_AUDIO_PLUGINS = "gst-plugins-ugly-mad \
- gst-plugins-ugly-mpegaudioparse"
- COMMERCIAL_VIDEO_PLUGINS = "gst-plugins-ugly-mpeg2dec \
- gst-plugins-ugly-mpegstream gst-plugins-bad-mpegvideoparse"
- LICENSE_FLAGS_ACCEPTED = "commercial_gst-plugins-ugly commercial_gst-plugins-bad commercial_qmmp"
-
-
-Of course, you could also create a matching list for those
-components using the more general "commercial" in the
-:term:`LICENSE_FLAGS_ACCEPTED` variable, but that would also enable all
-the other packages with :term:`LICENSE_FLAGS`
-containing "commercial", which you may or may not want::
-
- LICENSE_FLAGS_ACCEPTED = "commercial"
-
-Specifying audio and video plugins as part of the
-``COMMERCIAL_AUDIO_PLUGINS`` and ``COMMERCIAL_VIDEO_PLUGINS`` statements
-(along with the enabling :term:`LICENSE_FLAGS_ACCEPTED`) includes the
-plugins or components into built images, thus adding support for media
-formats or components.
-
-Maintaining Open Source License Compliance During Your Product's Lifecycle
---------------------------------------------------------------------------
-
-One of the concerns for a development organization using open source
-software is how to maintain compliance with various open source
-licensing during the lifecycle of the product. While this section does
-not provide legal advice or comprehensively cover all scenarios, it does
-present methods that you can use to assist you in meeting the compliance
-requirements during a software release.
-
-With hundreds of different open source licenses that the Yocto Project
-tracks, it is difficult to know the requirements of each and every
-license. However, the requirements of the major FLOSS licenses can begin
-to be covered by assuming that there are three main areas of concern:
-
-- Source code must be provided.
-
-- License text for the software must be provided.
-
-- Compilation scripts and modifications to the source code must be
- provided.
-
-- spdx files can be provided.
-
-There are other requirements beyond the scope of these three and the
-methods described in this section (e.g. the mechanism through which
-source code is distributed).
-
-As different organizations have different methods of complying with open
-source licensing, this section is not meant to imply that there is only
-one single way to meet your compliance obligations, but rather to
-describe one method of achieving compliance. The remainder of this
-section describes methods supported to meet the previously mentioned
-three requirements. Once you take steps to meet these requirements, and
-prior to releasing images, sources, and the build system, you should
-audit all artifacts to ensure completeness.
-
-.. note::
-
- The Yocto Project generates a license manifest during image creation
- that is located in ``${DEPLOY_DIR}/licenses/``\ `image_name`\ ``-``\ `datestamp`
- to assist with any audits.
-
-Providing the Source Code
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Compliance activities should begin before you generate the final image.
-The first thing you should look at is the requirement that tops the list
-for most compliance groups - providing the source. The Yocto Project has
-a few ways of meeting this requirement.
-
-One of the easiest ways to meet this requirement is to provide the
-entire :term:`DL_DIR` used by the
-build. This method, however, has a few issues. The most obvious is the
-size of the directory since it includes all sources used in the build
-and not just the source used in the released image. It will include
-toolchain source, and other artifacts, which you would not generally
-release. However, the more serious issue for most companies is
-accidental release of proprietary software. The Yocto Project provides
-an :ref:`archiver <ref-classes-archiver>` class to
-help avoid some of these concerns.
-
-Before you employ :term:`DL_DIR` or the :ref:`archiver <ref-classes-archiver>` class, you need to
-decide how you choose to provide source. The source ``archiver`` class
-can generate tarballs and SRPMs and can create them with various levels
-of compliance in mind.
-
-One way of doing this (but certainly not the only way) is to release
-just the source as a tarball. You can do this by adding the following to
-the ``local.conf`` file found in the
-:term:`Build Directory`::
-
- INHERIT += "archiver"
- ARCHIVER_MODE[src] = "original"
-
-During the creation of your
-image, the source from all recipes that deploy packages to the image is
-placed within subdirectories of ``DEPLOY_DIR/sources`` based on the
-:term:`LICENSE` for each recipe.
-Releasing the entire directory enables you to comply with requirements
-concerning providing the unmodified source. It is important to note that
-the size of the directory can get large.
-
-A way to help mitigate the size issue is to only release tarballs for
-licenses that require the release of source. Let us assume you are only
-concerned with GPL code as identified by running the following script:
-
-.. code-block:: shell
-
- # Script to archive a subset of packages matching specific license(s)
- # Source and license files are copied into sub folders of package folder
- # Must be run from build folder
- #!/bin/bash
- src_release_dir="source-release"
- mkdir -p $src_release_dir
- for a in tmp/deploy/sources/*; do
- for d in $a/*; do
- # Get package name from path
- p=`basename $d`
- p=${p%-*}
- p=${p%-*}
- # Only archive GPL packages (update *GPL* regex for your license check)
- numfiles=`ls tmp/deploy/licenses/$p/*GPL* 2> /dev/null | wc -l`
- if [ $numfiles -ge 1 ]; then
- echo Archiving $p
- mkdir -p $src_release_dir/$p/source
- cp $d/* $src_release_dir/$p/source 2> /dev/null
- mkdir -p $src_release_dir/$p/license
- cp tmp/deploy/licenses/$p/* $src_release_dir/$p/license 2> /dev/null
- fi
- done
- done
-
-At this point, you
-could create a tarball from the ``gpl_source_release`` directory and
-provide that to the end user. This method would be a step toward
-achieving compliance with section 3a of GPLv2 and with section 6 of
-GPLv3.
-
-Providing License Text
-~~~~~~~~~~~~~~~~~~~~~~
-
-One requirement that is often overlooked is inclusion of license text.
-This requirement also needs to be dealt with prior to generating the
-final image. Some licenses require the license text to accompany the
-binary. You can achieve this by adding the following to your
-``local.conf`` file::
-
- COPY_LIC_MANIFEST = "1"
- COPY_LIC_DIRS = "1"
- LICENSE_CREATE_PACKAGE = "1"
-
-Adding these statements to the
-configuration file ensures that the licenses collected during package
-generation are included on your image.
-
-.. note::
-
- Setting all three variables to "1" results in the image having two
- copies of the same license file. One copy resides in
- ``/usr/share/common-licenses`` and the other resides in
- ``/usr/share/license``.
-
- The reason for this behavior is because
- :term:`COPY_LIC_DIRS` and
- :term:`COPY_LIC_MANIFEST`
- add a copy of the license when the image is built but do not offer a
- path for adding licenses for newly installed packages to an image.
- :term:`LICENSE_CREATE_PACKAGE`
- adds a separate package and an upgrade path for adding licenses to an
- image.
-
-As the source ``archiver`` class has already archived the original
-unmodified source that contains the license files, you would have
-already met the requirements for inclusion of the license information
-with source as defined by the GPL and other open source licenses.
-
-Providing Compilation Scripts and Source Code Modifications
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-At this point, we have addressed all we need to prior to generating the
-image. The next two requirements are addressed during the final
-packaging of the release.
-
-By releasing the version of the OpenEmbedded build system and the layers
-used during the build, you will be providing both compilation scripts
-and the source code modifications in one step.
-
-If the deployment team has a :ref:`overview-manual/concepts:bsp layer`
-and a distro layer, and those
-those layers are used to patch, compile, package, or modify (in any way)
-any open source software included in your released images, you might be
-required to release those layers under section 3 of GPLv2 or section 1
-of GPLv3. One way of doing that is with a clean checkout of the version
-of the Yocto Project and layers used during your build. Here is an
-example:
-
-.. code-block:: shell
-
- # We built using the dunfell branch of the poky repo
- $ git clone -b dunfell git://git.yoctoproject.org/poky
- $ cd poky
- # We built using the release_branch for our layers
- $ git clone -b release_branch git://git.mycompany.com/meta-my-bsp-layer
- $ git clone -b release_branch git://git.mycompany.com/meta-my-software-layer
- # clean up the .git repos
- $ find . -name ".git" -type d -exec rm -rf {} \;
-
-One
-thing a development organization might want to consider for end-user
-convenience is to modify ``meta-poky/conf/bblayers.conf.sample`` to
-ensure that when the end user utilizes the released build system to
-build an image, the development organization's layers are included in
-the ``bblayers.conf`` file automatically::
-
- # POKY_BBLAYERS_CONF_VERSION is increased each time build/conf/bblayers.conf
- # changes incompatibly
- POKY_BBLAYERS_CONF_VERSION = "2"
-
- BBPATH = "${TOPDIR}"
- BBFILES ?= ""
-
- BBLAYERS ?= " \
- ##OEROOT##/meta \
- ##OEROOT##/meta-poky \
- ##OEROOT##/meta-yocto-bsp \
- ##OEROOT##/meta-mylayer \
- "
-
-Creating and
-providing an archive of the :term:`Metadata`
-layers (recipes, configuration files, and so forth) enables you to meet
-your requirements to include the scripts to control compilation as well
-as any modifications to the original source.
-
-Providing spdx files
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The spdx module has been integrated to a layer named meta-spdxscanner.
-meta-spdxscanner provides several kinds of scanner. If you want to enable
-this function, you have to follow the following steps:
-
-1. Add meta-spdxscanner layer into ``bblayers.conf``.
-
-2. Refer to the README in meta-spdxscanner to setup the environment (e.g,
- setup a fossology server) needed for the scanner.
-
-3. Meta-spdxscanner provides several methods within the bbclass to create spdx files.
- Please choose one that you want to use and enable the spdx task. You have to
- add some config options in ``local.conf`` file in your :term:`Build
- Directory`. Here is an example showing how to generate spdx files
- during bitbake using the fossology-python.bbclass::
-
- # Select fossology-python.bbclass.
- INHERIT += "fossology-python"
- # For fossology-python.bbclass, TOKEN is necessary, so, after setup a
- # Fossology server, you have to create a token.
- TOKEN = "eyJ0eXAiO..."
- # The fossology server is necessary for fossology-python.bbclass.
- FOSSOLOGY_SERVER = "http://xx.xx.xx.xx:8081/repo"
- # If you want to upload the source code to a special folder:
- FOLDER_NAME = "xxxx" //Optional
- # If you don't want to put spdx files in tmp/deploy/spdx, you can enable:
- SPDX_DEPLOY_DIR = "${DEPLOY_DIR}" //Optional
-
-For more usage information refer to :yocto_git:`the meta-spdxscanner repository
-</meta-spdxscanner/>`.
-
-Compliance Limitations with Executables Built from Static Libraries
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-When package A is added to an image via the :term:`RDEPENDS` or :term:`RRECOMMENDS`
-mechanisms as well as explicitly included in the image recipe with
-:term:`IMAGE_INSTALL`, and depends on a static linked library recipe B
-(``DEPENDS += "B"``), package B will neither appear in the generated license
-manifest nor in the generated source tarballs. This occurs as the
-:ref:`license <ref-classes-license>` and :ref:`archiver <ref-classes-archiver>`
-classes assume that only packages included via :term:`RDEPENDS` or :term:`RRECOMMENDS`
-end up in the image.
-
-As a result, potential obligations regarding license compliance for package B
-may not be met.
-
-The Yocto Project doesn't enable static libraries by default, in part because
-of this issue. Before a solution to this limitation is found, you need to
-keep in mind that if your root filesystem is built from static libraries,
-you will need to manually ensure that your deliveries are compliant
-with the licenses of these libraries.
-
-Copying Non Standard Licenses
------------------------------
-
-Some packages, such as the linux-firmware package, have many licenses
-that are not in any way common. You can avoid adding a lot of these
-types of common license files, which are only applicable to a specific
-package, by using the
-:term:`NO_GENERIC_LICENSE`
-variable. Using this variable also avoids QA errors when you use a
-non-common, non-CLOSED license in a recipe.
-
-Here is an example that uses the ``LICENSE.Abilis.txt`` file as
-the license from the fetched source::
-
- NO_GENERIC_LICENSE[Firmware-Abilis] = "LICENSE.Abilis.txt"
-
-Checking for Vulnerabilities
-============================
-
-Vulnerabilities in images
--------------------------
-
-The Yocto Project has an infrastructure to track and address unfixed
-known security vulnerabilities, as tracked by the public
-`Common Vulnerabilities and Exposures (CVE) <https://en.wikipedia.org/wiki/Common_Vulnerabilities_and_Exposures>`__
-database.
-
-To know which packages are vulnerable to known security vulnerabilities,
-add the following setting to your configuration::
-
- INHERIT += "cve-check"
-
-This way, at build time, BitBake will warn you about known CVEs
-as in the example below::
-
- WARNING: flex-2.6.4-r0 do_cve_check: Found unpatched CVE (CVE-2019-6293), for more information check /poky/build/tmp/work/core2-64-poky-linux/flex/2.6.4-r0/temp/cve.log
- WARNING: libarchive-3.5.1-r0 do_cve_check: Found unpatched CVE (CVE-2021-36976), for more information check /poky/build/tmp/work/core2-64-poky-linux/libarchive/3.5.1-r0/temp/cve.log
-
-It is also possible to check the CVE status of individual packages as follows::
-
- bitbake -c cve_check flex libarchive
-
-Note that OpenEmbedded-Core keeps a list of known unfixed CVE issues which can
-be ignored. You can pass this list to the check as follows::
-
- bitbake -c cve_check libarchive -R conf/distro/include/cve-extra-exclusions.inc
-
-Enabling vulnerabily tracking in recipes
-----------------------------------------
-
-The :term:`CVE_PRODUCT` variable defines the name used to match the recipe name
-against the name in the upstream `NIST CVE database <https://nvd.nist.gov/>`__.
-
-Editing recipes to fix vulnerabilities
---------------------------------------
-
-To fix a given known vulnerability, you need to add a patch file to your recipe. Here's
-an example from the :oe_layerindex:`ffmpeg recipe</layerindex/recipe/47350>`::
-
- SRC_URI = "https://www.ffmpeg.org/releases/${BP}.tar.xz \
- file://0001-libavutil-include-assembly-with-full-path-from-sourc.patch \
- file://fix-CVE-2020-20446.patch \
- file://fix-CVE-2020-20453.patch \
- file://fix-CVE-2020-22015.patch \
- file://fix-CVE-2020-22021.patch \
- file://fix-CVE-2020-22033-CVE-2020-22019.patch \
- file://fix-CVE-2021-33815.patch \
-
-The :ref:`cve-check <ref-classes-cve-check>` class defines two ways of
-supplying a patch for a given CVE. The first
-way is to use a patch filename that matches the below pattern::
-
- cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
-
-As shown in the example above, multiple CVE IDs can appear in a patch filename,
-but the :ref:`cve-check <ref-classes-cve-check>` class will only consider
-the last CVE ID in the filename as patched.
-
-The second way to recognize a patched CVE ID is when a line matching the
-below pattern is found in any patch file provided by the recipe::
-
- cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
-
-This allows a single patch file to address multiple CVE IDs at the same time.
-
-Of course, another way to fix vulnerabilities is to upgrade to a version
-of the package which is not impacted, typically a more recent one.
-The NIST database knows which versions are vulnerable and which ones
-are not.
-
-Last but not least, you can choose to ignore vulnerabilities through
-the :term:`CVE_CHECK_SKIP_RECIPE` and :term:`CVE_CHECK_IGNORE`
-variables.
-
-Implementation details
-----------------------
-
-Here's what the :ref:`cve-check <ref-classes-cve-check>` class does to
-find unpatched CVE IDs.
-
-First the code goes through each patch file provided by a recipe. If a valid CVE ID
-is found in the name of the file, the corresponding CVE is considered as patched.
-Don't forget that if multiple CVE IDs are found in the filename, only the last
-one is considered. Then, the code looks for ``CVE: CVE-ID`` lines in the patch
-file. The found CVE IDs are also considered as patched.
-
-Then, the code looks up all the CVE IDs in the NIST database for all the
-products defined in :term:`CVE_PRODUCT`. Then, for each found CVE:
-
- - If the package name (:term:`PN`) is part of
- :term:`CVE_CHECK_SKIP_RECIPE`, it is considered as patched.
-
- - If the CVE ID is part of :term:`CVE_CHECK_IGNORE`, it is
- considered as patched too.
-
- - If the CVE ID is part of the patched CVE for the recipe, it is
- already considered as patched.
-
- - Otherwise, the code checks whether the recipe version (:term:`PV`)
- is within the range of versions impacted by the CVE. If so, the CVE
- is considered as unpatched.
-
-The CVE database is stored in :term:`DL_DIR` and can be inspected using
-``sqlite3`` command as follows::
-
- sqlite3 downloads/CVE_CHECK/nvdcve_1.1.db .dump | grep CVE-2021-37462
-
-Using the Error Reporting Tool
-==============================
-
-The error reporting tool allows you to submit errors encountered during
-builds to a central database. Outside of the build environment, you can
-use a web interface to browse errors, view statistics, and query for
-errors. The tool works using a client-server system where the client
-portion is integrated with the installed Yocto Project
-:term:`Source Directory` (e.g. ``poky``).
-The server receives the information collected and saves it in a
-database.
-
-There is a live instance of the error reporting server at
-https://errors.yoctoproject.org.
-When you want to get help with build failures, you can submit all of the
-information on the failure easily and then point to the URL in your bug
-report or send an email to the mailing list.
-
-.. note::
-
- If you send error reports to this server, the reports become publicly
- visible.
-
-Enabling and Using the Tool
----------------------------
-
-By default, the error reporting tool is disabled. You can enable it by
-inheriting the
-:ref:`report-error <ref-classes-report-error>`
-class by adding the following statement to the end of your
-``local.conf`` file in your
-:term:`Build Directory`.
-::
-
- INHERIT += "report-error"
-
-By default, the error reporting feature stores information in
-``${``\ :term:`LOG_DIR`\ ``}/error-report``.
-However, you can specify a directory to use by adding the following to
-your ``local.conf`` file::
-
- ERR_REPORT_DIR = "path"
-
-Enabling error
-reporting causes the build process to collect the errors and store them
-in a file as previously described. When the build system encounters an
-error, it includes a command as part of the console output. You can run
-the command to send the error file to the server. For example, the
-following command sends the errors to an upstream server::
-
- $ send-error-report /home/brandusa/project/poky/build/tmp/log/error-report/error_report_201403141617.txt
-
-In the previous example, the errors are sent to a public database
-available at https://errors.yoctoproject.org, which is used by the
-entire community. If you specify a particular server, you can send the
-errors to a different database. Use the following command for more
-information on available options::
-
- $ send-error-report --help
-
-When sending the error file, you are prompted to review the data being
-sent as well as to provide a name and optional email address. Once you
-satisfy these prompts, the command returns a link from the server that
-corresponds to your entry in the database. For example, here is a
-typical link: https://errors.yoctoproject.org/Errors/Details/9522/
-
-Following the link takes you to a web interface where you can browse,
-query the errors, and view statistics.
-
-Disabling the Tool
-------------------
-
-To disable the error reporting feature, simply remove or comment out the
-following statement from the end of your ``local.conf`` file in your
-:term:`Build Directory`.
-::
-
- INHERIT += "report-error"
-
-Setting Up Your Own Error Reporting Server
-------------------------------------------
-
-If you want to set up your own error reporting server, you can obtain
-the code from the Git repository at :yocto_git:`/error-report-web/`.
-Instructions on how to set it up are in the README document.
-
-Using Wayland and Weston
-========================
-
-`Wayland <https://en.wikipedia.org/wiki/Wayland_(display_server_protocol)>`__
-is a computer display server protocol that provides a method for
-compositing window managers to communicate directly with applications
-and video hardware and expects them to communicate with input hardware
-using other libraries. Using Wayland with supporting targets can result
-in better control over graphics frame rendering than an application
-might otherwise achieve.
-
-The Yocto Project provides the Wayland protocol libraries and the
-reference
-`Weston <https://en.wikipedia.org/wiki/Wayland_(display_server_protocol)#Weston>`__
-compositor as part of its release. You can find the integrated packages
-in the ``meta`` layer of the :term:`Source Directory`.
-Specifically, you
-can find the recipes that build both Wayland and Weston at
-``meta/recipes-graphics/wayland``.
-
-You can build both the Wayland and Weston packages for use only with
-targets that accept the `Mesa 3D and Direct Rendering
-Infrastructure <https://en.wikipedia.org/wiki/Mesa_(computer_graphics)>`__,
-which is also known as Mesa DRI. This implies that you cannot build and
-use the packages if your target uses, for example, the Intel Embedded
-Media and Graphics Driver (Intel EMGD) that overrides Mesa DRI.
-
-.. note::
-
- Due to lack of EGL support, Weston 1.0.3 will not run directly on the
- emulated QEMU hardware. However, this version of Weston will run
- under X emulation without issues.
-
-This section describes what you need to do to implement Wayland and use
-the Weston compositor when building an image for a supporting target.
-
-Enabling Wayland in an Image
-----------------------------
-
-To enable Wayland, you need to enable it to be built and enable it to be
-included (installed) in the image.
-
-Building Wayland
-~~~~~~~~~~~~~~~~
-
-To cause Mesa to build the ``wayland-egl`` platform and Weston to build
-Wayland with Kernel Mode Setting
-(`KMS <https://wiki.archlinux.org/index.php/Kernel_Mode_Setting>`__)
-support, include the "wayland" flag in the
-:term:`DISTRO_FEATURES`
-statement in your ``local.conf`` file::
-
- DISTRO_FEATURES:append = " wayland"
-
-.. note::
-
- If X11 has been enabled elsewhere, Weston will build Wayland with X11
- support
-
-Installing Wayland and Weston
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To install the Wayland feature into an image, you must include the
-following
-:term:`CORE_IMAGE_EXTRA_INSTALL`
-statement in your ``local.conf`` file::
-
- CORE_IMAGE_EXTRA_INSTALL += "wayland weston"
-
-Running Weston
---------------
-
-To run Weston inside X11, enabling it as described earlier and building
-a Sato image is sufficient. If you are running your image under Sato, a
-Weston Launcher appears in the "Utility" category.
-
-Alternatively, you can run Weston through the command-line interpretor
-(CLI), which is better suited for development work. To run Weston under
-the CLI, you need to do the following after your image is built:
-
-1. Run these commands to export ``XDG_RUNTIME_DIR``::
-
- mkdir -p /tmp/$USER-weston
- chmod 0700 /tmp/$USER-weston
- export XDG_RUNTIME_DIR=/tmp/$USER-weston
-
-2. Launch Weston in the shell::
-
- weston
diff --git a/documentation/dev-manual/custom-distribution.rst b/documentation/dev-manual/custom-distribution.rst
new file mode 100644
index 0000000000..47faed0d04
--- /dev/null
+++ b/documentation/dev-manual/custom-distribution.rst
@@ -0,0 +1,109 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Creating Your Own Distribution
+******************************
+
+When you build an image using the Yocto Project and do not alter any
+distribution :term:`Metadata`, you are
+creating a Poky distribution. If you wish to gain more control over
+package alternative selections, compile-time options, and other
+low-level configurations, you can create your own distribution.
+
+To create your own distribution, the basic steps consist of creating
+your own distribution layer, creating your own distribution
+configuration file, and then adding any needed code and Metadata to the
+layer. The following steps provide some more detail:
+
+- *Create a layer for your new distro:* Create your distribution layer
+ so that you can keep your Metadata and code for the distribution
+ separate. It is strongly recommended that you create and use your own
+ layer for configuration and code. Using your own layer as compared to
+ just placing configurations in a ``local.conf`` configuration file
+ makes it easier to reproduce the same build configuration when using
+ multiple build machines. See the
+ ":ref:`dev-manual/layers:creating a general layer using the \`\`bitbake-layers\`\` script`"
+ section for information on how to quickly set up a layer.
+
+- *Create the distribution configuration file:* The distribution
+ configuration file needs to be created in the ``conf/distro``
+ directory of your layer. You need to name it using your distribution
+ name (e.g. ``mydistro.conf``).
+
+ .. note::
+
+ The :term:`DISTRO` variable in your ``local.conf`` file determines the
+ name of your distribution.
+
+ You can split out parts of your configuration file into include files
+ and then "require" them from within your distribution configuration
+ file. Be sure to place the include files in the
+ ``conf/distro/include`` directory of your layer. A common example
+ usage of include files would be to separate out the selection of
+ desired version and revisions for individual recipes.
+
+ Your configuration file needs to set the following required
+ variables:
+
+ - :term:`DISTRO_NAME`
+
+ - :term:`DISTRO_VERSION`
+
+ These following variables are optional and you typically set them
+ from the distribution configuration file:
+
+ - :term:`DISTRO_FEATURES`
+
+ - :term:`DISTRO_EXTRA_RDEPENDS`
+
+ - :term:`DISTRO_EXTRA_RRECOMMENDS`
+
+ - :term:`TCLIBC`
+
+ .. tip::
+
+ If you want to base your distribution configuration file on the
+ very basic configuration from OE-Core, you can use
+ ``conf/distro/defaultsetup.conf`` as a reference and just include
+ variables that differ as compared to ``defaultsetup.conf``.
+ Alternatively, you can create a distribution configuration file
+ from scratch using the ``defaultsetup.conf`` file or configuration files
+ from another distribution such as Poky as a reference.
+
+- *Provide miscellaneous variables:* Be sure to define any other
+ variables for which you want to create a default or enforce as part
+ of the distribution configuration. You can include nearly any
+ variable from the ``local.conf`` file. The variables you use are not
+ limited to the list in the previous bulleted item.
+
+- *Point to Your distribution configuration file:* In your ``local.conf``
+ file in the :term:`Build Directory`, set your :term:`DISTRO` variable to
+ point to your distribution's configuration file. For example, if your
+ distribution's configuration file is named ``mydistro.conf``, then
+ you point to it as follows::
+
+ DISTRO = "mydistro"
+
+- *Add more to the layer if necessary:* Use your layer to hold other
+ information needed for the distribution:
+
+ - Add recipes for installing distro-specific configuration files
+ that are not already installed by another recipe. If you have
+ distro-specific configuration files that are included by an
+ existing recipe, you should add an append file (``.bbappend``) for
+ those. For general information and recommendations on how to add
+ recipes to your layer, see the
+ ":ref:`dev-manual/layers:creating your own layer`" and
+ ":ref:`dev-manual/layers:following best practices when creating layers`"
+ sections.
+
+ - Add any image recipes that are specific to your distribution.
+
+ - Add a ``psplash`` append file for a branded splash screen, using
+ the :term:`SPLASH_IMAGES` variable.
+
+ - Add any other append files to make custom changes that are
+ specific to individual recipes.
+
+ For information on append files, see the
+ ":ref:`dev-manual/layers:appending other layers metadata with your layer`"
+ section.
diff --git a/documentation/dev-manual/custom-template-configuration-directory.rst b/documentation/dev-manual/custom-template-configuration-directory.rst
new file mode 100644
index 0000000000..90ed3ed92e
--- /dev/null
+++ b/documentation/dev-manual/custom-template-configuration-directory.rst
@@ -0,0 +1,72 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Creating a Custom Template Configuration Directory
+**************************************************
+
+If you are producing your own customized version of the build system for
+use by other users, you might want to customize the message shown by the
+setup script or you might want to change the template configuration
+files (i.e. ``local.conf`` and ``bblayers.conf``) that are created in a
+new build directory.
+
+The OpenEmbedded build system uses the environment variable
+``TEMPLATECONF`` to locate the directory from which it gathers
+configuration information that ultimately ends up in the
+:term:`Build Directory` ``conf`` directory.
+By default, ``TEMPLATECONF`` is set as follows in the ``poky``
+repository::
+
+ TEMPLATECONF=${TEMPLATECONF:-meta-poky/conf}
+
+This is the
+directory used by the build system to find templates from which to build
+some key configuration files. If you look at this directory, you will
+see the ``bblayers.conf.sample``, ``local.conf.sample``, and
+``conf-notes.txt`` files. The build system uses these files to form the
+respective ``bblayers.conf`` file, ``local.conf`` file, and display the
+list of BitBake targets when running the setup script.
+
+To override these default configuration files with configurations you
+want used within every new Build Directory, simply set the
+``TEMPLATECONF`` variable to your directory. The ``TEMPLATECONF``
+variable is set in the ``.templateconf`` file, which is in the top-level
+:term:`Source Directory` folder
+(e.g. ``poky``). Edit the ``.templateconf`` so that it can locate your
+directory.
+
+Best practices dictate that you should keep your template configuration
+directory in your custom distribution layer. For example, suppose you
+have a layer named ``meta-mylayer`` located in your home directory and
+you want your template configuration directory named ``myconf``.
+Changing the ``.templateconf`` as follows causes the OpenEmbedded build
+system to look in your directory and base its configuration files on the
+``*.sample`` configuration files it finds. The final configuration files
+(i.e. ``local.conf`` and ``bblayers.conf`` ultimately still end up in
+your Build Directory, but they are based on your ``*.sample`` files.
+::
+
+ TEMPLATECONF=${TEMPLATECONF:-meta-mylayer/myconf}
+
+Aside from the ``*.sample`` configuration files, the ``conf-notes.txt``
+also resides in the default ``meta-poky/conf`` directory. The script
+that sets up the build environment (i.e.
+:ref:`structure-core-script`) uses this file to
+display BitBake targets as part of the script output. Customizing this
+``conf-notes.txt`` file is a good way to make sure your list of custom
+targets appears as part of the script's output.
+
+Here is the default list of targets displayed as a result of running
+either of the setup scripts::
+
+ You can now run 'bitbake <target>'
+
+ Common targets are:
+ core-image-minimal
+ core-image-sato
+ meta-toolchain
+ meta-ide-support
+
+Changing the listed common targets is as easy as editing your version of
+``conf-notes.txt`` in your custom template configuration directory and
+making sure you have ``TEMPLATECONF`` set to your directory.
+
diff --git a/documentation/dev-manual/customizing-images.rst b/documentation/dev-manual/customizing-images.rst
new file mode 100644
index 0000000000..17dd2d2b0c
--- /dev/null
+++ b/documentation/dev-manual/customizing-images.rst
@@ -0,0 +1,223 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Customizing Images
+******************
+
+You can customize images to satisfy particular requirements. This
+section describes several methods and provides guidelines for each.
+
+Customizing Images Using ``local.conf``
+=======================================
+
+Probably the easiest way to customize an image is to add a package by
+way of the ``local.conf`` configuration file. Because it is limited to
+local use, this method generally only allows you to add packages and is
+not as flexible as creating your own customized image. When you add
+packages using local variables this way, you need to realize that these
+variable changes are in effect for every build and consequently affect
+all images, which might not be what you require.
+
+To add a package to your image using the local configuration file, use
+the :term:`IMAGE_INSTALL` variable with the ``:append`` operator::
+
+ IMAGE_INSTALL:append = " strace"
+
+Use of the syntax is important; specifically, the leading space
+after the opening quote and before the package name, which is
+``strace`` in this example. This space is required since the ``:append``
+operator does not add the space.
+
+Furthermore, you must use ``:append`` instead of the ``+=`` operator if
+you want to avoid ordering issues. The reason for this is because doing
+so unconditionally appends to the variable and avoids ordering problems
+due to the variable being set in image recipes and ``.bbclass`` files
+with operators like ``?=``. Using ``:append`` ensures the operation
+takes effect.
+
+As shown in its simplest use, ``IMAGE_INSTALL:append`` affects all
+images. It is possible to extend the syntax so that the variable applies
+to a specific image only. Here is an example::
+
+ IMAGE_INSTALL:append:pn-core-image-minimal = " strace"
+
+This example adds ``strace`` to the ``core-image-minimal`` image only.
+
+You can add packages using a similar approach through the
+:term:`CORE_IMAGE_EXTRA_INSTALL` variable. If you use this variable, only
+``core-image-*`` images are affected.
+
+Customizing Images Using Custom ``IMAGE_FEATURES`` and ``EXTRA_IMAGE_FEATURES``
+===============================================================================
+
+Another method for customizing your image is to enable or disable
+high-level image features by using the
+:term:`IMAGE_FEATURES` and
+:term:`EXTRA_IMAGE_FEATURES`
+variables. Although the functions for both variables are nearly
+equivalent, best practices dictate using :term:`IMAGE_FEATURES` from within
+a recipe and using :term:`EXTRA_IMAGE_FEATURES` from within your
+``local.conf`` file, which is found in the :term:`Build Directory`.
+
+To understand how these features work, the best reference is
+:ref:`meta/classes/image.bbclass <ref-classes-image>`.
+This class lists out the available
+:term:`IMAGE_FEATURES` of which most map to package groups while some, such
+as ``debug-tweaks`` and ``read-only-rootfs``, resolve as general
+configuration settings.
+
+In summary, the file looks at the contents of the :term:`IMAGE_FEATURES`
+variable and then maps or configures the feature accordingly. Based on
+this information, the build system automatically adds the appropriate
+packages or configurations to the
+:term:`IMAGE_INSTALL` variable.
+Effectively, you are enabling extra features by extending the class or
+creating a custom class for use with specialized image ``.bb`` files.
+
+Use the :term:`EXTRA_IMAGE_FEATURES` variable from within your local
+configuration file. Using a separate area from which to enable features
+with this variable helps you avoid overwriting the features in the image
+recipe that are enabled with :term:`IMAGE_FEATURES`. The value of
+:term:`EXTRA_IMAGE_FEATURES` is added to :term:`IMAGE_FEATURES` within
+``meta/conf/bitbake.conf``.
+
+To illustrate how you can use these variables to modify your image,
+consider an example that selects the SSH server. The Yocto Project ships
+with two SSH servers you can use with your images: Dropbear and OpenSSH.
+Dropbear is a minimal SSH server appropriate for resource-constrained
+environments, while OpenSSH is a well-known standard SSH server
+implementation. By default, the ``core-image-sato`` image is configured
+to use Dropbear. The ``core-image-full-cmdline`` and ``core-image-lsb``
+images both include OpenSSH. The ``core-image-minimal`` image does not
+contain an SSH server.
+
+You can customize your image and change these defaults. Edit the
+:term:`IMAGE_FEATURES` variable in your recipe or use the
+:term:`EXTRA_IMAGE_FEATURES` in your ``local.conf`` file so that it
+configures the image you are working with to include
+``ssh-server-dropbear`` or ``ssh-server-openssh``.
+
+.. note::
+
+ See the ":ref:`ref-manual/features:image features`" section in the Yocto
+ Project Reference Manual for a complete list of image features that ship
+ with the Yocto Project.
+
+Customizing Images Using Custom .bb Files
+=========================================
+
+You can also customize an image by creating a custom recipe that defines
+additional software as part of the image. The following example shows
+the form for the two lines you need::
+
+ IMAGE_INSTALL = "packagegroup-core-x11-base package1 package2"
+ inherit core-image
+
+Defining the software using a custom recipe gives you total control over
+the contents of the image. It is important to use the correct names of
+packages in the :term:`IMAGE_INSTALL` variable. You must use the
+OpenEmbedded notation and not the Debian notation for the names (e.g.
+``glibc-dev`` instead of ``libc6-dev``).
+
+The other method for creating a custom image is to base it on an
+existing image. For example, if you want to create an image based on
+``core-image-sato`` but add the additional package ``strace`` to the
+image, copy the ``meta/recipes-sato/images/core-image-sato.bb`` to a new
+``.bb`` and add the following line to the end of the copy::
+
+ IMAGE_INSTALL += "strace"
+
+Customizing Images Using Custom Package Groups
+==============================================
+
+For complex custom images, the best approach for customizing an image is
+to create a custom package group recipe that is used to build the image
+or images. A good example of a package group recipe is
+``meta/recipes-core/packagegroups/packagegroup-base.bb``.
+
+If you examine that recipe, you see that the :term:`PACKAGES` variable lists
+the package group packages to produce. The ``inherit packagegroup``
+statement sets appropriate default values and automatically adds
+``-dev``, ``-dbg``, and ``-ptest`` complementary packages for each
+package specified in the :term:`PACKAGES` statement.
+
+.. note::
+
+ The ``inherit packagegroup`` line should be located near the top of the
+ recipe, certainly before the :term:`PACKAGES` statement.
+
+For each package you specify in :term:`PACKAGES`, you can use :term:`RDEPENDS`
+and :term:`RRECOMMENDS` entries to provide a list of packages the parent
+task package should contain. You can see examples of these further down
+in the ``packagegroup-base.bb`` recipe.
+
+Here is a short, fabricated example showing the same basic pieces for a
+hypothetical packagegroup defined in ``packagegroup-custom.bb``, where
+the variable :term:`PN` is the standard way to abbreviate the reference to
+the full packagegroup name ``packagegroup-custom``::
+
+ DESCRIPTION = "My Custom Package Groups"
+
+ inherit packagegroup
+
+ PACKAGES = "\
+ ${PN}-apps \
+ ${PN}-tools \
+ "
+
+ RDEPENDS:${PN}-apps = "\
+ dropbear \
+ portmap \
+ psplash"
+
+ RDEPENDS:${PN}-tools = "\
+ oprofile \
+ oprofileui-server \
+ lttng-tools"
+
+ RRECOMMENDS:${PN}-tools = "\
+ kernel-module-oprofile"
+
+In the previous example, two package group packages are created with
+their dependencies and their recommended package dependencies listed:
+``packagegroup-custom-apps``, and ``packagegroup-custom-tools``. To
+build an image using these package group packages, you need to add
+``packagegroup-custom-apps`` and/or ``packagegroup-custom-tools`` to
+:term:`IMAGE_INSTALL`. For other forms of image dependencies see the other
+areas of this section.
+
+Customizing an Image Hostname
+=============================
+
+By default, the configured hostname (i.e. ``/etc/hostname``) in an image
+is the same as the machine name. For example, if
+:term:`MACHINE` equals "qemux86", the
+configured hostname written to ``/etc/hostname`` is "qemux86".
+
+You can customize this name by altering the value of the "hostname"
+variable in the ``base-files`` recipe using either an append file or a
+configuration file. Use the following in an append file::
+
+ hostname = "myhostname"
+
+Use the following in a configuration file::
+
+ hostname:pn-base-files = "myhostname"
+
+Changing the default value of the variable "hostname" can be useful in
+certain situations. For example, suppose you need to do extensive
+testing on an image and you would like to easily identify the image
+under test from existing images with typical default hostnames. In this
+situation, you could change the default hostname to "testme", which
+results in all the images using the name "testme". Once testing is
+complete and you do not need to rebuild the image for test any longer,
+you can easily reset the default hostname.
+
+Another point of interest is that if you unset the variable, the image
+will have no default hostname in the filesystem. Here is an example that
+unsets the variable in a configuration file::
+
+ hostname:pn-base-files = ""
+
+Having no default hostname in the filesystem is suitable for
+environments that use dynamic hostnames such as virtual machines.
+
diff --git a/documentation/dev-manual/debugging.rst b/documentation/dev-manual/debugging.rst
new file mode 100644
index 0000000000..74f5772554
--- /dev/null
+++ b/documentation/dev-manual/debugging.rst
@@ -0,0 +1,1252 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Debugging Tools and Techniques
+******************************
+
+The exact method for debugging build failures depends on the nature of
+the problem and on the system's area from which the bug originates.
+Standard debugging practices such as comparison against the last known
+working version with examination of the changes and the re-application
+of steps to identify the one causing the problem are valid for the Yocto
+Project just as they are for any other system. Even though it is
+impossible to detail every possible potential failure, this section
+provides some general tips to aid in debugging given a variety of
+situations.
+
+.. note::
+
+ A useful feature for debugging is the error reporting tool.
+ Configuring the Yocto Project to use this tool causes the
+ OpenEmbedded build system to produce error reporting commands as part
+ of the console output. You can enter the commands after the build
+ completes to log error information into a common database, that can
+ help you figure out what might be going wrong. For information on how
+ to enable and use this feature, see the
+ ":ref:`dev-manual/error-reporting-tool:using the error reporting tool`"
+ section.
+
+The following list shows the debugging topics in the remainder of this
+section:
+
+- ":ref:`dev-manual/debugging:viewing logs from failed tasks`" describes
+ how to find and view logs from tasks that failed during the build
+ process.
+
+- ":ref:`dev-manual/debugging:viewing variable values`" describes how to
+ use the BitBake ``-e`` option to examine variable values after a
+ recipe has been parsed.
+
+- ":ref:`dev-manual/debugging:viewing package information with \`\`oe-pkgdata-util\`\``"
+ describes how to use the ``oe-pkgdata-util`` utility to query
+ :term:`PKGDATA_DIR` and
+ display package-related information for built packages.
+
+- ":ref:`dev-manual/debugging:viewing dependencies between recipes and tasks`"
+ describes how to use the BitBake ``-g`` option to display recipe
+ dependency information used during the build.
+
+- ":ref:`dev-manual/debugging:viewing task variable dependencies`" describes
+ how to use the ``bitbake-dumpsig`` command in conjunction with key
+ subdirectories in the :term:`Build Directory` to determine variable
+ dependencies.
+
+- ":ref:`dev-manual/debugging:running specific tasks`" describes
+ how to use several BitBake options (e.g. ``-c``, ``-C``, and ``-f``)
+ to run specific tasks in the build chain. It can be useful to run
+ tasks "out-of-order" when trying isolate build issues.
+
+- ":ref:`dev-manual/debugging:general BitBake problems`" describes how
+ to use BitBake's ``-D`` debug output option to reveal more about what
+ BitBake is doing during the build.
+
+- ":ref:`dev-manual/debugging:building with no dependencies`"
+ describes how to use the BitBake ``-b`` option to build a recipe
+ while ignoring dependencies.
+
+- ":ref:`dev-manual/debugging:recipe logging mechanisms`"
+ describes how to use the many recipe logging functions to produce
+ debugging output and report errors and warnings.
+
+- ":ref:`dev-manual/debugging:debugging parallel make races`"
+ describes how to debug situations where the build consists of several
+ parts that are run simultaneously and when the output or result of
+ one part is not ready for use with a different part of the build that
+ depends on that output.
+
+- ":ref:`dev-manual/debugging:debugging with the gnu project debugger (gdb) remotely`"
+ describes how to use GDB to allow you to examine running programs, which can
+ help you fix problems.
+
+- ":ref:`dev-manual/debugging:debugging with the gnu project debugger (gdb) on the target`"
+ describes how to use GDB directly on target hardware for debugging.
+
+- ":ref:`dev-manual/debugging:other debugging tips`" describes
+ miscellaneous debugging tips that can be useful.
+
+Viewing Logs from Failed Tasks
+==============================
+
+You can find the log for a task in the file
+``${``\ :term:`WORKDIR`\ ``}/temp/log.do_``\ `taskname`.
+For example, the log for the
+:ref:`ref-tasks-compile` task of the
+QEMU minimal image for the x86 machine (``qemux86``) might be in
+``tmp/work/qemux86-poky-linux/core-image-minimal/1.0-r0/temp/log.do_compile``.
+To see the commands :term:`BitBake` ran
+to generate a log, look at the corresponding ``run.do_``\ `taskname` file
+in the same directory.
+
+``log.do_``\ `taskname` and ``run.do_``\ `taskname` are actually symbolic
+links to ``log.do_``\ `taskname`\ ``.``\ `pid` and
+``log.run_``\ `taskname`\ ``.``\ `pid`, where `pid` is the PID the task had
+when it ran. The symlinks always point to the files corresponding to the
+most recent run.
+
+Viewing Variable Values
+=======================
+
+Sometimes you need to know the value of a variable as a result of
+BitBake's parsing step. This could be because some unexpected behavior
+occurred in your project. Perhaps an attempt to :ref:`modify a variable
+<bitbake-user-manual/bitbake-user-manual-metadata:modifying existing
+variables>` did not work out as expected.
+
+BitBake's ``-e`` option is used to display variable values after
+parsing. The following command displays the variable values after the
+configuration files (i.e. ``local.conf``, ``bblayers.conf``,
+``bitbake.conf`` and so forth) have been parsed::
+
+ $ bitbake -e
+
+The following command displays variable values after a specific recipe has
+been parsed. The variables include those from the configuration as well::
+
+ $ bitbake -e recipename
+
+.. note::
+
+ Each recipe has its own private set of variables (datastore).
+ Internally, after parsing the configuration, a copy of the resulting
+ datastore is made prior to parsing each recipe. This copying implies
+ that variables set in one recipe will not be visible to other
+ recipes.
+
+ Likewise, each task within a recipe gets a private datastore based on
+ the recipe datastore, which means that variables set within one task
+ will not be visible to other tasks.
+
+In the output of ``bitbake -e``, each variable is preceded by a
+description of how the variable got its value, including temporary
+values that were later overridden. This description also includes
+variable flags (varflags) set on the variable. The output can be very
+helpful during debugging.
+
+Variables that are exported to the environment are preceded by
+``export`` in the output of ``bitbake -e``. See the following example::
+
+ export CC="i586-poky-linux-gcc -m32 -march=i586 --sysroot=/home/ulf/poky/build/tmp/sysroots/qemux86"
+
+In addition to variable values, the output of the ``bitbake -e`` and
+``bitbake -e`` recipe commands includes the following information:
+
+- The output starts with a tree listing all configuration files and
+ classes included globally, recursively listing the files they include
+ or inherit in turn. Much of the behavior of the OpenEmbedded build
+ system (including the behavior of the :ref:`ref-manual/tasks:normal recipe build tasks`) is
+ implemented in the :ref:`ref-classes-base` class and the
+ classes it inherits, rather than being built into BitBake itself.
+
+- After the variable values, all functions appear in the output. For
+ shell functions, variables referenced within the function body are
+ expanded. If a function has been modified using overrides or using
+ override-style operators like ``:append`` and ``:prepend``, then the
+ final assembled function body appears in the output.
+
+Viewing Package Information with ``oe-pkgdata-util``
+====================================================
+
+You can use the ``oe-pkgdata-util`` command-line utility to query
+:term:`PKGDATA_DIR` and display
+various package-related information. When you use the utility, you must
+use it to view information on packages that have already been built.
+
+Here are a few of the available ``oe-pkgdata-util`` subcommands.
+
+.. note::
+
+ You can use the standard \* and ? globbing wildcards as part of
+ package names and paths.
+
+- ``oe-pkgdata-util list-pkgs [pattern]``: Lists all packages
+ that have been built, optionally limiting the match to packages that
+ match pattern.
+
+- ``oe-pkgdata-util list-pkg-files package ...``: Lists the
+ files and directories contained in the given packages.
+
+ .. note::
+
+ A different way to view the contents of a package is to look at
+ the
+ ``${``\ :term:`WORKDIR`\ ``}/packages-split``
+ directory of the recipe that generates the package. This directory
+ is created by the
+ :ref:`ref-tasks-package` task
+ and has one subdirectory for each package the recipe generates,
+ which contains the files stored in that package.
+
+ If you want to inspect the ``${WORKDIR}/packages-split``
+ directory, make sure that :ref:`ref-classes-rm-work` is not
+ enabled when you build the recipe.
+
+- ``oe-pkgdata-util find-path path ...``: Lists the names of
+ the packages that contain the given paths. For example, the following
+ tells us that ``/usr/share/man/man1/make.1`` is contained in the
+ ``make-doc`` package::
+
+ $ oe-pkgdata-util find-path /usr/share/man/man1/make.1
+ make-doc: /usr/share/man/man1/make.1
+
+- ``oe-pkgdata-util lookup-recipe package ...``: Lists the name
+ of the recipes that produce the given packages.
+
+For more information on the ``oe-pkgdata-util`` command, use the help
+facility::
+
+ $ oe-pkgdata-util --help
+ $ oe-pkgdata-util subcommand --help
+
+Viewing Dependencies Between Recipes and Tasks
+==============================================
+
+Sometimes it can be hard to see why BitBake wants to build other recipes
+before the one you have specified. Dependency information can help you
+understand why a recipe is built.
+
+To generate dependency information for a recipe, run the following
+command::
+
+ $ bitbake -g recipename
+
+This command writes the following files in the current directory:
+
+- ``pn-buildlist``: A list of recipes/targets involved in building
+ `recipename`. "Involved" here means that at least one task from the
+ recipe needs to run when building `recipename` from scratch. Targets
+ that are in
+ :term:`ASSUME_PROVIDED`
+ are not listed.
+
+- ``task-depends.dot``: A graph showing dependencies between tasks.
+
+The graphs are in :wikipedia:`DOT <DOT_%28graph_description_language%29>`
+format and can be converted to images (e.g. using the ``dot`` tool from
+`Graphviz <https://www.graphviz.org/>`__).
+
+.. note::
+
+ - DOT files use a plain text format. The graphs generated using the
+ ``bitbake -g`` command are often so large as to be difficult to
+ read without special pruning (e.g. with BitBake's ``-I`` option)
+ and processing. Despite the form and size of the graphs, the
+ corresponding ``.dot`` files can still be possible to read and
+ provide useful information.
+
+ As an example, the ``task-depends.dot`` file contains lines such
+ as the following::
+
+ "libxslt.do_configure" -> "libxml2.do_populate_sysroot"
+
+ The above example line reveals that the
+ :ref:`ref-tasks-configure`
+ task in ``libxslt`` depends on the
+ :ref:`ref-tasks-populate_sysroot`
+ task in ``libxml2``, which is a normal
+ :term:`DEPENDS` dependency
+ between the two recipes.
+
+ - For an example of how ``.dot`` files can be processed, see the
+ ``scripts/contrib/graph-tool`` Python script, which finds and
+ displays paths between graph nodes.
+
+You can use a different method to view dependency information by using
+the following command::
+
+ $ bitbake -g -u taskexp recipename
+
+This command
+displays a GUI window from which you can view build-time and runtime
+dependencies for the recipes involved in building recipename.
+
+Viewing Task Variable Dependencies
+==================================
+
+As mentioned in the
+":ref:`bitbake-user-manual/bitbake-user-manual-execution:checksums (signatures)`"
+section of the BitBake User Manual, BitBake tries to automatically determine
+what variables a task depends on so that it can rerun the task if any values of
+the variables change. This determination is usually reliable. However, if you
+do things like construct variable names at runtime, then you might have to
+manually declare dependencies on those variables using ``vardeps`` as described
+in the ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:variable flags`"
+section of the BitBake User Manual.
+
+If you are unsure whether a variable dependency is being picked up
+automatically for a given task, you can list the variable dependencies
+BitBake has determined by doing the following:
+
+#. Build the recipe containing the task::
+
+ $ bitbake recipename
+
+#. Inside the :term:`STAMPS_DIR`
+ directory, find the signature data (``sigdata``) file that
+ corresponds to the task. The ``sigdata`` files contain a pickled
+ Python database of all the metadata that went into creating the input
+ checksum for the task. As an example, for the
+ :ref:`ref-tasks-fetch` task of the
+ ``db`` recipe, the ``sigdata`` file might be found in the following
+ location::
+
+ ${BUILDDIR}/tmp/stamps/i586-poky-linux/db/6.0.30-r1.do_fetch.sigdata.7c048c18222b16ff0bcee2000ef648b1
+
+ For tasks that are accelerated through the shared state
+ (:ref:`sstate <overview-manual/concepts:shared state cache>`) cache, an
+ additional ``siginfo`` file is written into
+ :term:`SSTATE_DIR` along with
+ the cached task output. The ``siginfo`` files contain exactly the
+ same information as ``sigdata`` files.
+
+#. Run ``bitbake-dumpsig`` on the ``sigdata`` or ``siginfo`` file. Here
+ is an example::
+
+ $ bitbake-dumpsig ${BUILDDIR}/tmp/stamps/i586-poky-linux/db/6.0.30-r1.do_fetch.sigdata.7c048c18222b16ff0bcee2000ef648b1
+
+ In the output of the above command, you will find a line like the
+ following, which lists all the (inferred) variable dependencies for
+ the task. This list also includes indirect dependencies from
+ variables depending on other variables, recursively::
+
+ Task dependencies: ['PV', 'SRCREV', 'SRC_URI', 'SRC_URI[sha256sum]', 'base_do_fetch']
+
+ .. note::
+
+ Functions (e.g. ``base_do_fetch``) also count as variable dependencies.
+ These functions in turn depend on the variables they reference.
+
+ The output of ``bitbake-dumpsig`` also includes the value each
+ variable had, a list of dependencies for each variable, and
+ :term:`BB_BASEHASH_IGNORE_VARS`
+ information.
+
+Debugging signature construction and unexpected task executions
+===============================================================
+
+There is a ``bitbake-diffsigs`` command for comparing two
+``siginfo`` or ``sigdata`` files. This command can be helpful when
+trying to figure out what changed between two versions of a task. If you
+call ``bitbake-diffsigs`` with just one file, the command behaves like
+``bitbake-dumpsig``.
+
+You can also use BitBake to dump out the signature construction
+information without executing tasks by using either of the following
+BitBake command-line options::
+
+ ‐‐dump-signatures=SIGNATURE_HANDLER
+ -S SIGNATURE_HANDLER
+
+
+.. note::
+
+ Two common values for `SIGNATURE_HANDLER` are "none" and "printdiff", which
+ dump only the signature or compare the dumped signature with the most recent one,
+ respectively. "printdiff" will try to establish the most recent
+ signature match (e.g. in the sstate cache) and then
+ compare the matched signatures to determine the stamps and delta
+ where these two stamp trees diverge. This can be used to determine why
+ tasks need to be re-run in situations where that is not expected.
+
+Using BitBake with either of these options causes BitBake to dump out
+``sigdata`` files in the ``stamps`` directory for every task it would
+have executed instead of building the specified target package.
+
+Viewing Metadata Used to Create the Input Signature of a Shared State Task
+==========================================================================
+
+Seeing what metadata went into creating the input signature of a shared
+state (sstate) task can be a useful debugging aid. This information is
+available in signature information (``siginfo``) files in
+:term:`SSTATE_DIR`. For
+information on how to view and interpret information in ``siginfo``
+files, see the
+":ref:`dev-manual/debugging:viewing task variable dependencies`" section.
+
+For conceptual information on shared state, see the
+":ref:`overview-manual/concepts:shared state`"
+section in the Yocto Project Overview and Concepts Manual.
+
+Invalidating Shared State to Force a Task to Run
+================================================
+
+The OpenEmbedded build system uses
+:ref:`checksums <overview-manual/concepts:checksums (signatures)>` and
+:ref:`overview-manual/concepts:shared state` cache to avoid unnecessarily
+rebuilding tasks. Collectively, this scheme is known as "shared state
+code".
+
+As with all schemes, this one has some drawbacks. It is possible that
+you could make implicit changes to your code that the checksum
+calculations do not take into account. These implicit changes affect a
+task's output but do not trigger the shared state code into rebuilding a
+recipe. Consider an example during which a tool changes its output.
+Assume that the output of ``rpmdeps`` changes. The result of the change
+should be that all the ``package`` and ``package_write_rpm`` shared
+state cache items become invalid. However, because the change to the
+output is external to the code and therefore implicit, the associated
+shared state cache items do not become invalidated. In this case, the
+build process uses the cached items rather than running the task again.
+Obviously, these types of implicit changes can cause problems.
+
+To avoid these problems during the build, you need to understand the
+effects of any changes you make. Realize that changes you make directly
+to a function are automatically factored into the checksum calculation.
+Thus, these explicit changes invalidate the associated area of shared
+state cache. However, you need to be aware of any implicit changes that
+are not obvious changes to the code and could affect the output of a
+given task.
+
+When you identify an implicit change, you can easily take steps to
+invalidate the cache and force the tasks to run. The steps you can take
+are as simple as changing a function's comments in the source code. For
+example, to invalidate package shared state files, change the comment
+statements of
+:ref:`ref-tasks-package` or the
+comments of one of the functions it calls. Even though the change is
+purely cosmetic, it causes the checksum to be recalculated and forces
+the build system to run the task again.
+
+.. note::
+
+ For an example of a commit that makes a cosmetic change to invalidate
+ shared state, see this
+ :yocto_git:`commit </poky/commit/meta/classes/package.bbclass?id=737f8bbb4f27b4837047cb9b4fbfe01dfde36d54>`.
+
+Running Specific Tasks
+======================
+
+Any given recipe consists of a set of tasks. The standard BitBake
+behavior in most cases is: :ref:`ref-tasks-fetch`, :ref:`ref-tasks-unpack`, :ref:`ref-tasks-patch`,
+:ref:`ref-tasks-configure`, :ref:`ref-tasks-compile`, :ref:`ref-tasks-install`, :ref:`ref-tasks-package`,
+:ref:`do_package_write_* <ref-tasks-package_write_deb>`, and :ref:`ref-tasks-build`. The default task is
+:ref:`ref-tasks-build` and any tasks on which it depends build first. Some tasks,
+such as :ref:`ref-tasks-devshell`, are not part of the default build chain. If you
+wish to run a task that is not part of the default build chain, you can
+use the ``-c`` option in BitBake. Here is an example::
+
+ $ bitbake matchbox-desktop -c devshell
+
+The ``-c`` option respects task dependencies, which means that all other
+tasks (including tasks from other recipes) that the specified task
+depends on will be run before the task. Even when you manually specify a
+task to run with ``-c``, BitBake will only run the task if it considers
+it "out of date". See the
+":ref:`overview-manual/concepts:stamp files and the rerunning of tasks`"
+section in the Yocto Project Overview and Concepts Manual for how
+BitBake determines whether a task is "out of date".
+
+If you want to force an up-to-date task to be rerun (e.g. because you
+made manual modifications to the recipe's
+:term:`WORKDIR` that you want to try
+out), then you can use the ``-f`` option.
+
+.. note::
+
+ The reason ``-f`` is never required when running the
+ :ref:`ref-tasks-devshell` task is because the
+ [\ :ref:`nostamp <bitbake-user-manual/bitbake-user-manual-metadata:variable flags>`\ ]
+ variable flag is already set for the task.
+
+The following example shows one way you can use the ``-f`` option::
+
+ $ bitbake matchbox-desktop
+ .
+ .
+ make some changes to the source code in the work directory
+ .
+ .
+ $ bitbake matchbox-desktop -c compile -f
+ $ bitbake matchbox-desktop
+
+This sequence first builds and then recompiles ``matchbox-desktop``. The
+last command reruns all tasks (basically the packaging tasks) after the
+compile. BitBake recognizes that the :ref:`ref-tasks-compile` task was rerun and
+therefore understands that the other tasks also need to be run again.
+
+Another, shorter way to rerun a task and all
+:ref:`ref-manual/tasks:normal recipe build tasks`
+that depend on it is to use the ``-C`` option.
+
+.. note::
+
+ This option is upper-cased and is separate from the ``-c``
+ option, which is lower-cased.
+
+Using this option invalidates the given task and then runs the
+:ref:`ref-tasks-build` task, which is
+the default task if no task is given, and the tasks on which it depends.
+You could replace the final two commands in the previous example with
+the following single command::
+
+ $ bitbake matchbox-desktop -C compile
+
+Internally, the ``-f`` and ``-C`` options work by tainting (modifying)
+the input checksum of the specified task. This tainting indirectly
+causes the task and its dependent tasks to be rerun through the normal
+task dependency mechanisms.
+
+.. note::
+
+ BitBake explicitly keeps track of which tasks have been tainted in
+ this fashion, and will print warnings such as the following for
+ builds involving such tasks:
+
+ .. code-block:: none
+
+ WARNING: /home/ulf/poky/meta/recipes-sato/matchbox-desktop/matchbox-desktop_2.1.bb.do_compile is tainted from a forced run
+
+
+ The purpose of the warning is to let you know that the work directory
+ and build output might not be in the clean state they would be in for
+ a "normal" build, depending on what actions you took. To get rid of
+ such warnings, you can remove the work directory and rebuild the
+ recipe, as follows::
+
+ $ bitbake matchbox-desktop -c clean
+ $ bitbake matchbox-desktop
+
+
+You can view a list of tasks in a given package by running the
+:ref:`ref-tasks-listtasks` task as follows::
+
+ $ bitbake matchbox-desktop -c listtasks
+
+The results appear as output to the console and are also in
+the file ``${WORKDIR}/temp/log.do_listtasks``.
+
+General BitBake Problems
+========================
+
+You can see debug output from BitBake by using the ``-D`` option. The
+debug output gives more information about what BitBake is doing and the
+reason behind it. Each ``-D`` option you use increases the logging
+level. The most common usage is ``-DDD``.
+
+The output from ``bitbake -DDD -v targetname`` can reveal why BitBake
+chose a certain version of a package or why BitBake picked a certain
+provider. This command could also help you in a situation where you
+think BitBake did something unexpected.
+
+Building with No Dependencies
+=============================
+
+To build a specific recipe (``.bb`` file), you can use the following
+command form::
+
+ $ bitbake -b somepath/somerecipe.bb
+
+This command form does
+not check for dependencies. Consequently, you should use it only when
+you know existing dependencies have been met.
+
+.. note::
+
+ You can also specify fragments of the filename. In this case, BitBake
+ checks for a unique match.
+
+Recipe Logging Mechanisms
+=========================
+
+The Yocto Project provides several logging functions for producing
+debugging output and reporting errors and warnings. For Python
+functions, the following logging functions are available. All of these functions
+log to ``${T}/log.do_``\ `task`, and can also log to standard output
+(stdout) with the right settings:
+
+- ``bb.plain(msg)``: Writes msg as is to the log while also
+ logging to stdout.
+
+- ``bb.note(msg)``: Writes "NOTE: msg" to the log. Also logs to
+ stdout if BitBake is called with "-v".
+
+- ``bb.debug(level, msg)``: Writes "DEBUG: msg" to the log. Also logs to
+ stdout if the log level is greater than or equal to level. See the
+ ":ref:`bitbake-user-manual/bitbake-user-manual-intro:usage and syntax`"
+ option in the BitBake User Manual for more information.
+
+- ``bb.warn(msg)``: Writes "WARNING: msg" to the log while also
+ logging to stdout.
+
+- ``bb.error(msg)``: Writes "ERROR: msg" to the log while also
+ logging to standard out (stdout).
+
+ .. note::
+
+ Calling this function does not cause the task to fail.
+
+- ``bb.fatal(msg)``: This logging function is similar to
+ ``bb.error(msg)`` but also causes the calling task to fail.
+
+ .. note::
+
+ ``bb.fatal()`` raises an exception, which means you do not need to put a
+ "return" statement after the function.
+
+The same logging functions are also available in shell functions, under
+the names ``bbplain``, ``bbnote``, ``bbdebug``, ``bbwarn``, ``bberror``,
+and ``bbfatal``. The :ref:`ref-classes-logging` class
+implements these functions. See that class in the ``meta/classes``
+folder of the :term:`Source Directory` for information.
+
+Logging With Python
+-------------------
+
+When creating recipes using Python and inserting code that handles build
+logs, keep in mind the goal is to have informative logs while keeping
+the console as "silent" as possible. Also, if you want status messages
+in the log, use the "debug" loglevel.
+
+Here is an example written in Python. The code handles logging for
+a function that determines the number of tasks needed to be run. See the
+":ref:`ref-tasks-listtasks`"
+section for additional information::
+
+ python do_listtasks() {
+ bb.debug(2, "Starting to figure out the task list")
+ if noteworthy_condition:
+ bb.note("There are 47 tasks to run")
+ bb.debug(2, "Got to point xyz")
+ if warning_trigger:
+ bb.warn("Detected warning_trigger, this might be a problem later.")
+ if recoverable_error:
+ bb.error("Hit recoverable_error, you really need to fix this!")
+ if fatal_error:
+ bb.fatal("fatal_error detected, unable to print the task list")
+ bb.plain("The tasks present are abc")
+ bb.debug(2, "Finished figuring out the tasklist")
+ }
+
+Logging With Bash
+-----------------
+
+When creating recipes using Bash and inserting code that handles build
+logs, you have the same goals --- informative with minimal console output.
+The syntax you use for recipes written in Bash is similar to that of
+recipes written in Python described in the previous section.
+
+Here is an example written in Bash. The code logs the progress of
+the ``do_my_function`` function::
+
+ do_my_function() {
+ bbdebug 2 "Running do_my_function"
+ if [ exceptional_condition ]; then
+ bbnote "Hit exceptional_condition"
+ fi
+ bbdebug 2 "Got to point xyz"
+ if [ warning_trigger ]; then
+ bbwarn "Detected warning_trigger, this might cause a problem later."
+ fi
+ if [ recoverable_error ]; then
+ bberror "Hit recoverable_error, correcting"
+ fi
+ if [ fatal_error ]; then
+ bbfatal "fatal_error detected"
+ fi
+ bbdebug 2 "Completed do_my_function"
+ }
+
+
+Debugging Parallel Make Races
+=============================
+
+A parallel ``make`` race occurs when the build consists of several parts
+that are run simultaneously and a situation occurs when the output or
+result of one part is not ready for use with a different part of the
+build that depends on that output. Parallel make races are annoying and
+can sometimes be difficult to reproduce and fix. However, there are some simple
+tips and tricks that can help you debug and fix them. This section
+presents a real-world example of an error encountered on the Yocto
+Project autobuilder and the process used to fix it.
+
+.. note::
+
+ If you cannot properly fix a ``make`` race condition, you can work around it
+ by clearing either the :term:`PARALLEL_MAKE` or :term:`PARALLEL_MAKEINST`
+ variables.
+
+The Failure
+-----------
+
+For this example, assume that you are building an image that depends on
+the "neard" package. And, during the build, BitBake runs into problems
+and creates the following output.
+
+.. note::
+
+ This example log file has longer lines artificially broken to make
+ the listing easier to read.
+
+If you examine the output or the log file, you see the failure during
+``make``:
+
+.. code-block:: none
+
+ | DEBUG: SITE files ['endian-little', 'bit-32', 'ix86-common', 'common-linux', 'common-glibc', 'i586-linux', 'common']
+ | DEBUG: Executing shell function do_compile
+ | NOTE: make -j 16
+ | make --no-print-directory all-am
+ | /bin/mkdir -p include/near
+ | /bin/mkdir -p include/near
+ | /bin/mkdir -p include/near
+ | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
+ 0.14-r0/neard-0.14/include/types.h include/near/types.h
+ | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
+ 0.14-r0/neard-0.14/include/log.h include/near/log.h
+ | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
+ 0.14-r0/neard-0.14/include/plugin.h include/near/plugin.h
+ | /bin/mkdir -p include/near
+ | /bin/mkdir -p include/near
+ | /bin/mkdir -p include/near
+ | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
+ 0.14-r0/neard-0.14/include/tag.h include/near/tag.h
+ | /bin/mkdir -p include/near
+ | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
+ 0.14-r0/neard-0.14/include/adapter.h include/near/adapter.h
+ | /bin/mkdir -p include/near
+ | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
+ 0.14-r0/neard-0.14/include/ndef.h include/near/ndef.h
+ | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
+ 0.14-r0/neard-0.14/include/tlv.h include/near/tlv.h
+ | /bin/mkdir -p include/near
+ | /bin/mkdir -p include/near
+ | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
+ 0.14-r0/neard-0.14/include/setting.h include/near/setting.h
+ | /bin/mkdir -p include/near
+ | /bin/mkdir -p include/near
+ | /bin/mkdir -p include/near
+ | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
+ 0.14-r0/neard-0.14/include/device.h include/near/device.h
+ | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
+ 0.14-r0/neard-0.14/include/nfc_copy.h include/near/nfc_copy.h
+ | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
+ 0.14-r0/neard-0.14/include/snep.h include/near/snep.h
+ | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
+ 0.14-r0/neard-0.14/include/version.h include/near/version.h
+ | ln -s /home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/
+ 0.14-r0/neard-0.14/include/dbus.h include/near/dbus.h
+ | ./src/genbuiltin nfctype1 nfctype2 nfctype3 nfctype4 p2p > src/builtin.h
+ | i586-poky-linux-gcc -m32 -march=i586 --sysroot=/home/pokybuild/yocto-autobuilder/nightly-x86/
+ build/build/tmp/sysroots/qemux86 -DHAVE_CONFIG_H -I. -I./include -I./src -I./gdbus -I/home/pokybuild/
+ yocto-autobuilder/nightly-x86/build/build/tmp/sysroots/qemux86/usr/include/glib-2.0
+ -I/home/pokybuild/yocto-autobuilder/nightly-x86/build/build/tmp/sysroots/qemux86/usr/
+ lib/glib-2.0/include -I/home/pokybuild/yocto-autobuilder/nightly-x86/build/build/
+ tmp/sysroots/qemux86/usr/include/dbus-1.0 -I/home/pokybuild/yocto-autobuilder/
+ nightly-x86/build/build/tmp/sysroots/qemux86/usr/lib/dbus-1.0/include -I/home/pokybuild/yocto-autobuilder/
+ nightly-x86/build/build/tmp/sysroots/qemux86/usr/include/libnl3
+ -DNEAR_PLUGIN_BUILTIN -DPLUGINDIR=\""/usr/lib/near/plugins"\"
+ -DCONFIGDIR=\""/etc/neard\"" -O2 -pipe -g -feliminate-unused-debug-types -c
+ -o tools/snep-send.o tools/snep-send.c
+ | In file included from tools/snep-send.c:16:0:
+ | tools/../src/near.h:41:23: fatal error: near/dbus.h: No such file or directory
+ | #include <near/dbus.h>
+ | ^
+ | compilation terminated.
+ | make[1]: *** [tools/snep-send.o] Error 1
+ | make[1]: *** Waiting for unfinished jobs....
+ | make: *** [all] Error 2
+ | ERROR: oe_runmake failed
+
+Reproducing the Error
+---------------------
+
+Because race conditions are intermittent, they do not manifest
+themselves every time you do the build. In fact, most times the build
+will complete without problems even though the potential race condition
+exists. Thus, once the error surfaces, you need a way to reproduce it.
+
+In this example, compiling the "neard" package is causing the problem.
+So the first thing to do is build "neard" locally. Before you start the
+build, set the
+:term:`PARALLEL_MAKE` variable
+in your ``local.conf`` file to a high number (e.g. "-j 20"). Using a
+high value for :term:`PARALLEL_MAKE` increases the chances of the race
+condition showing up::
+
+ $ bitbake neard
+
+Once the local build for "neard" completes, start a ``devshell`` build::
+
+ $ bitbake neard -c devshell
+
+For information on how to use a ``devshell``, see the
+":ref:`dev-manual/development-shell:using a development shell`" section.
+
+In the ``devshell``, do the following::
+
+ $ make clean
+ $ make tools/snep-send.o
+
+The ``devshell`` commands cause the failure to clearly
+be visible. In this case, there is a missing dependency for the ``neard``
+Makefile target. Here is some abbreviated, sample output with the
+missing dependency clearly visible at the end::
+
+ i586-poky-linux-gcc -m32 -march=i586 --sysroot=/home/scott-lenovo/......
+ .
+ .
+ .
+ tools/snep-send.c
+ In file included from tools/snep-send.c:16:0:
+ tools/../src/near.h:41:23: fatal error: near/dbus.h: No such file or directory
+ #include <near/dbus.h>
+ ^
+ compilation terminated.
+ make: *** [tools/snep-send.o] Error 1
+ $
+
+
+Creating a Patch for the Fix
+----------------------------
+
+Because there is a missing dependency for the Makefile target, you need
+to patch the ``Makefile.am`` file, which is generated from
+``Makefile.in``. You can use Quilt to create the patch::
+
+ $ quilt new parallelmake.patch
+ Patch patches/parallelmake.patch is now on top
+ $ quilt add Makefile.am
+ File Makefile.am added to patch patches/parallelmake.patch
+
+For more information on using Quilt, see the
+":ref:`dev-manual/quilt:using quilt in your workflow`" section.
+
+At this point you need to make the edits to ``Makefile.am`` to add the
+missing dependency. For our example, you have to add the following line
+to the file::
+
+ tools/snep-send.$(OBJEXT): include/near/dbus.h
+
+Once you have edited the file, use the ``refresh`` command to create the
+patch::
+
+ $ quilt refresh
+ Refreshed patch patches/parallelmake.patch
+
+Once the patch file is created, you need to add it back to the originating
+recipe folder. Here is an example assuming a top-level
+:term:`Source Directory` named ``poky``::
+
+ $ cp patches/parallelmake.patch poky/meta/recipes-connectivity/neard/neard
+
+The final thing you need to do to implement the fix in the build is to
+update the "neard" recipe (i.e. ``neard-0.14.bb``) so that the
+:term:`SRC_URI` statement includes
+the patch file. The recipe file is in the folder above the patch. Here
+is what the edited :term:`SRC_URI` statement would look like::
+
+ SRC_URI = "${KERNELORG_MIRROR}/linux/network/nfc/${BPN}-${PV}.tar.xz \
+ file://neard.in \
+ file://neard.service.in \
+ file://parallelmake.patch \
+ "
+
+With the patch complete and moved to the correct folder and the
+:term:`SRC_URI` statement updated, you can exit the ``devshell``::
+
+ $ exit
+
+Testing the Build
+-----------------
+
+With everything in place, you can get back to trying the build again
+locally::
+
+ $ bitbake neard
+
+This build should succeed.
+
+Now you can open up a ``devshell`` again and repeat the clean and make
+operations as follows::
+
+ $ bitbake neard -c devshell
+ $ make clean
+ $ make tools/snep-send.o
+
+The build should work without issue.
+
+As with all solved problems, if they originated upstream, you need to
+submit the fix for the recipe in OE-Core and upstream so that the
+problem is taken care of at its source. See the
+":doc:`../contributor-guide/submit-changes`" section for more information.
+
+Debugging With the GNU Project Debugger (GDB) Remotely
+======================================================
+
+GDB allows you to examine running programs, which in turn helps you to
+understand and fix problems. It also allows you to perform post-mortem
+style analysis of program crashes. GDB is available as a package within
+the Yocto Project and is installed in SDK images by default. See the
+":ref:`ref-manual/images:Images`" chapter in the Yocto
+Project Reference Manual for a description of these images. You can find
+information on GDB at https://sourceware.org/gdb/.
+
+.. note::
+
+ For best results, install debug (``-dbg``) packages for the applications you
+ are going to debug. Doing so makes extra debug symbols available that give
+ you more meaningful output.
+
+Sometimes, due to memory or disk space constraints, it is not possible
+to use GDB directly on the remote target to debug applications. These
+constraints arise because GDB needs to load the debugging information
+and the binaries of the process being debugged. Additionally, GDB needs
+to perform many computations to locate information such as function
+names, variable names and values, stack traces and so forth --- even
+before starting the debugging process. These extra computations place
+more load on the target system and can alter the characteristics of the
+program being debugged.
+
+To help get past the previously mentioned constraints, there are two
+methods you can use: running a debuginfod server and using gdbserver.
+
+Using the debuginfod server method
+----------------------------------
+
+``debuginfod`` from ``elfutils`` is a way to distribute ``debuginfo`` files.
+Running a ``debuginfod`` server makes debug symbols readily available,
+which means you don't need to download debugging information
+and the binaries of the process being debugged. You can just fetch
+debug symbols from the server.
+
+To run a ``debuginfod`` server, you need to do the following:
+
+- Ensure that ``debuginfod`` is present in :term:`DISTRO_FEATURES`
+ (it already is in ``OpenEmbedded-core`` defaults and ``poky`` reference distribution).
+ If not, set in your distro config file or in ``local.conf``::
+
+ DISTRO_FEATURES:append = " debuginfod"
+
+ This distro feature enables the server and client library in ``elfutils``,
+ and enables ``debuginfod`` support in clients (at the moment, ``gdb`` and ``binutils``).
+
+- Run the following commands to launch the ``debuginfod`` server on the host::
+
+ $ oe-debuginfod
+
+- To use ``debuginfod`` on the target, you need to know the ip:port where
+ ``debuginfod`` is listening on the host (port defaults to 8002), and export
+ that into the shell environment, for example in ``qemu``::
+
+ root@qemux86-64:~# export DEBUGINFOD_URLS="http://192.168.7.1:8002/"
+
+- Then debug info fetching should simply work when running the target ``gdb``,
+ ``readelf`` or ``objdump``, for example::
+
+ root@qemux86-64:~# gdb /bin/cat
+ ...
+ Reading symbols from /bin/cat...
+ Downloading separate debug info for /bin/cat...
+ Reading symbols from /home/root/.cache/debuginfod_client/923dc4780cfbc545850c616bffa884b6b5eaf322/debuginfo...
+
+- It's also possible to use ``debuginfod-find`` to just query the server::
+
+ root@qemux86-64:~# debuginfod-find debuginfo /bin/ls
+ /home/root/.cache/debuginfod_client/356edc585f7f82d46f94fcb87a86a3fe2d2e60bd/debuginfo
+
+
+Using the gdbserver method
+--------------------------
+
+gdbserver, which runs on the remote target and does not load any
+debugging information from the debugged process. Instead, a GDB instance
+processes the debugging information that is run on a remote computer -
+the host GDB. The host GDB then sends control commands to gdbserver to
+make it stop or start the debugged program, as well as read or write
+memory regions of that debugged program. All the debugging information
+loaded and processed as well as all the heavy debugging is done by the
+host GDB. Offloading these processes gives the gdbserver running on the
+target a chance to remain small and fast.
+
+Because the host GDB is responsible for loading the debugging
+information and for doing the necessary processing to make actual
+debugging happen, you have to make sure the host can access the
+unstripped binaries complete with their debugging information and also
+be sure the target is compiled with no optimizations. The host GDB must
+also have local access to all the libraries used by the debugged
+program. Because gdbserver does not need any local debugging
+information, the binaries on the remote target can remain stripped.
+However, the binaries must also be compiled without optimization so they
+match the host's binaries.
+
+To remain consistent with GDB documentation and terminology, the binary
+being debugged on the remote target machine is referred to as the
+"inferior" binary. For documentation on GDB see the `GDB
+site <https://sourceware.org/gdb/documentation/>`__.
+
+The following steps show you how to debug using the GNU project
+debugger.
+
+#. *Configure your build system to construct the companion debug
+ filesystem:*
+
+ In your ``local.conf`` file, set the following::
+
+ IMAGE_GEN_DEBUGFS = "1"
+ IMAGE_FSTYPES_DEBUGFS = "tar.bz2"
+
+ These options cause the
+ OpenEmbedded build system to generate a special companion filesystem
+ fragment, which contains the matching source and debug symbols to
+ your deployable filesystem. The build system does this by looking at
+ what is in the deployed filesystem, and pulling the corresponding
+ ``-dbg`` packages.
+
+ The companion debug filesystem is not a complete filesystem, but only
+ contains the debug fragments. This filesystem must be combined with
+ the full filesystem for debugging. Subsequent steps in this procedure
+ show how to combine the partial filesystem with the full filesystem.
+
+#. *Configure the system to include gdbserver in the target filesystem:*
+
+ Make the following addition in your ``local.conf`` file::
+
+ EXTRA_IMAGE_FEATURES:append = " tools-debug"
+
+ The change makes
+ sure the ``gdbserver`` package is included.
+
+#. *Build the environment:*
+
+ Use the following command to construct the image and the companion
+ Debug Filesystem::
+
+ $ bitbake image
+
+ Build the cross GDB component and
+ make it available for debugging. Build the SDK that matches the
+ image. Building the SDK is best for a production build that can be
+ used later for debugging, especially during long term maintenance::
+
+ $ bitbake -c populate_sdk image
+
+ Alternatively, you can build the minimal toolchain components that
+ match the target. Doing so creates a smaller than typical SDK and
+ only contains a minimal set of components with which to build simple
+ test applications, as well as run the debugger::
+
+ $ bitbake meta-toolchain
+
+ A final method is to build Gdb itself within the build system::
+
+ $ bitbake gdb-cross-<architecture>
+
+ Doing so produces a temporary copy of
+ ``cross-gdb`` you can use for debugging during development. While
+ this is the quickest approach, the two previous methods in this step
+ are better when considering long-term maintenance strategies.
+
+ .. note::
+
+ If you run ``bitbake gdb-cross``, the OpenEmbedded build system suggests
+ the actual image (e.g. ``gdb-cross-i586``). The suggestion is usually the
+ actual name you want to use.
+
+#. *Set up the* ``debugfs``\ *:*
+
+ Run the following commands to set up the ``debugfs``::
+
+ $ mkdir debugfs
+ $ cd debugfs
+ $ tar xvfj build-dir/tmp/deploy/images/machine/image.rootfs.tar.bz2
+ $ tar xvfj build-dir/tmp/deploy/images/machine/image-dbg.rootfs.tar.bz2
+
+#. *Set up GDB:*
+
+ Install the SDK (if you built one) and then source the correct
+ environment file. Sourcing the environment file puts the SDK in your
+ ``PATH`` environment variable and sets ``$GDB`` to the SDK's debugger.
+
+ If you are using the build system, Gdb is located in
+ `build-dir`\ ``/tmp/sysroots/``\ `host`\ ``/usr/bin/``\ `architecture`\ ``/``\ `architecture`\ ``-gdb``
+
+#. *Boot the target:*
+
+ For information on how to run QEMU, see the `QEMU
+ Documentation <https://wiki.qemu.org/Documentation/GettingStartedDevelopers>`__.
+
+ .. note::
+
+ Be sure to verify that your host can access the target via TCP.
+
+#. *Debug a program:*
+
+ Debugging a program involves running gdbserver on the target and then
+ running Gdb on the host. The example in this step debugs ``gzip``:
+
+ .. code-block:: shell
+
+ root@qemux86:~# gdbserver localhost:1234 /bin/gzip —help
+
+ For
+ additional gdbserver options, see the `GDB Server
+ Documentation <https://www.gnu.org/software/gdb/documentation/>`__.
+
+ After running gdbserver on the target, you need to run Gdb on the
+ host and configure it and connect to the target. Use these commands::
+
+ $ cd directory-holding-the-debugfs-directory
+ $ arch-gdb
+ (gdb) set sysroot debugfs
+ (gdb) set substitute-path /usr/src/debug debugfs/usr/src/debug
+ (gdb) target remote IP-of-target:1234
+
+ At this
+ point, everything should automatically load (i.e. matching binaries,
+ symbols and headers).
+
+ .. note::
+
+ The Gdb ``set`` commands in the previous example can be placed into the
+ users ``~/.gdbinit`` file. Upon starting, Gdb automatically runs whatever
+ commands are in that file.
+
+#. *Deploying without a full image rebuild:*
+
+ In many cases, during development you want a quick method to deploy a
+ new binary to the target and debug it, without waiting for a full
+ image build.
+
+ One approach to solving this situation is to just build the component
+ you want to debug. Once you have built the component, copy the
+ executable directly to both the target and the host ``debugfs``.
+
+ If the binary is processed through the debug splitting in
+ OpenEmbedded, you should also copy the debug items (i.e. ``.debug``
+ contents and corresponding ``/usr/src/debug`` files) from the work
+ directory. Here is an example::
+
+ $ bitbake bash
+ $ bitbake -c devshell bash
+ $ cd ..
+ $ scp packages-split/bash/bin/bash target:/bin/bash
+ $ cp -a packages-split/bash-dbg/\* path/debugfs
+
+Debugging with the GNU Project Debugger (GDB) on the Target
+===========================================================
+
+The previous section addressed using GDB remotely for debugging
+purposes, which is the most usual case due to the inherent hardware
+limitations on many embedded devices. However, debugging in the target
+hardware itself is also possible with more powerful devices. This
+section describes what you need to do in order to support using GDB to
+debug on the target hardware.
+
+To support this kind of debugging, you need do the following:
+
+- Ensure that GDB is on the target. You can do this by making
+ the following addition to your ``local.conf`` file::
+
+ EXTRA_IMAGE_FEATURES:append = " tools-debug"
+
+- Ensure that debug symbols are present. You can do so by adding the
+ corresponding ``-dbg`` package to :term:`IMAGE_INSTALL`::
+
+ IMAGE_INSTALL:append = " packagename-dbg"
+
+ Alternatively, you can add the following to ``local.conf`` to include
+ all the debug symbols::
+
+ EXTRA_IMAGE_FEATURES:append = " dbg-pkgs"
+
+.. note::
+
+ To improve the debug information accuracy, you can reduce the level
+ of optimization used by the compiler. For example, when adding the
+ following line to your ``local.conf`` file, you will reduce optimization
+ from :term:`FULL_OPTIMIZATION` of "-O2" to :term:`DEBUG_OPTIMIZATION`
+ of "-O -fno-omit-frame-pointer"::
+
+ DEBUG_BUILD = "1"
+
+ Consider that this will reduce the application's performance and is
+ recommended only for debugging purposes.
+
+Other Debugging Tips
+====================
+
+Here are some other tips that you might find useful:
+
+- When adding new packages, it is worth watching for undesirable items
+ making their way into compiler command lines. For example, you do not
+ want references to local system files like ``/usr/lib/`` or
+ ``/usr/include/``.
+
+- If you want to remove the ``psplash`` boot splashscreen, add
+ ``psplash=false`` to the kernel command line. Doing so prevents
+ ``psplash`` from loading and thus allows you to see the console. It
+ is also possible to switch out of the splashscreen by switching the
+ virtual console (e.g. Fn+Left or Fn+Right on a Zaurus).
+
+- Removing :term:`TMPDIR` (usually ``tmp/``, within the
+ :term:`Build Directory`) can often fix temporary build issues. Removing
+ :term:`TMPDIR` is usually a relatively cheap operation, because task output
+ will be cached in :term:`SSTATE_DIR` (usually ``sstate-cache/``, which is
+ also in the :term:`Build Directory`).
+
+ .. note::
+
+ Removing :term:`TMPDIR` might be a workaround rather than a fix.
+ Consequently, trying to determine the underlying cause of an issue before
+ removing the directory is a good idea.
+
+- Understanding how a feature is used in practice within existing
+ recipes can be very helpful. It is recommended that you configure
+ some method that allows you to quickly search through files.
+
+ Using GNU Grep, you can use the following shell function to
+ recursively search through common recipe-related files, skipping
+ binary files, ``.git`` directories, and the :term:`Build Directory`
+ (assuming its name starts with "build")::
+
+ g() {
+ grep -Ir \
+ --exclude-dir=.git \
+ --exclude-dir='build*' \
+ --include='*.bb*' \
+ --include='*.inc*' \
+ --include='*.conf*' \
+ --include='*.py*' \
+ "$@"
+ }
+
+ Here are some usage examples::
+
+ $ g FOO # Search recursively for "FOO"
+ $ g -i foo # Search recursively for "foo", ignoring case
+ $ g -w FOO # Search recursively for "FOO" as a word, ignoring e.g. "FOOBAR"
+
+ If figuring
+ out how some feature works requires a lot of searching, it might
+ indicate that the documentation should be extended or improved. In
+ such cases, consider filing a documentation bug using the Yocto
+ Project implementation of
+ :yocto_bugs:`Bugzilla <>`. For information on
+ how to submit a bug against the Yocto Project, see the Yocto Project
+ Bugzilla :yocto_wiki:`wiki page </Bugzilla_Configuration_and_Bug_Tracking>`
+ and the ":doc:`../contributor-guide/report-defect`" section.
+
+ .. note::
+
+ The manuals might not be the right place to document variables
+ that are purely internal and have a limited scope (e.g. internal
+ variables used to implement a single ``.bbclass`` file).
+
diff --git a/documentation/dev-manual/development-shell.rst b/documentation/dev-manual/development-shell.rst
new file mode 100644
index 0000000000..be26bcffc7
--- /dev/null
+++ b/documentation/dev-manual/development-shell.rst
@@ -0,0 +1,82 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Using a Development Shell
+*************************
+
+When debugging certain commands or even when just editing packages,
+``devshell`` can be a useful tool. When you invoke ``devshell``, all
+tasks up to and including
+:ref:`ref-tasks-patch` are run for the
+specified target. Then, a new terminal is opened and you are placed in
+``${``\ :term:`S`\ ``}``, the source
+directory. In the new terminal, all the OpenEmbedded build-related
+environment variables are still defined so you can use commands such as
+``configure`` and ``make``. The commands execute just as if the
+OpenEmbedded build system were executing them. Consequently, working
+this way can be helpful when debugging a build or preparing software to
+be used with the OpenEmbedded build system.
+
+Here is an example that uses ``devshell`` on a target named
+``matchbox-desktop``::
+
+ $ bitbake matchbox-desktop -c devshell
+
+This command spawns a terminal with a shell prompt within the
+OpenEmbedded build environment. The
+:term:`OE_TERMINAL` variable
+controls what type of shell is opened.
+
+For spawned terminals, the following occurs:
+
+- The ``PATH`` variable includes the cross-toolchain.
+
+- The ``pkgconfig`` variables find the correct ``.pc`` files.
+
+- The ``configure`` command finds the Yocto Project site files as well
+ as any other necessary files.
+
+Within this environment, you can run configure or compile commands as if
+they were being run by the OpenEmbedded build system itself. As noted
+earlier, the working directory also automatically changes to the Source
+Directory (:term:`S`).
+
+To manually run a specific task using ``devshell``, run the
+corresponding ``run.*`` script in the
+``${``\ :term:`WORKDIR`\ ``}/temp``
+directory (e.g., ``run.do_configure.``\ `pid`). If a task's script does
+not exist, which would be the case if the task was skipped by way of the
+sstate cache, you can create the task by first running it outside of the
+``devshell``::
+
+ $ bitbake -c task
+
+.. note::
+
+ - Execution of a task's ``run.*`` script and BitBake's execution of
+ a task are identical. In other words, running the script re-runs
+ the task just as it would be run using the ``bitbake -c`` command.
+
+ - Any ``run.*`` file that does not have a ``.pid`` extension is a
+ symbolic link (symlink) to the most recent version of that file.
+
+Remember, that the ``devshell`` is a mechanism that allows you to get
+into the BitBake task execution environment. And as such, all commands
+must be called just as BitBake would call them. That means you need to
+provide the appropriate options for cross-compilation and so forth as
+applicable.
+
+When you are finished using ``devshell``, exit the shell or close the
+terminal window.
+
+.. note::
+
+ - It is worth remembering that when using ``devshell`` you need to
+ use the full compiler name such as ``arm-poky-linux-gnueabi-gcc``
+ instead of just using ``gcc``. The same applies to other
+ applications such as ``binutils``, ``libtool`` and so forth.
+ BitBake sets up environment variables such as :term:`CC` to assist
+ applications, such as ``make`` to find the correct tools.
+
+ - It is also worth noting that ``devshell`` still works over X11
+ forwarding and similar situations.
+
diff --git a/documentation/dev-manual/device-manager.rst b/documentation/dev-manual/device-manager.rst
new file mode 100644
index 0000000000..49fc785fec
--- /dev/null
+++ b/documentation/dev-manual/device-manager.rst
@@ -0,0 +1,74 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+.. _device-manager:
+
+Selecting a Device Manager
+**************************
+
+The Yocto Project provides multiple ways to manage the device manager
+(``/dev``):
+
+- Persistent and Pre-Populated ``/dev``: For this case, the ``/dev``
+ directory is persistent and the required device nodes are created
+ during the build.
+
+- Use ``devtmpfs`` with a Device Manager: For this case, the ``/dev``
+ directory is provided by the kernel as an in-memory file system and
+ is automatically populated by the kernel at runtime. Additional
+ configuration of device nodes is done in user space by a device
+ manager like ``udev`` or ``busybox-mdev``.
+
+Using Persistent and Pre-Populated ``/dev``
+===========================================
+
+To use the static method for device population, you need to set the
+:term:`USE_DEVFS` variable to "0"
+as follows::
+
+ USE_DEVFS = "0"
+
+The content of the resulting ``/dev`` directory is defined in a Device
+Table file. The
+:term:`IMAGE_DEVICE_TABLES`
+variable defines the Device Table to use and should be set in the
+machine or distro configuration file. Alternatively, you can set this
+variable in your ``local.conf`` configuration file.
+
+If you do not define the :term:`IMAGE_DEVICE_TABLES` variable, the default
+``device_table-minimal.txt`` is used::
+
+ IMAGE_DEVICE_TABLES = "device_table-mymachine.txt"
+
+The population is handled by the ``makedevs`` utility during image
+creation:
+
+Using ``devtmpfs`` and a Device Manager
+=======================================
+
+To use the dynamic method for device population, you need to use (or be
+sure to set) the :term:`USE_DEVFS`
+variable to "1", which is the default::
+
+ USE_DEVFS = "1"
+
+With this
+setting, the resulting ``/dev`` directory is populated by the kernel
+using ``devtmpfs``. Make sure the corresponding kernel configuration
+variable ``CONFIG_DEVTMPFS`` is set when building you build a Linux
+kernel.
+
+All devices created by ``devtmpfs`` will be owned by ``root`` and have
+permissions ``0600``.
+
+To have more control over the device nodes, you can use a device manager like
+``udev`` or ``busybox-mdev``. You choose the device manager by defining the
+:term:`VIRTUAL-RUNTIME_dev_manager <VIRTUAL-RUNTIME>` variable in your machine
+or distro configuration file. Alternatively, you can set this variable in
+your ``local.conf`` configuration file::
+
+ VIRTUAL-RUNTIME_dev_manager = "udev"
+
+ # Some alternative values
+ # VIRTUAL-RUNTIME_dev_manager = "busybox-mdev"
+ # VIRTUAL-RUNTIME_dev_manager = "systemd"
+
diff --git a/documentation/dev-manual/disk-space.rst b/documentation/dev-manual/disk-space.rst
new file mode 100644
index 0000000000..6d1638a302
--- /dev/null
+++ b/documentation/dev-manual/disk-space.rst
@@ -0,0 +1,61 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Conserving Disk Space
+*********************
+
+Conserving Disk Space During Builds
+===================================
+
+To help conserve disk space during builds, you can add the following
+statement to your project's ``local.conf`` configuration file found in
+the :term:`Build Directory`::
+
+ INHERIT += "rm_work"
+
+Adding this statement deletes the work directory used for
+building a recipe once the recipe is built. For more information on
+"rm_work", see the :ref:`ref-classes-rm-work` class in the
+Yocto Project Reference Manual.
+
+When you inherit this class and build a ``core-image-sato`` image for a
+``qemux86-64`` machine from an Ubuntu 22.04 x86-64 system, you end up with a
+final disk usage of 22 Gbytes instead of &MIN_DISK_SPACE; Gbytes. However,
+&MIN_DISK_SPACE_RM_WORK; Gbytes of initial free disk space are still needed to
+create temporary files before they can be deleted.
+
+Purging Obsolete Shared State Cache Files
+=========================================
+
+After multiple build iterations, the Shared State (sstate) cache can contain
+multiple cache files for a given package, consuming a substantial amount of
+disk space. However, only the most recent ones are likely to be reused.
+
+The following command is a quick way to purge all the cache files which
+haven't been used for a least a specified number of days::
+
+ find build/sstate-cache -type f -mtime +$DAYS -delete
+
+The above command relies on the fact that BitBake touches the sstate cache
+files as it accesses them, when it has write access to the cache.
+
+You could use ``-atime`` instead of ``-mtime`` if the partition isn't mounted
+with the ``noatime`` option for a read only cache.
+
+For more advanced needs, OpenEmbedded-Core also offers a more elaborate
+command. It has the ability to purge all but the newest cache files on each
+architecture, and also to remove files that it considers unreachable by
+exploring a set of build configurations. However, this command
+requires a full build environment to be available and doesn't work well
+covering multiple releases. It won't work either on limited environments
+such as BSD based NAS::
+
+ sstate-cache-management.sh --remove-duplicated --cache-dir=build/sstate-cache
+
+This command will ask you to confirm the deletions it identifies.
+Run ``sstate-cache-management.sh`` for more details about this script.
+
+.. note::
+
+ As this command is much more cautious and selective, removing only cache files,
+ it will execute much slower than the simple ``find`` command described above.
+ Therefore, it may not be your best option to trim huge cache directories.
diff --git a/documentation/dev-manual/efficiently-fetching-sources.rst b/documentation/dev-manual/efficiently-fetching-sources.rst
new file mode 100644
index 0000000000..fd9ad6aa69
--- /dev/null
+++ b/documentation/dev-manual/efficiently-fetching-sources.rst
@@ -0,0 +1,68 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Efficiently Fetching Source Files During a Build
+================================================
+
+The OpenEmbedded build system works with source files located through
+the :term:`SRC_URI` variable. When
+you build something using BitBake, a big part of the operation is
+locating and downloading all the source tarballs. For images,
+downloading all the source for various packages can take a significant
+amount of time.
+
+This section shows you how you can use mirrors to speed up fetching
+source files and how you can pre-fetch files all of which leads to more
+efficient use of resources and time.
+
+Setting up Effective Mirrors
+----------------------------
+
+A good deal that goes into a Yocto Project build is simply downloading
+all of the source tarballs. Maybe you have been working with another
+build system for which you have built up a
+sizable directory of source tarballs. Or, perhaps someone else has such
+a directory for which you have read access. If so, you can save time by
+adding statements to your configuration file so that the build process
+checks local directories first for existing tarballs before checking the
+Internet.
+
+Here is an efficient way to set it up in your ``local.conf`` file::
+
+ SOURCE_MIRROR_URL ?= "file:///home/you/your-download-dir/"
+ INHERIT += "own-mirrors"
+ BB_GENERATE_MIRROR_TARBALLS = "1"
+ # BB_NO_NETWORK = "1"
+
+In the previous example, the
+:term:`BB_GENERATE_MIRROR_TARBALLS`
+variable causes the OpenEmbedded build system to generate tarballs of
+the Git repositories and store them in the
+:term:`DL_DIR` directory. Due to
+performance reasons, generating and storing these tarballs is not the
+build system's default behavior.
+
+You can also use the
+:term:`PREMIRRORS` variable. For
+an example, see the variable's glossary entry in the Yocto Project
+Reference Manual.
+
+Getting Source Files and Suppressing the Build
+----------------------------------------------
+
+Another technique you can use to ready yourself for a successive string
+of build operations, is to pre-fetch all the source files without
+actually starting a build. This technique lets you work through any
+download issues and ultimately gathers all the source files into your
+download directory :ref:`structure-build-downloads`,
+which is located with :term:`DL_DIR`.
+
+Use the following BitBake command form to fetch all the necessary
+sources without starting the build::
+
+ $ bitbake target --runall=fetch
+
+This
+variation of the BitBake command guarantees that you have all the
+sources for that BitBake target should you disconnect from the Internet
+and want to do the build later offline.
+
diff --git a/documentation/dev-manual/error-reporting-tool.rst b/documentation/dev-manual/error-reporting-tool.rst
new file mode 100644
index 0000000000..84f3d9cd1e
--- /dev/null
+++ b/documentation/dev-manual/error-reporting-tool.rst
@@ -0,0 +1,84 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Using the Error Reporting Tool
+******************************
+
+The error reporting tool allows you to submit errors encountered during
+builds to a central database. Outside of the build environment, you can
+use a web interface to browse errors, view statistics, and query for
+errors. The tool works using a client-server system where the client
+portion is integrated with the installed Yocto Project
+:term:`Source Directory` (e.g. ``poky``).
+The server receives the information collected and saves it in a
+database.
+
+There is a live instance of the error reporting server at
+https://errors.yoctoproject.org.
+When you want to get help with build failures, you can submit all of the
+information on the failure easily and then point to the URL in your bug
+report or send an email to the mailing list.
+
+.. note::
+
+ If you send error reports to this server, the reports become publicly
+ visible.
+
+Enabling and Using the Tool
+===========================
+
+By default, the error reporting tool is disabled. You can enable it by
+inheriting the :ref:`ref-classes-report-error` class by adding the
+following statement to the end of your ``local.conf`` file in your
+:term:`Build Directory`::
+
+ INHERIT += "report-error"
+
+By default, the error reporting feature stores information in
+``${``\ :term:`LOG_DIR`\ ``}/error-report``.
+However, you can specify a directory to use by adding the following to
+your ``local.conf`` file::
+
+ ERR_REPORT_DIR = "path"
+
+Enabling error
+reporting causes the build process to collect the errors and store them
+in a file as previously described. When the build system encounters an
+error, it includes a command as part of the console output. You can run
+the command to send the error file to the server. For example, the
+following command sends the errors to an upstream server::
+
+ $ send-error-report /home/brandusa/project/poky/build/tmp/log/error-report/error_report_201403141617.txt
+
+In the previous example, the errors are sent to a public database
+available at https://errors.yoctoproject.org, which is used by the
+entire community. If you specify a particular server, you can send the
+errors to a different database. Use the following command for more
+information on available options::
+
+ $ send-error-report --help
+
+When sending the error file, you are prompted to review the data being
+sent as well as to provide a name and optional email address. Once you
+satisfy these prompts, the command returns a link from the server that
+corresponds to your entry in the database. For example, here is a
+typical link: https://errors.yoctoproject.org/Errors/Details/9522/
+
+Following the link takes you to a web interface where you can browse,
+query the errors, and view statistics.
+
+Disabling the Tool
+==================
+
+To disable the error reporting feature, simply remove or comment out the
+following statement from the end of your ``local.conf`` file in your
+:term:`Build Directory`::
+
+ INHERIT += "report-error"
+
+Setting Up Your Own Error Reporting Server
+==========================================
+
+If you want to set up your own error reporting server, you can obtain
+the code from the Git repository at :yocto_git:`/error-report-web/`.
+Instructions on how to set it up are in the README document.
+
diff --git a/documentation/dev-manual/external-scm.rst b/documentation/dev-manual/external-scm.rst
new file mode 100644
index 0000000000..97a7e63e36
--- /dev/null
+++ b/documentation/dev-manual/external-scm.rst
@@ -0,0 +1,67 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Using an External SCM
+*********************
+
+If you're working on a recipe that pulls from an external Source Code
+Manager (SCM), it is possible to have the OpenEmbedded build system
+notice new recipe changes added to the SCM and then build the resulting
+packages that depend on the new recipes by using the latest versions.
+This only works for SCMs from which it is possible to get a sensible
+revision number for changes. Currently, you can do this with Apache
+Subversion (SVN), Git, and Bazaar (BZR) repositories.
+
+To enable this behavior, the :term:`PV` of
+the recipe needs to reference
+:term:`SRCPV`. Here is an example::
+
+ PV = "1.2.3+git${SRCPV}"
+
+Then, you can add the following to your
+``local.conf``::
+
+ SRCREV:pn-PN = "${AUTOREV}"
+
+:term:`PN` is the name of the recipe for
+which you want to enable automatic source revision updating.
+
+If you do not want to update your local configuration file, you can add
+the following directly to the recipe to finish enabling the feature::
+
+ SRCREV = "${AUTOREV}"
+
+The Yocto Project provides a distribution named ``poky-bleeding``, whose
+configuration file contains the line::
+
+ require conf/distro/include/poky-floating-revisions.inc
+
+This line pulls in the
+listed include file that contains numerous lines of exactly that form::
+
+ #SRCREV:pn-opkg-native ?= "${AUTOREV}"
+ #SRCREV:pn-opkg-sdk ?= "${AUTOREV}"
+ #SRCREV:pn-opkg ?= "${AUTOREV}"
+ #SRCREV:pn-opkg-utils-native ?= "${AUTOREV}"
+ #SRCREV:pn-opkg-utils ?= "${AUTOREV}"
+ SRCREV:pn-gconf-dbus ?= "${AUTOREV}"
+ SRCREV:pn-matchbox-common ?= "${AUTOREV}"
+ SRCREV:pn-matchbox-config-gtk ?= "${AUTOREV}"
+ SRCREV:pn-matchbox-desktop ?= "${AUTOREV}"
+ SRCREV:pn-matchbox-keyboard ?= "${AUTOREV}"
+ SRCREV:pn-matchbox-panel-2 ?= "${AUTOREV}"
+ SRCREV:pn-matchbox-themes-extra ?= "${AUTOREV}"
+ SRCREV:pn-matchbox-terminal ?= "${AUTOREV}"
+ SRCREV:pn-matchbox-wm ?= "${AUTOREV}"
+ SRCREV:pn-settings-daemon ?= "${AUTOREV}"
+ SRCREV:pn-screenshot ?= "${AUTOREV}"
+ . . .
+
+These lines allow you to
+experiment with building a distribution that tracks the latest
+development source for numerous packages.
+
+.. note::
+
+ The ``poky-bleeding`` distribution is not tested on a regular basis. Keep
+ this in mind if you use it.
+
diff --git a/documentation/dev-manual/external-toolchain.rst b/documentation/dev-manual/external-toolchain.rst
new file mode 100644
index 0000000000..238f8cf467
--- /dev/null
+++ b/documentation/dev-manual/external-toolchain.rst
@@ -0,0 +1,40 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Optionally Using an External Toolchain
+**************************************
+
+You might want to use an external toolchain as part of your development.
+If this is the case, the fundamental steps you need to accomplish are as
+follows:
+
+- Understand where the installed toolchain resides. For cases where you
+ need to build the external toolchain, you would need to take separate
+ steps to build and install the toolchain.
+
+- Make sure you add the layer that contains the toolchain to your
+ ``bblayers.conf`` file through the
+ :term:`BBLAYERS` variable.
+
+- Set the :term:`EXTERNAL_TOOLCHAIN` variable in your ``local.conf`` file
+ to the location in which you installed the toolchain.
+
+The toolchain configuration is very flexible and customizable. It
+is primarily controlled with the :term:`TCMODE` variable. This variable
+controls which ``tcmode-*.inc`` file to include from the
+``meta/conf/distro/include`` directory within the :term:`Source Directory`.
+
+The default value of :term:`TCMODE` is "default", which tells the
+OpenEmbedded build system to use its internally built toolchain (i.e.
+``tcmode-default.inc``). However, other patterns are accepted. In
+particular, "external-\*" refers to external toolchains. One example is
+the Mentor Graphics Sourcery G++ Toolchain. Support for this toolchain resides
+in the separate ``meta-sourcery`` layer at
+https://github.com/MentorEmbedded/meta-sourcery/.
+See its ``README`` file for details about how to use this layer.
+
+Another example of external toolchain layer is
+:yocto_git:`meta-arm-toolchain </meta-arm/tree/meta-arm-toolchain/>`
+supporting GNU toolchains released by ARM.
+
+You can find further information by reading about the :term:`TCMODE` variable
+in the Yocto Project Reference Manual's variable glossary.
diff --git a/documentation/dev-manual/gobject-introspection.rst b/documentation/dev-manual/gobject-introspection.rst
new file mode 100644
index 0000000000..f7206e6fae
--- /dev/null
+++ b/documentation/dev-manual/gobject-introspection.rst
@@ -0,0 +1,155 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Enabling GObject Introspection Support
+**************************************
+
+`GObject introspection <https://gi.readthedocs.io/en/latest/>`__
+is the standard mechanism for accessing GObject-based software from
+runtime environments. GObject is a feature of the GLib library that
+provides an object framework for the GNOME desktop and related software.
+GObject Introspection adds information to GObject that allows objects
+created within it to be represented across different programming
+languages. If you want to construct GStreamer pipelines using Python, or
+control UPnP infrastructure using Javascript and GUPnP, GObject
+introspection is the only way to do it.
+
+This section describes the Yocto Project support for generating and
+packaging GObject introspection data. GObject introspection data is a
+description of the API provided by libraries built on top of the GLib
+framework, and, in particular, that framework's GObject mechanism.
+GObject Introspection Repository (GIR) files go to ``-dev`` packages,
+``typelib`` files go to main packages as they are packaged together with
+libraries that are introspected.
+
+The data is generated when building such a library, by linking the
+library with a small executable binary that asks the library to describe
+itself, and then executing the binary and processing its output.
+
+Generating this data in a cross-compilation environment is difficult
+because the library is produced for the target architecture, but its
+code needs to be executed on the build host. This problem is solved with
+the OpenEmbedded build system by running the code through QEMU, which
+allows precisely that. Unfortunately, QEMU does not always work
+perfectly as mentioned in the ":ref:`dev-manual/gobject-introspection:known issues`"
+section.
+
+Enabling the Generation of Introspection Data
+=============================================
+
+Enabling the generation of introspection data (GIR files) in your
+library package involves the following:
+
+#. Inherit the :ref:`ref-classes-gobject-introspection` class.
+
+#. Make sure introspection is not disabled anywhere in the recipe or
+ from anything the recipe includes. Also, make sure that
+ "gobject-introspection-data" is not in
+ :term:`DISTRO_FEATURES_BACKFILL_CONSIDERED`
+ and that "qemu-usermode" is not in
+ :term:`MACHINE_FEATURES_BACKFILL_CONSIDERED`.
+ In either of these conditions, nothing will happen.
+
+#. Try to build the recipe. If you encounter build errors that look like
+ something is unable to find ``.so`` libraries, check where these
+ libraries are located in the source tree and add the following to the
+ recipe::
+
+ GIR_EXTRA_LIBS_PATH = "${B}/something/.libs"
+
+ .. note::
+
+ See recipes in the ``oe-core`` repository that use that
+ :term:`GIR_EXTRA_LIBS_PATH` variable as an example.
+
+#. Look for any other errors, which probably mean that introspection
+ support in a package is not entirely standard, and thus breaks down
+ in a cross-compilation environment. For such cases, custom-made fixes
+ are needed. A good place to ask and receive help in these cases is
+ the :ref:`Yocto Project mailing
+ lists <resources-mailinglist>`.
+
+.. note::
+
+ Using a library that no longer builds against the latest Yocto
+ Project release and prints introspection related errors is a good
+ candidate for the previous procedure.
+
+Disabling the Generation of Introspection Data
+==============================================
+
+You might find that you do not want to generate introspection data. Or,
+perhaps QEMU does not work on your build host and target architecture
+combination. If so, you can use either of the following methods to
+disable GIR file generations:
+
+- Add the following to your distro configuration::
+
+ DISTRO_FEATURES_BACKFILL_CONSIDERED = "gobject-introspection-data"
+
+ Adding this statement disables generating introspection data using
+ QEMU but will still enable building introspection tools and libraries
+ (i.e. building them does not require the use of QEMU).
+
+- Add the following to your machine configuration::
+
+ MACHINE_FEATURES_BACKFILL_CONSIDERED = "qemu-usermode"
+
+ Adding this statement disables the use of QEMU when building packages for your
+ machine. Currently, this feature is used only by introspection
+ recipes and has the same effect as the previously described option.
+
+ .. note::
+
+ Future releases of the Yocto Project might have other features
+ affected by this option.
+
+If you disable introspection data, you can still obtain it through other
+means such as copying the data from a suitable sysroot, or by generating
+it on the target hardware. The OpenEmbedded build system does not
+currently provide specific support for these techniques.
+
+Testing that Introspection Works in an Image
+============================================
+
+Use the following procedure to test if generating introspection data is
+working in an image:
+
+#. Make sure that "gobject-introspection-data" is not in
+ :term:`DISTRO_FEATURES_BACKFILL_CONSIDERED`
+ and that "qemu-usermode" is not in
+ :term:`MACHINE_FEATURES_BACKFILL_CONSIDERED`.
+
+#. Build ``core-image-sato``.
+
+#. Launch a Terminal and then start Python in the terminal.
+
+#. Enter the following in the terminal::
+
+ >>> from gi.repository import GLib
+ >>> GLib.get_host_name()
+
+#. For something a little more advanced, enter the following see:
+ https://python-gtk-3-tutorial.readthedocs.io/en/latest/introduction.html
+
+Known Issues
+============
+
+Here are know issues in GObject Introspection Support:
+
+- ``qemu-ppc64`` immediately crashes. Consequently, you cannot build
+ introspection data on that architecture.
+
+- x32 is not supported by QEMU. Consequently, introspection data is
+ disabled.
+
+- musl causes transient GLib binaries to crash on assertion failures.
+ Consequently, generating introspection data is disabled.
+
+- Because QEMU is not able to run the binaries correctly, introspection
+ is disabled for some specific packages under specific architectures
+ (e.g. ``gcr``, ``libsecret``, and ``webkit``).
+
+- QEMU usermode might not work properly when running 64-bit binaries
+ under 32-bit host machines. In particular, "qemumips64" is known to
+ not work under i686.
+
diff --git a/documentation/dev-manual/index.rst b/documentation/dev-manual/index.rst
index f16b135c4d..9ccf60f701 100644
--- a/documentation/dev-manual/index.rst
+++ b/documentation/dev-manual/index.rst
@@ -4,15 +4,49 @@
Yocto Project Development Tasks Manual
======================================
-|
-
.. toctree::
:caption: Table of Contents
:numbered:
intro
start
- common-tasks
+ layers
+ customizing-images
+ new-recipe
+ new-machine
+ upgrading-recipes
+ temporary-source-code
+ quilt.rst
+ development-shell
+ python-development-shell
+ building
+ speeding-up-build
+ libraries
+ prebuilt-libraries
+ x32-psabi
+ gobject-introspection
+ external-toolchain
+ wic
+ bmaptool
+ securing-images
+ custom-distribution
+ custom-template-configuration-directory
+ disk-space
+ packages
+ efficiently-fetching-sources
+ init-manager
+ device-manager
+ external-scm
+ read-only-rootfs
+ build-quality
+ runtime-testing
+ debugging
+ licenses
+ security-subjects
+ vulnerabilities
+ sbom
+ error-reporting-tool
+ wayland
qemu
.. include:: /boilerplate.rst
diff --git a/documentation/dev-manual/init-manager.rst b/documentation/dev-manual/init-manager.rst
new file mode 100644
index 0000000000..ddce82b81f
--- /dev/null
+++ b/documentation/dev-manual/init-manager.rst
@@ -0,0 +1,162 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+.. _init-manager:
+
+Selecting an Initialization Manager
+***********************************
+
+By default, the Yocto Project uses :wikipedia:`SysVinit <Init#SysV-style>` as
+the initialization manager. There is also support for BusyBox init, a simpler
+implementation, as well as support for :wikipedia:`systemd <Systemd>`, which
+is a full replacement for init with parallel starting of services, reduced
+shell overhead, increased security and resource limits for services, and other
+features that are used by many distributions.
+
+Within the system, SysVinit and BusyBox init treat system components as
+services. These services are maintained as shell scripts stored in the
+``/etc/init.d/`` directory.
+
+SysVinit is more elaborate than BusyBox init and organizes services in
+different run levels. This organization is maintained by putting links
+to the services in the ``/etc/rcN.d/`` directories, where `N/` is one
+of the following options: "S", "0", "1", "2", "3", "4", "5", or "6".
+
+.. note::
+
+ Each runlevel has a dependency on the previous runlevel. This
+ dependency allows the services to work properly.
+
+Both SysVinit and BusyBox init are configured through the ``/etc/inittab``
+file, with a very similar syntax, though of course BusyBox init features
+are more limited.
+
+In comparison, systemd treats components as units. Using units is a
+broader concept as compared to using a service. A unit includes several
+different types of entities. ``Service`` is one of the types of entities.
+The runlevel concept in SysVinit corresponds to the concept of a target
+in systemd, where target is also a type of supported unit.
+
+In systems with SysVinit or BusyBox init, services load sequentially (i.e. one
+by one) during init and parallelization is not supported. With systemd, services
+start in parallel. This method can have an impact on the startup performance
+of a given service, though systemd will also provide more services by default,
+therefore increasing the total system boot time. systemd also substantially
+increases system size because of its multiple components and the extra
+dependencies it pulls.
+
+On the contrary, BusyBox init is the simplest and the lightest solution and
+also comes with BusyBox mdev as device manager, a lighter replacement to
+:wikipedia:`udev <Udev>`, which SysVinit and systemd both use.
+
+The ":ref:`device-manager`" chapter has more details about device managers.
+
+Using SysVinit with udev
+=========================
+
+SysVinit with the udev device manager corresponds to the
+default setting in Poky. This corresponds to setting::
+
+ INIT_MANAGER = "sysvinit"
+
+Using BusyBox init with BusyBox mdev
+====================================
+
+BusyBox init with BusyBox mdev is the simplest and lightest solution
+for small root filesystems. All you need is BusyBox, which most systems
+have anyway::
+
+ INIT_MANAGER = "mdev-busybox"
+
+Using systemd
+=============
+
+The last option is to use systemd together with the udev device
+manager. This is the most powerful and versatile solution, especially
+for more complex systems::
+
+ INIT_MANAGER = "systemd"
+
+This will enable systemd and remove sysvinit components from the image.
+See :yocto_git:`meta/conf/distro/include/init-manager-systemd.inc
+</poky/tree/meta/conf/distro/include/init-manager-systemd.inc>` for exact
+details on what this does.
+
+Controling systemd from the target command line
+-----------------------------------------------
+
+Here is a quick reference for controling systemd from the command line on the
+target. Instead of opening and sometimes modifying files, most interaction
+happens through the ``systemctl`` and ``journalctl`` commands:
+
+- ``systemctl status``: show the status of all services
+- ``systemctl status <service>``: show the status of one service
+- ``systemctl [start|stop] <service>``: start or stop a service
+- ``systemctl [enable|disable] <service>``: enable or disable a service at boot time
+- ``systemctl list-units``: list all available units
+- ``journalctl -a``: show all logs for all services
+- ``journalctl -f``: show only the last log entries, and keep printing updates as they arrive
+- ``journalctl -u``: show only logs from a particular service
+
+Using systemd-journald without a traditional syslog daemon
+----------------------------------------------------------
+
+Counter-intuitively, ``systemd-journald`` is not a syslog runtime or provider,
+and the proper way to use ``systemd-journald`` as your sole logging mechanism is to
+effectively disable syslog entirely by setting these variables in your distribution
+configuration file::
+
+ VIRTUAL-RUNTIME_syslog = ""
+ VIRTUAL-RUNTIME_base-utils-syslog = ""
+
+Doing so will prevent ``rsyslog`` / ``busybox-syslog`` from being pulled in by
+default, leaving only ``systemd-journald``.
+
+Summary
+-------
+
+The Yocto Project supports three different initialization managers, offering
+increasing levels of complexity and functionality:
+
+.. list-table::
+ :widths: 40 20 20 20
+ :header-rows: 1
+
+ * -
+ - BusyBox init
+ - SysVinit
+ - systemd
+ * - Size
+ - Small
+ - Small
+ - Big [#footnote-systemd-size]_
+ * - Complexity
+ - Small
+ - Medium
+ - High
+ * - Support for boot profiles
+ - No
+ - Yes ("runlevels")
+ - Yes ("targets")
+ * - Services defined as
+ - Shell scripts
+ - Shell scripts
+ - Description files
+ * - Starting services in parallel
+ - No
+ - No
+ - Yes
+ * - Setting service resource limits
+ - No
+ - No
+ - Yes
+ * - Support service isolation
+ - No
+ - No
+ - Yes
+ * - Integrated logging
+ - No
+ - No
+ - Yes
+
+.. [#footnote-systemd-size] Using systemd increases the ``core-image-minimal``
+ image size by 160\% for ``qemux86-64`` on Mickledore (4.2), compared to SysVinit.
diff --git a/documentation/dev-manual/layers.rst b/documentation/dev-manual/layers.rst
new file mode 100644
index 0000000000..e3f56199be
--- /dev/null
+++ b/documentation/dev-manual/layers.rst
@@ -0,0 +1,853 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Understanding and Creating Layers
+*********************************
+
+The OpenEmbedded build system supports organizing
+:term:`Metadata` into multiple layers.
+Layers allow you to isolate different types of customizations from each
+other. For introductory information on the Yocto Project Layer Model,
+see the
+":ref:`overview-manual/yp-intro:the yocto project layer model`"
+section in the Yocto Project Overview and Concepts Manual.
+
+Creating Your Own Layer
+=======================
+
+.. note::
+
+ It is very easy to create your own layers to use with the OpenEmbedded
+ build system, as the Yocto Project ships with tools that speed up creating
+ layers. This section describes the steps you perform by hand to create
+ layers so that you can better understand them. For information about the
+ layer-creation tools, see the
+ ":ref:`bsp-guide/bsp:creating a new bsp layer using the \`\`bitbake-layers\`\` script`"
+ section in the Yocto Project Board Support Package (BSP) Developer's
+ Guide and the ":ref:`dev-manual/layers:creating a general layer using the \`\`bitbake-layers\`\` script`"
+ section further down in this manual.
+
+Follow these general steps to create your layer without using tools:
+
+#. *Check Existing Layers:* Before creating a new layer, you should be
+ sure someone has not already created a layer containing the Metadata
+ you need. You can see the :oe_layerindex:`OpenEmbedded Metadata Index <>`
+ for a list of layers from the OpenEmbedded community that can be used in
+ the Yocto Project. You could find a layer that is identical or close
+ to what you need.
+
+#. *Create a Directory:* Create the directory for your layer. When you
+ create the layer, be sure to create the directory in an area not
+ associated with the Yocto Project :term:`Source Directory`
+ (e.g. the cloned ``poky`` repository).
+
+ While not strictly required, prepend the name of the directory with
+ the string "meta-". For example::
+
+ meta-mylayer
+ meta-GUI_xyz
+ meta-mymachine
+
+ With rare exceptions, a layer's name follows this form::
+
+ meta-root_name
+
+ Following this layer naming convention can save
+ you trouble later when tools, components, or variables "assume" your
+ layer name begins with "meta-". A notable example is in configuration
+ files as shown in the following step where layer names without the
+ "meta-" string are appended to several variables used in the
+ configuration.
+
+#. *Create a Layer Configuration File:* Inside your new layer folder,
+ you need to create a ``conf/layer.conf`` file. It is easiest to take
+ an existing layer configuration file and copy that to your layer's
+ ``conf`` directory and then modify the file as needed.
+
+ The ``meta-yocto-bsp/conf/layer.conf`` file in the Yocto Project
+ :yocto_git:`Source Repositories </poky/tree/meta-yocto-bsp/conf>`
+ demonstrates the required syntax. For your layer, you need to replace
+ "yoctobsp" with a unique identifier for your layer (e.g. "machinexyz"
+ for a layer named "meta-machinexyz")::
+
+ # We have a conf and classes directory, add to BBPATH
+ BBPATH .= ":${LAYERDIR}"
+
+ # We have recipes-* directories, add to BBFILES
+ BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \
+ ${LAYERDIR}/recipes-*/*/*.bbappend"
+
+ BBFILE_COLLECTIONS += "yoctobsp"
+ BBFILE_PATTERN_yoctobsp = "^${LAYERDIR}/"
+ BBFILE_PRIORITY_yoctobsp = "5"
+ LAYERVERSION_yoctobsp = "4"
+ LAYERSERIES_COMPAT_yoctobsp = "dunfell"
+
+ Here is an explanation of the layer configuration file:
+
+ - :term:`BBPATH`: Adds the layer's
+ root directory to BitBake's search path. Through the use of the
+ :term:`BBPATH` variable, BitBake locates class files (``.bbclass``),
+ configuration files, and files that are included with ``include``
+ and ``require`` statements. For these cases, BitBake uses the
+ first file that matches the name found in :term:`BBPATH`. This is
+ similar to the way the ``PATH`` variable is used for binaries. It
+ is recommended, therefore, that you use unique class and
+ configuration filenames in your custom layer.
+
+ - :term:`BBFILES`: Defines the
+ location for all recipes in the layer.
+
+ - :term:`BBFILE_COLLECTIONS`:
+ Establishes the current layer through a unique identifier that is
+ used throughout the OpenEmbedded build system to refer to the
+ layer. In this example, the identifier "yoctobsp" is the
+ representation for the container layer named "meta-yocto-bsp".
+
+ - :term:`BBFILE_PATTERN`:
+ Expands immediately during parsing to provide the directory of the
+ layer.
+
+ - :term:`BBFILE_PRIORITY`:
+ Establishes a priority to use for recipes in the layer when the
+ OpenEmbedded build finds recipes of the same name in different
+ layers.
+
+ - :term:`LAYERVERSION`:
+ Establishes a version number for the layer. You can use this
+ version number to specify this exact version of the layer as a
+ dependency when using the
+ :term:`LAYERDEPENDS`
+ variable.
+
+ - :term:`LAYERDEPENDS`:
+ Lists all layers on which this layer depends (if any).
+
+ - :term:`LAYERSERIES_COMPAT`:
+ Lists the :yocto_wiki:`Yocto Project </Releases>`
+ releases for which the current version is compatible. This
+ variable is a good way to indicate if your particular layer is
+ current.
+
+
+ .. note::
+
+ A layer does not have to contain only recipes ``.bb`` or append files
+ ``.bbappend``. Generally, developers create layers using
+ ``bitbake-layers create-layer``.
+ See ":ref:`dev-manual/layers:creating a general layer using the \`\`bitbake-layers\`\` script`",
+ explaining how the ``layer.conf`` file is created from a template located in
+ ``meta/lib/bblayers/templates/layer.conf``.
+ In fact, none of the variables set in ``layer.conf`` are mandatory,
+ except when :term:`BBFILE_COLLECTIONS` is present. In this case
+ :term:`LAYERSERIES_COMPAT` and :term:`BBFILE_PATTERN` have to be
+ defined too.
+
+#. *Add Content:* Depending on the type of layer, add the content. If
+ the layer adds support for a machine, add the machine configuration
+ in a ``conf/machine/`` file within the layer. If the layer adds
+ distro policy, add the distro configuration in a ``conf/distro/``
+ file within the layer. If the layer introduces new recipes, put the
+ recipes you need in ``recipes-*`` subdirectories within the layer.
+
+ .. note::
+
+ For an explanation of layer hierarchy that is compliant with the
+ Yocto Project, see the ":ref:`bsp-guide/bsp:example filesystem layout`"
+ section in the Yocto Project Board Support Package (BSP) Developer's Guide.
+
+#. *Optionally Test for Compatibility:* If you want permission to use
+ the Yocto Project Compatibility logo with your layer or application
+ that uses your layer, perform the steps to apply for compatibility.
+ See the
+ ":ref:`dev-manual/layers:making sure your layer is compatible with yocto project`"
+ section for more information.
+
+Following Best Practices When Creating Layers
+=============================================
+
+To create layers that are easier to maintain and that will not impact
+builds for other machines, you should consider the information in the
+following list:
+
+- *Avoid "Overlaying" Entire Recipes from Other Layers in Your
+ Configuration:* In other words, do not copy an entire recipe into
+ your layer and then modify it. Rather, use an append file
+ (``.bbappend``) to override only those parts of the original recipe
+ you need to modify.
+
+- *Avoid Duplicating Include Files:* Use append files (``.bbappend``)
+ for each recipe that uses an include file. Or, if you are introducing
+ a new recipe that requires the included file, use the path relative
+ to the original layer directory to refer to the file. For example,
+ use ``require recipes-core/``\ `package`\ ``/``\ `file`\ ``.inc`` instead
+ of ``require`` `file`\ ``.inc``. If you're finding you have to overlay
+ the include file, it could indicate a deficiency in the include file
+ in the layer to which it originally belongs. If this is the case, you
+ should try to address that deficiency instead of overlaying the
+ include file. For example, you could address this by getting the
+ maintainer of the include file to add a variable or variables to make
+ it easy to override the parts needing to be overridden.
+
+- *Structure Your Layers:* Proper use of overrides within append files
+ and placement of machine-specific files within your layer can ensure
+ that a build is not using the wrong Metadata and negatively impacting
+ a build for a different machine. Here are some examples:
+
+ - *Modify Variables to Support a Different Machine:* Suppose you
+ have a layer named ``meta-one`` that adds support for building
+ machine "one". To do so, you use an append file named
+ ``base-files.bbappend`` and create a dependency on "foo" by
+ altering the :term:`DEPENDS`
+ variable::
+
+ DEPENDS = "foo"
+
+ The dependency is created during any
+ build that includes the layer ``meta-one``. However, you might not
+ want this dependency for all machines. For example, suppose you
+ are building for machine "two" but your ``bblayers.conf`` file has
+ the ``meta-one`` layer included. During the build, the
+ ``base-files`` for machine "two" will also have the dependency on
+ ``foo``.
+
+ To make sure your changes apply only when building machine "one",
+ use a machine override with the :term:`DEPENDS` statement::
+
+ DEPENDS:one = "foo"
+
+ You should follow the same strategy when using ``:append``
+ and ``:prepend`` operations::
+
+ DEPENDS:append:one = " foo"
+ DEPENDS:prepend:one = "foo "
+
+ As an actual example, here's a
+ snippet from the generic kernel include file ``linux-yocto.inc``,
+ wherein the kernel compile and link options are adjusted in the
+ case of a subset of the supported architectures::
+
+ DEPENDS:append:aarch64 = " libgcc"
+ KERNEL_CC:append:aarch64 = " ${TOOLCHAIN_OPTIONS}"
+ KERNEL_LD:append:aarch64 = " ${TOOLCHAIN_OPTIONS}"
+
+ DEPENDS:append:nios2 = " libgcc"
+ KERNEL_CC:append:nios2 = " ${TOOLCHAIN_OPTIONS}"
+ KERNEL_LD:append:nios2 = " ${TOOLCHAIN_OPTIONS}"
+
+ DEPENDS:append:arc = " libgcc"
+ KERNEL_CC:append:arc = " ${TOOLCHAIN_OPTIONS}"
+ KERNEL_LD:append:arc = " ${TOOLCHAIN_OPTIONS}"
+
+ KERNEL_FEATURES:append:qemuall=" features/debug/printk.scc"
+
+ - *Place Machine-Specific Files in Machine-Specific Locations:* When
+ you have a base recipe, such as ``base-files.bb``, that contains a
+ :term:`SRC_URI` statement to a
+ file, you can use an append file to cause the build to use your
+ own version of the file. For example, an append file in your layer
+ at ``meta-one/recipes-core/base-files/base-files.bbappend`` could
+ extend :term:`FILESPATH` using :term:`FILESEXTRAPATHS` as follows::
+
+ FILESEXTRAPATHS:prepend := "${THISDIR}/${BPN}:"
+
+ The build for machine "one" will pick up your machine-specific file as
+ long as you have the file in
+ ``meta-one/recipes-core/base-files/base-files/``. However, if you
+ are building for a different machine and the ``bblayers.conf``
+ file includes the ``meta-one`` layer and the location of your
+ machine-specific file is the first location where that file is
+ found according to :term:`FILESPATH`, builds for all machines will
+ also use that machine-specific file.
+
+ You can make sure that a machine-specific file is used for a
+ particular machine by putting the file in a subdirectory specific
+ to the machine. For example, rather than placing the file in
+ ``meta-one/recipes-core/base-files/base-files/`` as shown above,
+ put it in ``meta-one/recipes-core/base-files/base-files/one/``.
+ Not only does this make sure the file is used only when building
+ for machine "one", but the build process locates the file more
+ quickly.
+
+ In summary, you need to place all files referenced from
+ :term:`SRC_URI` in a machine-specific subdirectory within the layer in
+ order to restrict those files to machine-specific builds.
+
+- *Perform Steps to Apply for Yocto Project Compatibility:* If you want
+ permission to use the Yocto Project Compatibility logo with your
+ layer or application that uses your layer, perform the steps to apply
+ for compatibility. See the
+ ":ref:`dev-manual/layers:making sure your layer is compatible with yocto project`"
+ section for more information.
+
+- *Follow the Layer Naming Convention:* Store custom layers in a Git
+ repository that use the ``meta-layer_name`` format.
+
+- *Group Your Layers Locally:* Clone your repository alongside other
+ cloned ``meta`` directories from the :term:`Source Directory`.
+
+Making Sure Your Layer is Compatible With Yocto Project
+=======================================================
+
+When you create a layer used with the Yocto Project, it is advantageous
+to make sure that the layer interacts well with existing Yocto Project
+layers (i.e. the layer is compatible with the Yocto Project). Ensuring
+compatibility makes the layer easy to be consumed by others in the Yocto
+Project community and could allow you permission to use the Yocto
+Project Compatible Logo.
+
+.. note::
+
+ Only Yocto Project member organizations are permitted to use the
+ Yocto Project Compatible Logo. The logo is not available for general
+ use. For information on how to become a Yocto Project member
+ organization, see the :yocto_home:`Yocto Project Website <>`.
+
+The Yocto Project Compatibility Program consists of a layer application
+process that requests permission to use the Yocto Project Compatibility
+Logo for your layer and application. The process consists of two parts:
+
+#. Successfully passing a script (``yocto-check-layer``) that when run
+ against your layer, tests it against constraints based on experiences
+ of how layers have worked in the real world and where pitfalls have
+ been found. Getting a "PASS" result from the script is required for
+ successful compatibility registration.
+
+#. Completion of an application acceptance form, which you can find at
+ :yocto_home:`/compatible-registration/`.
+
+To be granted permission to use the logo, you need to satisfy the
+following:
+
+- Be able to check the box indicating that you got a "PASS" when
+ running the script against your layer.
+
+- Answer "Yes" to the questions on the form or have an acceptable
+ explanation for any questions answered "No".
+
+- Be a Yocto Project Member Organization.
+
+The remainder of this section presents information on the registration
+form and on the ``yocto-check-layer`` script.
+
+Yocto Project Compatible Program Application
+--------------------------------------------
+
+Use the form to apply for your layer's approval. Upon successful
+application, you can use the Yocto Project Compatibility Logo with your
+layer and the application that uses your layer.
+
+To access the form, use this link:
+:yocto_home:`/compatible-registration`.
+Follow the instructions on the form to complete your application.
+
+The application consists of the following sections:
+
+- *Contact Information:* Provide your contact information as the fields
+ require. Along with your information, provide the released versions
+ of the Yocto Project for which your layer is compatible.
+
+- *Acceptance Criteria:* Provide "Yes" or "No" answers for each of the
+ items in the checklist. There is space at the bottom of the form for
+ any explanations for items for which you answered "No".
+
+- *Recommendations:* Provide answers for the questions regarding Linux
+ kernel use and build success.
+
+``yocto-check-layer`` Script
+----------------------------
+
+The ``yocto-check-layer`` script provides you a way to assess how
+compatible your layer is with the Yocto Project. You should run this
+script prior to using the form to apply for compatibility as described
+in the previous section. You need to achieve a "PASS" result in order to
+have your application form successfully processed.
+
+The script divides tests into three areas: COMMON, BSP, and DISTRO. For
+example, given a distribution layer (DISTRO), the layer must pass both
+the COMMON and DISTRO related tests. Furthermore, if your layer is a BSP
+layer, the layer must pass the COMMON and BSP set of tests.
+
+To execute the script, enter the following commands from your build
+directory::
+
+ $ source oe-init-build-env
+ $ yocto-check-layer your_layer_directory
+
+Be sure to provide the actual directory for your
+layer as part of the command.
+
+Entering the command causes the script to determine the type of layer
+and then to execute a set of specific tests against the layer. The
+following list overviews the test:
+
+- ``common.test_readme``: Tests if a ``README`` file exists in the
+ layer and the file is not empty.
+
+- ``common.test_parse``: Tests to make sure that BitBake can parse the
+ files without error (i.e. ``bitbake -p``).
+
+- ``common.test_show_environment``: Tests that the global or per-recipe
+ environment is in order without errors (i.e. ``bitbake -e``).
+
+- ``common.test_world``: Verifies that ``bitbake world`` works.
+
+- ``common.test_signatures``: Tests to be sure that BSP and DISTRO
+ layers do not come with recipes that change signatures.
+
+- ``common.test_layerseries_compat``: Verifies layer compatibility is
+ set properly.
+
+- ``bsp.test_bsp_defines_machines``: Tests if a BSP layer has machine
+ configurations.
+
+- ``bsp.test_bsp_no_set_machine``: Tests to ensure a BSP layer does not
+ set the machine when the layer is added.
+
+- ``bsp.test_machine_world``: Verifies that ``bitbake world`` works
+ regardless of which machine is selected.
+
+- ``bsp.test_machine_signatures``: Verifies that building for a
+ particular machine affects only the signature of tasks specific to
+ that machine.
+
+- ``distro.test_distro_defines_distros``: Tests if a DISTRO layer has
+ distro configurations.
+
+- ``distro.test_distro_no_set_distros``: Tests to ensure a DISTRO layer
+ does not set the distribution when the layer is added.
+
+Enabling Your Layer
+===================
+
+Before the OpenEmbedded build system can use your new layer, you need to
+enable it. To enable your layer, simply add your layer's path to the
+:term:`BBLAYERS` variable in your ``conf/bblayers.conf`` file, which is
+found in the :term:`Build Directory`. The following example shows how to
+enable your new ``meta-mylayer`` layer (note how your new layer exists
+outside of the official ``poky`` repository which you would have checked
+out earlier)::
+
+ # POKY_BBLAYERS_CONF_VERSION is increased each time build/conf/bblayers.conf
+ # changes incompatibly
+ POKY_BBLAYERS_CONF_VERSION = "2"
+ BBPATH = "${TOPDIR}"
+ BBFILES ?= ""
+ BBLAYERS ?= " \
+ /home/user/poky/meta \
+ /home/user/poky/meta-poky \
+ /home/user/poky/meta-yocto-bsp \
+ /home/user/mystuff/meta-mylayer \
+ "
+
+BitBake parses each ``conf/layer.conf`` file from the top down as
+specified in the :term:`BBLAYERS` variable within the ``conf/bblayers.conf``
+file. During the processing of each ``conf/layer.conf`` file, BitBake
+adds the recipes, classes and configurations contained within the
+particular layer to the source directory.
+
+Appending Other Layers Metadata With Your Layer
+===============================================
+
+A recipe that appends Metadata to another recipe is called a BitBake
+append file. A BitBake append file uses the ``.bbappend`` file type
+suffix, while the corresponding recipe to which Metadata is being
+appended uses the ``.bb`` file type suffix.
+
+You can use a ``.bbappend`` file in your layer to make additions or
+changes to the content of another layer's recipe without having to copy
+the other layer's recipe into your layer. Your ``.bbappend`` file
+resides in your layer, while the main ``.bb`` recipe file to which you
+are appending Metadata resides in a different layer.
+
+Being able to append information to an existing recipe not only avoids
+duplication, but also automatically applies recipe changes from a
+different layer into your layer. If you were copying recipes, you would
+have to manually merge changes as they occur.
+
+When you create an append file, you must use the same root name as the
+corresponding recipe file. For example, the append file
+``someapp_3.1.bbappend`` must apply to ``someapp_3.1.bb``. This
+means the original recipe and append filenames are version
+number-specific. If the corresponding recipe is renamed to update to a
+newer version, you must also rename and possibly update the
+corresponding ``.bbappend`` as well. During the build process, BitBake
+displays an error on starting if it detects a ``.bbappend`` file that
+does not have a corresponding recipe with a matching name. See the
+:term:`BB_DANGLINGAPPENDS_WARNONLY`
+variable for information on how to handle this error.
+
+Overlaying a File Using Your Layer
+----------------------------------
+
+As an example, consider the main formfactor recipe and a corresponding
+formfactor append file both from the :term:`Source Directory`.
+Here is the main
+formfactor recipe, which is named ``formfactor_0.0.bb`` and located in
+the "meta" layer at ``meta/recipes-bsp/formfactor``::
+
+ SUMMARY = "Device formfactor information"
+ DESCRIPTION = "A formfactor configuration file provides information about the \
+ target hardware for which the image is being built and information that the \
+ build system cannot obtain from other sources such as the kernel."
+ SECTION = "base"
+ LICENSE = "MIT"
+ LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
+ PR = "r45"
+
+ SRC_URI = "file://config file://machconfig"
+ S = "${WORKDIR}"
+
+ PACKAGE_ARCH = "${MACHINE_ARCH}"
+ INHIBIT_DEFAULT_DEPS = "1"
+
+ do_install() {
+ # Install file only if it has contents
+ install -d ${D}${sysconfdir}/formfactor/
+ install -m 0644 ${S}/config ${D}${sysconfdir}/formfactor/
+ if [ -s "${S}/machconfig" ]; then
+ install -m 0644 ${S}/machconfig ${D}${sysconfdir}/formfactor/
+ fi
+ }
+
+In the main recipe, note the :term:`SRC_URI`
+variable, which tells the OpenEmbedded build system where to find files
+during the build.
+
+Here is the append file, which is named ``formfactor_0.0.bbappend``
+and is from the Raspberry Pi BSP Layer named ``meta-raspberrypi``. The
+file is in the layer at ``recipes-bsp/formfactor``::
+
+ FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"
+
+By default, the build system uses the
+:term:`FILESPATH` variable to
+locate files. This append file extends the locations by setting the
+:term:`FILESEXTRAPATHS`
+variable. Setting this variable in the ``.bbappend`` file is the most
+reliable and recommended method for adding directories to the search
+path used by the build system to find files.
+
+The statement in this example extends the directories to include
+``${``\ :term:`THISDIR`\ ``}/${``\ :term:`PN`\ ``}``,
+which resolves to a directory named ``formfactor`` in the same directory
+in which the append file resides (i.e.
+``meta-raspberrypi/recipes-bsp/formfactor``. This implies that you must
+have the supporting directory structure set up that will contain any
+files or patches you will be including from the layer.
+
+Using the immediate expansion assignment operator ``:=`` is important
+because of the reference to :term:`THISDIR`. The trailing colon character is
+important as it ensures that items in the list remain colon-separated.
+
+.. note::
+
+ BitBake automatically defines the :term:`THISDIR` variable. You should
+ never set this variable yourself. Using ":prepend" as part of the
+ :term:`FILESEXTRAPATHS` ensures your path will be searched prior to other
+ paths in the final list.
+
+ Also, not all append files add extra files. Many append files simply
+ allow to add build options (e.g. ``systemd``). For these cases, your
+ append file would not even use the :term:`FILESEXTRAPATHS` statement.
+
+The end result of this ``.bbappend`` file is that on a Raspberry Pi, where
+``rpi`` will exist in the list of :term:`OVERRIDES`, the file
+``meta-raspberrypi/recipes-bsp/formfactor/formfactor/rpi/machconfig`` will be
+used during :ref:`ref-tasks-fetch` and the test for a non-zero file size in
+:ref:`ref-tasks-install` will return true, and the file will be installed.
+
+Installing Additional Files Using Your Layer
+--------------------------------------------
+
+As another example, consider the main ``xserver-xf86-config`` recipe and a
+corresponding ``xserver-xf86-config`` append file both from the :term:`Source
+Directory`. Here is the main ``xserver-xf86-config`` recipe, which is named
+``xserver-xf86-config_0.1.bb`` and located in the "meta" layer at
+``meta/recipes-graphics/xorg-xserver``::
+
+ SUMMARY = "X.Org X server configuration file"
+ HOMEPAGE = "http://www.x.org"
+ SECTION = "x11/base"
+ LICENSE = "MIT"
+ LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
+ PR = "r33"
+
+ SRC_URI = "file://xorg.conf"
+
+ S = "${WORKDIR}"
+
+ CONFFILES:${PN} = "${sysconfdir}/X11/xorg.conf"
+
+ PACKAGE_ARCH = "${MACHINE_ARCH}"
+ ALLOW_EMPTY:${PN} = "1"
+
+ do_install () {
+ if test -s ${WORKDIR}/xorg.conf; then
+ install -d ${D}/${sysconfdir}/X11
+ install -m 0644 ${WORKDIR}/xorg.conf ${D}/${sysconfdir}/X11/
+ fi
+ }
+
+Here is the append file, which is named ``xserver-xf86-config_%.bbappend``
+and is from the Raspberry Pi BSP Layer named ``meta-raspberrypi``. The
+file is in the layer at ``recipes-graphics/xorg-xserver``::
+
+ FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"
+
+ SRC_URI:append:rpi = " \
+ file://xorg.conf.d/98-pitft.conf \
+ file://xorg.conf.d/99-calibration.conf \
+ "
+ do_install:append:rpi () {
+ PITFT="${@bb.utils.contains("MACHINE_FEATURES", "pitft", "1", "0", d)}"
+ if [ "${PITFT}" = "1" ]; then
+ install -d ${D}/${sysconfdir}/X11/xorg.conf.d/
+ install -m 0644 ${WORKDIR}/xorg.conf.d/98-pitft.conf ${D}/${sysconfdir}/X11/xorg.conf.d/
+ install -m 0644 ${WORKDIR}/xorg.conf.d/99-calibration.conf ${D}/${sysconfdir}/X11/xorg.conf.d/
+ fi
+ }
+
+ FILES:${PN}:append:rpi = " ${sysconfdir}/X11/xorg.conf.d/*"
+
+Building off of the previous example, we once again are setting the
+:term:`FILESEXTRAPATHS` variable. In this case we are also using
+:term:`SRC_URI` to list additional source files to use when ``rpi`` is found in
+the list of :term:`OVERRIDES`. The :ref:`ref-tasks-install` task will then perform a
+check for an additional :term:`MACHINE_FEATURES` that if set will cause these
+additional files to be installed. These additional files are listed in
+:term:`FILES` so that they will be packaged.
+
+Prioritizing Your Layer
+=======================
+
+Each layer is assigned a priority value. Priority values control which
+layer takes precedence if there are recipe files with the same name in
+multiple layers. For these cases, the recipe file from the layer with a
+higher priority number takes precedence. Priority values also affect the
+order in which multiple ``.bbappend`` files for the same recipe are
+applied. You can either specify the priority manually, or allow the
+build system to calculate it based on the layer's dependencies.
+
+To specify the layer's priority manually, use the
+:term:`BBFILE_PRIORITY`
+variable and append the layer's root name::
+
+ BBFILE_PRIORITY_mylayer = "1"
+
+.. note::
+
+ It is possible for a recipe with a lower version number
+ :term:`PV` in a layer that has a higher
+ priority to take precedence.
+
+ Also, the layer priority does not currently affect the precedence
+ order of ``.conf`` or ``.bbclass`` files. Future versions of BitBake
+ might address this.
+
+Managing Layers
+===============
+
+You can use the BitBake layer management tool ``bitbake-layers`` to
+provide a view into the structure of recipes across a multi-layer
+project. Being able to generate output that reports on configured layers
+with their paths and priorities and on ``.bbappend`` files and their
+applicable recipes can help to reveal potential problems.
+
+For help on the BitBake layer management tool, use the following
+command::
+
+ $ bitbake-layers --help
+
+The following list describes the available commands:
+
+- ``help:`` Displays general help or help on a specified command.
+
+- ``show-layers:`` Shows the current configured layers.
+
+- ``show-overlayed:`` Lists overlayed recipes. A recipe is overlayed
+ when a recipe with the same name exists in another layer that has a
+ higher layer priority.
+
+- ``show-recipes:`` Lists available recipes and the layers that
+ provide them.
+
+- ``show-appends:`` Lists ``.bbappend`` files and the recipe files to
+ which they apply.
+
+- ``show-cross-depends:`` Lists dependency relationships between
+ recipes that cross layer boundaries.
+
+- ``add-layer:`` Adds a layer to ``bblayers.conf``.
+
+- ``remove-layer:`` Removes a layer from ``bblayers.conf``
+
+- ``flatten:`` Flattens the layer configuration into a separate
+ output directory. Flattening your layer configuration builds a
+ "flattened" directory that contains the contents of all layers, with
+ any overlayed recipes removed and any ``.bbappend`` files appended to
+ the corresponding recipes. You might have to perform some manual
+ cleanup of the flattened layer as follows:
+
+ - Non-recipe files (such as patches) are overwritten. The flatten
+ command shows a warning for these files.
+
+ - Anything beyond the normal layer setup has been added to the
+ ``layer.conf`` file. Only the lowest priority layer's
+ ``layer.conf`` is used.
+
+ - Overridden and appended items from ``.bbappend`` files need to be
+ cleaned up. The contents of each ``.bbappend`` end up in the
+ flattened recipe. However, if there are appended or changed
+ variable values, you need to tidy these up yourself. Consider the
+ following example. Here, the ``bitbake-layers`` command adds the
+ line ``#### bbappended ...`` so that you know where the following
+ lines originate::
+
+ ...
+ DESCRIPTION = "A useful utility"
+ ...
+ EXTRA_OECONF = "--enable-something"
+ ...
+
+ #### bbappended from meta-anotherlayer ####
+
+ DESCRIPTION = "Customized utility"
+ EXTRA_OECONF += "--enable-somethingelse"
+
+
+ Ideally, you would tidy up these utilities as follows::
+
+ ...
+ DESCRIPTION = "Customized utility"
+ ...
+ EXTRA_OECONF = "--enable-something --enable-somethingelse"
+ ...
+
+- ``layerindex-fetch``: Fetches a layer from a layer index, along
+ with its dependent layers, and adds the layers to the
+ ``conf/bblayers.conf`` file.
+
+- ``layerindex-show-depends``: Finds layer dependencies from the
+ layer index.
+
+- ``create-layer``: Creates a basic layer.
+
+Creating a General Layer Using the ``bitbake-layers`` Script
+============================================================
+
+The ``bitbake-layers`` script with the ``create-layer`` subcommand
+simplifies creating a new general layer.
+
+.. note::
+
+ - For information on BSP layers, see the ":ref:`bsp-guide/bsp:bsp layers`"
+ section in the Yocto
+ Project Board Specific (BSP) Developer's Guide.
+
+ - In order to use a layer with the OpenEmbedded build system, you
+ need to add the layer to your ``bblayers.conf`` configuration
+ file. See the ":ref:`dev-manual/layers:adding a layer using the \`\`bitbake-layers\`\` script`"
+ section for more information.
+
+The default mode of the script's operation with this subcommand is to
+create a layer with the following:
+
+- A layer priority of 6.
+
+- A ``conf`` subdirectory that contains a ``layer.conf`` file.
+
+- A ``recipes-example`` subdirectory that contains a further
+ subdirectory named ``example``, which contains an ``example.bb``
+ recipe file.
+
+- A ``COPYING.MIT``, which is the license statement for the layer. The
+ script assumes you want to use the MIT license, which is typical for
+ most layers, for the contents of the layer itself.
+
+- A ``README`` file, which is a file describing the contents of your
+ new layer.
+
+In its simplest form, you can use the following command form to create a
+layer. The command creates a layer whose name corresponds to
+"your_layer_name" in the current directory::
+
+ $ bitbake-layers create-layer your_layer_name
+
+As an example, the following command creates a layer named ``meta-scottrif``
+in your home directory::
+
+ $ cd /usr/home
+ $ bitbake-layers create-layer meta-scottrif
+ NOTE: Starting bitbake server...
+ Add your new layer with 'bitbake-layers add-layer meta-scottrif'
+
+If you want to set the priority of the layer to other than the default
+value of "6", you can either use the ``--priority`` option or you
+can edit the
+:term:`BBFILE_PRIORITY` value
+in the ``conf/layer.conf`` after the script creates it. Furthermore, if
+you want to give the example recipe file some name other than the
+default, you can use the ``--example-recipe-name`` option.
+
+The easiest way to see how the ``bitbake-layers create-layer`` command
+works is to experiment with the script. You can also read the usage
+information by entering the following::
+
+ $ bitbake-layers create-layer --help
+ NOTE: Starting bitbake server...
+ usage: bitbake-layers create-layer [-h] [--priority PRIORITY]
+ [--example-recipe-name EXAMPLERECIPE]
+ layerdir
+
+ Create a basic layer
+
+ positional arguments:
+ layerdir Layer directory to create
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --priority PRIORITY, -p PRIORITY
+ Layer directory to create
+ --example-recipe-name EXAMPLERECIPE, -e EXAMPLERECIPE
+ Filename of the example recipe
+
+Adding a Layer Using the ``bitbake-layers`` Script
+==================================================
+
+Once you create your general layer, you must add it to your
+``bblayers.conf`` file. Adding the layer to this configuration file
+makes the OpenEmbedded build system aware of your layer so that it can
+search it for metadata.
+
+Add your layer by using the ``bitbake-layers add-layer`` command::
+
+ $ bitbake-layers add-layer your_layer_name
+
+Here is an example that adds a
+layer named ``meta-scottrif`` to the configuration file. Following the
+command that adds the layer is another ``bitbake-layers`` command that
+shows the layers that are in your ``bblayers.conf`` file::
+
+ $ bitbake-layers add-layer meta-scottrif
+ NOTE: Starting bitbake server...
+ Parsing recipes: 100% |##########################################################| Time: 0:00:49
+ Parsing of 1441 .bb files complete (0 cached, 1441 parsed). 2055 targets, 56 skipped, 0 masked, 0 errors.
+ $ bitbake-layers show-layers
+ NOTE: Starting bitbake server...
+ layer path priority
+ ==========================================================================
+ meta /home/scottrif/poky/meta 5
+ meta-poky /home/scottrif/poky/meta-poky 5
+ meta-yocto-bsp /home/scottrif/poky/meta-yocto-bsp 5
+ workspace /home/scottrif/poky/build/workspace 99
+ meta-scottrif /home/scottrif/poky/build/meta-scottrif 6
+
+
+Adding the layer to this file
+enables the build system to locate the layer during the build.
+
+.. note::
+
+ During a build, the OpenEmbedded build system looks in the layers
+ from the top of the list down to the bottom in that order.
+
diff --git a/documentation/dev-manual/libraries.rst b/documentation/dev-manual/libraries.rst
new file mode 100644
index 0000000000..521dbb9a7c
--- /dev/null
+++ b/documentation/dev-manual/libraries.rst
@@ -0,0 +1,267 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Working With Libraries
+**********************
+
+Libraries are an integral part of your system. This section describes
+some common practices you might find helpful when working with libraries
+to build your system:
+
+- :ref:`How to include static library files
+ <dev-manual/libraries:including static library files>`
+
+- :ref:`How to use the Multilib feature to combine multiple versions of
+ library files into a single image
+ <dev-manual/libraries:combining multiple versions of library files into one image>`
+
+- :ref:`How to install multiple versions of the same library in parallel on
+ the same system
+ <dev-manual/libraries:installing multiple versions of the same library>`
+
+Including Static Library Files
+==============================
+
+If you are building a library and the library offers static linking, you
+can control which static library files (``*.a`` files) get included in
+the built library.
+
+The :term:`PACKAGES` and
+:term:`FILES:* <FILES>` variables in the
+``meta/conf/bitbake.conf`` configuration file define how files installed
+by the :ref:`ref-tasks-install` task are packaged. By default, the :term:`PACKAGES`
+variable includes ``${PN}-staticdev``, which represents all static
+library files.
+
+.. note::
+
+ Some previously released versions of the Yocto Project defined the
+ static library files through ``${PN}-dev``.
+
+Here is the part of the BitBake configuration file, where you can see
+how the static library files are defined::
+
+ PACKAGE_BEFORE_PN ?= ""
+ PACKAGES = "${PN}-src ${PN}-dbg ${PN}-staticdev ${PN}-dev ${PN}-doc ${PN}-locale ${PACKAGE_BEFORE_PN} ${PN}"
+ PACKAGES_DYNAMIC = "^${PN}-locale-.*"
+ FILES = ""
+
+ FILES:${PN} = "${bindir}/* ${sbindir}/* ${libexecdir}/* ${libdir}/lib*${SOLIBS} \
+ ${sysconfdir} ${sharedstatedir} ${localstatedir} \
+ ${base_bindir}/* ${base_sbindir}/* \
+ ${base_libdir}/*${SOLIBS} \
+ ${base_prefix}/lib/udev ${prefix}/lib/udev \
+ ${base_libdir}/udev ${libdir}/udev \
+ ${datadir}/${BPN} ${libdir}/${BPN}/* \
+ ${datadir}/pixmaps ${datadir}/applications \
+ ${datadir}/idl ${datadir}/omf ${datadir}/sounds \
+ ${libdir}/bonobo/servers"
+
+ FILES:${PN}-bin = "${bindir}/* ${sbindir}/*"
+
+ FILES:${PN}-doc = "${docdir} ${mandir} ${infodir} ${datadir}/gtk-doc \
+ ${datadir}/gnome/help"
+ SECTION:${PN}-doc = "doc"
+
+ FILES_SOLIBSDEV ?= "${base_libdir}/lib*${SOLIBSDEV} ${libdir}/lib*${SOLIBSDEV}"
+ FILES:${PN}-dev = "${includedir} ${FILES_SOLIBSDEV} ${libdir}/*.la \
+ ${libdir}/*.o ${libdir}/pkgconfig ${datadir}/pkgconfig \
+ ${datadir}/aclocal ${base_libdir}/*.o \
+ ${libdir}/${BPN}/*.la ${base_libdir}/*.la \
+ ${libdir}/cmake ${datadir}/cmake"
+ SECTION:${PN}-dev = "devel"
+ ALLOW_EMPTY:${PN}-dev = "1"
+ RDEPENDS:${PN}-dev = "${PN} (= ${EXTENDPKGV})"
+
+ FILES:${PN}-staticdev = "${libdir}/*.a ${base_libdir}/*.a ${libdir}/${BPN}/*.a"
+ SECTION:${PN}-staticdev = "devel"
+ RDEPENDS:${PN}-staticdev = "${PN}-dev (= ${EXTENDPKGV})"
+
+Combining Multiple Versions of Library Files into One Image
+===========================================================
+
+The build system offers the ability to build libraries with different
+target optimizations or architecture formats and combine these together
+into one system image. You can link different binaries in the image
+against the different libraries as needed for specific use cases. This
+feature is called "Multilib".
+
+An example would be where you have most of a system compiled in 32-bit
+mode using 32-bit libraries, but you have something large, like a
+database engine, that needs to be a 64-bit application and uses 64-bit
+libraries. Multilib allows you to get the best of both 32-bit and 64-bit
+libraries.
+
+While the Multilib feature is most commonly used for 32 and 64-bit
+differences, the approach the build system uses facilitates different
+target optimizations. You could compile some binaries to use one set of
+libraries and other binaries to use a different set of libraries. The
+libraries could differ in architecture, compiler options, or other
+optimizations.
+
+There are several examples in the ``meta-skeleton`` layer found in the
+:term:`Source Directory`:
+
+- :oe_git:`conf/multilib-example.conf </openembedded-core/tree/meta-skeleton/conf/multilib-example.conf>`
+ configuration file.
+
+- :oe_git:`conf/multilib-example2.conf </openembedded-core/tree/meta-skeleton/conf/multilib-example2.conf>`
+ configuration file.
+
+- :oe_git:`recipes-multilib/images/core-image-multilib-example.bb </openembedded-core/tree/meta-skeleton/recipes-multilib/images/core-image-multilib-example.bb>`
+ recipe
+
+Preparing to Use Multilib
+-------------------------
+
+User-specific requirements drive the Multilib feature. Consequently,
+there is no one "out-of-the-box" configuration that would
+meet your needs.
+
+In order to enable Multilib, you first need to ensure your recipe is
+extended to support multiple libraries. Many standard recipes are
+already extended and support multiple libraries. You can check in the
+``meta/conf/multilib.conf`` configuration file in the
+:term:`Source Directory` to see how this is
+done using the
+:term:`BBCLASSEXTEND` variable.
+Eventually, all recipes will be covered and this list will not be
+needed.
+
+For the most part, the :ref:`Multilib <ref-classes-multilib*>`
+class extension works automatically to
+extend the package name from ``${PN}`` to ``${MLPREFIX}${PN}``, where
+:term:`MLPREFIX` is the particular multilib (e.g. "lib32-" or "lib64-").
+Standard variables such as
+:term:`DEPENDS`,
+:term:`RDEPENDS`,
+:term:`RPROVIDES`,
+:term:`RRECOMMENDS`,
+:term:`PACKAGES`, and
+:term:`PACKAGES_DYNAMIC` are
+automatically extended by the system. If you are extending any manual
+code in the recipe, you can use the ``${MLPREFIX}`` variable to ensure
+those names are extended correctly.
+
+Using Multilib
+--------------
+
+After you have set up the recipes, you need to define the actual
+combination of multiple libraries you want to build. You accomplish this
+through your ``local.conf`` configuration file in the
+:term:`Build Directory`. An example configuration would be as follows::
+
+ MACHINE = "qemux86-64"
+ require conf/multilib.conf
+ MULTILIBS = "multilib:lib32"
+ DEFAULTTUNE:virtclass-multilib-lib32 = "x86"
+ IMAGE_INSTALL:append = " lib32-glib-2.0"
+
+This example enables an additional library named
+``lib32`` alongside the normal target packages. When combining these
+"lib32" alternatives, the example uses "x86" for tuning. For information
+on this particular tuning, see
+``meta/conf/machine/include/ia32/arch-ia32.inc``.
+
+The example then includes ``lib32-glib-2.0`` in all the images, which
+illustrates one method of including a multiple library dependency. You
+can use a normal image build to include this dependency, for example::
+
+ $ bitbake core-image-sato
+
+You can also build Multilib packages
+specifically with a command like this::
+
+ $ bitbake lib32-glib-2.0
+
+Additional Implementation Details
+---------------------------------
+
+There are generic implementation details as well as details that are specific to
+package management systems. Here are implementation details
+that exist regardless of the package management system:
+
+- The typical convention used for the class extension code as used by
+ Multilib assumes that all package names specified in
+ :term:`PACKAGES` that contain
+ ``${PN}`` have ``${PN}`` at the start of the name. When that
+ convention is not followed and ``${PN}`` appears at the middle or the
+ end of a name, problems occur.
+
+- The :term:`TARGET_VENDOR`
+ value under Multilib will be extended to "-vendormlmultilib" (e.g.
+ "-pokymllib32" for a "lib32" Multilib with Poky). The reason for this
+ slightly unwieldy contraction is that any "-" characters in the
+ vendor string presently break Autoconf's ``config.sub``, and other
+ separators are problematic for different reasons.
+
+Here are the implementation details for the RPM Package Management System:
+
+- A unique architecture is defined for the Multilib packages, along
+ with creating a unique deploy folder under ``tmp/deploy/rpm`` in the
+ :term:`Build Directory`. For example, consider ``lib32`` in a
+ ``qemux86-64`` image. The possible architectures in the system are "all",
+ "qemux86_64", "lib32:qemux86_64", and "lib32:x86".
+
+- The ``${MLPREFIX}`` variable is stripped from ``${PN}`` during RPM
+ packaging. The naming for a normal RPM package and a Multilib RPM
+ package in a ``qemux86-64`` system resolves to something similar to
+ ``bash-4.1-r2.x86_64.rpm`` and ``bash-4.1.r2.lib32_x86.rpm``,
+ respectively.
+
+- When installing a Multilib image, the RPM backend first installs the
+ base image and then installs the Multilib libraries.
+
+- The build system relies on RPM to resolve the identical files in the
+ two (or more) Multilib packages.
+
+Here are the implementation details for the IPK Package Management System:
+
+- The ``${MLPREFIX}`` is not stripped from ``${PN}`` during IPK
+ packaging. The naming for a normal RPM package and a Multilib IPK
+ package in a ``qemux86-64`` system resolves to something like
+ ``bash_4.1-r2.x86_64.ipk`` and ``lib32-bash_4.1-rw:x86.ipk``,
+ respectively.
+
+- The IPK deploy folder is not modified with ``${MLPREFIX}`` because
+ packages with and without the Multilib feature can exist in the same
+ folder due to the ``${PN}`` differences.
+
+- IPK defines a sanity check for Multilib installation using certain
+ rules for file comparison, overridden, etc.
+
+Installing Multiple Versions of the Same Library
+================================================
+
+There are be situations where you need to install and use multiple versions
+of the same library on the same system at the same time. This
+almost always happens when a library API changes and you have
+multiple pieces of software that depend on the separate versions of the
+library. To accommodate these situations, you can install multiple
+versions of the same library in parallel on the same system.
+
+The process is straightforward as long as the libraries use proper
+versioning. With properly versioned libraries, all you need to do to
+individually specify the libraries is create separate, appropriately
+named recipes where the :term:`PN` part of
+the name includes a portion that differentiates each library version
+(e.g. the major part of the version number). Thus, instead of having a
+single recipe that loads one version of a library (e.g. ``clutter``),
+you provide multiple recipes that result in different versions of the
+libraries you want. As an example, the following two recipes would allow
+the two separate versions of the ``clutter`` library to co-exist on the
+same system:
+
+.. code-block:: none
+
+ clutter-1.6_1.6.20.bb
+ clutter-1.8_1.8.4.bb
+
+Additionally, if
+you have other recipes that depend on a given library, you need to use
+the :term:`DEPENDS` variable to
+create the dependency. Continuing with the same example, if you want to
+have a recipe depend on the 1.8 version of the ``clutter`` library, use
+the following in your recipe::
+
+ DEPENDS = "clutter-1.8"
+
diff --git a/documentation/dev-manual/licenses.rst b/documentation/dev-manual/licenses.rst
new file mode 100644
index 0000000000..197712b68d
--- /dev/null
+++ b/documentation/dev-manual/licenses.rst
@@ -0,0 +1,539 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Working With Licenses
+*********************
+
+As mentioned in the ":ref:`overview-manual/development-environment:licensing`"
+section in the Yocto Project Overview and Concepts Manual, open source
+projects are open to the public and they consequently have different
+licensing structures in place. This section describes the mechanism by
+which the :term:`OpenEmbedded Build System`
+tracks changes to
+licensing text and covers how to maintain open source license compliance
+during your project's lifecycle. The section also describes how to
+enable commercially licensed recipes, which by default are disabled.
+
+Tracking License Changes
+========================
+
+The license of an upstream project might change in the future. In order
+to prevent these changes going unnoticed, the
+:term:`LIC_FILES_CHKSUM`
+variable tracks changes to the license text. The checksums are validated
+at the end of the configure step, and if the checksums do not match, the
+build will fail.
+
+Specifying the ``LIC_FILES_CHKSUM`` Variable
+--------------------------------------------
+
+The :term:`LIC_FILES_CHKSUM` variable contains checksums of the license text
+in the source code for the recipe. Here is an example of how to
+specify :term:`LIC_FILES_CHKSUM`::
+
+ LIC_FILES_CHKSUM = "file://COPYING;md5=xxxx \
+ file://licfile1.txt;beginline=5;endline=29;md5=yyyy \
+ file://licfile2.txt;endline=50;md5=zzzz \
+ ..."
+
+.. note::
+
+ - When using "beginline" and "endline", realize that line numbering
+ begins with one and not zero. Also, the included lines are
+ inclusive (i.e. lines five through and including 29 in the
+ previous example for ``licfile1.txt``).
+
+ - When a license check fails, the selected license text is included
+ as part of the QA message. Using this output, you can determine
+ the exact start and finish for the needed license text.
+
+The build system uses the :term:`S`
+variable as the default directory when searching files listed in
+:term:`LIC_FILES_CHKSUM`. The previous example employs the default
+directory.
+
+Consider this next example::
+
+ LIC_FILES_CHKSUM = "file://src/ls.c;beginline=5;endline=16;\
+ md5=bb14ed3c4cda583abc85401304b5cd4e"
+ LIC_FILES_CHKSUM = "file://${WORKDIR}/license.html;md5=5c94767cedb5d6987c902ac850ded2c6"
+
+The first line locates a file in ``${S}/src/ls.c`` and isolates lines
+five through 16 as license text. The second line refers to a file in
+:term:`WORKDIR`.
+
+Note that :term:`LIC_FILES_CHKSUM` variable is mandatory for all recipes,
+unless the :term:`LICENSE` variable is set to "CLOSED".
+
+Explanation of Syntax
+---------------------
+
+As mentioned in the previous section, the :term:`LIC_FILES_CHKSUM` variable
+lists all the important files that contain the license text for the
+source code. It is possible to specify a checksum for an entire file, or
+a specific section of a file (specified by beginning and ending line
+numbers with the "beginline" and "endline" parameters, respectively).
+The latter is useful for source files with a license notice header,
+README documents, and so forth. If you do not use the "beginline"
+parameter, then it is assumed that the text begins on the first line of
+the file. Similarly, if you do not use the "endline" parameter, it is
+assumed that the license text ends with the last line of the file.
+
+The "md5" parameter stores the md5 checksum of the license text. If the
+license text changes in any way as compared to this parameter then a
+mismatch occurs. This mismatch triggers a build failure and notifies the
+developer. Notification allows the developer to review and address the
+license text changes. Also note that if a mismatch occurs during the
+build, the correct md5 checksum is placed in the build log and can be
+easily copied to the recipe.
+
+There is no limit to how many files you can specify using the
+:term:`LIC_FILES_CHKSUM` variable. Generally, however, every project
+requires a few specifications for license tracking. Many projects have a
+"COPYING" file that stores the license information for all the source
+code files. This practice allows you to just track the "COPYING" file as
+long as it is kept up to date.
+
+.. note::
+
+ - If you specify an empty or invalid "md5" parameter,
+ :term:`BitBake` returns an md5
+ mis-match error and displays the correct "md5" parameter value
+ during the build. The correct parameter is also captured in the
+ build log.
+
+ - If the whole file contains only license text, you do not need to
+ use the "beginline" and "endline" parameters.
+
+Enabling Commercially Licensed Recipes
+======================================
+
+By default, the OpenEmbedded build system disables components that have
+commercial or other special licensing requirements. Such requirements
+are defined on a recipe-by-recipe basis through the
+:term:`LICENSE_FLAGS` variable
+definition in the affected recipe. For instance, the
+``poky/meta/recipes-multimedia/gstreamer/gst-plugins-ugly`` recipe
+contains the following statement::
+
+ LICENSE_FLAGS = "commercial"
+
+Here is a
+slightly more complicated example that contains both an explicit recipe
+name and version (after variable expansion)::
+
+ LICENSE_FLAGS = "license_${PN}_${PV}"
+
+In order for a component restricted by a
+:term:`LICENSE_FLAGS` definition to be enabled and included in an image, it
+needs to have a matching entry in the global
+:term:`LICENSE_FLAGS_ACCEPTED`
+variable, which is a variable typically defined in your ``local.conf``
+file. For example, to enable the
+``poky/meta/recipes-multimedia/gstreamer/gst-plugins-ugly`` package, you
+could add either the string "commercial_gst-plugins-ugly" or the more
+general string "commercial" to :term:`LICENSE_FLAGS_ACCEPTED`. See the
+":ref:`dev-manual/licenses:license flag matching`" section for a full
+explanation of how :term:`LICENSE_FLAGS` matching works. Here is the
+example::
+
+ LICENSE_FLAGS_ACCEPTED = "commercial_gst-plugins-ugly"
+
+Likewise, to additionally enable the package built from the recipe
+containing ``LICENSE_FLAGS = "license_${PN}_${PV}"``, and assuming that
+the actual recipe name was ``emgd_1.10.bb``, the following string would
+enable that package as well as the original ``gst-plugins-ugly``
+package::
+
+ LICENSE_FLAGS_ACCEPTED = "commercial_gst-plugins-ugly license_emgd_1.10"
+
+As a convenience, you do not need to specify the
+complete license string for every package. You can use
+an abbreviated form, which consists of just the first portion or
+portions of the license string before the initial underscore character
+or characters. A partial string will match any license that contains the
+given string as the first portion of its license. For example, the
+following value will also match both of the packages
+previously mentioned as well as any other packages that have licenses
+starting with "commercial" or "license"::
+
+ LICENSE_FLAGS_ACCEPTED = "commercial license"
+
+License Flag Matching
+---------------------
+
+License flag matching allows you to control what recipes the
+OpenEmbedded build system includes in the build. Fundamentally, the
+build system attempts to match :term:`LICENSE_FLAGS` strings found in
+recipes against strings found in :term:`LICENSE_FLAGS_ACCEPTED`.
+A match causes the build system to include a recipe in the
+build, while failure to find a match causes the build system to exclude
+a recipe.
+
+In general, license flag matching is simple. However, understanding some
+concepts will help you correctly and effectively use matching.
+
+Before a flag defined by a particular recipe is tested against the
+entries of :term:`LICENSE_FLAGS_ACCEPTED`, the expanded
+string ``_${PN}`` is appended to the flag. This expansion makes each
+:term:`LICENSE_FLAGS` value recipe-specific. After expansion, the
+string is then matched against the entries. Thus, specifying
+``LICENSE_FLAGS = "commercial"`` in recipe "foo", for example, results
+in the string ``"commercial_foo"``. And, to create a match, that string
+must appear among the entries of :term:`LICENSE_FLAGS_ACCEPTED`.
+
+Judicious use of the :term:`LICENSE_FLAGS` strings and the contents of the
+:term:`LICENSE_FLAGS_ACCEPTED` variable allows you a lot of flexibility for
+including or excluding recipes based on licensing. For example, you can
+broaden the matching capabilities by using license flags string subsets
+in :term:`LICENSE_FLAGS_ACCEPTED`.
+
+.. note::
+
+ When using a string subset, be sure to use the part of the expanded
+ string that precedes the appended underscore character (e.g.
+ ``usethispart_1.3``, ``usethispart_1.4``, and so forth).
+
+For example, simply specifying the string "commercial" in the
+:term:`LICENSE_FLAGS_ACCEPTED` variable matches any expanded
+:term:`LICENSE_FLAGS` definition that starts with the string
+"commercial" such as "commercial_foo" and "commercial_bar", which
+are the strings the build system automatically generates for
+hypothetical recipes named "foo" and "bar" assuming those recipes simply
+specify the following::
+
+ LICENSE_FLAGS = "commercial"
+
+Thus, you can choose to exhaustively enumerate each license flag in the
+list and allow only specific recipes into the image, or you can use a
+string subset that causes a broader range of matches to allow a range of
+recipes into the image.
+
+This scheme works even if the :term:`LICENSE_FLAGS` string already has
+``_${PN}`` appended. For example, the build system turns the license
+flag "commercial_1.2_foo" into "commercial_1.2_foo_foo" and would match
+both the general "commercial" and the specific "commercial_1.2_foo"
+strings found in the :term:`LICENSE_FLAGS_ACCEPTED` variable, as expected.
+
+Here are some other scenarios:
+
+- You can specify a versioned string in the recipe such as
+ "commercial_foo_1.2" in a "foo" recipe. The build system expands this
+ string to "commercial_foo_1.2_foo". Combine this license flag with a
+ :term:`LICENSE_FLAGS_ACCEPTED` variable that has the string
+ "commercial" and you match the flag along with any other flag that
+ starts with the string "commercial".
+
+- Under the same circumstances, you can add "commercial_foo" in the
+ :term:`LICENSE_FLAGS_ACCEPTED` variable and the build system not only
+ matches "commercial_foo_1.2" but also matches any license flag with
+ the string "commercial_foo", regardless of the version.
+
+- You can be very specific and use both the package and version parts
+ in the :term:`LICENSE_FLAGS_ACCEPTED` list (e.g.
+ "commercial_foo_1.2") to specifically match a versioned recipe.
+
+Other Variables Related to Commercial Licenses
+----------------------------------------------
+
+There are other helpful variables related to commercial license handling,
+defined in the
+``poky/meta/conf/distro/include/default-distrovars.inc`` file::
+
+ COMMERCIAL_AUDIO_PLUGINS ?= ""
+ COMMERCIAL_VIDEO_PLUGINS ?= ""
+
+If you want to enable these components, you can do so by making sure you have
+statements similar to the following in your ``local.conf`` configuration file::
+
+ COMMERCIAL_AUDIO_PLUGINS = "gst-plugins-ugly-mad \
+ gst-plugins-ugly-mpegaudioparse"
+ COMMERCIAL_VIDEO_PLUGINS = "gst-plugins-ugly-mpeg2dec \
+ gst-plugins-ugly-mpegstream gst-plugins-bad-mpegvideoparse"
+ LICENSE_FLAGS_ACCEPTED = "commercial_gst-plugins-ugly commercial_gst-plugins-bad commercial_qmmp"
+
+Of course, you could also create a matching list for those components using the
+more general "commercial" string in the :term:`LICENSE_FLAGS_ACCEPTED` variable,
+but that would also enable all the other packages with :term:`LICENSE_FLAGS`
+containing "commercial", which you may or may not want::
+
+ LICENSE_FLAGS_ACCEPTED = "commercial"
+
+Specifying audio and video plugins as part of the
+:term:`COMMERCIAL_AUDIO_PLUGINS` and :term:`COMMERCIAL_VIDEO_PLUGINS` statements
+(along with :term:`LICENSE_FLAGS_ACCEPTED`) includes the plugins or
+components into built images, thus adding support for media formats or
+components.
+
+.. note::
+
+ GStreamer "ugly" and "bad" plugins are actually available through
+ open source licenses. However, the "ugly" ones can be subject to software
+ patents in some countries, making it necessary to pay licensing fees
+ to distribute them. The "bad" ones are just deemed unreliable by the
+ GStreamer community and should therefore be used with care.
+
+Maintaining Open Source License Compliance During Your Product's Lifecycle
+==========================================================================
+
+One of the concerns for a development organization using open source
+software is how to maintain compliance with various open source
+licensing during the lifecycle of the product. While this section does
+not provide legal advice or comprehensively cover all scenarios, it does
+present methods that you can use to assist you in meeting the compliance
+requirements during a software release.
+
+With hundreds of different open source licenses that the Yocto Project
+tracks, it is difficult to know the requirements of each and every
+license. However, the requirements of the major FLOSS licenses can begin
+to be covered by assuming that there are three main areas of concern:
+
+- Source code must be provided.
+
+- License text for the software must be provided.
+
+- Compilation scripts and modifications to the source code must be
+ provided.
+
+- spdx files can be provided.
+
+There are other requirements beyond the scope of these three and the
+methods described in this section (e.g. the mechanism through which
+source code is distributed).
+
+As different organizations have different ways of releasing software,
+there can be multiple ways of meeting license obligations. At
+least, we describe here two methods for achieving compliance:
+
+- The first method is to use OpenEmbedded's ability to provide
+ the source code, provide a list of licenses, as well as
+ compilation scripts and source code modifications.
+
+ The remainder of this section describes supported methods to meet
+ the previously mentioned three requirements.
+
+- The second method is to generate a *Software Bill of Materials*
+ (:term:`SBoM`), as described in the ":doc:`/dev-manual/sbom`" section.
+ Not only do you generate :term:`SPDX` output which can be used meet
+ license compliance requirements (except for sharing the build system
+ and layers sources for the time being), but this output also includes
+ component version and patch information which can be used
+ for vulnerability assessment.
+
+Whatever method you choose, prior to releasing images, sources,
+and the build system, you should audit all artifacts to ensure
+completeness.
+
+.. note::
+
+ The Yocto Project generates a license manifest during image creation
+ that is located in
+ ``${DEPLOY_DIR}/licenses/<image-name>-<machine>.rootfs-<datestamp>/``
+ to assist with any audits.
+
+Providing the Source Code
+-------------------------
+
+Compliance activities should begin before you generate the final image.
+The first thing you should look at is the requirement that tops the list
+for most compliance groups --- providing the source. The Yocto Project has
+a few ways of meeting this requirement.
+
+One of the easiest ways to meet this requirement is to provide the
+entire :term:`DL_DIR` used by the
+build. This method, however, has a few issues. The most obvious is the
+size of the directory since it includes all sources used in the build
+and not just the source used in the released image. It will include
+toolchain source, and other artifacts, which you would not generally
+release. However, the more serious issue for most companies is
+accidental release of proprietary software. The Yocto Project provides
+an :ref:`ref-classes-archiver` class to help avoid some of these concerns.
+
+Before you employ :term:`DL_DIR` or the :ref:`ref-classes-archiver` class, you
+need to decide how you choose to provide source. The source
+:ref:`ref-classes-archiver` class can generate tarballs and SRPMs and can
+create them with various levels of compliance in mind.
+
+One way of doing this (but certainly not the only way) is to release
+just the source as a tarball. You can do this by adding the following to
+the ``local.conf`` file found in the :term:`Build Directory`::
+
+ INHERIT += "archiver"
+ ARCHIVER_MODE[src] = "original"
+
+During the creation of your
+image, the source from all recipes that deploy packages to the image is
+placed within subdirectories of ``DEPLOY_DIR/sources`` based on the
+:term:`LICENSE` for each recipe.
+Releasing the entire directory enables you to comply with requirements
+concerning providing the unmodified source. It is important to note that
+the size of the directory can get large.
+
+A way to help mitigate the size issue is to only release tarballs for
+licenses that require the release of source. Let us assume you are only
+concerned with GPL code as identified by running the following script:
+
+.. code-block:: shell
+
+ # Script to archive a subset of packages matching specific license(s)
+ # Source and license files are copied into sub folders of package folder
+ # Must be run from build folder
+ #!/bin/bash
+ src_release_dir="source-release"
+ mkdir -p $src_release_dir
+ for a in tmp/deploy/sources/*; do
+ for d in $a/*; do
+ # Get package name from path
+ p=`basename $d`
+ p=${p%-*}
+ p=${p%-*}
+ # Only archive GPL packages (update *GPL* regex for your license check)
+ numfiles=`ls tmp/deploy/licenses/$p/*GPL* 2> /dev/null | wc -l`
+ if [ $numfiles -ge 1 ]; then
+ echo Archiving $p
+ mkdir -p $src_release_dir/$p/source
+ cp $d/* $src_release_dir/$p/source 2> /dev/null
+ mkdir -p $src_release_dir/$p/license
+ cp tmp/deploy/licenses/$p/* $src_release_dir/$p/license 2> /dev/null
+ fi
+ done
+ done
+
+At this point, you
+could create a tarball from the ``gpl_source_release`` directory and
+provide that to the end user. This method would be a step toward
+achieving compliance with section 3a of GPLv2 and with section 6 of
+GPLv3.
+
+Providing License Text
+~~~~~~~~~~~~~~~~~~~~~~
+
+One requirement that is often overlooked is inclusion of license text.
+This requirement also needs to be dealt with prior to generating the
+final image. Some licenses require the license text to accompany the
+binary. You can achieve this by adding the following to your
+``local.conf`` file::
+
+ COPY_LIC_MANIFEST = "1"
+ COPY_LIC_DIRS = "1"
+ LICENSE_CREATE_PACKAGE = "1"
+
+Adding these statements to the
+configuration file ensures that the licenses collected during package
+generation are included on your image.
+
+.. note::
+
+ Setting all three variables to "1" results in the image having two
+ copies of the same license file. One copy resides in
+ ``/usr/share/common-licenses`` and the other resides in
+ ``/usr/share/license``.
+
+ The reason for this behavior is because
+ :term:`COPY_LIC_DIRS` and
+ :term:`COPY_LIC_MANIFEST`
+ add a copy of the license when the image is built but do not offer a
+ path for adding licenses for newly installed packages to an image.
+ :term:`LICENSE_CREATE_PACKAGE`
+ adds a separate package and an upgrade path for adding licenses to an
+ image.
+
+As the source :ref:`ref-classes-archiver` class has already archived the
+original unmodified source that contains the license files, you would have
+already met the requirements for inclusion of the license information
+with source as defined by the GPL and other open source licenses.
+
+Providing Compilation Scripts and Source Code Modifications
+-----------------------------------------------------------
+
+At this point, we have addressed all we need prior to generating the
+image. The next two requirements are addressed during the final
+packaging of the release.
+
+By releasing the version of the OpenEmbedded build system and the layers
+used during the build, you will be providing both compilation scripts
+and the source code modifications in one step.
+
+If the deployment team has a :ref:`overview-manual/concepts:bsp layer`
+and a distro layer, and those
+those layers are used to patch, compile, package, or modify (in any way)
+any open source software included in your released images, you might be
+required to release those layers under section 3 of GPLv2 or section 1
+of GPLv3. One way of doing that is with a clean checkout of the version
+of the Yocto Project and layers used during your build. Here is an
+example:
+
+.. code-block:: shell
+
+ # We built using the dunfell branch of the poky repo
+ $ git clone -b dunfell git://git.yoctoproject.org/poky
+ $ cd poky
+ # We built using the release_branch for our layers
+ $ git clone -b release_branch git://git.mycompany.com/meta-my-bsp-layer
+ $ git clone -b release_branch git://git.mycompany.com/meta-my-software-layer
+ # clean up the .git repos
+ $ find . -name ".git" -type d -exec rm -rf {} \;
+
+One
+thing a development organization might want to consider for end-user
+convenience is to modify ``meta-poky/conf/bblayers.conf.sample`` to
+ensure that when the end user utilizes the released build system to
+build an image, the development organization's layers are included in
+the ``bblayers.conf`` file automatically::
+
+ # POKY_BBLAYERS_CONF_VERSION is increased each time build/conf/bblayers.conf
+ # changes incompatibly
+ POKY_BBLAYERS_CONF_VERSION = "2"
+
+ BBPATH = "${TOPDIR}"
+ BBFILES ?= ""
+
+ BBLAYERS ?= " \
+ ##OEROOT##/meta \
+ ##OEROOT##/meta-poky \
+ ##OEROOT##/meta-yocto-bsp \
+ ##OEROOT##/meta-mylayer \
+ "
+
+Creating and
+providing an archive of the :term:`Metadata`
+layers (recipes, configuration files, and so forth) enables you to meet
+your requirements to include the scripts to control compilation as well
+as any modifications to the original source.
+
+Compliance Limitations with Executables Built from Static Libraries
+-------------------------------------------------------------------
+
+When package A is added to an image via the :term:`RDEPENDS` or :term:`RRECOMMENDS`
+mechanisms as well as explicitly included in the image recipe with
+:term:`IMAGE_INSTALL`, and depends on a static linked library recipe B
+(``DEPENDS += "B"``), package B will neither appear in the generated license
+manifest nor in the generated source tarballs. This occurs as the
+:ref:`ref-classes-license` and :ref:`ref-classes-archiver` classes assume that
+only packages included via :term:`RDEPENDS` or :term:`RRECOMMENDS`
+end up in the image.
+
+As a result, potential obligations regarding license compliance for package B
+may not be met.
+
+The Yocto Project doesn't enable static libraries by default, in part because
+of this issue. Before a solution to this limitation is found, you need to
+keep in mind that if your root filesystem is built from static libraries,
+you will need to manually ensure that your deliveries are compliant
+with the licenses of these libraries.
+
+Copying Non Standard Licenses
+=============================
+
+Some packages, such as the linux-firmware package, have many licenses
+that are not in any way common. You can avoid adding a lot of these
+types of common license files, which are only applicable to a specific
+package, by using the
+:term:`NO_GENERIC_LICENSE`
+variable. Using this variable also avoids QA errors when you use a
+non-common, non-CLOSED license in a recipe.
+
+Here is an example that uses the ``LICENSE.Abilis.txt`` file as
+the license from the fetched source::
+
+ NO_GENERIC_LICENSE[Firmware-Abilis] = "LICENSE.Abilis.txt"
+
diff --git a/documentation/dev-manual/new-machine.rst b/documentation/dev-manual/new-machine.rst
new file mode 100644
index 0000000000..d007c1631f
--- /dev/null
+++ b/documentation/dev-manual/new-machine.rst
@@ -0,0 +1,118 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Adding a New Machine
+====================
+
+Adding a new machine to the Yocto Project is a straightforward process.
+This section describes how to add machines that are similar to those
+that the Yocto Project already supports.
+
+.. note::
+
+ Although well within the capabilities of the Yocto Project, adding a
+ totally new architecture might require changes to ``gcc``/``glibc``
+ and to the site information, which is beyond the scope of this
+ manual.
+
+For a complete example that shows how to add a new machine, see the
+":ref:`bsp-guide/bsp:creating a new bsp layer using the \`\`bitbake-layers\`\` script`"
+section in the Yocto Project Board Support Package (BSP) Developer's
+Guide.
+
+Adding the Machine Configuration File
+=====================================
+
+To add a new machine, you need to add a new machine configuration file
+to the layer's ``conf/machine`` directory. This configuration file
+provides details about the device you are adding.
+
+The OpenEmbedded build system uses the root name of the machine
+configuration file to reference the new machine. For example, given a
+machine configuration file named ``crownbay.conf``, the build system
+recognizes the machine as "crownbay".
+
+The most important variables you must set in your machine configuration
+file or include from a lower-level configuration file are as follows:
+
+- :term:`TARGET_ARCH` (e.g. "arm")
+
+- ``PREFERRED_PROVIDER_virtual/kernel``
+
+- :term:`MACHINE_FEATURES` (e.g. "apm screen wifi")
+
+You might also need these variables:
+
+- :term:`SERIAL_CONSOLES` (e.g. "115200;ttyS0 115200;ttyS1")
+
+- :term:`KERNEL_IMAGETYPE` (e.g. "zImage")
+
+- :term:`IMAGE_FSTYPES` (e.g. "tar.gz jffs2")
+
+You can find full details on these variables in the reference section.
+You can leverage existing machine ``.conf`` files from
+``meta-yocto-bsp/conf/machine/``.
+
+Adding a Kernel for the Machine
+===============================
+
+The OpenEmbedded build system needs to be able to build a kernel for the
+machine. You need to either create a new kernel recipe for this machine,
+or extend an existing kernel recipe. You can find several kernel recipe
+examples in the Source Directory at ``meta/recipes-kernel/linux`` that
+you can use as references.
+
+If you are creating a new kernel recipe, normal recipe-writing rules
+apply for setting up a :term:`SRC_URI`. Thus, you need to specify any
+necessary patches and set :term:`S` to point at the source code. You need to
+create a :ref:`ref-tasks-configure` task that configures the unpacked kernel with
+a ``defconfig`` file. You can do this by using a ``make defconfig``
+command or, more commonly, by copying in a suitable ``defconfig`` file
+and then running ``make oldconfig``. By making use of ``inherit kernel``
+and potentially some of the ``linux-*.inc`` files, most other
+functionality is centralized and the defaults of the class normally work
+well.
+
+If you are extending an existing kernel recipe, it is usually a matter
+of adding a suitable ``defconfig`` file. The file needs to be added into
+a location similar to ``defconfig`` files used for other machines in a
+given kernel recipe. A possible way to do this is by listing the file in
+the :term:`SRC_URI` and adding the machine to the expression in
+:term:`COMPATIBLE_MACHINE`::
+
+ COMPATIBLE_MACHINE = '(qemux86|qemumips)'
+
+For more information on ``defconfig`` files, see the
+":ref:`kernel-dev/common:changing the configuration`"
+section in the Yocto Project Linux Kernel Development Manual.
+
+Adding a Formfactor Configuration File
+======================================
+
+A formfactor configuration file provides information about the target
+hardware for which the image is being built and information that the
+build system cannot obtain from other sources such as the kernel. Some
+examples of information contained in a formfactor configuration file
+include framebuffer orientation, whether or not the system has a
+keyboard, the positioning of the keyboard in relation to the screen, and
+the screen resolution.
+
+The build system uses reasonable defaults in most cases. However, if
+customization is necessary, you need to create a ``machconfig`` file in
+the ``meta/recipes-bsp/formfactor/files`` directory. This directory
+contains directories for specific machines such as ``qemuarm`` and
+``qemux86``. For information about the settings available and the
+defaults, see the ``meta/recipes-bsp/formfactor/files/config`` file
+found in the same area.
+
+Here is an example for "qemuarm" machine::
+
+ HAVE_TOUCHSCREEN=1
+ HAVE_KEYBOARD=1
+ DISPLAY_CAN_ROTATE=0
+ DISPLAY_ORIENTATION=0
+ #DISPLAY_WIDTH_PIXELS=640
+ #DISPLAY_HEIGHT_PIXELS=480
+ #DISPLAY_BPP=16
+ DISPLAY_DPI=150
+ DISPLAY_SUBPIXEL_ORDER=vrgb
+
diff --git a/documentation/dev-manual/new-recipe.rst b/documentation/dev-manual/new-recipe.rst
new file mode 100644
index 0000000000..be52610621
--- /dev/null
+++ b/documentation/dev-manual/new-recipe.rst
@@ -0,0 +1,1640 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Writing a New Recipe
+********************
+
+Recipes (``.bb`` files) are fundamental components in the Yocto Project
+environment. Each software component built by the OpenEmbedded build
+system requires a recipe to define the component. This section describes
+how to create, write, and test a new recipe.
+
+.. note::
+
+ For information on variables that are useful for recipes and for
+ information about recipe naming issues, see the
+ ":ref:`ref-manual/varlocality:recipes`" section of the Yocto Project
+ Reference Manual.
+
+Overview
+========
+
+The following figure shows the basic process for creating a new recipe.
+The remainder of the section provides details for the steps.
+
+.. image:: figures/recipe-workflow.png
+ :align: center
+ :width: 50%
+
+Locate or Automatically Create a Base Recipe
+============================================
+
+You can always write a recipe from scratch. However, there are three choices
+that can help you quickly get started with a new recipe:
+
+- ``devtool add``: A command that assists in creating a recipe and an
+ environment conducive to development.
+
+- ``recipetool create``: A command provided by the Yocto Project that
+ automates creation of a base recipe based on the source files.
+
+- *Existing Recipes:* Location and modification of an existing recipe
+ that is similar in function to the recipe you need.
+
+.. note::
+
+ For information on recipe syntax, see the
+ ":ref:`dev-manual/new-recipe:recipe syntax`" section.
+
+Creating the Base Recipe Using ``devtool add``
+----------------------------------------------
+
+The ``devtool add`` command uses the same logic for auto-creating the
+recipe as ``recipetool create``, which is listed below. Additionally,
+however, ``devtool add`` sets up an environment that makes it easy for
+you to patch the source and to make changes to the recipe as is often
+necessary when adding a recipe to build a new piece of software to be
+included in a build.
+
+You can find a complete description of the ``devtool add`` command in
+the ":ref:`sdk-manual/extensible:a closer look at \`\`devtool add\`\``" section
+in the Yocto Project Application Development and the Extensible Software
+Development Kit (eSDK) manual.
+
+Creating the Base Recipe Using ``recipetool create``
+----------------------------------------------------
+
+``recipetool create`` automates creation of a base recipe given a set of
+source code files. As long as you can extract or point to the source
+files, the tool will construct a recipe and automatically configure all
+pre-build information into the recipe. For example, suppose you have an
+application that builds using Autotools. Creating the base recipe using
+``recipetool`` results in a recipe that has the pre-build dependencies,
+license requirements, and checksums configured.
+
+To run the tool, you just need to be in your :term:`Build Directory` and
+have sourced the build environment setup script (i.e.
+:ref:`structure-core-script`). To get help on the tool, use the following
+command::
+
+ $ recipetool -h
+ NOTE: Starting bitbake server...
+ usage: recipetool [-d] [-q] [--color COLOR] [-h] <subcommand> ...
+
+ OpenEmbedded recipe tool
+
+ options:
+ -d, --debug Enable debug output
+ -q, --quiet Print only errors
+ --color COLOR Colorize output (where COLOR is auto, always, never)
+ -h, --help show this help message and exit
+
+ subcommands:
+ create Create a new recipe
+ newappend Create a bbappend for the specified target in the specified
+ layer
+ setvar Set a variable within a recipe
+ appendfile Create/update a bbappend to replace a target file
+ appendsrcfiles Create/update a bbappend to add or replace source files
+ appendsrcfile Create/update a bbappend to add or replace a source file
+ Use recipetool <subcommand> --help to get help on a specific command
+
+Running ``recipetool create -o OUTFILE`` creates the base recipe and
+locates it properly in the layer that contains your source files.
+Here are some syntax examples:
+
+ - Use this syntax to generate a recipe based on source. Once generated,
+ the recipe resides in the existing source code layer::
+
+ recipetool create -o OUTFILE source
+
+ - Use this syntax to generate a recipe using code that
+ you extract from source. The extracted code is placed in its own layer
+ defined by :term:`EXTERNALSRC`::
+
+ recipetool create -o OUTFILE -x EXTERNALSRC source
+
+ - Use this syntax to generate a recipe based on source. The options
+ direct ``recipetool`` to generate debugging information. Once generated,
+ the recipe resides in the existing source code layer::
+
+ recipetool create -d -o OUTFILE source
+
+Locating and Using a Similar Recipe
+-----------------------------------
+
+Before writing a recipe from scratch, it is often useful to discover
+whether someone else has already written one that meets (or comes close
+to meeting) your needs. The Yocto Project and OpenEmbedded communities
+maintain many recipes that might be candidates for what you are doing.
+You can find a good central index of these recipes in the
+:oe_layerindex:`OpenEmbedded Layer Index <>`.
+
+Working from an existing recipe or a skeleton recipe is the best way to
+get started. Here are some points on both methods:
+
+- *Locate and modify a recipe that is close to what you want to do:*
+ This method works when you are familiar with the current recipe
+ space. The method does not work so well for those new to the Yocto
+ Project or writing recipes.
+
+ Some risks associated with this method are using a recipe that has
+ areas totally unrelated to what you are trying to accomplish with
+ your recipe, not recognizing areas of the recipe that you might have
+ to add from scratch, and so forth. All these risks stem from
+ unfamiliarity with the existing recipe space.
+
+- *Use and modify the following skeleton recipe:* If for some reason
+ you do not want to use ``recipetool`` and you cannot find an existing
+ recipe that is close to meeting your needs, you can use the following
+ structure to provide the fundamental areas of a new recipe::
+
+ DESCRIPTION = ""
+ HOMEPAGE = ""
+ LICENSE = ""
+ SECTION = ""
+ DEPENDS = ""
+ LIC_FILES_CHKSUM = ""
+
+ SRC_URI = ""
+
+Storing and Naming the Recipe
+=============================
+
+Once you have your base recipe, you should put it in your own layer and
+name it appropriately. Locating it correctly ensures that the
+OpenEmbedded build system can find it when you use BitBake to process
+the recipe.
+
+- *Storing Your Recipe:* The OpenEmbedded build system locates your
+ recipe through the layer's ``conf/layer.conf`` file and the
+ :term:`BBFILES` variable. This
+ variable sets up a path from which the build system can locate
+ recipes. Here is the typical use::
+
+ BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \
+ ${LAYERDIR}/recipes-*/*/*.bbappend"
+
+ Consequently, you need to be sure you locate your new recipe inside
+ your layer such that it can be found.
+
+ You can find more information on how layers are structured in the
+ ":ref:`dev-manual/layers:understanding and creating layers`" section.
+
+- *Naming Your Recipe:* When you name your recipe, you need to follow
+ this naming convention::
+
+ basename_version.bb
+
+ Use lower-cased characters and do not include the reserved suffixes
+ ``-native``, ``-cross``, ``-initial``, or ``-dev`` casually (i.e. do not use
+ them as part of your recipe name unless the string applies). Here are some
+ examples:
+
+ .. code-block:: none
+
+ cups_1.7.0.bb
+ gawk_4.0.2.bb
+ irssi_0.8.16-rc1.bb
+
+Running a Build on the Recipe
+=============================
+
+Creating a new recipe is usually an iterative process that requires
+using BitBake to process the recipe multiple times in order to
+progressively discover and add information to the recipe file.
+
+Assuming you have sourced the build environment setup script (i.e.
+:ref:`structure-core-script`) and you are in the :term:`Build Directory`, use
+BitBake to process your recipe. All you need to provide is the
+``basename`` of the recipe as described in the previous section::
+
+ $ bitbake basename
+
+During the build, the OpenEmbedded build system creates a temporary work
+directory for each recipe
+(``${``\ :term:`WORKDIR`\ ``}``)
+where it keeps extracted source files, log files, intermediate
+compilation and packaging files, and so forth.
+
+The path to the per-recipe temporary work directory depends on the
+context in which it is being built. The quickest way to find this path
+is to have BitBake return it by running the following::
+
+ $ bitbake -e basename | grep ^WORKDIR=
+
+As an example, assume a Source Directory
+top-level folder named ``poky``, a default :term:`Build Directory` at
+``poky/build``, and a ``qemux86-poky-linux`` machine target system.
+Furthermore, suppose your recipe is named ``foo_1.3.0.bb``. In this
+case, the work directory the build system uses to build the package
+would be as follows::
+
+ poky/build/tmp/work/qemux86-poky-linux/foo/1.3.0-r0
+
+Inside this directory you can find sub-directories such as ``image``,
+``packages-split``, and ``temp``. After the build, you can examine these
+to determine how well the build went.
+
+.. note::
+
+ You can find log files for each task in the recipe's ``temp``
+ directory (e.g. ``poky/build/tmp/work/qemux86-poky-linux/foo/1.3.0-r0/temp``).
+ Log files are named ``log.taskname`` (e.g. ``log.do_configure``,
+ ``log.do_fetch``, and ``log.do_compile``).
+
+You can find more information about the build process in
+":doc:`/overview-manual/development-environment`"
+chapter of the Yocto Project Overview and Concepts Manual.
+
+Fetching Code
+=============
+
+The first thing your recipe must do is specify how to fetch the source
+files. Fetching is controlled mainly through the
+:term:`SRC_URI` variable. Your recipe
+must have a :term:`SRC_URI` variable that points to where the source is
+located. For a graphical representation of source locations, see the
+":ref:`overview-manual/concepts:sources`" section in
+the Yocto Project Overview and Concepts Manual.
+
+The :ref:`ref-tasks-fetch` task uses the prefix of each entry in the
+:term:`SRC_URI` variable value to determine which
+:ref:`fetcher <bitbake-user-manual/bitbake-user-manual-fetching:fetchers>`
+to use to get your source files. It is the :term:`SRC_URI` variable that triggers
+the fetcher. The :ref:`ref-tasks-patch` task uses the variable after source is
+fetched to apply patches. The OpenEmbedded build system uses
+:term:`FILESOVERRIDES` for scanning directory locations for local files in
+:term:`SRC_URI`.
+
+The :term:`SRC_URI` variable in your recipe must define each unique location
+for your source files. It is good practice to not hard-code version
+numbers in a URL used in :term:`SRC_URI`. Rather than hard-code these
+values, use ``${``\ :term:`PV`\ ``}``,
+which causes the fetch process to use the version specified in the
+recipe filename. Specifying the version in this manner means that
+upgrading the recipe to a future version is as simple as renaming the
+recipe to match the new version.
+
+Here is a simple example from the
+``meta/recipes-devtools/strace/strace_5.5.bb`` recipe where the source
+comes from a single tarball. Notice the use of the
+:term:`PV` variable::
+
+ SRC_URI = "https://strace.io/files/${PV}/strace-${PV}.tar.xz \
+
+Files mentioned in :term:`SRC_URI` whose names end in a typical archive
+extension (e.g. ``.tar``, ``.tar.gz``, ``.tar.bz2``, ``.zip``, and so
+forth), are automatically extracted during the
+:ref:`ref-tasks-unpack` task. For
+another example that specifies these types of files, see the
+":ref:`dev-manual/new-recipe:building an autotooled package`" section.
+
+Another way of specifying source is from an SCM. For Git repositories,
+you must specify :term:`SRCREV` and you should specify :term:`PV` to include
+the revision with :term:`SRCPV`. Here is an example from the recipe
+``meta/recipes-core/musl/gcompat_git.bb``::
+
+ SRC_URI = "git://git.adelielinux.org/adelie/gcompat.git;protocol=https;branch=current"
+
+ PV = "1.0.0+1.1+git${SRCPV}"
+ SRCREV = "af5a49e489fdc04b9cf02547650d7aeaccd43793"
+
+If your :term:`SRC_URI` statement includes URLs pointing to individual files
+fetched from a remote server other than a version control system,
+BitBake attempts to verify the files against checksums defined in your
+recipe to ensure they have not been tampered with or otherwise modified
+since the recipe was written. Multiple checksums are supported:
+``SRC_URI[md5sum]``, ``SRC_URI[sha1sum]``, ``SRC_URI[sha256sum]``.
+``SRC_URI[sha384sum]`` and ``SRC_URI[sha512sum]``, but only
+``SRC_URI[sha256sum]`` is commonly used.
+
+.. note::
+
+ ``SRC_URI[md5sum]`` used to also be commonly used, but it is deprecated
+ and should be replaced by ``SRC_URI[sha256sum]`` when updating existing
+ recipes.
+
+If your :term:`SRC_URI` variable points to more than a single URL (excluding
+SCM URLs), you need to provide the ``sha256`` checksum for each URL. For these
+cases, you provide a name for each URL as part of the :term:`SRC_URI` and then
+reference that name in the subsequent checksum statements. Here is an example
+combining lines from the files ``git.inc`` and ``git_2.24.1.bb``::
+
+ SRC_URI = "${KERNELORG_MIRROR}/software/scm/git/git-${PV}.tar.gz;name=tarball \
+ ${KERNELORG_MIRROR}/software/scm/git/git-manpages-${PV}.tar.gz;name=manpages"
+
+ SRC_URI[tarball.sha256sum] = "ad5334956301c86841eb1e5b1bb20884a6bad89a10a6762c958220c7cf64da02"
+ SRC_URI[manpages.sha256sum] = "9a7ae3a093bea39770eb96ca3e5b40bff7af0b9f6123f089d7821d0e5b8e1230"
+
+The proper value for the ``sha256`` checksum might be available together
+with other signatures on the download page for the upstream source (e.g.
+``md5``, ``sha1``, ``sha256``, ``GPG``, and so forth). Because the
+OpenEmbedded build system typically only deals with ``sha256sum``,
+you should verify all the signatures you find by hand.
+
+If no :term:`SRC_URI` checksums are specified when you attempt to build the
+recipe, or you provide an incorrect checksum, the build will produce an
+error for each missing or incorrect checksum. As part of the error
+message, the build system provides the checksum string corresponding to
+the fetched file. Once you have the correct checksums, you can copy and
+paste them into your recipe and then run the build again to continue.
+
+.. note::
+
+ As mentioned, if the upstream source provides signatures for
+ verifying the downloaded source code, you should verify those
+ manually before setting the checksum values in the recipe and
+ continuing with the build.
+
+This final example is a bit more complicated and is from the
+``meta/recipes-sato/rxvt-unicode/rxvt-unicode_9.20.bb`` recipe. The
+example's :term:`SRC_URI` statement identifies multiple files as the source
+files for the recipe: a tarball, a patch file, a desktop file, and an icon::
+
+ SRC_URI = "http://dist.schmorp.de/rxvt-unicode/Attic/rxvt-unicode-${PV}.tar.bz2 \
+ file://xwc.patch \
+ file://rxvt.desktop \
+ file://rxvt.png"
+
+When you specify local files using the ``file://`` URI protocol, the
+build system fetches files from the local machine. The path is relative
+to the :term:`FILESPATH` variable
+and searches specific directories in a certain order:
+``${``\ :term:`BP`\ ``}``,
+``${``\ :term:`BPN`\ ``}``, and
+``files``. The directories are assumed to be subdirectories of the
+directory in which the recipe or append file resides. For another
+example that specifies these types of files, see the
+"`building a single .c file package`_" section.
+
+The previous example also specifies a patch file. Patch files are files
+whose names usually end in ``.patch`` or ``.diff`` but can end with
+compressed suffixes such as ``diff.gz`` and ``patch.bz2``, for example.
+The build system automatically applies patches as described in the
+":ref:`dev-manual/new-recipe:patching code`" section.
+
+Fetching Code Through Firewalls
+-------------------------------
+
+Some users are behind firewalls and need to fetch code through a proxy.
+See the ":doc:`/ref-manual/faq`" chapter for advice.
+
+Limiting the Number of Parallel Connections
+-------------------------------------------
+
+Some users are behind firewalls or use servers where the number of parallel
+connections is limited. In such cases, you can limit the number of fetch
+tasks being run in parallel by adding the following to your ``local.conf``
+file::
+
+ do_fetch[number_threads] = "4"
+
+Unpacking Code
+==============
+
+During the build, the
+:ref:`ref-tasks-unpack` task unpacks
+the source with ``${``\ :term:`S`\ ``}``
+pointing to where it is unpacked.
+
+If you are fetching your source files from an upstream source archived
+tarball and the tarball's internal structure matches the common
+convention of a top-level subdirectory named
+``${``\ :term:`BPN`\ ``}-${``\ :term:`PV`\ ``}``,
+then you do not need to set :term:`S`. However, if :term:`SRC_URI` specifies to
+fetch source from an archive that does not use this convention, or from
+an SCM like Git or Subversion, your recipe needs to define :term:`S`.
+
+If processing your recipe using BitBake successfully unpacks the source
+files, you need to be sure that the directory pointed to by ``${S}``
+matches the structure of the source.
+
+Patching Code
+=============
+
+Sometimes it is necessary to patch code after it has been fetched. Any
+files mentioned in :term:`SRC_URI` whose names end in ``.patch`` or
+``.diff`` or compressed versions of these suffixes (e.g. ``diff.gz``,
+``patch.bz2``, etc.) are treated as patches. The
+:ref:`ref-tasks-patch` task
+automatically applies these patches.
+
+The build system should be able to apply patches with the "-p1" option
+(i.e. one directory level in the path will be stripped off). If your
+patch needs to have more directory levels stripped off, specify the
+number of levels using the "striplevel" option in the :term:`SRC_URI` entry
+for the patch. Alternatively, if your patch needs to be applied in a
+specific subdirectory that is not specified in the patch file, use the
+"patchdir" option in the entry.
+
+As with all local files referenced in
+:term:`SRC_URI` using ``file://``,
+you should place patch files in a directory next to the recipe either
+named the same as the base name of the recipe
+(:term:`BP` and
+:term:`BPN`) or "files".
+
+Licensing
+=========
+
+Your recipe needs to define variables related to the license
+under whith the software is distributed. See the
+:ref:`contributor-guide/recipe-style-guide:recipe license fields`
+section in the Contributor Guide for details.
+
+Dependencies
+============
+
+Most software packages have a short list of other packages that they
+require, which are called dependencies. These dependencies fall into two
+main categories: build-time dependencies, which are required when the
+software is built; and runtime dependencies, which are required to be
+installed on the target in order for the software to run.
+
+Within a recipe, you specify build-time dependencies using the
+:term:`DEPENDS` variable. Although there are nuances,
+items specified in :term:`DEPENDS` should be names of other
+recipes. It is important that you specify all build-time dependencies
+explicitly.
+
+Another consideration is that configure scripts might automatically
+check for optional dependencies and enable corresponding functionality
+if those dependencies are found. If you wish to make a recipe that is
+more generally useful (e.g. publish the recipe in a layer for others to
+use), instead of hard-disabling the functionality, you can use the
+:term:`PACKAGECONFIG` variable to allow functionality and the
+corresponding dependencies to be enabled and disabled easily by other
+users of the recipe.
+
+Similar to build-time dependencies, you specify runtime dependencies
+through a variable -
+:term:`RDEPENDS`, which is
+package-specific. All variables that are package-specific need to have
+the name of the package added to the end as an override. Since the main
+package for a recipe has the same name as the recipe, and the recipe's
+name can be found through the
+``${``\ :term:`PN`\ ``}`` variable, then
+you specify the dependencies for the main package by setting
+``RDEPENDS:${PN}``. If the package were named ``${PN}-tools``, then you
+would set ``RDEPENDS:${PN}-tools``, and so forth.
+
+Some runtime dependencies will be set automatically at packaging time.
+These dependencies include any shared library dependencies (i.e. if a
+package "example" contains "libexample" and another package "mypackage"
+contains a binary that links to "libexample" then the OpenEmbedded build
+system will automatically add a runtime dependency to "mypackage" on
+"example"). See the
+":ref:`overview-manual/concepts:automatically added runtime dependencies`"
+section in the Yocto Project Overview and Concepts Manual for further
+details.
+
+Configuring the Recipe
+======================
+
+Most software provides some means of setting build-time configuration
+options before compilation. Typically, setting these options is
+accomplished by running a configure script with options, or by modifying
+a build configuration file.
+
+.. note::
+
+ As of Yocto Project Release 1.7, some of the core recipes that
+ package binary configuration scripts now disable the scripts due to
+ the scripts previously requiring error-prone path substitution. The
+ OpenEmbedded build system uses ``pkg-config`` now, which is much more
+ robust. You can find a list of the ``*-config`` scripts that are disabled
+ in the ":ref:`migration-1.7-binary-configuration-scripts-disabled`" section
+ in the Yocto Project Reference Manual.
+
+A major part of build-time configuration is about checking for
+build-time dependencies and possibly enabling optional functionality as
+a result. You need to specify any build-time dependencies for the
+software you are building in your recipe's
+:term:`DEPENDS` value, in terms of
+other recipes that satisfy those dependencies. You can often find
+build-time or runtime dependencies described in the software's
+documentation.
+
+The following list provides configuration items of note based on how
+your software is built:
+
+- *Autotools:* If your source files have a ``configure.ac`` file, then
+ your software is built using Autotools. If this is the case, you just
+ need to modify the configuration.
+
+ When using Autotools, your recipe needs to inherit the
+ :ref:`ref-classes-autotools` class and it does not have to
+ contain a :ref:`ref-tasks-configure` task. However, you might still want to
+ make some adjustments. For example, you can set :term:`EXTRA_OECONF` or
+ :term:`PACKAGECONFIG_CONFARGS` to pass any needed configure options that
+ are specific to the recipe.
+
+- *CMake:* If your source files have a ``CMakeLists.txt`` file, then
+ your software is built using CMake. If this is the case, you just
+ need to modify the configuration.
+
+ When you use CMake, your recipe needs to inherit the
+ :ref:`ref-classes-cmake` class and it does not have to contain a
+ :ref:`ref-tasks-configure` task. You can make some adjustments by setting
+ :term:`EXTRA_OECMAKE` to pass any needed configure options that are
+ specific to the recipe.
+
+ .. note::
+
+ If you need to install one or more custom CMake toolchain files
+ that are supplied by the application you are building, install the
+ files to ``${D}${datadir}/cmake/Modules`` during :ref:`ref-tasks-install`.
+
+- *Other:* If your source files do not have a ``configure.ac`` or
+ ``CMakeLists.txt`` file, then your software is built using some
+ method other than Autotools or CMake. If this is the case, you
+ normally need to provide a
+ :ref:`ref-tasks-configure` task
+ in your recipe unless, of course, there is nothing to configure.
+
+ Even if your software is not being built by Autotools or CMake, you
+ still might not need to deal with any configuration issues. You need
+ to determine if configuration is even a required step. You might need
+ to modify a Makefile or some configuration file used for the build to
+ specify necessary build options. Or, perhaps you might need to run a
+ provided, custom configure script with the appropriate options.
+
+ For the case involving a custom configure script, you would run
+ ``./configure --help`` and look for the options you need to set.
+
+Once configuration succeeds, it is always good practice to look at the
+``log.do_configure`` file to ensure that the appropriate options have
+been enabled and no additional build-time dependencies need to be added
+to :term:`DEPENDS`. For example, if the configure script reports that it
+found something not mentioned in :term:`DEPENDS`, or that it did not find
+something that it needed for some desired optional functionality, then
+you would need to add those to :term:`DEPENDS`. Looking at the log might
+also reveal items being checked for, enabled, or both that you do not
+want, or items not being found that are in :term:`DEPENDS`, in which case
+you would need to look at passing extra options to the configure script
+as needed. For reference information on configure options specific to
+the software you are building, you can consult the output of the
+``./configure --help`` command within ``${S}`` or consult the software's
+upstream documentation.
+
+Using Headers to Interface with Devices
+=======================================
+
+If your recipe builds an application that needs to communicate with some
+device or needs an API into a custom kernel, you will need to provide
+appropriate header files. Under no circumstances should you ever modify
+the existing
+``meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc`` file.
+These headers are used to build ``libc`` and must not be compromised
+with custom or machine-specific header information. If you customize
+``libc`` through modified headers all other applications that use
+``libc`` thus become affected.
+
+.. note::
+
+ Never copy and customize the ``libc`` header file (i.e.
+ ``meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc``).
+
+The correct way to interface to a device or custom kernel is to use a
+separate package that provides the additional headers for the driver or
+other unique interfaces. When doing so, your application also becomes
+responsible for creating a dependency on that specific provider.
+
+Consider the following:
+
+- Never modify ``linux-libc-headers.inc``. Consider that file to be
+ part of the ``libc`` system, and not something you use to access the
+ kernel directly. You should access ``libc`` through specific ``libc``
+ calls.
+
+- Applications that must talk directly to devices should either provide
+ necessary headers themselves, or establish a dependency on a special
+ headers package that is specific to that driver.
+
+For example, suppose you want to modify an existing header that adds I/O
+control or network support. If the modifications are used by a small
+number programs, providing a unique version of a header is easy and has
+little impact. When doing so, bear in mind the guidelines in the
+previous list.
+
+.. note::
+
+ If for some reason your changes need to modify the behavior of the ``libc``,
+ and subsequently all other applications on the system, use a ``.bbappend``
+ to modify the ``linux-kernel-headers.inc`` file. However, take care to not
+ make the changes machine specific.
+
+Consider a case where your kernel is older and you need an older
+``libc`` ABI. The headers installed by your recipe should still be a
+standard mainline kernel, not your own custom one.
+
+When you use custom kernel headers you need to get them from
+:term:`STAGING_KERNEL_DIR`,
+which is the directory with kernel headers that are required to build
+out-of-tree modules. Your recipe will also need the following::
+
+ do_configure[depends] += "virtual/kernel:do_shared_workdir"
+
+Compilation
+===========
+
+During a build, the :ref:`ref-tasks-compile` task happens after source is fetched,
+unpacked, and configured. If the recipe passes through :ref:`ref-tasks-compile`
+successfully, nothing needs to be done.
+
+However, if the compile step fails, you need to diagnose the failure.
+Here are some common issues that cause failures.
+
+.. note::
+
+ For cases where improper paths are detected for configuration files
+ or for when libraries/headers cannot be found, be sure you are using
+ the more robust ``pkg-config``. See the note in section
+ ":ref:`dev-manual/new-recipe:Configuring the Recipe`" for additional information.
+
+- *Parallel build failures:* These failures manifest themselves as
+ intermittent errors, or errors reporting that a file or directory
+ that should be created by some other part of the build process could
+ not be found. This type of failure can occur even if, upon
+ inspection, the file or directory does exist after the build has
+ failed, because that part of the build process happened in the wrong
+ order.
+
+ To fix the problem, you need to either satisfy the missing dependency
+ in the Makefile or whatever script produced the Makefile, or (as a
+ workaround) set :term:`PARALLEL_MAKE` to an empty string::
+
+ PARALLEL_MAKE = ""
+
+ For information on parallel Makefile issues, see the
+ ":ref:`dev-manual/debugging:debugging parallel make races`" section.
+
+- *Improper host path usage:* This failure applies to recipes building
+ for the target or ":ref:`ref-classes-nativesdk`" only. The
+ failure occurs when the compilation process uses improper headers,
+ libraries, or other files from the host system when cross-compiling for
+ the target.
+
+ To fix the problem, examine the ``log.do_compile`` file to identify
+ the host paths being used (e.g. ``/usr/include``, ``/usr/lib``, and
+ so forth) and then either add configure options, apply a patch, or do
+ both.
+
+- *Failure to find required libraries/headers:* If a build-time
+ dependency is missing because it has not been declared in
+ :term:`DEPENDS`, or because the
+ dependency exists but the path used by the build process to find the
+ file is incorrect and the configure step did not detect it, the
+ compilation process could fail. For either of these failures, the
+ compilation process notes that files could not be found. In these
+ cases, you need to go back and add additional options to the
+ configure script as well as possibly add additional build-time
+ dependencies to :term:`DEPENDS`.
+
+ Occasionally, it is necessary to apply a patch to the source to
+ ensure the correct paths are used. If you need to specify paths to
+ find files staged into the sysroot from other recipes, use the
+ variables that the OpenEmbedded build system provides (e.g.
+ :term:`STAGING_BINDIR`, :term:`STAGING_INCDIR`, :term:`STAGING_DATADIR`, and so
+ forth).
+
+Installing
+==========
+
+During :ref:`ref-tasks-install`, the task copies the built files along with their
+hierarchy to locations that would mirror their locations on the target
+device. The installation process copies files from the
+``${``\ :term:`S`\ ``}``,
+``${``\ :term:`B`\ ``}``, and
+``${``\ :term:`WORKDIR`\ ``}``
+directories to the ``${``\ :term:`D`\ ``}``
+directory to create the structure as it should appear on the target
+system.
+
+How your software is built affects what you must do to be sure your
+software is installed correctly. The following list describes what you
+must do for installation depending on the type of build system used by
+the software being built:
+
+- *Autotools and CMake:* If the software your recipe is building uses
+ Autotools or CMake, the OpenEmbedded build system understands how to
+ install the software. Consequently, you do not have to have a
+ :ref:`ref-tasks-install` task as part of your recipe. You just need to make
+ sure the install portion of the build completes with no issues.
+ However, if you wish to install additional files not already being
+ installed by ``make install``, you should do this using a
+ ``do_install:append`` function using the install command as described
+ in the "Manual" bulleted item later in this list.
+
+- *Other (using* ``make install``\ *)*: You need to define a :ref:`ref-tasks-install`
+ function in your recipe. The function should call
+ ``oe_runmake install`` and will likely need to pass in the
+ destination directory as well. How you pass that path is dependent on
+ how the ``Makefile`` being run is written (e.g. ``DESTDIR=${D}``,
+ ``PREFIX=${D}``, ``INSTALLROOT=${D}``, and so forth).
+
+ For an example recipe using ``make install``, see the
+ ":ref:`dev-manual/new-recipe:building a makefile-based package`" section.
+
+- *Manual:* You need to define a :ref:`ref-tasks-install` function in your
+ recipe. The function must first use ``install -d`` to create the
+ directories under
+ ``${``\ :term:`D`\ ``}``. Once the
+ directories exist, your function can use ``install`` to manually
+ install the built software into the directories.
+
+ You can find more information on ``install`` at
+ https://www.gnu.org/software/coreutils/manual/html_node/install-invocation.html.
+
+For the scenarios that do not use Autotools or CMake, you need to track
+the installation and diagnose and fix any issues until everything
+installs correctly. You need to look in the default location of
+``${D}``, which is ``${WORKDIR}/image``, to be sure your files have been
+installed correctly.
+
+.. note::
+
+ - During the installation process, you might need to modify some of
+ the installed files to suit the target layout. For example, you
+ might need to replace hard-coded paths in an initscript with
+ values of variables provided by the build system, such as
+ replacing ``/usr/bin/`` with ``${bindir}``. If you do perform such
+ modifications during :ref:`ref-tasks-install`, be sure to modify the
+ destination file after copying rather than before copying.
+ Modifying after copying ensures that the build system can
+ re-execute :ref:`ref-tasks-install` if needed.
+
+ - ``oe_runmake install``, which can be run directly or can be run
+ indirectly by the :ref:`ref-classes-autotools` and
+ :ref:`ref-classes-cmake` classes, runs ``make install`` in parallel.
+ Sometimes, a Makefile can have missing dependencies between targets that
+ can result in race conditions. If you experience intermittent failures
+ during :ref:`ref-tasks-install`, you might be able to work around them by
+ disabling parallel Makefile installs by adding the following to the
+ recipe::
+
+ PARALLEL_MAKEINST = ""
+
+ See :term:`PARALLEL_MAKEINST` for additional information.
+
+ - If you need to install one or more custom CMake toolchain files
+ that are supplied by the application you are building, install the
+ files to ``${D}${datadir}/cmake/Modules`` during
+ :ref:`ref-tasks-install`.
+
+Enabling System Services
+========================
+
+If you want to install a service, which is a process that usually starts
+on boot and runs in the background, then you must include some
+additional definitions in your recipe.
+
+If you are adding services and the service initialization script or the
+service file itself is not installed, you must provide for that
+installation in your recipe using a ``do_install:append`` function. If
+your recipe already has a :ref:`ref-tasks-install` function, update the function
+near its end rather than adding an additional ``do_install:append``
+function.
+
+When you create the installation for your services, you need to
+accomplish what is normally done by ``make install``. In other words,
+make sure your installation arranges the output similar to how it is
+arranged on the target system.
+
+The OpenEmbedded build system provides support for starting services two
+different ways:
+
+- *SysVinit:* SysVinit is a system and service manager that manages the
+ init system used to control the very basic functions of your system.
+ The init program is the first program started by the Linux kernel
+ when the system boots. Init then controls the startup, running and
+ shutdown of all other programs.
+
+ To enable a service using SysVinit, your recipe needs to inherit the
+ :ref:`ref-classes-update-rc.d` class. The class helps
+ facilitate safely installing the package on the target.
+
+ You will need to set the
+ :term:`INITSCRIPT_PACKAGES`,
+ :term:`INITSCRIPT_NAME`,
+ and
+ :term:`INITSCRIPT_PARAMS`
+ variables within your recipe.
+
+- *systemd:* System Management Daemon (systemd) was designed to replace
+ SysVinit and to provide enhanced management of services. For more
+ information on systemd, see the systemd homepage at
+ https://freedesktop.org/wiki/Software/systemd/.
+
+ To enable a service using systemd, your recipe needs to inherit the
+ :ref:`ref-classes-systemd` class. See the ``systemd.bbclass`` file
+ located in your :term:`Source Directory` section for more information.
+
+Packaging
+=========
+
+Successful packaging is a combination of automated processes performed
+by the OpenEmbedded build system and some specific steps you need to
+take. The following list describes the process:
+
+- *Splitting Files*: The :ref:`ref-tasks-package` task splits the files produced
+ by the recipe into logical components. Even software that produces a
+ single binary might still have debug symbols, documentation, and
+ other logical components that should be split out. The :ref:`ref-tasks-package`
+ task ensures that files are split up and packaged correctly.
+
+- *Running QA Checks*: The :ref:`ref-classes-insane` class adds a
+ step to the package generation process so that output quality
+ assurance checks are generated by the OpenEmbedded build system. This
+ step performs a range of checks to be sure the build's output is free
+ of common problems that show up during runtime. For information on
+ these checks, see the :ref:`ref-classes-insane` class and
+ the ":ref:`ref-manual/qa-checks:qa error and warning messages`"
+ chapter in the Yocto Project Reference Manual.
+
+- *Hand-Checking Your Packages*: After you build your software, you
+ need to be sure your packages are correct. Examine the
+ ``${``\ :term:`WORKDIR`\ ``}/packages-split``
+ directory and make sure files are where you expect them to be. If you
+ discover problems, you can set
+ :term:`PACKAGES`,
+ :term:`FILES`,
+ ``do_install(:append)``, and so forth as needed.
+
+- *Splitting an Application into Multiple Packages*: If you need to
+ split an application into several packages, see the
+ ":ref:`dev-manual/new-recipe:splitting an application into multiple packages`"
+ section for an example.
+
+- *Installing a Post-Installation Script*: For an example showing how
+ to install a post-installation script, see the
+ ":ref:`dev-manual/new-recipe:post-installation scripts`" section.
+
+- *Marking Package Architecture*: Depending on what your recipe is
+ building and how it is configured, it might be important to mark the
+ packages produced as being specific to a particular machine, or to
+ mark them as not being specific to a particular machine or
+ architecture at all.
+
+ By default, packages apply to any machine with the same architecture
+ as the target machine. When a recipe produces packages that are
+ machine-specific (e.g. the
+ :term:`MACHINE` value is passed
+ into the configure script or a patch is applied only for a particular
+ machine), you should mark them as such by adding the following to the
+ recipe::
+
+ PACKAGE_ARCH = "${MACHINE_ARCH}"
+
+ On the other hand, if the recipe produces packages that do not
+ contain anything specific to the target machine or architecture at
+ all (e.g. recipes that simply package script files or configuration
+ files), you should use the :ref:`ref-classes-allarch` class to
+ do this for you by adding this to your recipe::
+
+ inherit allarch
+
+ Ensuring that the package architecture is correct is not critical
+ while you are doing the first few builds of your recipe. However, it
+ is important in order to ensure that your recipe rebuilds (or does
+ not rebuild) appropriately in response to changes in configuration,
+ and to ensure that you get the appropriate packages installed on the
+ target machine, particularly if you run separate builds for more than
+ one target machine.
+
+Sharing Files Between Recipes
+=============================
+
+Recipes often need to use files provided by other recipes on the build
+host. For example, an application linking to a common library needs
+access to the library itself and its associated headers. The way this
+access is accomplished is by populating a sysroot with files. Each
+recipe has two sysroots in its work directory, one for target files
+(``recipe-sysroot``) and one for files that are native to the build host
+(``recipe-sysroot-native``).
+
+.. note::
+
+ You could find the term "staging" used within the Yocto project
+ regarding files populating sysroots (e.g. the :term:`STAGING_DIR`
+ variable).
+
+Recipes should never populate the sysroot directly (i.e. write files
+into sysroot). Instead, files should be installed into standard
+locations during the
+:ref:`ref-tasks-install` task within
+the ``${``\ :term:`D`\ ``}`` directory. The
+reason for this limitation is that almost all files that populate the
+sysroot are cataloged in manifests in order to ensure the files can be
+removed later when a recipe is either modified or removed. Thus, the
+sysroot is able to remain free from stale files.
+
+A subset of the files installed by the :ref:`ref-tasks-install` task are
+used by the :ref:`ref-tasks-populate_sysroot` task as defined by the
+:term:`SYSROOT_DIRS` variable to automatically populate the sysroot. It
+is possible to modify the list of directories that populate the sysroot.
+The following example shows how you could add the ``/opt`` directory to
+the list of directories within a recipe::
+
+ SYSROOT_DIRS += "/opt"
+
+.. note::
+
+ The `/sysroot-only` is to be used by recipes that generate artifacts
+ that are not included in the target filesystem, allowing them to share
+ these artifacts without needing to use the :term:`DEPLOY_DIR`.
+
+For a more complete description of the :ref:`ref-tasks-populate_sysroot`
+task and its associated functions, see the
+:ref:`staging <ref-classes-staging>` class.
+
+Using Virtual Providers
+=======================
+
+Prior to a build, if you know that several different recipes provide the
+same functionality, you can use a virtual provider (i.e. ``virtual/*``)
+as a placeholder for the actual provider. The actual provider is
+determined at build-time.
+
+A common scenario where a virtual provider is used would be for the kernel
+recipe. Suppose you have three kernel recipes whose :term:`PN` values map to
+``kernel-big``, ``kernel-mid``, and ``kernel-small``. Furthermore, each of
+these recipes in some way uses a :term:`PROVIDES` statement that essentially
+identifies itself as being able to provide ``virtual/kernel``. Here is one way
+through the :ref:`ref-classes-kernel` class::
+
+ PROVIDES += "virtual/kernel"
+
+Any recipe that inherits the :ref:`ref-classes-kernel` class is
+going to utilize a :term:`PROVIDES` statement that identifies that recipe as
+being able to provide the ``virtual/kernel`` item.
+
+Now comes the time to actually build an image and you need a kernel
+recipe, but which one? You can configure your build to call out the
+kernel recipe you want by using the :term:`PREFERRED_PROVIDER` variable. As
+an example, consider the :yocto_git:`x86-base.inc
+</poky/tree/meta/conf/machine/include/x86/x86-base.inc>` include file, which is a
+machine (i.e. :term:`MACHINE`) configuration file. This include file is the
+reason all x86-based machines use the ``linux-yocto`` kernel. Here are the
+relevant lines from the include file::
+
+ PREFERRED_PROVIDER_virtual/kernel ??= "linux-yocto"
+ PREFERRED_VERSION_linux-yocto ??= "4.15%"
+
+When you use a virtual provider, you do not have to "hard code" a recipe
+name as a build dependency. You can use the
+:term:`DEPENDS` variable to state the
+build is dependent on ``virtual/kernel`` for example::
+
+ DEPENDS = "virtual/kernel"
+
+During the build, the OpenEmbedded build system picks
+the correct recipe needed for the ``virtual/kernel`` dependency based on
+the :term:`PREFERRED_PROVIDER` variable. If you want to use the small kernel
+mentioned at the beginning of this section, configure your build as
+follows::
+
+ PREFERRED_PROVIDER_virtual/kernel ??= "kernel-small"
+
+.. note::
+
+ Any recipe that :term:`PROVIDES` a ``virtual/*`` item that is ultimately not
+ selected through :term:`PREFERRED_PROVIDER` does not get built. Preventing these
+ recipes from building is usually the desired behavior since this mechanism's
+ purpose is to select between mutually exclusive alternative providers.
+
+The following lists specific examples of virtual providers:
+
+- ``virtual/kernel``: Provides the name of the kernel recipe to use
+ when building a kernel image.
+
+- ``virtual/bootloader``: Provides the name of the bootloader to use
+ when building an image.
+
+- ``virtual/libgbm``: Provides ``gbm.pc``.
+
+- ``virtual/egl``: Provides ``egl.pc`` and possibly ``wayland-egl.pc``.
+
+- ``virtual/libgl``: Provides ``gl.pc`` (i.e. libGL).
+
+- ``virtual/libgles1``: Provides ``glesv1_cm.pc`` (i.e. libGLESv1_CM).
+
+- ``virtual/libgles2``: Provides ``glesv2.pc`` (i.e. libGLESv2).
+
+.. note::
+
+ Virtual providers only apply to build time dependencies specified with
+ :term:`PROVIDES` and :term:`DEPENDS`. They do not apply to runtime
+ dependencies specified with :term:`RPROVIDES` and :term:`RDEPENDS`.
+
+Properly Versioning Pre-Release Recipes
+=======================================
+
+Sometimes the name of a recipe can lead to versioning problems when the
+recipe is upgraded to a final release. For example, consider the
+``irssi_0.8.16-rc1.bb`` recipe file in the list of example recipes in
+the ":ref:`dev-manual/new-recipe:storing and naming the recipe`" section.
+This recipe is at a release candidate stage (i.e. "rc1"). When the recipe is
+released, the recipe filename becomes ``irssi_0.8.16.bb``. The version
+change from ``0.8.16-rc1`` to ``0.8.16`` is seen as a decrease by the
+build system and package managers, so the resulting packages will not
+correctly trigger an upgrade.
+
+In order to ensure the versions compare properly, the recommended
+convention is to use a tilde (``~``) character as follows::
+
+ PV = 0.8.16~rc1
+
+This way ``0.8.16~rc1`` sorts before ``0.8.16``. See the
+":ref:`contributor-guide/recipe-style-guide:version policy`" section in the
+Yocto Project and OpenEmbedded Contributor Guide for more details about
+versioning code corresponding to a pre-release or to a specific Git commit.
+
+Post-Installation Scripts
+=========================
+
+Post-installation scripts run immediately after installing a package on
+the target or during image creation when a package is included in an
+image. To add a post-installation script to a package, add a
+``pkg_postinst:``\ `PACKAGENAME`\ ``()`` function to the recipe file
+(``.bb``) and replace `PACKAGENAME` with the name of the package you want
+to attach to the ``postinst`` script. To apply the post-installation
+script to the main package for the recipe, which is usually what is
+required, specify
+``${``\ :term:`PN`\ ``}`` in place of
+PACKAGENAME.
+
+A post-installation function has the following structure::
+
+ pkg_postinst:PACKAGENAME() {
+ # Commands to carry out
+ }
+
+The script defined in the post-installation function is called when the
+root filesystem is created. If the script succeeds, the package is
+marked as installed.
+
+.. note::
+
+ Any RPM post-installation script that runs on the target should
+ return a 0 exit code. RPM does not allow non-zero exit codes for
+ these scripts, and the RPM package manager will cause the package to
+ fail installation on the target.
+
+Sometimes it is necessary for the execution of a post-installation
+script to be delayed until the first boot. For example, the script might
+need to be executed on the device itself. To delay script execution
+until boot time, you must explicitly mark post installs to defer to the
+target. You can use ``pkg_postinst_ontarget()`` or call
+``postinst_intercept delay_to_first_boot`` from ``pkg_postinst()``. Any
+failure of a ``pkg_postinst()`` script (including exit 1) triggers an
+error during the
+:ref:`ref-tasks-rootfs` task.
+
+If you have recipes that use ``pkg_postinst`` function and they require
+the use of non-standard native tools that have dependencies during
+root filesystem construction, you need to use the
+:term:`PACKAGE_WRITE_DEPS`
+variable in your recipe to list these tools. If you do not use this
+variable, the tools might be missing and execution of the
+post-installation script is deferred until first boot. Deferring the
+script to the first boot is undesirable and impossible for read-only
+root filesystems.
+
+.. note::
+
+ There is equivalent support for pre-install, pre-uninstall, and post-uninstall
+ scripts by way of ``pkg_preinst``, ``pkg_prerm``, and ``pkg_postrm``,
+ respectively. These scrips work in exactly the same way as does
+ ``pkg_postinst`` with the exception that they run at different times. Also,
+ because of when they run, they are not applicable to being run at image
+ creation time like ``pkg_postinst``.
+
+Testing
+=======
+
+The final step for completing your recipe is to be sure that the
+software you built runs correctly. To accomplish runtime testing, add
+the build's output packages to your image and test them on the target.
+
+For information on how to customize your image by adding specific
+packages, see ":ref:`dev-manual/customizing-images:customizing images`" section.
+
+Examples
+========
+
+To help summarize how to write a recipe, this section provides some
+recipe examples given various scenarios:
+
+- `Building a single .c file package`_
+
+- `Building a Makefile-based package`_
+
+- `Building an Autotooled package`_
+
+- `Building a Meson package`_
+
+- `Splitting an application into multiple packages`_
+
+- `Packaging externally produced binaries`_
+
+Building a Single .c File Package
+---------------------------------
+
+Building an application from a single file that is stored locally (e.g. under
+``files``) requires a recipe that has the file listed in the :term:`SRC_URI`
+variable. Additionally, you need to manually write the :ref:`ref-tasks-compile`
+and :ref:`ref-tasks-install` tasks. The :term:`S` variable defines the
+directory containing the source code, which is set to :term:`WORKDIR` in this
+case --- the directory BitBake uses for the build::
+
+ SUMMARY = "Simple helloworld application"
+ SECTION = "examples"
+ LICENSE = "MIT"
+ LIC_FILES_CHKSUM = "file://${COMMON_LICENSE_DIR}/MIT;md5=0835ade698e0bcf8506ecda2f7b4f302"
+
+ SRC_URI = "file://helloworld.c"
+
+ S = "${WORKDIR}"
+
+ do_compile() {
+ ${CC} ${LDFLAGS} helloworld.c -o helloworld
+ }
+
+ do_install() {
+ install -d ${D}${bindir}
+ install -m 0755 helloworld ${D}${bindir}
+ }
+
+By default, the ``helloworld``, ``helloworld-dbg``, and ``helloworld-dev`` packages
+are built. For information on how to customize the packaging process, see the
+":ref:`dev-manual/new-recipe:splitting an application into multiple packages`"
+section.
+
+Building a Makefile-Based Package
+---------------------------------
+
+Applications built with GNU ``make`` require a recipe that has the source archive
+listed in :term:`SRC_URI`. You do not need to add a :ref:`ref-tasks-compile`
+step since by default BitBake starts the ``make`` command to compile the
+application. If you need additional ``make`` options, you should store them in
+the :term:`EXTRA_OEMAKE` or :term:`PACKAGECONFIG_CONFARGS` variables. BitBake
+passes these options into the GNU ``make`` invocation. Note that a
+:ref:`ref-tasks-install` task is still required. Otherwise, BitBake runs an
+empty :ref:`ref-tasks-install` task by default.
+
+Some applications might require extra parameters to be passed to the
+compiler. For example, the application might need an additional header
+path. You can accomplish this by adding to the :term:`CFLAGS` variable. The
+following example shows this::
+
+ CFLAGS:prepend = "-I ${S}/include "
+
+In the following example, ``lz4`` is a makefile-based package::
+
+ SUMMARY = "Extremely Fast Compression algorithm"
+ DESCRIPTION = "LZ4 is a very fast lossless compression algorithm, providing compression speed at 400 MB/s per core, scalable with multi-cores CPU. It also features an extremely fast decoder, with speed in multiple GB/s per core, typically reaching RAM speed limits on multi-core systems."
+ HOMEPAGE = "https://github.com/lz4/lz4"
+
+ LICENSE = "BSD-2-Clause | GPL-2.0-only"
+ LIC_FILES_CHKSUM = "file://lib/LICENSE;md5=ebc2ea4814a64de7708f1571904b32cc \
+ file://programs/COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
+ file://LICENSE;md5=d57c0d21cb917fb4e0af2454aa48b956 \
+ "
+
+ PE = "1"
+
+ SRCREV = "d44371841a2f1728a3f36839fd4b7e872d0927d3"
+
+ SRC_URI = "git://github.com/lz4/lz4.git;branch=release;protocol=https \
+ file://CVE-2021-3520.patch \
+ "
+ UPSTREAM_CHECK_GITTAGREGEX = "v(?P<pver>.*)"
+
+ S = "${WORKDIR}/git"
+
+ # Fixed in r118, which is larger than the current version.
+ CVE_CHECK_IGNORE += "CVE-2014-4715"
+
+ EXTRA_OEMAKE = "PREFIX=${prefix} CC='${CC}' CFLAGS='${CFLAGS}' DESTDIR=${D} LIBDIR=${libdir} INCLUDEDIR=${includedir} BUILD_STATIC=no"
+
+ do_install() {
+ oe_runmake install
+ }
+
+ BBCLASSEXTEND = "native nativesdk"
+
+Building an Autotooled Package
+------------------------------
+
+Applications built with the Autotools such as ``autoconf`` and ``automake``
+require a recipe that has a source archive listed in :term:`SRC_URI` and also
+inherit the :ref:`ref-classes-autotools` class, which contains the definitions
+of all the steps needed to build an Autotool-based application. The result of
+the build is automatically packaged. And, if the application uses NLS for
+localization, packages with local information are generated (one package per
+language). Here is one example: (``hello_2.3.bb``)::
+
+ SUMMARY = "GNU Helloworld application"
+ SECTION = "examples"
+ LICENSE = "GPL-2.0-or-later"
+ LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe"
+
+ SRC_URI = "${GNU_MIRROR}/hello/hello-${PV}.tar.gz"
+
+ inherit autotools gettext
+
+The variable :term:`LIC_FILES_CHKSUM` is used to track source license changes
+as described in the ":ref:`dev-manual/licenses:tracking license changes`"
+section in the Yocto Project Overview and Concepts Manual. You can quickly
+create Autotool-based recipes in a manner similar to the previous example.
+
+.. _ref-building-meson-package:
+
+Building a Meson Package
+------------------------
+
+Applications built with the `Meson build system <https://mesonbuild.com/>`__
+just need a recipe that has sources described in :term:`SRC_URI` and inherits
+the :ref:`ref-classes-meson` class.
+
+The :oe_git:`ipcalc recipe </meta-openembedded/tree/meta-networking/recipes-support/ipcalc>`
+is a simple example of an application without dependencies::
+
+ SUMMARY = "Tool to assist in network address calculations for IPv4 and IPv6."
+ HOMEPAGE = "https://gitlab.com/ipcalc/ipcalc"
+
+ SECTION = "net"
+
+ LICENSE = "GPL-2.0-only"
+ LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
+
+ SRC_URI = "git://gitlab.com/ipcalc/ipcalc.git;protocol=https;branch=master"
+ SRCREV = "4c4261a47f355946ee74013d4f5d0494487cc2d6"
+
+ S = "${WORKDIR}/git"
+
+ inherit meson
+
+Applications with dependencies are likely to inherit the
+:ref:`ref-classes-pkgconfig` class, as ``pkg-config`` is the default method
+used by Meson to find dependencies and compile applications against them.
+
+Splitting an Application into Multiple Packages
+-----------------------------------------------
+
+You can use the variables :term:`PACKAGES` and :term:`FILES` to split an
+application into multiple packages.
+
+Here is an example that uses the ``libxpm`` recipe. By default,
+this recipe generates a single package that contains the library along
+with a few binaries. You can modify the recipe to split the binaries
+into separate packages::
+
+ require xorg-lib-common.inc
+
+ SUMMARY = "Xpm: X Pixmap extension library"
+ LICENSE = "MIT"
+ LIC_FILES_CHKSUM = "file://COPYING;md5=51f4270b012ecd4ab1a164f5f4ed6cf7"
+ DEPENDS += "libxext libsm libxt"
+ PE = "1"
+
+ XORG_PN = "libXpm"
+
+ PACKAGES =+ "sxpm cxpm"
+ FILES:cxpm = "${bindir}/cxpm"
+ FILES:sxpm = "${bindir}/sxpm"
+
+In the previous example, we want to ship the ``sxpm`` and ``cxpm``
+binaries in separate packages. Since ``bindir`` would be packaged into
+the main :term:`PN` package by default, we prepend the :term:`PACKAGES` variable
+so additional package names are added to the start of list. This results
+in the extra ``FILES:*`` variables then containing information that
+define which files and directories go into which packages. Files
+included by earlier packages are skipped by latter packages. Thus, the
+main :term:`PN` package does not include the above listed files.
+
+Packaging Externally Produced Binaries
+--------------------------------------
+
+Sometimes, you need to add pre-compiled binaries to an image. For
+example, suppose that there are binaries for proprietary code,
+created by a particular division of a company. Your part of the company
+needs to use those binaries as part of an image that you are building
+using the OpenEmbedded build system. Since you only have the binaries
+and not the source code, you cannot use a typical recipe that expects to
+fetch the source specified in
+:term:`SRC_URI` and then compile it.
+
+One method is to package the binaries and then install them as part of
+the image. Generally, it is not a good idea to package binaries since,
+among other things, it can hinder the ability to reproduce builds and
+could lead to compatibility problems with ABI in the future. However,
+sometimes you have no choice.
+
+The easiest solution is to create a recipe that uses the
+:ref:`ref-classes-bin-package` class and to be sure that you are using default
+locations for build artifacts. In most cases, the
+:ref:`ref-classes-bin-package` class handles "skipping" the configure and
+compile steps as well as sets things up to grab packages from the appropriate
+area. In particular, this class sets ``noexec`` on both the
+:ref:`ref-tasks-configure` and :ref:`ref-tasks-compile` tasks, sets
+``FILES:${PN}`` to "/" so that it picks up all files, and sets up a
+:ref:`ref-tasks-install` task, which effectively copies all files from ``${S}``
+to ``${D}``. The :ref:`ref-classes-bin-package` class works well when the files
+extracted into ``${S}`` are already laid out in the way they should be laid out
+on the target. For more information on these variables, see the :term:`FILES`,
+:term:`PN`, :term:`S`, and :term:`D` variables in the Yocto Project Reference
+Manual's variable glossary.
+
+.. note::
+
+ - Using :term:`DEPENDS` is a good
+ idea even for components distributed in binary form, and is often
+ necessary for shared libraries. For a shared library, listing the
+ library dependencies in :term:`DEPENDS` makes sure that the libraries
+ are available in the staging sysroot when other recipes link
+ against the library, which might be necessary for successful
+ linking.
+
+ - Using :term:`DEPENDS` also allows runtime dependencies between
+ packages to be added automatically. See the
+ ":ref:`overview-manual/concepts:automatically added runtime dependencies`"
+ section in the Yocto Project Overview and Concepts Manual for more
+ information.
+
+If you cannot use the :ref:`ref-classes-bin-package` class, you need to be sure you are
+doing the following:
+
+- Create a recipe where the
+ :ref:`ref-tasks-configure` and
+ :ref:`ref-tasks-compile` tasks do
+ nothing: It is usually sufficient to just not define these tasks in
+ the recipe, because the default implementations do nothing unless a
+ Makefile is found in
+ ``${``\ :term:`S`\ ``}``.
+
+ If ``${S}`` might contain a Makefile, or if you inherit some class
+ that replaces :ref:`ref-tasks-configure` and :ref:`ref-tasks-compile` with custom
+ versions, then you can use the
+ ``[``\ :ref:`noexec <bitbake-user-manual/bitbake-user-manual-metadata:variable flags>`\ ``]``
+ flag to turn the tasks into no-ops, as follows::
+
+ do_configure[noexec] = "1"
+ do_compile[noexec] = "1"
+
+ Unlike :ref:`bitbake-user-manual/bitbake-user-manual-metadata:deleting a task`,
+ using the flag preserves the dependency chain from the :ref:`ref-tasks-fetch`,
+ :ref:`ref-tasks-unpack`, and :ref:`ref-tasks-patch` tasks to the
+ :ref:`ref-tasks-install` task.
+
+- Make sure your :ref:`ref-tasks-install` task installs the binaries
+ appropriately.
+
+- Ensure that you set up :term:`FILES`
+ (usually
+ ``FILES:${``\ :term:`PN`\ ``}``) to
+ point to the files you have installed, which of course depends on
+ where you have installed them and whether those files are in
+ different locations than the defaults.
+
+Following Recipe Style Guidelines
+=================================
+
+When writing recipes, it is good to conform to existing style guidelines.
+See the ":doc:`../contributor-guide/recipe-style-guide`" in the Yocto Project
+and OpenEmbedded Contributor Guide for reference.
+
+It is common for existing recipes to deviate a bit from this style.
+However, aiming for at least a consistent style is a good idea. Some
+practices, such as omitting spaces around ``=`` operators in assignments
+or ordering recipe components in an erratic way, are widely seen as poor
+style.
+
+Recipe Syntax
+=============
+
+Understanding recipe file syntax is important for writing recipes. The
+following list overviews the basic items that make up a BitBake recipe
+file. For more complete BitBake syntax descriptions, see the
+":doc:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata`"
+chapter of the BitBake User Manual.
+
+- *Variable Assignments and Manipulations:* Variable assignments allow
+ a value to be assigned to a variable. The assignment can be static
+ text or might include the contents of other variables. In addition to
+ the assignment, appending and prepending operations are also
+ supported.
+
+ The following example shows some of the ways you can use variables in
+ recipes::
+
+ S = "${WORKDIR}/postfix-${PV}"
+ CFLAGS += "-DNO_ASM"
+ CFLAGS:append = " --enable-important-feature"
+
+- *Functions:* Functions provide a series of actions to be performed.
+ You usually use functions to override the default implementation of a
+ task function or to complement a default function (i.e. append or
+ prepend to an existing function). Standard functions use ``sh`` shell
+ syntax, although access to OpenEmbedded variables and internal
+ methods are also available.
+
+ Here is an example function from the ``sed`` recipe::
+
+ do_install () {
+ autotools_do_install
+ install -d ${D}${base_bindir}
+ mv ${D}${bindir}/sed ${D}${base_bindir}/sed
+ rmdir ${D}${bindir}/
+ }
+
+ It is
+ also possible to implement new functions that are called between
+ existing tasks as long as the new functions are not replacing or
+ complementing the default functions. You can implement functions in
+ Python instead of shell. Both of these options are not seen in the
+ majority of recipes.
+
+- *Keywords:* BitBake recipes use only a few keywords. You use keywords
+ to include common functions (``inherit``), load parts of a recipe
+ from other files (``include`` and ``require``) and export variables
+ to the environment (``export``).
+
+ The following example shows the use of some of these keywords::
+
+ export POSTCONF = "${STAGING_BINDIR}/postconf"
+ inherit autoconf
+ require otherfile.inc
+
+- *Comments (#):* Any lines that begin with the hash character (``#``)
+ are treated as comment lines and are ignored::
+
+ # This is a comment
+
+This next list summarizes the most important and most commonly used
+parts of the recipe syntax. For more information on these parts of the
+syntax, you can reference the
+":doc:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata`" chapter
+in the BitBake User Manual.
+
+- *Line Continuation (\\):* Use the backward slash (``\``) character to
+ split a statement over multiple lines. Place the slash character at
+ the end of the line that is to be continued on the next line::
+
+ VAR = "A really long \
+ line"
+
+ .. note::
+
+ You cannot have any characters including spaces or tabs after the
+ slash character.
+
+- *Using Variables (${VARNAME}):* Use the ``${VARNAME}`` syntax to
+ access the contents of a variable::
+
+ SRC_URI = "${SOURCEFORGE_MIRROR}/libpng/zlib-${PV}.tar.gz"
+
+ .. note::
+
+ It is important to understand that the value of a variable
+ expressed in this form does not get substituted automatically. The
+ expansion of these expressions happens on-demand later (e.g.
+ usually when a function that makes reference to the variable
+ executes). This behavior ensures that the values are most
+ appropriate for the context in which they are finally used. On the
+ rare occasion that you do need the variable expression to be
+ expanded immediately, you can use the
+ :=
+ operator instead of
+ =
+ when you make the assignment, but this is not generally needed.
+
+- *Quote All Assignments ("value"):* Use double quotes around values in
+ all variable assignments (e.g. ``"value"``). Here is an example::
+
+ VAR1 = "${OTHERVAR}"
+ VAR2 = "The version is ${PV}"
+
+- *Conditional Assignment (?=):* Conditional assignment is used to
+ assign a value to a variable, but only when the variable is currently
+ unset. Use the question mark followed by the equal sign (``?=``) to
+ make a "soft" assignment used for conditional assignment. Typically,
+ "soft" assignments are used in the ``local.conf`` file for variables
+ that are allowed to come through from the external environment.
+
+ Here is an example where ``VAR1`` is set to "New value" if it is
+ currently empty. However, if ``VAR1`` has already been set, it
+ remains unchanged::
+
+ VAR1 ?= "New value"
+
+ In this next example, ``VAR1`` is left with the value "Original value"::
+
+ VAR1 = "Original value"
+ VAR1 ?= "New value"
+
+- *Appending (+=):* Use the plus character followed by the equals sign
+ (``+=``) to append values to existing variables.
+
+ .. note::
+
+ This operator adds a space between the existing content of the
+ variable and the new content.
+
+ Here is an example::
+
+ SRC_URI += "file://fix-makefile.patch"
+
+- *Prepending (=+):* Use the equals sign followed by the plus character
+ (``=+``) to prepend values to existing variables.
+
+ .. note::
+
+ This operator adds a space between the new content and the
+ existing content of the variable.
+
+ Here is an example::
+
+ VAR =+ "Starts"
+
+- *Appending (:append):* Use the ``:append`` operator to append values
+ to existing variables. This operator does not add any additional
+ space. Also, the operator is applied after all the ``+=``, and ``=+``
+ operators have been applied and after all ``=`` assignments have
+ occurred. This means that if ``:append`` is used in a recipe, it can
+ only be overridden by another layer using the special ``:remove``
+ operator, which in turn will prevent further layers from adding it back.
+
+ The following example shows the space being explicitly added to the
+ start to ensure the appended value is not merged with the existing
+ value::
+
+ CFLAGS:append = " --enable-important-feature"
+
+ You can also use
+ the ``:append`` operator with overrides, which results in the actions
+ only being performed for the specified target or machine::
+
+ CFLAGS:append:sh4 = " --enable-important-sh4-specific-feature"
+
+- *Prepending (:prepend):* Use the ``:prepend`` operator to prepend
+ values to existing variables. This operator does not add any
+ additional space. Also, the operator is applied after all the ``+=``,
+ and ``=+`` operators have been applied and after all ``=``
+ assignments have occurred.
+
+ The following example shows the space being explicitly added to the
+ end to ensure the prepended value is not merged with the existing
+ value::
+
+ CFLAGS:prepend = "-I${S}/myincludes "
+
+ You can also use the
+ ``:prepend`` operator with overrides, which results in the actions
+ only being performed for the specified target or machine::
+
+ CFLAGS:prepend:sh4 = "-I${S}/myincludes "
+
+- *Overrides:* You can use overrides to set a value conditionally,
+ typically based on how the recipe is being built. For example, to set
+ the :term:`KBRANCH` variable's
+ value to "standard/base" for any target
+ :term:`MACHINE`, except for
+ qemuarm where it should be set to "standard/arm-versatile-926ejs",
+ you would do the following::
+
+ KBRANCH = "standard/base"
+ KBRANCH:qemuarm = "standard/arm-versatile-926ejs"
+
+ Overrides are also used to separate
+ alternate values of a variable in other situations. For example, when
+ setting variables such as
+ :term:`FILES` and
+ :term:`RDEPENDS` that are
+ specific to individual packages produced by a recipe, you should
+ always use an override that specifies the name of the package.
+
+- *Indentation:* Use spaces for indentation rather than tabs. For
+ shell functions, both currently work. However, it is a policy
+ decision of the Yocto Project to use tabs in shell functions. Realize
+ that some layers have a policy to use spaces for all indentation.
+
+- *Using Python for Complex Operations:* For more advanced processing,
+ it is possible to use Python code during variable assignments (e.g.
+ search and replacement on a variable).
+
+ You indicate Python code using the ``${@python_code}`` syntax for the
+ variable assignment::
+
+ SRC_URI = "ftp://ftp.info-zip.org/pub/infozip/src/zip${@d.getVar('PV',1).replace('.', '')}.tgz
+
+- *Shell Function Syntax:* Write shell functions as if you were writing
+ a shell script when you describe a list of actions to take. You
+ should ensure that your script works with a generic ``sh`` and that
+ it does not require any ``bash`` or other shell-specific
+ functionality. The same considerations apply to various system
+ utilities (e.g. ``sed``, ``grep``, ``awk``, and so forth) that you
+ might wish to use. If in doubt, you should check with multiple
+ implementations --- including those from BusyBox.
+
diff --git a/documentation/dev-manual/packages.rst b/documentation/dev-manual/packages.rst
new file mode 100644
index 0000000000..e5028fffdc
--- /dev/null
+++ b/documentation/dev-manual/packages.rst
@@ -0,0 +1,1250 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Working with Packages
+*********************
+
+This section describes a few tasks that involve packages:
+
+- :ref:`dev-manual/packages:excluding packages from an image`
+
+- :ref:`dev-manual/packages:incrementing a package version`
+
+- :ref:`dev-manual/packages:handling optional module packaging`
+
+- :ref:`dev-manual/packages:using runtime package management`
+
+- :ref:`dev-manual/packages:generating and using signed packages`
+
+- :ref:`Setting up and running package test
+ (ptest) <dev-manual/packages:testing packages with ptest>`
+
+- :ref:`dev-manual/packages:creating node package manager (npm) packages`
+
+- :ref:`dev-manual/packages:adding custom metadata to packages`
+
+Excluding Packages from an Image
+================================
+
+You might find it necessary to prevent specific packages from being
+installed into an image. If so, you can use several variables to direct
+the build system to essentially ignore installing recommended packages
+or to not install a package at all.
+
+The following list introduces variables you can use to prevent packages
+from being installed into your image. Each of these variables only works
+with IPK and RPM package types, not for Debian packages.
+Also, you can use these variables from your ``local.conf`` file
+or attach them to a specific image recipe by using a recipe name
+override. For more detail on the variables, see the descriptions in the
+Yocto Project Reference Manual's glossary chapter.
+
+- :term:`BAD_RECOMMENDATIONS`:
+ Use this variable to specify "recommended-only" packages that you do
+ not want installed.
+
+- :term:`NO_RECOMMENDATIONS`:
+ Use this variable to prevent all "recommended-only" packages from
+ being installed.
+
+- :term:`PACKAGE_EXCLUDE`:
+ Use this variable to prevent specific packages from being installed
+ regardless of whether they are "recommended-only" or not. You need to
+ realize that the build process could fail with an error when you
+ prevent the installation of a package whose presence is required by
+ an installed package.
+
+Incrementing a Package Version
+==============================
+
+This section provides some background on how binary package versioning
+is accomplished and presents some of the services, variables, and
+terminology involved.
+
+In order to understand binary package versioning, you need to consider
+the following:
+
+- Binary Package: The binary package that is eventually built and
+ installed into an image.
+
+- Binary Package Version: The binary package version is composed of two
+ components --- a version and a revision.
+
+ .. note::
+
+ Technically, a third component, the "epoch" (i.e. :term:`PE`) is involved
+ but this discussion for the most part ignores :term:`PE`.
+
+ The version and revision are taken from the
+ :term:`PV` and
+ :term:`PR` variables, respectively.
+
+- :term:`PV`: The recipe version. :term:`PV` represents the version of the
+ software being packaged. Do not confuse :term:`PV` with the binary
+ package version.
+
+- :term:`PR`: The recipe revision.
+
+- :term:`SRCPV`: The OpenEmbedded
+ build system uses this string to help define the value of :term:`PV` when
+ the source code revision needs to be included in it.
+
+- :yocto_wiki:`PR Service </PR_Service>`: A
+ network-based service that helps automate keeping package feeds
+ compatible with existing package manager applications such as RPM,
+ APT, and OPKG.
+
+Whenever the binary package content changes, the binary package version
+must change. Changing the binary package version is accomplished by
+changing or "bumping" the :term:`PR` and/or :term:`PV` values. Increasing these
+values occurs one of two ways:
+
+- Automatically using a Package Revision Service (PR Service).
+
+- Manually incrementing the :term:`PR` and/or :term:`PV` variables.
+
+Given a primary challenge of any build system and its users is how to
+maintain a package feed that is compatible with existing package manager
+applications such as RPM, APT, and OPKG, using an automated system is
+much preferred over a manual system. In either system, the main
+requirement is that binary package version numbering increases in a
+linear fashion and that there is a number of version components that
+support that linear progression. For information on how to ensure
+package revisioning remains linear, see the
+":ref:`dev-manual/packages:automatically incrementing a package version number`"
+section.
+
+The following three sections provide related information on the PR
+Service, the manual method for "bumping" :term:`PR` and/or :term:`PV`, and on
+how to ensure binary package revisioning remains linear.
+
+Working With a PR Service
+-------------------------
+
+As mentioned, attempting to maintain revision numbers in the
+:term:`Metadata` is error prone, inaccurate,
+and causes problems for people submitting recipes. Conversely, the PR
+Service automatically generates increasing numbers, particularly the
+revision field, which removes the human element.
+
+.. note::
+
+ For additional information on using a PR Service, you can see the
+ :yocto_wiki:`PR Service </PR_Service>` wiki page.
+
+The Yocto Project uses variables in order of decreasing priority to
+facilitate revision numbering (i.e.
+:term:`PE`,
+:term:`PV`, and
+:term:`PR` for epoch, version, and
+revision, respectively). The values are highly dependent on the policies
+and procedures of a given distribution and package feed.
+
+Because the OpenEmbedded build system uses
+":ref:`signatures <overview-manual/concepts:checksums (signatures)>`", which are
+unique to a given build, the build system knows when to rebuild
+packages. All the inputs into a given task are represented by a
+signature, which can trigger a rebuild when different. Thus, the build
+system itself does not rely on the :term:`PR`, :term:`PV`, and :term:`PE` numbers to
+trigger a rebuild. The signatures, however, can be used to generate
+these values.
+
+The PR Service works with both ``OEBasic`` and ``OEBasicHash``
+generators. The value of :term:`PR` bumps when the checksum changes and the
+different generator mechanisms change signatures under different
+circumstances.
+
+As implemented, the build system includes values from the PR Service
+into the :term:`PR` field as an addition using the form "``.x``" so ``r0``
+becomes ``r0.1``, ``r0.2`` and so forth. This scheme allows existing
+:term:`PR` values to be used for whatever reasons, which include manual
+:term:`PR` bumps, should it be necessary.
+
+By default, the PR Service is not enabled or running. Thus, the packages
+generated are just "self consistent". The build system adds and removes
+packages and there are no guarantees about upgrade paths but images will
+be consistent and correct with the latest changes.
+
+The simplest form for a PR Service is for a single host development system
+that builds the package feed (building system). For this scenario, you can
+enable a local PR Service by setting :term:`PRSERV_HOST` in your
+``local.conf`` file in the :term:`Build Directory`::
+
+ PRSERV_HOST = "localhost:0"
+
+Once the service is started, packages will automatically
+get increasing :term:`PR` values and BitBake takes care of starting and
+stopping the server.
+
+If you have a more complex setup where multiple host development systems
+work against a common, shared package feed, you have a single PR Service
+running and it is connected to each building system. For this scenario,
+you need to start the PR Service using the ``bitbake-prserv`` command::
+
+ bitbake-prserv --host ip --port port --start
+
+In addition to
+hand-starting the service, you need to update the ``local.conf`` file of
+each building system as described earlier so each system points to the
+server and port.
+
+It is also recommended you use build history, which adds some sanity
+checks to binary package versions, in conjunction with the server that
+is running the PR Service. To enable build history, add the following to
+each building system's ``local.conf`` file::
+
+ # It is recommended to activate "buildhistory" for testing the PR service
+ INHERIT += "buildhistory"
+ BUILDHISTORY_COMMIT = "1"
+
+For information on build
+history, see the
+":ref:`dev-manual/build-quality:maintaining build output quality`" section.
+
+.. note::
+
+ The OpenEmbedded build system does not maintain :term:`PR` information as
+ part of the shared state (sstate) packages. If you maintain an sstate
+ feed, it's expected that either all your building systems that
+ contribute to the sstate feed use a shared PR service, or you do not
+ run a PR service on any of your building systems.
+
+ That's because if you had multiple machines sharing a PR service but
+ not their sstate feed, you could end up with "diverging" hashes for
+ the same output artefacts. When presented to the share PR service,
+ each would be considered as new and would increase the revision
+ number, causing many unnecessary package upgrades.
+
+ For more information on shared state, see the
+ ":ref:`overview-manual/concepts:shared state cache`"
+ section in the Yocto Project Overview and Concepts Manual.
+
+Manually Bumping PR
+-------------------
+
+The alternative to setting up a PR Service is to manually "bump" the
+:term:`PR` variable.
+
+If a committed change results in changing the package output, then the
+value of the :term:`PR` variable needs to be increased (or "bumped") as part of
+that commit. For new recipes you should add the :term:`PR` variable and set
+its initial value equal to "r0", which is the default. Even though the
+default value is "r0", the practice of adding it to a new recipe makes
+it harder to forget to bump the variable when you make changes to the
+recipe in future.
+
+Usually, version increases occur only to binary packages. However, if
+for some reason :term:`PV` changes but does not increase, you can increase
+the :term:`PE` variable (Package Epoch). The :term:`PE` variable defaults to
+"0".
+
+Binary package version numbering strives to follow the `Debian Version
+Field Policy
+Guidelines <https://www.debian.org/doc/debian-policy/ch-controlfields.html>`__.
+These guidelines define how versions are compared and what "increasing"
+a version means.
+
+Automatically Incrementing a Package Version Number
+---------------------------------------------------
+
+When fetching a repository, BitBake uses the
+:term:`SRCREV` variable to determine
+the specific source code revision from which to build. You set the
+:term:`SRCREV` variable to
+:term:`AUTOREV` to cause the
+OpenEmbedded build system to automatically use the latest revision of
+the software::
+
+ SRCREV = "${AUTOREV}"
+
+Furthermore, you need to reference :term:`SRCPV` in :term:`PV` in order to
+automatically update the version whenever the revision of the source
+code changes. Here is an example::
+
+ PV = "1.0+git${SRCPV}"
+
+The OpenEmbedded build system substitutes :term:`SRCPV` with the following:
+
+.. code-block:: none
+
+ AUTOINC+source_code_revision
+
+The build system replaces the ``AUTOINC``
+with a number. The number used depends on the state of the PR Service:
+
+- If PR Service is enabled, the build system increments the number,
+ which is similar to the behavior of
+ :term:`PR`. This behavior results in
+ linearly increasing package versions, which is desirable. Here is an
+ example:
+
+ .. code-block:: none
+
+ hello-world-git_0.0+git0+b6558dd387-r0.0_armv7a-neon.ipk
+ hello-world-git_0.0+git1+dd2f5c3565-r0.0_armv7a-neon.ipk
+
+- If PR Service is not enabled, the build system replaces the
+ ``AUTOINC`` placeholder with zero (i.e. "0"). This results in
+ changing the package version since the source revision is included.
+ However, package versions are not increased linearly. Here is an
+ example:
+
+ .. code-block:: none
+
+ hello-world-git_0.0+git0+b6558dd387-r0.0_armv7a-neon.ipk
+ hello-world-git_0.0+git0+dd2f5c3565-r0.0_armv7a-neon.ipk
+
+In summary, the OpenEmbedded build system does not track the history of
+binary package versions for this purpose. ``AUTOINC``, in this case, is
+comparable to :term:`PR`. If PR server is not enabled, ``AUTOINC`` in the
+package version is simply replaced by "0". If PR server is enabled, the
+build system keeps track of the package versions and bumps the number
+when the package revision changes.
+
+Handling Optional Module Packaging
+==================================
+
+Many pieces of software split functionality into optional modules (or
+plugins) and the plugins that are built might depend on configuration
+options. To avoid having to duplicate the logic that determines what
+modules are available in your recipe or to avoid having to package each
+module by hand, the OpenEmbedded build system provides functionality to
+handle module packaging dynamically.
+
+To handle optional module packaging, you need to do two things:
+
+- Ensure the module packaging is actually done.
+
+- Ensure that any dependencies on optional modules from other recipes
+ are satisfied by your recipe.
+
+Making Sure the Packaging is Done
+---------------------------------
+
+To ensure the module packaging actually gets done, you use the
+``do_split_packages`` function within the ``populate_packages`` Python
+function in your recipe. The ``do_split_packages`` function searches for
+a pattern of files or directories under a specified path and creates a
+package for each one it finds by appending to the
+:term:`PACKAGES` variable and
+setting the appropriate values for ``FILES:packagename``,
+``RDEPENDS:packagename``, ``DESCRIPTION:packagename``, and so forth.
+Here is an example from the ``lighttpd`` recipe::
+
+ python populate_packages:prepend () {
+ lighttpd_libdir = d.expand('${libdir}')
+ do_split_packages(d, lighttpd_libdir, '^mod_(.*).so$',
+ 'lighttpd-module-%s', 'Lighttpd module for %s',
+ extra_depends='')
+ }
+
+The previous example specifies a number of things in the call to
+``do_split_packages``.
+
+- A directory within the files installed by your recipe through
+ :ref:`ref-tasks-install` in which to search.
+
+- A regular expression used to match module files in that directory. In
+ the example, note the parentheses () that mark the part of the
+ expression from which the module name should be derived.
+
+- A pattern to use for the package names.
+
+- A description for each package.
+
+- An empty string for ``extra_depends``, which disables the default
+ dependency on the main ``lighttpd`` package. Thus, if a file in
+ ``${libdir}`` called ``mod_alias.so`` is found, a package called
+ ``lighttpd-module-alias`` is created for it and the
+ :term:`DESCRIPTION` is set to
+ "Lighttpd module for alias".
+
+Often, packaging modules is as simple as the previous example. However,
+there are more advanced options that you can use within
+``do_split_packages`` to modify its behavior. And, if you need to, you
+can add more logic by specifying a hook function that is called for each
+package. It is also perfectly acceptable to call ``do_split_packages``
+multiple times if you have more than one set of modules to package.
+
+For more examples that show how to use ``do_split_packages``, see the
+``connman.inc`` file in the ``meta/recipes-connectivity/connman/``
+directory of the ``poky`` :ref:`source repository <overview-manual/development-environment:yocto project source repositories>`. You can
+also find examples in ``meta/classes-recipe/kernel.bbclass``.
+
+Here is a reference that shows ``do_split_packages`` mandatory and
+optional arguments::
+
+ Mandatory arguments
+
+ root
+ The path in which to search
+ file_regex
+ Regular expression to match searched files.
+ Use parentheses () to mark the part of this
+ expression that should be used to derive the
+ module name (to be substituted where %s is
+ used in other function arguments as noted below)
+ output_pattern
+ Pattern to use for the package names. Must
+ include %s.
+ description
+ Description to set for each package. Must
+ include %s.
+
+ Optional arguments
+
+ postinst
+ Postinstall script to use for all packages
+ (as a string)
+ recursive
+ True to perform a recursive search --- default
+ False
+ hook
+ A hook function to be called for every match.
+ The function will be called with the following
+ arguments (in the order listed):
+
+ f
+ Full path to the file/directory match
+ pkg
+ The package name
+ file_regex
+ As above
+ output_pattern
+ As above
+ modulename
+ The module name derived using file_regex
+ extra_depends
+ Extra runtime dependencies (RDEPENDS) to be
+ set for all packages. The default value of None
+ causes a dependency on the main package
+ (${PN}) --- if you do not want this, pass empty
+ string '' for this parameter.
+ aux_files_pattern
+ Extra item(s) to be added to FILES for each
+ package. Can be a single string item or a list
+ of strings for multiple items. Must include %s.
+ postrm
+ postrm script to use for all packages (as a
+ string)
+ allow_dirs
+ True to allow directories to be matched -
+ default False
+ prepend
+ If True, prepend created packages to PACKAGES
+ instead of the default False which appends them
+ match_path
+ match file_regex on the whole relative path to
+ the root rather than just the filename
+ aux_files_pattern_verbatim
+ Extra item(s) to be added to FILES for each
+ package, using the actual derived module name
+ rather than converting it to something legal
+ for a package name. Can be a single string item
+ or a list of strings for multiple items. Must
+ include %s.
+ allow_links
+ True to allow symlinks to be matched --- default
+ False
+ summary
+ Summary to set for each package. Must include %s;
+ defaults to description if not set.
+
+
+
+Satisfying Dependencies
+-----------------------
+
+The second part for handling optional module packaging is to ensure that
+any dependencies on optional modules from other recipes are satisfied by
+your recipe. You can be sure these dependencies are satisfied by using
+the :term:`PACKAGES_DYNAMIC`
+variable. Here is an example that continues with the ``lighttpd`` recipe
+shown earlier::
+
+ PACKAGES_DYNAMIC = "lighttpd-module-.*"
+
+The name
+specified in the regular expression can of course be anything. In this
+example, it is ``lighttpd-module-`` and is specified as the prefix to
+ensure that any :term:`RDEPENDS` and
+:term:`RRECOMMENDS` on a package
+name starting with the prefix are satisfied during build time. If you
+are using ``do_split_packages`` as described in the previous section,
+the value you put in :term:`PACKAGES_DYNAMIC` should correspond to the name
+pattern specified in the call to ``do_split_packages``.
+
+Using Runtime Package Management
+================================
+
+During a build, BitBake always transforms a recipe into one or more
+packages. For example, BitBake takes the ``bash`` recipe and produces a
+number of packages (e.g. ``bash``, ``bash-bashbug``,
+``bash-completion``, ``bash-completion-dbg``, ``bash-completion-dev``,
+``bash-completion-extra``, ``bash-dbg``, and so forth). Not all
+generated packages are included in an image.
+
+In several situations, you might need to update, add, remove, or query
+the packages on a target device at runtime (i.e. without having to
+generate a new image). Examples of such situations include:
+
+- You want to provide in-the-field updates to deployed devices (e.g.
+ security updates).
+
+- You want to have a fast turn-around development cycle for one or more
+ applications that run on your device.
+
+- You want to temporarily install the "debug" packages of various
+ applications on your device so that debugging can be greatly improved
+ by allowing access to symbols and source debugging.
+
+- You want to deploy a more minimal package selection of your device
+ but allow in-the-field updates to add a larger selection for
+ customization.
+
+In all these situations, you have something similar to a more
+traditional Linux distribution in that in-field devices are able to
+receive pre-compiled packages from a server for installation or update.
+Being able to install these packages on a running, in-field device is
+what is termed "runtime package management".
+
+In order to use runtime package management, you need a host or server
+machine that serves up the pre-compiled packages plus the required
+metadata. You also need package manipulation tools on the target. The
+build machine is a likely candidate to act as the server. However, that
+machine does not necessarily have to be the package server. The build
+machine could push its artifacts to another machine that acts as the
+server (e.g. Internet-facing). In fact, doing so is advantageous for a
+production environment as getting the packages away from the development
+system's :term:`Build Directory` prevents accidental overwrites.
+
+A simple build that targets just one device produces more than one
+package database. In other words, the packages produced by a build are
+separated out into a couple of different package groupings based on
+criteria such as the target's CPU architecture, the target board, or the
+C library used on the target. For example, a build targeting the
+``qemux86`` device produces the following three package databases:
+``noarch``, ``i586``, and ``qemux86``. If you wanted your ``qemux86``
+device to be aware of all the packages that were available to it, you
+would need to point it to each of these databases individually. In a
+similar way, a traditional Linux distribution usually is configured to
+be aware of a number of software repositories from which it retrieves
+packages.
+
+Using runtime package management is completely optional and not required
+for a successful build or deployment in any way. But if you want to make
+use of runtime package management, you need to do a couple things above
+and beyond the basics. The remainder of this section describes what you
+need to do.
+
+Build Considerations
+--------------------
+
+This section describes build considerations of which you need to be
+aware in order to provide support for runtime package management.
+
+When BitBake generates packages, it needs to know what format or formats
+to use. In your configuration, you use the
+:term:`PACKAGE_CLASSES`
+variable to specify the format:
+
+#. Open the ``local.conf`` file inside your :term:`Build Directory` (e.g.
+ ``poky/build/conf/local.conf``).
+
+#. Select the desired package format as follows::
+
+ PACKAGE_CLASSES ?= "package_packageformat"
+
+ where packageformat can be "ipk", "rpm",
+ "deb", or "tar" which are the supported package formats.
+
+ .. note::
+
+ Because the Yocto Project supports four different package formats,
+ you can set the variable with more than one argument. However, the
+ OpenEmbedded build system only uses the first argument when
+ creating an image or Software Development Kit (SDK).
+
+If you would like your image to start off with a basic package database
+containing the packages in your current build as well as to have the
+relevant tools available on the target for runtime package management,
+you can include "package-management" in the
+:term:`IMAGE_FEATURES`
+variable. Including "package-management" in this configuration variable
+ensures that when the image is assembled for your target, the image
+includes the currently-known package databases as well as the
+target-specific tools required for runtime package management to be
+performed on the target. However, this is not strictly necessary. You
+could start your image off without any databases but only include the
+required on-target package tool(s). As an example, you could include
+"opkg" in your
+:term:`IMAGE_INSTALL` variable
+if you are using the IPK package format. You can then initialize your
+target's package database(s) later once your image is up and running.
+
+Whenever you perform any sort of build step that can potentially
+generate a package or modify existing package, it is always a good idea
+to re-generate the package index after the build by using the following
+command::
+
+ $ bitbake package-index
+
+It might be tempting to build the
+package and the package index at the same time with a command such as
+the following::
+
+ $ bitbake some-package package-index
+
+Do not do this as
+BitBake does not schedule the package index for after the completion of
+the package you are building. Consequently, you cannot be sure of the
+package index including information for the package you just built.
+Thus, be sure to run the package update step separately after building
+any packages.
+
+You can use the
+:term:`PACKAGE_FEED_ARCHS`,
+:term:`PACKAGE_FEED_BASE_PATHS`,
+and
+:term:`PACKAGE_FEED_URIS`
+variables to pre-configure target images to use a package feed. If you
+do not define these variables, then manual steps as described in the
+subsequent sections are necessary to configure the target. You should
+set these variables before building the image in order to produce a
+correctly configured image.
+
+.. note::
+
+ Your image will need enough free storage space to run package upgrades,
+ especially if many of them need to be downloaded at the same time.
+ You should make sure images are created with enough free space
+ by setting the :term:`IMAGE_ROOTFS_EXTRA_SPACE` variable.
+
+When your build is complete, your packages reside in the
+``${TMPDIR}/deploy/packageformat`` directory. For example, if
+``${``\ :term:`TMPDIR`\ ``}`` is
+``tmp`` and your selected package type is RPM, then your RPM packages
+are available in ``tmp/deploy/rpm``.
+
+Host or Server Machine Setup
+----------------------------
+
+Although other protocols are possible, a server using HTTP typically
+serves packages. If you want to use HTTP, then set up and configure a
+web server such as Apache 2, lighttpd, or Python web server on the
+machine serving the packages.
+
+To keep things simple, this section describes how to set up a
+Python web server to share package feeds from the developer's
+machine. Although this server might not be the best for a production
+environment, the setup is simple and straight forward. Should you want
+to use a different server more suited for production (e.g. Apache 2,
+Lighttpd, or Nginx), take the appropriate steps to do so.
+
+From within the :term:`Build Directory` where you have built an image based on
+your packaging choice (i.e. the :term:`PACKAGE_CLASSES` setting), simply start
+the server. The following example assumes a :term:`Build Directory` of ``poky/build``
+and a :term:`PACKAGE_CLASSES` setting of ":ref:`ref-classes-package_rpm`"::
+
+ $ cd poky/build/tmp/deploy/rpm
+ $ python3 -m http.server
+
+Target Setup
+------------
+
+Setting up the target differs depending on the package management
+system. This section provides information for RPM, IPK, and DEB.
+
+Using RPM
+~~~~~~~~~
+
+The :wikipedia:`Dandified Packaging <DNF_(software)>` (DNF) performs
+runtime package management of RPM packages. In order to use DNF for
+runtime package management, you must perform an initial setup on the
+target machine for cases where the ``PACKAGE_FEED_*`` variables were not
+set as part of the image that is running on the target. This means if
+you built your image and did not use these variables as part of the
+build and your image is now running on the target, you need to perform
+the steps in this section if you want to use runtime package management.
+
+.. note::
+
+ For information on the ``PACKAGE_FEED_*`` variables, see
+ :term:`PACKAGE_FEED_ARCHS`, :term:`PACKAGE_FEED_BASE_PATHS`, and
+ :term:`PACKAGE_FEED_URIS` in the Yocto Project Reference Manual variables
+ glossary.
+
+On the target, you must inform DNF that package databases are available.
+You do this by creating a file named
+``/etc/yum.repos.d/oe-packages.repo`` and defining the ``oe-packages``.
+
+As an example, assume the target is able to use the following package
+databases: ``all``, ``i586``, and ``qemux86`` from a server named
+``my.server``. The specifics for setting up the web server are up to
+you. The critical requirement is that the URIs in the target repository
+configuration point to the correct remote location for the feeds.
+
+.. note::
+
+ For development purposes, you can point the web server to the build
+ system's ``deploy`` directory. However, for production use, it is better to
+ copy the package directories to a location outside of the build area and use
+ that location. Doing so avoids situations where the build system
+ overwrites or changes the ``deploy`` directory.
+
+When telling DNF where to look for the package databases, you must
+declare individual locations per architecture or a single location used
+for all architectures. You cannot do both:
+
+- *Create an Explicit List of Architectures:* Define individual base
+ URLs to identify where each package database is located:
+
+ .. code-block:: none
+
+ [oe-packages]
+ baseurl=http://my.server/rpm/i586 http://my.server/rpm/qemux86 http://my.server/rpm/all
+
+ This example
+ informs DNF about individual package databases for all three
+ architectures.
+
+- *Create a Single (Full) Package Index:* Define a single base URL that
+ identifies where a full package database is located::
+
+ [oe-packages]
+ baseurl=http://my.server/rpm
+
+ This example informs DNF about a single
+ package database that contains all the package index information for
+ all supported architectures.
+
+Once you have informed DNF where to find the package databases, you need
+to fetch them:
+
+.. code-block:: none
+
+ # dnf makecache
+
+DNF is now able to find, install, and
+upgrade packages from the specified repository or repositories.
+
+.. note::
+
+ See the `DNF documentation <https://dnf.readthedocs.io/en/latest/>`__ for
+ additional information.
+
+Using IPK
+~~~~~~~~~
+
+The ``opkg`` application performs runtime package management of IPK
+packages. You must perform an initial setup for ``opkg`` on the target
+machine if the
+:term:`PACKAGE_FEED_ARCHS`,
+:term:`PACKAGE_FEED_BASE_PATHS`,
+and
+:term:`PACKAGE_FEED_URIS`
+variables have not been set or the target image was built before the
+variables were set.
+
+The ``opkg`` application uses configuration files to find available
+package databases. Thus, you need to create a configuration file inside
+the ``/etc/opkg/`` directory, which informs ``opkg`` of any repository
+you want to use.
+
+As an example, suppose you are serving packages from a ``ipk/``
+directory containing the ``i586``, ``all``, and ``qemux86`` databases
+through an HTTP server named ``my.server``. On the target, create a
+configuration file (e.g. ``my_repo.conf``) inside the ``/etc/opkg/``
+directory containing the following:
+
+.. code-block:: none
+
+ src/gz all http://my.server/ipk/all
+ src/gz i586 http://my.server/ipk/i586
+ src/gz qemux86 http://my.server/ipk/qemux86
+
+Next, instruct ``opkg`` to fetch the
+repository information:
+
+.. code-block:: none
+
+ # opkg update
+
+The ``opkg`` application is now able to find, install, and upgrade packages
+from the specified repository.
+
+Using DEB
+~~~~~~~~~
+
+The ``apt`` application performs runtime package management of DEB
+packages. This application uses a source list file to find available
+package databases. You must perform an initial setup for ``apt`` on the
+target machine if the
+:term:`PACKAGE_FEED_ARCHS`,
+:term:`PACKAGE_FEED_BASE_PATHS`,
+and
+:term:`PACKAGE_FEED_URIS`
+variables have not been set or the target image was built before the
+variables were set.
+
+To inform ``apt`` of the repository you want to use, you might create a
+list file (e.g. ``my_repo.list``) inside the
+``/etc/apt/sources.list.d/`` directory. As an example, suppose you are
+serving packages from a ``deb/`` directory containing the ``i586``,
+``all``, and ``qemux86`` databases through an HTTP server named
+``my.server``. The list file should contain:
+
+.. code-block:: none
+
+ deb http://my.server/deb/all ./
+ deb http://my.server/deb/i586 ./
+ deb http://my.server/deb/qemux86 ./
+
+Next, instruct the ``apt`` application
+to fetch the repository information:
+
+.. code-block:: none
+
+ $ sudo apt update
+
+After this step,
+``apt`` is able to find, install, and upgrade packages from the
+specified repository.
+
+Generating and Using Signed Packages
+====================================
+
+In order to add security to RPM packages used during a build, you can
+take steps to securely sign them. Once a signature is verified, the
+OpenEmbedded build system can use the package in the build. If security
+fails for a signed package, the build system stops the build.
+
+This section describes how to sign RPM packages during a build and how
+to use signed package feeds (repositories) when doing a build.
+
+Signing RPM Packages
+--------------------
+
+To enable signing RPM packages, you must set up the following
+configurations in either your ``local.config`` or ``distro.config``
+file::
+
+ # Inherit sign_rpm.bbclass to enable signing functionality
+ INHERIT += " sign_rpm"
+ # Define the GPG key that will be used for signing.
+ RPM_GPG_NAME = "key_name"
+ # Provide passphrase for the key
+ RPM_GPG_PASSPHRASE = "passphrase"
+
+.. note::
+
+ Be sure to supply appropriate values for both `key_name` and
+ `passphrase`.
+
+Aside from the ``RPM_GPG_NAME`` and ``RPM_GPG_PASSPHRASE`` variables in
+the previous example, two optional variables related to signing are available:
+
+- *GPG_BIN:* Specifies a ``gpg`` binary/wrapper that is executed
+ when the package is signed.
+
+- *GPG_PATH:* Specifies the ``gpg`` home directory used when the
+ package is signed.
+
+Processing Package Feeds
+------------------------
+
+In addition to being able to sign RPM packages, you can also enable
+signed package feeds for IPK and RPM packages.
+
+The steps you need to take to enable signed package feed use are similar
+to the steps used to sign RPM packages. You must define the following in
+your ``local.config`` or ``distro.config`` file::
+
+ INHERIT += "sign_package_feed"
+ PACKAGE_FEED_GPG_NAME = "key_name"
+ PACKAGE_FEED_GPG_PASSPHRASE_FILE = "path_to_file_containing_passphrase"
+
+For signed package feeds, the passphrase must be specified in a separate file,
+which is pointed to by the ``PACKAGE_FEED_GPG_PASSPHRASE_FILE``
+variable. Regarding security, keeping a plain text passphrase out of the
+configuration is more secure.
+
+Aside from the ``PACKAGE_FEED_GPG_NAME`` and
+``PACKAGE_FEED_GPG_PASSPHRASE_FILE`` variables, three optional variables
+related to signed package feeds are available:
+
+- *GPG_BIN* Specifies a ``gpg`` binary/wrapper that is executed
+ when the package is signed.
+
+- *GPG_PATH:* Specifies the ``gpg`` home directory used when the
+ package is signed.
+
+- *PACKAGE_FEED_GPG_SIGNATURE_TYPE:* Specifies the type of ``gpg``
+ signature. This variable applies only to RPM and IPK package feeds.
+ Allowable values for the ``PACKAGE_FEED_GPG_SIGNATURE_TYPE`` are
+ "ASC", which is the default and specifies ascii armored, and "BIN",
+ which specifies binary.
+
+Testing Packages With ptest
+===========================
+
+A Package Test (ptest) runs tests against packages built by the
+OpenEmbedded build system on the target machine. A ptest contains at
+least two items: the actual test, and a shell script (``run-ptest``)
+that starts the test. The shell script that starts the test must not
+contain the actual test --- the script only starts the test. On the other
+hand, the test can be anything from a simple shell script that runs a
+binary and checks the output to an elaborate system of test binaries and
+data files.
+
+The test generates output in the format used by Automake::
+
+ result: testname
+
+where the result can be ``PASS``, ``FAIL``, or ``SKIP``, and
+the testname can be any identifying string.
+
+For a list of Yocto Project recipes that are already enabled with ptest,
+see the :yocto_wiki:`Ptest </Ptest>` wiki page.
+
+.. note::
+
+ A recipe is "ptest-enabled" if it inherits the :ref:`ref-classes-ptest`
+ class.
+
+Adding ptest to Your Build
+--------------------------
+
+To add package testing to your build, add the :term:`DISTRO_FEATURES` and
+:term:`EXTRA_IMAGE_FEATURES` variables to your ``local.conf`` file, which
+is found in the :term:`Build Directory`::
+
+ DISTRO_FEATURES:append = " ptest"
+ EXTRA_IMAGE_FEATURES += "ptest-pkgs"
+
+Once your build is complete, the ptest files are installed into the
+``/usr/lib/package/ptest`` directory within the image, where ``package``
+is the name of the package.
+
+Running ptest
+-------------
+
+The ``ptest-runner`` package installs a shell script that loops through
+all installed ptest test suites and runs them in sequence. Consequently,
+you might want to add this package to your image.
+
+Getting Your Package Ready
+--------------------------
+
+In order to enable a recipe to run installed ptests on target hardware,
+you need to prepare the recipes that build the packages you want to
+test. Here is what you have to do for each recipe:
+
+- *Be sure the recipe inherits the* :ref:`ref-classes-ptest` *class:*
+ Include the following line in each recipe::
+
+ inherit ptest
+
+- *Create run-ptest:* This script starts your test. Locate the
+ script where you will refer to it using
+ :term:`SRC_URI`. Here is an
+ example that starts a test for ``dbus``::
+
+ #!/bin/sh
+ cd test
+ make -k runtest-TESTS
+
+- *Ensure dependencies are met:* If the test adds build or runtime
+ dependencies that normally do not exist for the package (such as
+ requiring "make" to run the test suite), use the
+ :term:`DEPENDS` and
+ :term:`RDEPENDS` variables in
+ your recipe in order for the package to meet the dependencies. Here
+ is an example where the package has a runtime dependency on "make"::
+
+ RDEPENDS:${PN}-ptest += "make"
+
+- *Add a function to build the test suite:* Not many packages support
+ cross-compilation of their test suites. Consequently, you usually
+ need to add a cross-compilation function to the package.
+
+ Many packages based on Automake compile and run the test suite by
+ using a single command such as ``make check``. However, the host
+ ``make check`` builds and runs on the same computer, while
+ cross-compiling requires that the package is built on the host but
+ executed for the target architecture (though often, as in the case
+ for ptest, the execution occurs on the host). The built version of
+ Automake that ships with the Yocto Project includes a patch that
+ separates building and execution. Consequently, packages that use the
+ unaltered, patched version of ``make check`` automatically
+ cross-compiles.
+
+ Regardless, you still must add a ``do_compile_ptest`` function to
+ build the test suite. Add a function similar to the following to your
+ recipe::
+
+ do_compile_ptest() {
+ oe_runmake buildtest-TESTS
+ }
+
+- *Ensure special configurations are set:* If the package requires
+ special configurations prior to compiling the test code, you must
+ insert a ``do_configure_ptest`` function into the recipe.
+
+- *Install the test suite:* The :ref:`ref-classes-ptest` class
+ automatically copies the file ``run-ptest`` to the target and then runs make
+ ``install-ptest`` to run the tests. If this is not enough, you need
+ to create a ``do_install_ptest`` function and make sure it gets
+ called after the "make install-ptest" completes.
+
+Creating Node Package Manager (NPM) Packages
+============================================
+
+:wikipedia:`NPM <Npm_(software)>` is a package manager for the JavaScript
+programming language. The Yocto Project supports the NPM
+:ref:`fetcher <bitbake-user-manual/bitbake-user-manual-fetching:fetchers>`.
+You can use this fetcher in combination with
+:doc:`devtool </ref-manual/devtool-reference>` to create recipes that produce
+NPM packages.
+
+There are two workflows that allow you to create NPM packages using
+``devtool``: the NPM registry modules method and the NPM project code
+method.
+
+.. note::
+
+ While it is possible to create NPM recipes manually, using
+ ``devtool`` is far simpler.
+
+Additionally, some requirements and caveats exist.
+
+Requirements and Caveats
+------------------------
+
+You need to be aware of the following before using ``devtool`` to create
+NPM packages:
+
+- Of the two methods that you can use ``devtool`` to create NPM
+ packages, the registry approach is slightly simpler. However, you
+ might consider the project approach because you do not have to
+ publish your module in the `NPM registry <https://docs.npmjs.com/misc/registry>`__,
+ which is NPM's public registry.
+
+- Be familiar with
+ :doc:`devtool </ref-manual/devtool-reference>`.
+
+- The NPM host tools need the native ``nodejs-npm`` package, which is
+ part of the OpenEmbedded environment. You need to get the package by
+ cloning the :oe_git:`meta-openembedded </meta-openembedded>`
+ repository. Be sure to add the path to your local copy
+ to your ``bblayers.conf`` file.
+
+- ``devtool`` cannot detect native libraries in module dependencies.
+ Consequently, you must manually add packages to your recipe.
+
+- While deploying NPM packages, ``devtool`` cannot determine which
+ dependent packages are missing on the target (e.g. the node runtime
+ ``nodejs``). Consequently, you need to find out what files are
+ missing and be sure they are on the target.
+
+- Although you might not need NPM to run your node package, it is
+ useful to have NPM on your target. The NPM package name is
+ ``nodejs-npm``.
+
+Using the Registry Modules Method
+---------------------------------
+
+This section presents an example that uses the ``cute-files`` module,
+which is a file browser web application.
+
+.. note::
+
+ You must know the ``cute-files`` module version.
+
+The first thing you need to do is use ``devtool`` and the NPM fetcher to
+create the recipe::
+
+ $ devtool add "npm://registry.npmjs.org;package=cute-files;version=1.0.2"
+
+The
+``devtool add`` command runs ``recipetool create`` and uses the same
+fetch URI to download each dependency and capture license details where
+possible. The result is a generated recipe.
+
+After running for quite a long time, in particular building the
+``nodejs-native`` package, the command should end as follows::
+
+ INFO: Recipe /home/.../build/workspace/recipes/cute-files/cute-files_1.0.2.bb has been automatically created; further editing may be required to make it fully functional
+
+The recipe file is fairly simple and contains every license that
+``recipetool`` finds and includes the licenses in the recipe's
+:term:`LIC_FILES_CHKSUM`
+variables. You need to examine the variables and look for those with
+"unknown" in the :term:`LICENSE`
+field. You need to track down the license information for "unknown"
+modules and manually add the information to the recipe.
+
+``recipetool`` creates a "shrinkwrap" file for your recipe. Shrinkwrap
+files capture the version of all dependent modules. Many packages do not
+provide shrinkwrap files but ``recipetool`` will create a shrinkwrap file as it
+runs.
+
+.. note::
+
+ A package is created for each sub-module. This policy is the only
+ practical way to have the licenses for all of the dependencies
+ represented in the license manifest of the image.
+
+The ``devtool edit-recipe`` command lets you take a look at the recipe::
+
+ $ devtool edit-recipe cute-files
+ # Recipe created by recipetool
+ # This is the basis of a recipe and may need further editing in order to be fully functional.
+ # (Feel free to remove these comments when editing.)
+
+ SUMMARY = "Turn any folder on your computer into a cute file browser, available on the local network."
+ # WARNING: the following LICENSE and LIC_FILES_CHKSUM values are best guesses - it is
+ # your responsibility to verify that the values are complete and correct.
+ #
+ # NOTE: multiple licenses have been detected; they have been separated with &
+ # in the LICENSE value for now since it is a reasonable assumption that all
+ # of the licenses apply. If instead there is a choice between the multiple
+ # licenses then you should change the value to separate the licenses with |
+ # instead of &. If there is any doubt, check the accompanying documentation
+ # to determine which situation is applicable.
+
+ SUMMARY = "Turn any folder on your computer into a cute file browser, available on the local network."
+ LICENSE = "BSD-3-Clause & ISC & MIT"
+ LIC_FILES_CHKSUM = "file://LICENSE;md5=71d98c0a1db42956787b1909c74a86ca \
+ file://node_modules/accepts/LICENSE;md5=bf1f9ad1e2e1d507aef4883fff7103de \
+ file://node_modules/array-flatten/LICENSE;md5=44088ba57cb871a58add36ce51b8de08 \
+ ...
+ file://node_modules/cookie-signature/Readme.md;md5=57ae8b42de3dd0c1f22d5f4cf191e15a"
+
+ SRC_URI = " \
+ npm://registry.npmjs.org/;package=cute-files;version=${PV} \
+ npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \
+ "
+
+ S = "${WORKDIR}/npm"
+
+ inherit npm
+
+ LICENSE:${PN} = "MIT"
+ LICENSE:${PN}-accepts = "MIT"
+ LICENSE:${PN}-array-flatten = "MIT"
+ ...
+ LICENSE:${PN}-vary = "MIT"
+
+Three key points in the previous example are:
+
+- :term:`SRC_URI` uses the NPM
+ scheme so that the NPM fetcher is used.
+
+- ``recipetool`` collects all the license information. If a
+ sub-module's license is unavailable, the sub-module's name appears in
+ the comments.
+
+- The ``inherit npm`` statement causes the :ref:`ref-classes-npm` class to
+ package up all the modules.
+
+You can run the following command to build the ``cute-files`` package::
+
+ $ devtool build cute-files
+
+Remember that ``nodejs`` must be installed on
+the target before your package.
+
+Assuming 192.168.7.2 for the target's IP address, use the following
+command to deploy your package::
+
+ $ devtool deploy-target -s cute-files root@192.168.7.2
+
+Once the package is installed on the target, you can
+test the application to show the contents of any directory::
+
+ $ cd /usr/lib/node_modules/cute-files
+ $ cute-files
+
+On a browser,
+go to ``http://192.168.7.2:3000`` and you see the following:
+
+.. image:: figures/cute-files-npm-example.png
+ :width: 100%
+
+You can find the recipe in ``workspace/recipes/cute-files``. You can use
+the recipe in any layer you choose.
+
+Using the NPM Projects Code Method
+----------------------------------
+
+Although it is useful to package modules already in the NPM registry,
+adding ``node.js`` projects under development is a more common developer
+use case.
+
+This section covers the NPM projects code method, which is very similar
+to the "registry" approach described in the previous section. In the NPM
+projects method, you provide ``devtool`` with an URL that points to the
+source files.
+
+Replicating the same example, (i.e. ``cute-files``) use the following
+command::
+
+ $ devtool add https://github.com/martinaglv/cute-files.git
+
+The recipe this command generates is very similar to the recipe created in
+the previous section. However, the :term:`SRC_URI` looks like the following::
+
+ SRC_URI = " \
+ git://github.com/martinaglv/cute-files.git;protocol=https;branch=master \
+ npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \
+ "
+
+In this example,
+the main module is taken from the Git repository and dependencies are
+taken from the NPM registry. Other than those differences, the recipe is
+basically the same between the two methods. You can build and deploy the
+package exactly as described in the previous section that uses the
+registry modules method.
+
+Adding custom metadata to packages
+==================================
+
+The variable
+:term:`PACKAGE_ADD_METADATA`
+can be used to add additional metadata to packages. This is reflected in
+the package control/spec file. To take the ipk format for example, the
+CONTROL file stored inside would contain the additional metadata as
+additional lines.
+
+The variable can be used in multiple ways, including using suffixes to
+set it for a specific package type and/or package. Note that the order
+of precedence is the same as this list:
+
+- ``PACKAGE_ADD_METADATA_<PKGTYPE>:<PN>``
+
+- ``PACKAGE_ADD_METADATA_<PKGTYPE>``
+
+- ``PACKAGE_ADD_METADATA:<PN>``
+
+- :term:`PACKAGE_ADD_METADATA`
+
+`<PKGTYPE>` is a parameter and expected to be a distinct name of specific
+package type:
+
+- IPK for .ipk packages
+
+- DEB for .deb packages
+
+- RPM for .rpm packages
+
+`<PN>` is a parameter and expected to be a package name.
+
+The variable can contain multiple [one-line] metadata fields separated
+by the literal sequence '\\n'. The separator can be redefined using the
+variable flag ``separator``.
+
+Here is an example that adds two custom fields for ipk
+packages::
+
+ PACKAGE_ADD_METADATA_IPK = "Vendor: CustomIpk\nGroup:Applications/Spreadsheets"
+
diff --git a/documentation/dev-manual/prebuilt-libraries.rst b/documentation/dev-manual/prebuilt-libraries.rst
new file mode 100644
index 0000000000..a05f39ca1e
--- /dev/null
+++ b/documentation/dev-manual/prebuilt-libraries.rst
@@ -0,0 +1,209 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Working with Pre-Built Libraries
+********************************
+
+Introduction
+============
+
+Some library vendors do not release source code for their software but do
+release pre-built binaries. When shared libraries are built, they should
+be versioned (see `this article
+<https://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html>`__
+for some background), but sometimes this is not done.
+
+To summarize, a versioned library must meet two conditions:
+
+#. The filename must have the version appended, for example: ``libfoo.so.1.2.3``.
+#. The library must have the ELF tag ``SONAME`` set to the major version
+ of the library, for example: ``libfoo.so.1``. You can check this by
+ running ``readelf -d filename | grep SONAME``.
+
+This section shows how to deal with both versioned and unversioned
+pre-built libraries.
+
+Versioned Libraries
+===================
+
+In this example we work with pre-built libraries for the FT4222H USB I/O chip.
+Libraries are built for several target architecture variants and packaged in
+an archive as follows::
+
+ ├── build-arm-hisiv300
+ │   └── libft4222.so.1.4.4.44
+ ├── build-arm-v5-sf
+ │   └── libft4222.so.1.4.4.44
+ ├── build-arm-v6-hf
+ │   └── libft4222.so.1.4.4.44
+ ├── build-arm-v7-hf
+ │   └── libft4222.so.1.4.4.44
+ ├── build-arm-v8
+ │   └── libft4222.so.1.4.4.44
+ ├── build-i386
+ │   └── libft4222.so.1.4.4.44
+ ├── build-i486
+ │   └── libft4222.so.1.4.4.44
+ ├── build-mips-eglibc-hf
+ │   └── libft4222.so.1.4.4.44
+ ├── build-pentium
+ │   └── libft4222.so.1.4.4.44
+ ├── build-x86_64
+ │   └── libft4222.so.1.4.4.44
+ ├── examples
+ │   ├── get-version.c
+ │   ├── i2cm.c
+ │   ├── spim.c
+ │   └── spis.c
+ ├── ftd2xx.h
+ ├── install4222.sh
+ ├── libft4222.h
+ ├── ReadMe.txt
+ └── WinTypes.h
+
+To write a recipe to use such a library in your system:
+
+- The vendor will probably have a proprietary licence, so set
+ :term:`LICENSE_FLAGS` in your recipe.
+- The vendor provides a tarball containing libraries so set :term:`SRC_URI`
+ appropriately.
+- Set :term:`COMPATIBLE_HOST` so that the recipe cannot be used with an
+ unsupported architecture. In the following example, we only support the 32
+ and 64 bit variants of the ``x86`` architecture.
+- As the vendor provides versioned libraries, we can use ``oe_soinstall``
+ from :ref:`ref-classes-utils` to install the shared library and create
+ symbolic links. If the vendor does not do this, we need to follow the
+ non-versioned library guidelines in the next section.
+- As the vendor likely used :term:`LDFLAGS` different from those in your Yocto
+ Project build, disable the corresponding checks by adding ``ldflags``
+ to :term:`INSANE_SKIP`.
+- The vendor will typically ship release builds without debugging symbols.
+ Avoid errors by preventing the packaging task from stripping out the symbols
+ and adding them to a separate debug package. This is done by setting the
+ ``INHIBIT_`` flags shown below.
+
+The complete recipe would look like this::
+
+ SUMMARY = "FTDI FT4222H Library"
+ SECTION = "libs"
+ LICENSE_FLAGS = "ftdi"
+ LICENSE = "CLOSED"
+
+ COMPATIBLE_HOST = "(i.86|x86_64).*-linux"
+
+ # Sources available in a .tgz file in .zip archive
+ # at https://ftdichip.com/wp-content/uploads/2021/01/libft4222-linux-1.4.4.44.zip
+ # Found on https://ftdichip.com/software-examples/ft4222h-software-examples/
+ # Since dealing with this particular type of archive is out of topic here,
+ # we use a local link.
+ SRC_URI = "file://libft4222-linux-${PV}.tgz"
+
+ S = "${WORKDIR}"
+
+ ARCH_DIR:x86-64 = "build-x86_64"
+ ARCH_DIR:i586 = "build-i386"
+ ARCH_DIR:i686 = "build-i386"
+
+ INSANE_SKIP:${PN} = "ldflags"
+ INHIBIT_PACKAGE_STRIP = "1"
+ INHIBIT_SYSROOT_STRIP = "1"
+ INHIBIT_PACKAGE_DEBUG_SPLIT = "1"
+
+ do_install () {
+ install -m 0755 -d ${D}${libdir}
+ oe_soinstall ${S}/${ARCH_DIR}/libft4222.so.${PV} ${D}${libdir}
+ install -d ${D}${includedir}
+ install -m 0755 ${S}/*.h ${D}${includedir}
+ }
+
+If the precompiled binaries are not statically linked and have dependencies on
+other libraries, then by adding those libraries to :term:`DEPENDS`, the linking
+can be examined and the appropriate :term:`RDEPENDS` automatically added.
+
+Non-Versioned Libraries
+=======================
+
+Some Background
+---------------
+
+Libraries in Linux systems are generally versioned so that it is possible
+to have multiple versions of the same library installed, which eases upgrades
+and support for older software. For example, suppose that in a versioned
+library, an actual library is called ``libfoo.so.1.2``, a symbolic link named
+``libfoo.so.1`` points to ``libfoo.so.1.2``, and a symbolic link named
+``libfoo.so`` points to ``libfoo.so.1.2``. Given these conditions, when you
+link a binary against a library, you typically provide the unversioned file
+name (i.e. ``-lfoo`` to the linker). However, the linker follows the symbolic
+link and actually links against the versioned filename. The unversioned symbolic
+link is only used at development time. Consequently, the library is packaged
+along with the headers in the development package ``${PN}-dev`` along with the
+actual library and versioned symbolic links in ``${PN}``. Because versioned
+libraries are far more common than unversioned libraries, the default packaging
+rules assume versioned libraries.
+
+Yocto Library Packaging Overview
+--------------------------------
+
+It follows that packaging an unversioned library requires a bit of work in the
+recipe. By default, ``libfoo.so`` gets packaged into ``${PN}-dev``, which
+triggers a QA warning that a non-symlink library is in a ``-dev`` package,
+and binaries in the same recipe link to the library in ``${PN}-dev``,
+which triggers more QA warnings. To solve this problem, you need to package the
+unversioned library into ``${PN}`` where it belongs. The abridged
+default :term:`FILES` variables in ``bitbake.conf`` are::
+
+ SOLIBS = ".so.*"
+ SOLIBSDEV = ".so"
+ FILES:${PN} = "... ${libdir}/lib*${SOLIBS} ..."
+ FILES_SOLIBSDEV ?= "... ${libdir}/lib*${SOLIBSDEV} ..."
+ FILES:${PN}-dev = "... ${FILES_SOLIBSDEV} ..."
+
+:term:`SOLIBS` defines a pattern that matches real shared object libraries.
+:term:`SOLIBSDEV` matches the development form (unversioned symlink). These two
+variables are then used in ``FILES:${PN}`` and ``FILES:${PN}-dev``, which puts
+the real libraries into ``${PN}`` and the unversioned symbolic link into ``${PN}-dev``.
+To package unversioned libraries, you need to modify the variables in the recipe
+as follows::
+
+ SOLIBS = ".so"
+ FILES_SOLIBSDEV = ""
+
+The modifications cause the ``.so`` file to be the real library
+and unset :term:`FILES_SOLIBSDEV` so that no libraries get packaged into
+``${PN}-dev``. The changes are required because unless :term:`PACKAGES` is changed,
+``${PN}-dev`` collects files before `${PN}`. ``${PN}-dev`` must not collect any of
+the files you want in ``${PN}``.
+
+Finally, loadable modules, essentially unversioned libraries that are linked
+at runtime using ``dlopen()`` instead of at build time, should generally be
+installed in a private directory. However, if they are installed in ``${libdir}``,
+then the modules can be treated as unversioned libraries.
+
+Example
+-------
+
+The example below installs an unversioned x86-64 pre-built library named
+``libfoo.so``. The :term:`COMPATIBLE_HOST` variable limits recipes to the
+x86-64 architecture while the :term:`INSANE_SKIP`, :term:`INHIBIT_PACKAGE_STRIP`
+and :term:`INHIBIT_SYSROOT_STRIP` variables are all set as in the above
+versioned library example. The "magic" is setting the :term:`SOLIBS` and
+:term:`FILES_SOLIBSDEV` variables as explained above::
+
+ SUMMARY = "libfoo sample recipe"
+ SECTION = "libs"
+ LICENSE = "CLOSED"
+
+ SRC_URI = "file://libfoo.so"
+
+ COMPATIBLE_HOST = "x86_64.*-linux"
+
+ INSANE_SKIP:${PN} = "ldflags"
+ INHIBIT_PACKAGE_STRIP = "1"
+ INHIBIT_SYSROOT_STRIP = "1"
+ SOLIBS = ".so"
+ FILES_SOLIBSDEV = ""
+
+ do_install () {
+ install -d ${D}${libdir}
+ install -m 0755 ${WORKDIR}/libfoo.so ${D}${libdir}
+ }
+
diff --git a/documentation/dev-manual/python-development-shell.rst b/documentation/dev-manual/python-development-shell.rst
new file mode 100644
index 0000000000..81a5c43472
--- /dev/null
+++ b/documentation/dev-manual/python-development-shell.rst
@@ -0,0 +1,50 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Using a Python Development Shell
+********************************
+
+Similar to working within a development shell as described in the
+previous section, you can also spawn and work within an interactive
+Python development shell. When debugging certain commands or even when
+just editing packages, ``pydevshell`` can be a useful tool. When you
+invoke the ``pydevshell`` task, all tasks up to and including
+:ref:`ref-tasks-patch` are run for the
+specified target. Then a new terminal is opened. Additionally, key
+Python objects and code are available in the same way they are to
+BitBake tasks, in particular, the data store 'd'. So, commands such as
+the following are useful when exploring the data store and running
+functions::
+
+ pydevshell> d.getVar("STAGING_DIR")
+ '/media/build1/poky/build/tmp/sysroots'
+ pydevshell> d.getVar("STAGING_DIR", False)
+ '${TMPDIR}/sysroots'
+ pydevshell> d.setVar("FOO", "bar")
+ pydevshell> d.getVar("FOO")
+ 'bar'
+ pydevshell> d.delVar("FOO")
+ pydevshell> d.getVar("FOO")
+ pydevshell> bb.build.exec_func("do_unpack", d)
+ pydevshell>
+
+See the ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:functions you can call from within python`"
+section in the BitBake User Manual for details about available functions.
+
+The commands execute just as if the OpenEmbedded build
+system were executing them. Consequently, working this way can be
+helpful when debugging a build or preparing software to be used with the
+OpenEmbedded build system.
+
+Here is an example that uses ``pydevshell`` on a target named
+``matchbox-desktop``::
+
+ $ bitbake matchbox-desktop -c pydevshell
+
+This command spawns a terminal and places you in an interactive Python
+interpreter within the OpenEmbedded build environment. The
+:term:`OE_TERMINAL` variable
+controls what type of shell is opened.
+
+When you are finished using ``pydevshell``, you can exit the shell
+either by using Ctrl+d or closing the terminal window.
+
diff --git a/documentation/dev-manual/qemu.rst b/documentation/dev-manual/qemu.rst
index 88a63c1808..f998e47934 100644
--- a/documentation/dev-manual/qemu.rst
+++ b/documentation/dev-manual/qemu.rst
@@ -318,7 +318,7 @@ timestamp when it needs to look for an image. Minimally, through the use
of options, you must provide either a machine name, a virtual machine
image (``*wic.vmdk``), or a kernel image (``*.bin``).
-Following is the command-line help output for the ``runqemu`` command::
+Here is the command-line help output for the ``runqemu`` command::
$ runqemu --help
@@ -360,7 +360,7 @@ Following is the command-line help output for the ``runqemu`` command::
``runqemu`` Command-Line Options
================================
-Following is a description of ``runqemu`` options you can provide on the
+Here is a description of ``runqemu`` options you can provide on the
command line:
.. note::
diff --git a/documentation/dev-manual/quilt.rst b/documentation/dev-manual/quilt.rst
new file mode 100644
index 0000000000..59240705ad
--- /dev/null
+++ b/documentation/dev-manual/quilt.rst
@@ -0,0 +1,89 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Using Quilt in Your Workflow
+****************************
+
+`Quilt <https://savannah.nongnu.org/projects/quilt>`__ is a powerful tool
+that allows you to capture source code changes without having a clean
+source tree. This section outlines the typical workflow you can use to
+modify source code, test changes, and then preserve the changes in the
+form of a patch all using Quilt.
+
+.. note::
+
+ With regard to preserving changes to source files, if you clean a
+ recipe or have :ref:`ref-classes-rm-work` enabled, the
+ :ref:`devtool workflow <sdk-manual/extensible:using \`\`devtool\`\` in your sdk workflow>`
+ as described in the Yocto Project Application Development and the
+ Extensible Software Development Kit (eSDK) manual is a safer
+ development flow than the flow that uses Quilt.
+
+Follow these general steps:
+
+#. *Find the Source Code:* Temporary source code used by the
+ OpenEmbedded build system is kept in the :term:`Build Directory`. See the
+ ":ref:`dev-manual/temporary-source-code:finding temporary source code`" section to
+ learn how to locate the directory that has the temporary source code for a
+ particular package.
+
+#. *Change Your Working Directory:* You need to be in the directory that
+ has the temporary source code. That directory is defined by the
+ :term:`S` variable.
+
+#. *Create a New Patch:* Before modifying source code, you need to
+ create a new patch. To create a new patch file, use ``quilt new`` as
+ below::
+
+ $ quilt new my_changes.patch
+
+#. *Notify Quilt and Add Files:* After creating the patch, you need to
+ notify Quilt about the files you plan to edit. You notify Quilt by
+ adding the files to the patch you just created::
+
+ $ quilt add file1.c file2.c file3.c
+
+#. *Edit the Files:* Make your changes in the source code to the files
+ you added to the patch.
+
+#. *Test Your Changes:* Once you have modified the source code, the
+ easiest way to test your changes is by calling the :ref:`ref-tasks-compile`
+ task as shown in the following example::
+
+ $ bitbake -c compile -f package
+
+ The ``-f`` or ``--force`` option forces the specified task to
+ execute. If you find problems with your code, you can just keep
+ editing and re-testing iteratively until things work as expected.
+
+ .. note::
+
+ All the modifications you make to the temporary source code disappear
+ once you run the :ref:`ref-tasks-clean` or :ref:`ref-tasks-cleanall`
+ tasks using BitBake (i.e. ``bitbake -c clean package`` and
+ ``bitbake -c cleanall package``). Modifications will also disappear if
+ you use the :ref:`ref-classes-rm-work` feature as described in
+ the ":ref:`dev-manual/disk-space:conserving disk space during builds`"
+ section.
+
+#. *Generate the Patch:* Once your changes work as expected, you need to
+ use Quilt to generate the final patch that contains all your
+ modifications::
+
+ $ quilt refresh
+
+ At this point, the
+ ``my_changes.patch`` file has all your edits made to the ``file1.c``,
+ ``file2.c``, and ``file3.c`` files.
+
+ You can find the resulting patch file in the ``patches/``
+ subdirectory of the source (:term:`S`) directory.
+
+#. *Copy the Patch File:* For simplicity, copy the patch file into a
+ directory named ``files``, which you can create in the same directory
+ that holds the recipe (``.bb``) file or the append (``.bbappend``)
+ file. Placing the patch here guarantees that the OpenEmbedded build
+ system will find the patch. Next, add the patch into the :term:`SRC_URI`
+ of the recipe. Here is an example::
+
+ SRC_URI += "file://my_changes.patch"
+
diff --git a/documentation/dev-manual/read-only-rootfs.rst b/documentation/dev-manual/read-only-rootfs.rst
new file mode 100644
index 0000000000..251178ed54
--- /dev/null
+++ b/documentation/dev-manual/read-only-rootfs.rst
@@ -0,0 +1,89 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Creating a Read-Only Root Filesystem
+************************************
+
+Suppose, for security reasons, you need to disable your target device's
+root filesystem's write permissions (i.e. you need a read-only root
+filesystem). Or, perhaps you are running the device's operating system
+from a read-only storage device. For either case, you can customize your
+image for that behavior.
+
+.. note::
+
+ Supporting a read-only root filesystem requires that the system and
+ applications do not try to write to the root filesystem. You must
+ configure all parts of the target system to write elsewhere, or to
+ gracefully fail in the event of attempting to write to the root
+ filesystem.
+
+Creating the Root Filesystem
+============================
+
+To create the read-only root filesystem, simply add the
+"read-only-rootfs" feature to your image, normally in one of two ways.
+The first way is to add the "read-only-rootfs" image feature in the
+image's recipe file via the :term:`IMAGE_FEATURES` variable::
+
+ IMAGE_FEATURES += "read-only-rootfs"
+
+As an alternative, you can add the same feature
+from within your :term:`Build Directory`'s ``local.conf`` file with the
+associated :term:`EXTRA_IMAGE_FEATURES` variable, as in::
+
+ EXTRA_IMAGE_FEATURES = "read-only-rootfs"
+
+For more information on how to use these variables, see the
+":ref:`dev-manual/customizing-images:Customizing Images Using Custom \`\`IMAGE_FEATURES\`\` and \`\`EXTRA_IMAGE_FEATURES\`\``"
+section. For information on the variables, see
+:term:`IMAGE_FEATURES` and
+:term:`EXTRA_IMAGE_FEATURES`.
+
+Post-Installation Scripts and Read-Only Root Filesystem
+=======================================================
+
+It is very important that you make sure all post-Installation
+(``pkg_postinst``) scripts for packages that are installed into the
+image can be run at the time when the root filesystem is created during
+the build on the host system. These scripts cannot attempt to run during
+the first boot on the target device. With the "read-only-rootfs" feature
+enabled, the build system makes sure that all post-installation scripts
+succeed at file system creation time. If any of these scripts
+still need to be run after the root filesystem is created, the build
+immediately fails. These build-time checks ensure that the build fails
+rather than the target device fails later during its initial boot
+operation.
+
+Most of the common post-installation scripts generated by the build
+system for the out-of-the-box Yocto Project are engineered so that they
+can run during root filesystem creation (e.g. post-installation scripts
+for caching fonts). However, if you create and add custom scripts, you
+need to be sure they can be run during this file system creation.
+
+Here are some common problems that prevent post-installation scripts
+from running during root filesystem creation:
+
+- *Not using $D in front of absolute paths:* The build system defines
+ ``$``\ :term:`D` when the root
+ filesystem is created. Furthermore, ``$D`` is blank when the script
+ is run on the target device. This implies two purposes for ``$D``:
+ ensuring paths are valid in both the host and target environments,
+ and checking to determine which environment is being used as a method
+ for taking appropriate actions.
+
+- *Attempting to run processes that are specific to or dependent on the
+ target architecture:* You can work around these attempts by using
+ native tools, which run on the host system, to accomplish the same
+ tasks, or by alternatively running the processes under QEMU, which
+ has the ``qemu_run_binary`` function. For more information, see the
+ :ref:`ref-classes-qemu` class.
+
+Areas With Write Access
+=======================
+
+With the "read-only-rootfs" feature enabled, any attempt by the target
+to write to the root filesystem at runtime fails. Consequently, you must
+make sure that you configure processes and applications that attempt
+these types of writes do so to directories with write access (e.g.
+``/tmp`` or ``/var/run``).
+
diff --git a/documentation/dev-manual/runtime-testing.rst b/documentation/dev-manual/runtime-testing.rst
new file mode 100644
index 0000000000..bd3f42ac09
--- /dev/null
+++ b/documentation/dev-manual/runtime-testing.rst
@@ -0,0 +1,600 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Performing Automated Runtime Testing
+************************************
+
+The OpenEmbedded build system makes available a series of automated
+tests for images to verify runtime functionality. You can run these
+tests on either QEMU or actual target hardware. Tests are written in
+Python making use of the ``unittest`` module, and the majority of them
+run commands on the target system over SSH. This section describes how
+you set up the environment to use these tests, run available tests, and
+write and add your own tests.
+
+For information on the test and QA infrastructure available within the
+Yocto Project, see the ":ref:`ref-manual/release-process:testing and quality assurance`"
+section in the Yocto Project Reference Manual.
+
+Enabling Tests
+==============
+
+Depending on whether you are planning to run tests using QEMU or on the
+hardware, you have to take different steps to enable the tests. See the
+following subsections for information on how to enable both types of
+tests.
+
+Enabling Runtime Tests on QEMU
+------------------------------
+
+In order to run tests, you need to do the following:
+
+- *Set up to avoid interaction with sudo for networking:* To
+ accomplish this, you must do one of the following:
+
+ - Add ``NOPASSWD`` for your user in ``/etc/sudoers`` either for all
+ commands or just for ``runqemu-ifup``. You must provide the full
+ path as that can change if you are using multiple clones of the
+ source repository.
+
+ .. note::
+
+ On some distributions, you also need to comment out "Defaults
+ requiretty" in ``/etc/sudoers``.
+
+ - Manually configure a tap interface for your system.
+
+ - Run as root the script in ``scripts/runqemu-gen-tapdevs``, which
+ should generate a list of tap devices. This is the option
+ typically chosen for Autobuilder-type environments.
+
+ .. note::
+
+ - Be sure to use an absolute path when calling this script
+ with sudo.
+
+ - Ensure that your host has the package ``iptables`` installed.
+
+ - The package recipe ``qemu-helper-native`` is required to run
+ this script. Build the package using the following command::
+
+ $ bitbake qemu-helper-native
+
+- *Set the DISPLAY variable:* You need to set this variable so that
+ you have an X server available (e.g. start ``vncserver`` for a
+ headless machine).
+
+- *Be sure your host's firewall accepts incoming connections from
+ 192.168.7.0/24:* Some of the tests (in particular DNF tests) start an
+ HTTP server on a random high number port, which is used to serve
+ files to the target. The DNF module serves
+ ``${WORKDIR}/oe-rootfs-repo`` so it can run DNF channel commands.
+ That means your host's firewall must accept incoming connections from
+ 192.168.7.0/24, which is the default IP range used for tap devices by
+ ``runqemu``.
+
+- *Be sure your host has the correct packages installed:* Depending
+ your host's distribution, you need to have the following packages
+ installed:
+
+ - Ubuntu and Debian: ``sysstat`` and ``iproute2``
+
+ - openSUSE: ``sysstat`` and ``iproute2``
+
+ - Fedora: ``sysstat`` and ``iproute``
+
+ - CentOS: ``sysstat`` and ``iproute``
+
+Once you start running the tests, the following happens:
+
+#. A copy of the root filesystem is written to ``${WORKDIR}/testimage``.
+
+#. The image is booted under QEMU using the standard ``runqemu`` script.
+
+#. A default timeout of 500 seconds occurs to allow for the boot process
+ to reach the login prompt. You can change the timeout period by
+ setting
+ :term:`TEST_QEMUBOOT_TIMEOUT`
+ in the ``local.conf`` file.
+
+#. Once the boot process is reached and the login prompt appears, the
+ tests run. The full boot log is written to
+ ``${WORKDIR}/testimage/qemu_boot_log``.
+
+#. Each test module loads in the order found in :term:`TEST_SUITES`. You can
+ find the full output of the commands run over SSH in
+ ``${WORKDIR}/testimgage/ssh_target_log``.
+
+#. If no failures occur, the task running the tests ends successfully.
+ You can find the output from the ``unittest`` in the task log at
+ ``${WORKDIR}/temp/log.do_testimage``.
+
+Enabling Runtime Tests on Hardware
+----------------------------------
+
+The OpenEmbedded build system can run tests on real hardware, and for
+certain devices it can also deploy the image to be tested onto the
+device beforehand.
+
+For automated deployment, a "controller image" is installed onto the
+hardware once as part of setup. Then, each time tests are to be run, the
+following occurs:
+
+#. The controller image is booted into and used to write the image to be
+ tested to a second partition.
+
+#. The device is then rebooted using an external script that you need to
+ provide.
+
+#. The device boots into the image to be tested.
+
+When running tests (independent of whether the image has been deployed
+automatically or not), the device is expected to be connected to a
+network on a pre-determined IP address. You can either use static IP
+addresses written into the image, or set the image to use DHCP and have
+your DHCP server on the test network assign a known IP address based on
+the MAC address of the device.
+
+In order to run tests on hardware, you need to set :term:`TEST_TARGET` to an
+appropriate value. For QEMU, you do not have to change anything, the
+default value is "qemu". For running tests on hardware, the following
+options are available:
+
+- *"simpleremote":* Choose "simpleremote" if you are going to run tests
+ on a target system that is already running the image to be tested and
+ is available on the network. You can use "simpleremote" in
+ conjunction with either real hardware or an image running within a
+ separately started QEMU or any other virtual machine manager.
+
+- *"SystemdbootTarget":* Choose "SystemdbootTarget" if your hardware is
+ an EFI-based machine with ``systemd-boot`` as bootloader and
+ ``core-image-testmaster`` (or something similar) is installed. Also,
+ your hardware under test must be in a DHCP-enabled network that gives
+ it the same IP address for each reboot.
+
+ If you choose "SystemdbootTarget", there are additional requirements
+ and considerations. See the
+ ":ref:`dev-manual/runtime-testing:selecting systemdboottarget`" section, which
+ follows, for more information.
+
+- *"BeagleBoneTarget":* Choose "BeagleBoneTarget" if you are deploying
+ images and running tests on the BeagleBone "Black" or original
+ "White" hardware. For information on how to use these tests, see the
+ comments at the top of the BeagleBoneTarget
+ ``meta-yocto-bsp/lib/oeqa/controllers/beaglebonetarget.py`` file.
+
+- *"EdgeRouterTarget":* Choose "EdgeRouterTarget" if you are deploying
+ images and running tests on the Ubiquiti Networks EdgeRouter Lite.
+ For information on how to use these tests, see the comments at the
+ top of the EdgeRouterTarget
+ ``meta-yocto-bsp/lib/oeqa/controllers/edgeroutertarget.py`` file.
+
+- *"GrubTarget":* Choose "GrubTarget" if you are deploying images and running
+ tests on any generic PC that boots using GRUB. For information on how
+ to use these tests, see the comments at the top of the GrubTarget
+ ``meta-yocto-bsp/lib/oeqa/controllers/grubtarget.py`` file.
+
+- *"your-target":* Create your own custom target if you want to run
+ tests when you are deploying images and running tests on a custom
+ machine within your BSP layer. To do this, you need to add a Python
+ unit that defines the target class under ``lib/oeqa/controllers/``
+ within your layer. You must also provide an empty ``__init__.py``.
+ For examples, see files in ``meta-yocto-bsp/lib/oeqa/controllers/``.
+
+Selecting SystemdbootTarget
+---------------------------
+
+If you did not set :term:`TEST_TARGET` to "SystemdbootTarget", then you do
+not need any information in this section. You can skip down to the
+":ref:`dev-manual/runtime-testing:running tests`" section.
+
+If you did set :term:`TEST_TARGET` to "SystemdbootTarget", you also need to
+perform a one-time setup of your controller image by doing the following:
+
+#. *Set EFI_PROVIDER:* Be sure that :term:`EFI_PROVIDER` is as follows::
+
+ EFI_PROVIDER = "systemd-boot"
+
+#. *Build the controller image:* Build the ``core-image-testmaster`` image.
+ The ``core-image-testmaster`` recipe is provided as an example for a
+ "controller" image and you can customize the image recipe as you would
+ any other recipe.
+
+ Image recipe requirements are:
+
+ - Inherits ``core-image`` so that kernel modules are installed.
+
+ - Installs normal linux utilities not BusyBox ones (e.g. ``bash``,
+ ``coreutils``, ``tar``, ``gzip``, and ``kmod``).
+
+ - Uses a custom :term:`Initramfs` image with a custom
+ installer. A normal image that you can install usually creates a
+ single root filesystem partition. This image uses another installer that
+ creates a specific partition layout. Not all Board Support
+ Packages (BSPs) can use an installer. For such cases, you need to
+ manually create the following partition layout on the target:
+
+ - First partition mounted under ``/boot``, labeled "boot".
+
+ - The main root filesystem partition where this image gets installed,
+ which is mounted under ``/``.
+
+ - Another partition labeled "testrootfs" where test images get
+ deployed.
+
+#. *Install image:* Install the image that you just built on the target
+ system.
+
+The final thing you need to do when setting :term:`TEST_TARGET` to
+"SystemdbootTarget" is to set up the test image:
+
+#. *Set up your local.conf file:* Make sure you have the following
+ statements in your ``local.conf`` file::
+
+ IMAGE_FSTYPES += "tar.gz"
+ IMAGE_CLASSES += "testimage"
+ TEST_TARGET = "SystemdbootTarget"
+ TEST_TARGET_IP = "192.168.2.3"
+
+#. *Build your test image:* Use BitBake to build the image::
+
+ $ bitbake core-image-sato
+
+Power Control
+-------------
+
+For most hardware targets other than "simpleremote", you can control
+power:
+
+- You can use :term:`TEST_POWERCONTROL_CMD` together with
+ :term:`TEST_POWERCONTROL_EXTRA_ARGS` as a command that runs on the host
+ and does power cycling. The test code passes one argument to that
+ command: off, on or cycle (off then on). Here is an example that
+ could appear in your ``local.conf`` file::
+
+ TEST_POWERCONTROL_CMD = "powercontrol.exp test 10.11.12.1 nuc1"
+
+ In this example, the expect
+ script does the following:
+
+ .. code-block:: shell
+
+ ssh test@10.11.12.1 "pyctl nuc1 arg"
+
+ It then runs a Python script that controls power for a label called
+ ``nuc1``.
+
+ .. note::
+
+ You need to customize :term:`TEST_POWERCONTROL_CMD` and
+ :term:`TEST_POWERCONTROL_EXTRA_ARGS` for your own setup. The one requirement
+ is that it accepts "on", "off", and "cycle" as the last argument.
+
+- When no command is defined, it connects to the device over SSH and
+ uses the classic reboot command to reboot the device. Classic reboot
+ is fine as long as the machine actually reboots (i.e. the SSH test
+ has not failed). It is useful for scenarios where you have a simple
+ setup, typically with a single board, and where some manual
+ interaction is okay from time to time.
+
+If you have no hardware to automatically perform power control but still
+wish to experiment with automated hardware testing, you can use the
+``dialog-power-control`` script that shows a dialog prompting you to perform
+the required power action. This script requires either KDialog or Zenity
+to be installed. To use this script, set the
+:term:`TEST_POWERCONTROL_CMD`
+variable as follows::
+
+ TEST_POWERCONTROL_CMD = "${COREBASE}/scripts/contrib/dialog-power-control"
+
+Serial Console Connection
+-------------------------
+
+For test target classes requiring a serial console to interact with the
+bootloader (e.g. BeagleBoneTarget, EdgeRouterTarget, and GrubTarget),
+you need to specify a command to use to connect to the serial console of
+the target machine by using the
+:term:`TEST_SERIALCONTROL_CMD`
+variable and optionally the
+:term:`TEST_SERIALCONTROL_EXTRA_ARGS`
+variable.
+
+These cases could be a serial terminal program if the machine is
+connected to a local serial port, or a ``telnet`` or ``ssh`` command
+connecting to a remote console server. Regardless of the case, the
+command simply needs to connect to the serial console and forward that
+connection to standard input and output as any normal terminal program
+does. For example, to use the picocom terminal program on serial device
+``/dev/ttyUSB0`` at 115200bps, you would set the variable as follows::
+
+ TEST_SERIALCONTROL_CMD = "picocom /dev/ttyUSB0 -b 115200"
+
+For local
+devices where the serial port device disappears when the device reboots,
+an additional "serdevtry" wrapper script is provided. To use this
+wrapper, simply prefix the terminal command with
+``${COREBASE}/scripts/contrib/serdevtry``::
+
+ TEST_SERIALCONTROL_CMD = "${COREBASE}/scripts/contrib/serdevtry picocom -b 115200 /dev/ttyUSB0"
+
+Running Tests
+=============
+
+You can start the tests automatically or manually:
+
+- *Automatically running tests:* To run the tests automatically after the
+ OpenEmbedded build system successfully creates an image, first set the
+ :term:`TESTIMAGE_AUTO` variable to "1" in your ``local.conf`` file in the
+ :term:`Build Directory`::
+
+ TESTIMAGE_AUTO = "1"
+
+ Next, build your image. If the image successfully builds, the
+ tests run::
+
+ bitbake core-image-sato
+
+- *Manually running tests:* To manually run the tests, first globally
+ inherit the :ref:`ref-classes-testimage` class by editing your
+ ``local.conf`` file::
+
+ IMAGE_CLASSES += "testimage"
+
+ Next, use BitBake to run the tests::
+
+ bitbake -c testimage image
+
+All test files reside in ``meta/lib/oeqa/runtime/cases`` in the
+:term:`Source Directory`. A test name maps
+directly to a Python module. Each test module may contain a number of
+individual tests. Tests are usually grouped together by the area tested
+(e.g tests for systemd reside in ``meta/lib/oeqa/runtime/cases/systemd.py``).
+
+You can add tests to any layer provided you place them in the proper
+area and you extend :term:`BBPATH` in
+the ``local.conf`` file as normal. Be sure that tests reside in
+``layer/lib/oeqa/runtime/cases``.
+
+.. note::
+
+ Be sure that module names do not collide with module names used in
+ the default set of test modules in ``meta/lib/oeqa/runtime/cases``.
+
+You can change the set of tests run by appending or overriding
+:term:`TEST_SUITES` variable in
+``local.conf``. Each name in :term:`TEST_SUITES` represents a required test
+for the image. Test modules named within :term:`TEST_SUITES` cannot be
+skipped even if a test is not suitable for an image (e.g. running the
+RPM tests on an image without ``rpm``). Appending "auto" to
+:term:`TEST_SUITES` causes the build system to try to run all tests that are
+suitable for the image (i.e. each test module may elect to skip itself).
+
+The order you list tests in :term:`TEST_SUITES` is important and influences
+test dependencies. Consequently, tests that depend on other tests should
+be added after the test on which they depend. For example, since the
+``ssh`` test depends on the ``ping`` test, "ssh" needs to come after
+"ping" in the list. The test class provides no re-ordering or dependency
+handling.
+
+.. note::
+
+ Each module can have multiple classes with multiple test methods.
+ And, Python ``unittest`` rules apply.
+
+Here are some things to keep in mind when running tests:
+
+- The default tests for the image are defined as::
+
+ DEFAULT_TEST_SUITES:pn-image = "ping ssh df connman syslog xorg scp vnc date rpm dnf dmesg"
+
+- Add your own test to the list of the by using the following::
+
+ TEST_SUITES:append = " mytest"
+
+- Run a specific list of tests as follows::
+
+ TEST_SUITES = "test1 test2 test3"
+
+ Remember, order is important. Be sure to place a test that is
+ dependent on another test later in the order.
+
+Exporting Tests
+===============
+
+You can export tests so that they can run independently of the build
+system. Exporting tests is required if you want to be able to hand the
+test execution off to a scheduler. You can only export tests that are
+defined in :term:`TEST_SUITES`.
+
+If your image is already built, make sure the following are set in your
+``local.conf`` file::
+
+ INHERIT += "testexport"
+ TEST_TARGET_IP = "IP-address-for-the-test-target"
+ TEST_SERVER_IP = "IP-address-for-the-test-server"
+
+You can then export the tests with the
+following BitBake command form::
+
+ $ bitbake image -c testexport
+
+Exporting the tests places them in the :term:`Build Directory` in
+``tmp/testexport/``\ image, which is controlled by the :term:`TEST_EXPORT_DIR`
+variable.
+
+You can now run the tests outside of the build environment::
+
+ $ cd tmp/testexport/image
+ $ ./runexported.py testdata.json
+
+Here is a complete example that shows IP addresses and uses the
+``core-image-sato`` image::
+
+ INHERIT += "testexport"
+ TEST_TARGET_IP = "192.168.7.2"
+ TEST_SERVER_IP = "192.168.7.1"
+
+Use BitBake to export the tests::
+
+ $ bitbake core-image-sato -c testexport
+
+Run the tests outside of
+the build environment using the following::
+
+ $ cd tmp/testexport/core-image-sato
+ $ ./runexported.py testdata.json
+
+Writing New Tests
+=================
+
+As mentioned previously, all new test files need to be in the proper
+place for the build system to find them. New tests for additional
+functionality outside of the core should be added to the layer that adds
+the functionality, in ``layer/lib/oeqa/runtime/cases`` (as long as
+:term:`BBPATH` is extended in the
+layer's ``layer.conf`` file as normal). Just remember the following:
+
+- Filenames need to map directly to test (module) names.
+
+- Do not use module names that collide with existing core tests.
+
+- Minimally, an empty ``__init__.py`` file must be present in the runtime
+ directory.
+
+To create a new test, start by copying an existing module (e.g.
+``oe_syslog.py`` or ``gcc.py`` are good ones to use). Test modules can use
+code from ``meta/lib/oeqa/utils``, which are helper classes.
+
+.. note::
+
+ Structure shell commands such that you rely on them and they return a
+ single code for success. Be aware that sometimes you will need to
+ parse the output. See the ``df.py`` and ``date.py`` modules for examples.
+
+You will notice that all test classes inherit ``oeRuntimeTest``, which
+is found in ``meta/lib/oetest.py``. This base class offers some helper
+attributes, which are described in the following sections:
+
+Class Methods
+-------------
+
+Class methods are as follows:
+
+- *hasPackage(pkg):* Returns "True" if ``pkg`` is in the installed
+ package list of the image, which is based on the manifest file that
+ is generated during the :ref:`ref-tasks-rootfs` task.
+
+- *hasFeature(feature):* Returns "True" if the feature is in
+ :term:`IMAGE_FEATURES` or
+ :term:`DISTRO_FEATURES`.
+
+Class Attributes
+----------------
+
+Class attributes are as follows:
+
+- *pscmd:* Equals "ps -ef" if ``procps`` is installed in the image.
+ Otherwise, ``pscmd`` equals "ps" (busybox).
+
+- *tc:* The called test context, which gives access to the
+ following attributes:
+
+ - *d:* The BitBake datastore, which allows you to use stuff such
+ as ``oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager")``.
+
+ - *testslist and testsrequired:* Used internally. The tests
+ do not need these.
+
+ - *filesdir:* The absolute path to
+ ``meta/lib/oeqa/runtime/files``, which contains helper files for
+ tests meant for copying on the target such as small files written
+ in C for compilation.
+
+ - *target:* The target controller object used to deploy and
+ start an image on a particular target (e.g. Qemu, SimpleRemote,
+ and SystemdbootTarget). Tests usually use the following:
+
+ - *ip:* The target's IP address.
+
+ - *server_ip:* The host's IP address, which is usually used
+ by the DNF test suite.
+
+ - *run(cmd, timeout=None):* The single, most used method.
+ This command is a wrapper for: ``ssh root@host "cmd"``. The
+ command returns a tuple: (status, output), which are what their
+ names imply - the return code of "cmd" and whatever output it
+ produces. The optional timeout argument represents the number
+ of seconds the test should wait for "cmd" to return. If the
+ argument is "None", the test uses the default instance's
+ timeout period, which is 300 seconds. If the argument is "0",
+ the test runs until the command returns.
+
+ - *copy_to(localpath, remotepath):*
+ ``scp localpath root@ip:remotepath``.
+
+ - *copy_from(remotepath, localpath):*
+ ``scp root@host:remotepath localpath``.
+
+Instance Attributes
+-------------------
+
+There is a single instance attribute, which is ``target``. The ``target``
+instance attribute is identical to the class attribute of the same name,
+which is described in the previous section. This attribute exists as
+both an instance and class attribute so tests can use
+``self.target.run(cmd)`` in instance methods instead of
+``oeRuntimeTest.tc.target.run(cmd)``.
+
+Installing Packages in the DUT Without the Package Manager
+==========================================================
+
+When a test requires a package built by BitBake, it is possible to
+install that package. Installing the package does not require a package
+manager be installed in the device under test (DUT). It does, however,
+require an SSH connection and the target must be using the
+``sshcontrol`` class.
+
+.. note::
+
+ This method uses ``scp`` to copy files from the host to the target, which
+ causes permissions and special attributes to be lost.
+
+A JSON file is used to define the packages needed by a test. This file
+must be in the same path as the file used to define the tests.
+Furthermore, the filename must map directly to the test module name with
+a ``.json`` extension.
+
+The JSON file must include an object with the test name as keys of an
+object or an array. This object (or array of objects) uses the following
+data:
+
+- "pkg" --- a mandatory string that is the name of the package to be
+ installed.
+
+- "rm" --- an optional boolean, which defaults to "false", that specifies
+ to remove the package after the test.
+
+- "extract" --- an optional boolean, which defaults to "false", that
+ specifies if the package must be extracted from the package format.
+ When set to "true", the package is not automatically installed into
+ the DUT.
+
+Here is an example JSON file that handles test "foo" installing
+package "bar" and test "foobar" installing packages "foo" and "bar".
+Once the test is complete, the packages are removed from the DUT::
+
+ {
+ "foo": {
+ "pkg": "bar"
+ },
+ "foobar": [
+ {
+ "pkg": "foo",
+ "rm": true
+ },
+ {
+ "pkg": "bar",
+ "rm": true
+ }
+ ]
+ }
+
diff --git a/documentation/dev-manual/sbom.rst b/documentation/dev-manual/sbom.rst
new file mode 100644
index 0000000000..6949675c25
--- /dev/null
+++ b/documentation/dev-manual/sbom.rst
@@ -0,0 +1,81 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Creating a Software Bill of Materials
+*************************************
+
+Once you are able to build an image for your project, once the licenses for
+each software component are all identified (see
+":ref:`dev-manual/licenses:working with licenses`") and once vulnerability
+fixes are applied (see ":ref:`dev-manual/vulnerabilities:checking
+for vulnerabilities`"), the OpenEmbedded build system can generate
+a description of all the components you used, their licenses, their dependencies,
+their sources, the changes that were applied to them and the known
+vulnerabilities that were fixed.
+
+This description is generated in the form of a *Software Bill of Materials*
+(:term:`SBOM`), using the :term:`SPDX` standard.
+
+When you release software, this is the most standard way to provide information
+about the Software Supply Chain of your software image and SDK. The
+:term:`SBOM` tooling is often used to ensure open source license compliance by
+providing the license texts used in the product which legal departments and end
+users can read in standardized format.
+
+:term:`SBOM` information is also critical to performing vulnerability exposure
+assessments, as all the components used in the Software Supply Chain are listed.
+
+The OpenEmbedded build system doesn't generate such information by default.
+To make this happen, you must inherit the
+:ref:`ref-classes-create-spdx` class from a configuration file::
+
+ INHERIT += "create-spdx"
+
+Upon building an image, you will then get:
+
+- :term:`SPDX` output in JSON format as an ``IMAGE-MACHINE.spdx.json`` file in
+ ``tmp/deploy/images/MACHINE/`` inside the :term:`Build Directory`.
+
+- This toplevel file is accompanied by an ``IMAGE-MACHINE.spdx.index.json``
+ containing an index of JSON :term:`SPDX` files for individual recipes.
+
+- The compressed archive ``IMAGE-MACHINE.spdx.tar.zst`` contains the index
+ and the files for the single recipes.
+
+The :ref:`ref-classes-create-spdx` class offers options to include
+more information in the output :term:`SPDX` data:
+
+- Make the json files more human readable by setting (:term:`SPDX_PRETTY`).
+
+- Add compressed archives of the files in the generated target packages by
+ setting (:term:`SPDX_ARCHIVE_PACKAGED`).
+
+- Add a description of the source files used to generate host tools and target
+ packages (:term:`SPDX_INCLUDE_SOURCES`)
+
+- Add archives of these source files themselves (:term:`SPDX_ARCHIVE_SOURCES`).
+
+Though the toplevel :term:`SPDX` output is available in
+``tmp/deploy/images/MACHINE/`` inside the :term:`Build Directory`, ancillary
+generated files are available in ``tmp/deploy/spdx/MACHINE`` too, such as:
+
+- The individual :term:`SPDX` JSON files in the ``IMAGE-MACHINE.spdx.tar.zst``
+ archive.
+
+- Compressed archives of the files in the generated target packages,
+ in ``packages/packagename.tar.zst`` (when :term:`SPDX_ARCHIVE_PACKAGED`
+ is set).
+
+- Compressed archives of the source files used to build the host tools
+ and the target packages in ``recipes/recipe-packagename.tar.zst``
+ (when :term:`SPDX_ARCHIVE_SOURCES` is set). Those are needed to fulfill
+ "source code access" license requirements.
+
+See the `tools page <https://spdx.dev/resources/tools/>`__ on the :term:`SPDX`
+project website for a list of tools to consume and transform the :term:`SPDX`
+data generated by the OpenEmbedded build system.
+
+See also Joshua Watt's presentations
+`Automated SBoM generation with OpenEmbedded and the Yocto Project <https://youtu.be/Q5UQUM6zxVU>`__
+at FOSDEM 2023 and
+`SPDX in the Yocto Project <https://fosdem.org/2024/schedule/event/fosdem-2024-3318-spdx-in-the-yocto-project/>`__
+at FOSDEM 2024.
diff --git a/documentation/dev-manual/securing-images.rst b/documentation/dev-manual/securing-images.rst
new file mode 100644
index 0000000000..e5791d3d6d
--- /dev/null
+++ b/documentation/dev-manual/securing-images.rst
@@ -0,0 +1,156 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Making Images More Secure
+*************************
+
+Security is of increasing concern for embedded devices. Consider the
+issues and problems discussed in just this sampling of work found across
+the Internet:
+
+- *"*\ `Security Risks of Embedded
+ Systems <https://www.schneier.com/blog/archives/2014/01/security_risks_9.html>`__\ *"*
+ by Bruce Schneier
+
+- *"*\ `Internet Census
+ 2012 <http://census2012.sourceforge.net/paper.html>`__\ *"* by Carna
+ Botnet
+
+- *"*\ `Security Issues for Embedded
+ Devices <https://elinux.org/images/6/6f/Security-issues.pdf>`__\ *"*
+ by Jake Edge
+
+When securing your image is of concern, there are steps, tools, and
+variables that you can consider to help you reach the security goals you
+need for your particular device. Not all situations are identical when
+it comes to making an image secure. Consequently, this section provides
+some guidance and suggestions for consideration when you want to make
+your image more secure.
+
+.. note::
+
+ Because the security requirements and risks are different for every
+ type of device, this section cannot provide a complete reference on
+ securing your custom OS. It is strongly recommended that you also
+ consult other sources of information on embedded Linux system
+ hardening and on security.
+
+General Considerations
+======================
+
+There are general considerations that help you create more secure images.
+You should consider the following suggestions to make your device
+more secure:
+
+- Scan additional code you are adding to the system (e.g. application
+ code) by using static analysis tools. Look for buffer overflows and
+ other potential security problems.
+
+- Pay particular attention to the security for any web-based
+ administration interface.
+
+ Web interfaces typically need to perform administrative functions and
+ tend to need to run with elevated privileges. Thus, the consequences
+ resulting from the interface's security becoming compromised can be
+ serious. Look for common web vulnerabilities such as
+ cross-site-scripting (XSS), unvalidated inputs, and so forth.
+
+ As with system passwords, the default credentials for accessing a
+ web-based interface should not be the same across all devices. This
+ is particularly true if the interface is enabled by default as it can
+ be assumed that many end-users will not change the credentials.
+
+- Ensure you can update the software on the device to mitigate
+ vulnerabilities discovered in the future. This consideration
+ especially applies when your device is network-enabled.
+
+- Regularly scan and apply fixes for CVE security issues affecting
+ all software components in the product, see ":ref:`dev-manual/vulnerabilities:checking for vulnerabilities`".
+
+- Regularly update your version of Poky and OE-Core from their upstream
+ developers, e.g. to apply updates and security fixes from stable
+ and :term:`LTS` branches.
+
+- Ensure you remove or disable debugging functionality before producing
+ the final image. For information on how to do this, see the
+ ":ref:`dev-manual/securing-images:considerations specific to the openembedded build system`"
+ section.
+
+- Ensure you have no network services listening that are not needed.
+
+- Remove any software from the image that is not needed.
+
+- Enable hardware support for secure boot functionality when your
+ device supports this functionality.
+
+Security Flags
+==============
+
+The Yocto Project has security flags that you can enable that help make
+your build output more secure. The security flags are in the
+``meta/conf/distro/include/security_flags.inc`` file in your
+:term:`Source Directory` (e.g. ``poky``).
+
+.. note::
+
+ Depending on the recipe, certain security flags are enabled and
+ disabled by default.
+
+Use the following line in your ``local.conf`` file or in your custom
+distribution configuration file to enable the security compiler and
+linker flags for your build::
+
+ require conf/distro/include/security_flags.inc
+
+Considerations Specific to the OpenEmbedded Build System
+========================================================
+
+You can take some steps that are specific to the OpenEmbedded build
+system to make your images more secure:
+
+- Ensure "debug-tweaks" is not one of your selected
+ :term:`IMAGE_FEATURES`.
+ When creating a new project, the default is to provide you with an
+ initial ``local.conf`` file that enables this feature using the
+ :term:`EXTRA_IMAGE_FEATURES`
+ variable with the line::
+
+ EXTRA_IMAGE_FEATURES = "debug-tweaks"
+
+ To disable that feature, simply comment out that line in your
+ ``local.conf`` file, or make sure :term:`IMAGE_FEATURES` does not contain
+ "debug-tweaks" before producing your final image. Among other things,
+ leaving this in place sets the root password as blank, which makes
+ logging in for debugging or inspection easy during development but
+ also means anyone can easily log in during production.
+
+- It is possible to set a root password for the image and also to set
+ passwords for any extra users you might add (e.g. administrative or
+ service type users). When you set up passwords for multiple images or
+ users, you should not duplicate passwords.
+
+ To set up passwords, use the :ref:`ref-classes-extrausers` class, which
+ is the preferred method. For an example on how to set up both root and
+ user passwords, see the ":ref:`ref-classes-extrausers`" section.
+
+ .. note::
+
+ When adding extra user accounts or setting a root password, be
+ cautious about setting the same password on every device. If you
+ do this, and the password you have set is exposed, then every
+ device is now potentially compromised. If you need this access but
+ want to ensure security, consider setting a different, random
+ password for each device. Typically, you do this as a separate
+ step after you deploy the image onto the device.
+
+- Consider enabling a Mandatory Access Control (MAC) framework such as
+ SMACK or SELinux and tuning it appropriately for your device's usage.
+ You can find more information in the
+ :yocto_git:`meta-selinux </meta-selinux/>` layer.
+
+Tools for Hardening Your Image
+==============================
+
+The Yocto Project provides tools for making your image more secure. You
+can find these tools in the ``meta-security`` layer of the
+:yocto_git:`Yocto Project Source Repositories <>`.
+
diff --git a/documentation/dev-manual/security-subjects.rst b/documentation/dev-manual/security-subjects.rst
new file mode 100644
index 0000000000..1b02b6a9e9
--- /dev/null
+++ b/documentation/dev-manual/security-subjects.rst
@@ -0,0 +1,189 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Dealing with Vulnerability Reports
+**********************************
+
+The Yocto Project and OpenEmbedded are open-source, community-based projects
+used in numerous products. They assemble multiple other open-source projects,
+and need to handle security issues and practices both internal (in the code
+maintained by both projects), and external (maintained by other projects and
+organizations).
+
+This manual assembles security-related information concerning the whole
+ecosystem. It includes information on reporting a potential security issue,
+the operation of the YP Security team and how to contribute in the
+related code. It is written to be useful for both security researchers and
+YP developers.
+
+How to report a potential security vulnerability?
+=================================================
+
+If you would like to report a public issue (for example, one with a released
+CVE number), please report it using the
+:yocto_bugs:`Security Bugzilla </enter_bug.cgi?product=Security>`.
+
+If you are dealing with a not-yet-released issue, or an urgent one, please send
+a message to security AT yoctoproject DOT org, including as many details as
+possible: the layer or software module affected, the recipe and its version,
+and any example code, if available. This mailing list is monitored by the
+Yocto Project Security team.
+
+For each layer, you might also look for specific instructions (if any) for
+reporting potential security issues in the specific ``SECURITY.md`` file at the
+root of the repository. Instructions on how and where submit a patch are
+usually available in ``README.md``. If this is your first patch to the
+Yocto Project/OpenEmbedded, you might want to have a look into the
+Contributor's Manual section
+":ref:`contributor-guide/submit-changes:preparing changes for submission`".
+
+Branches maintained with security fixes
+---------------------------------------
+
+See the
+:ref:`Release process <ref-manual/release-process:Stable Release Process>`
+documentation for details regarding the policies and maintenance of stable
+branches.
+
+The :yocto_wiki:`Releases page </Releases>` contains a list
+of all releases of the Yocto Project. Versions in gray are no longer actively
+maintained with security patches, but well-tested patches may still be accepted
+for them for significant issues.
+
+Security-related discussions at the Yocto Project
+-------------------------------------------------
+
+We have set up two security-related mailing lists:
+
+ - Public List: yocto [dash] security [at] yoctoproject[dot] org
+
+ This is a public mailing list for anyone to subscribe to. This list is an
+ open list to discuss public security issues/patches and security-related
+ initiatives. For more information, including subscription information,
+ please see the :yocto_lists:`yocto-security mailing list info page </g/yocto-security>`.
+
+ - Private List: security [at] yoctoproject [dot] org
+
+ This is a private mailing list for reporting non-published potential
+ vulnerabilities. The list is monitored by the Yocto Project Security team.
+
+
+What you should do if you find a security vulnerability
+-------------------------------------------------------
+
+If you find a security flaw: a crash, an information leakage, or anything that
+can have a security impact if exploited in any Open Source software built or
+used by the Yocto Project, please report this to the Yocto Project Security
+Team. If you prefer to contact the upstream project directly, please send a
+copy to the security team at the Yocto Project as well. If you believe this is
+highly sensitive information, please report the vulnerability in a secure way,
+i.e. encrypt the email and send it to the private list. This ensures that
+the exploit is not leaked and exploited before a response/fix has been generated.
+
+Security team
+=============
+
+The Yocto Project/OpenEmbedded security team coordinates the work on security
+subjects in the project. All general discussion takes place publicly. The
+Security Team only uses confidential communication tools to deal with private
+vulnerability reports before they are released.
+
+Security team appointment
+-------------------------
+
+The Yocto Project Security Team consists of at least three members. When new
+members are needed, the Yocto Project Technical Steering Committee (YP TSC)
+asks for nominations by public channels including a nomination deadline.
+Self-nominations are possible. When the limit time is
+reached, the YP TSC posts the list of candidates for the comments of project
+participants and developers. Comments may be sent publicly or privately to the
+YP and OE TSCs. The candidates are approved by both YP TSC and OpenEmbedded
+Technical Steering Committee (OE TSC) and the final list of the team members
+is announced publicly. The aim is to have people representing technical
+leadership, security knowledge and infrastructure present with enough people
+to provide backup/coverage but keep the notification list small enough to
+minimize information risk and maintain trust.
+
+YP Security Team members may resign at any time.
+
+Security Team Operations
+------------------------
+
+The work of the Security Team might require high confidentiality. Team members
+are individuals selected by merit and do not represent the companies they work
+for. They do not share information about confidential issues outside of the team
+and do not hint about ongoing embargoes.
+
+Team members can bring in domain experts as needed. Those people should be
+added to individual issues only and adhere to the same standards as the YP
+Security Team.
+
+The YP security team organizes its meetings and communication as needed.
+
+When the YP Security team receives a report about a potential security
+vulnerability, they quickly analyze and notify the reporter of the result.
+They might also request more information.
+
+If the issue is confirmed and affects the code maintained by the YP, they
+confidentially notify maintainers of that code and work with them to prepare
+a fix.
+
+If the issue is confirmed and affects an upstream project, the YP security team
+notifies the project. Usually, the upstream project analyzes the problem again.
+If they deem it a real security problem in their software, they develop and
+release a fix following their security policy. They may want to include the
+original reporter in the loop. There is also sometimes some coordination for
+handling patches, backporting patches etc, or just understanding the problem
+or what caused it.
+
+When the fix is publicly available, the YP security team member or the
+package maintainer sends patches against the YP code base, following usual
+procedures, including public code review.
+
+What Yocto Security Team does when it receives a security vulnerability
+-----------------------------------------------------------------------
+
+The YP Security Team team performs a quick analysis and would usually report
+the flaw to the upstream project. Normally the upstream project analyzes the
+problem. If they deem it a real security problem in their software, they
+develop and release a fix following their own security policy. They may want
+to include the original reporter in the loop. There is also sometimes some
+coordination for handling patches, backporting patches etc, or just
+understanding the problem or what caused it.
+
+The security policy of the upstream project might include a notification to
+Linux distributions or other important downstream projects in advance to
+discuss coordinated disclosure. These mailing lists are normally non-public.
+
+When the upstream project releases a version with the fix, they are responsible
+for contacting `Mitre <https://www.cve.org/>`__ to get a CVE number assigned and
+the CVE record published.
+
+If an upstream project does not respond quickly
+-----------------------------------------------
+
+If an upstream project does not fix the problem in a reasonable time,
+the Yocto's Security Team will contact other interested parties (usually
+other distributions) in the community and together try to solve the
+vulnerability as quickly as possible.
+
+The Yocto Project Security team adheres to the 90 days disclosure policy
+by default. An increase of the embargo time is possible when necessary.
+
+Current Security Team members
+-----------------------------
+
+For secure communications, please send your messages encrypted using the GPG
+keys. Remember, message headers are not encrypted so do not include sensitive
+information in the subject line.
+
+ - Ross Burton: <ross@burtonini.com> `Public key <https://keys.openpgp.org/search?q=ross%40burtonini.com>`__
+
+ - Michael Halstead: <mhalstead [at] linuxfoundation [dot] org>
+ `Public key <https://pgp.mit.edu/pks/lookup?op=vindex&search=0x3373170601861969>`__
+ or `Public key <https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xd1f2407285e571ed12a407a73373170601861969>`__
+
+ - Richard Purdie: <richard.purdie@linuxfoundation.org> `Public key <https://keys.openpgp.org/search?q=richard.purdie%40linuxfoundation.org>`__
+
+ - Marta Rybczynska: <marta DOT rybczynska [at] syslinbit [dot] com> `Public key <https://keys.openpgp.org/search?q=marta.rybczynska@syslinbit.com>`__
+
+ - Steve Sakoman: <steve [at] sakoman [dot] com> `Public key <https://keys.openpgp.org/search?q=steve%40sakoman.com>`__
diff --git a/documentation/dev-manual/speeding-up-build.rst b/documentation/dev-manual/speeding-up-build.rst
new file mode 100644
index 0000000000..6e0d7873ac
--- /dev/null
+++ b/documentation/dev-manual/speeding-up-build.rst
@@ -0,0 +1,109 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Speeding Up a Build
+*******************
+
+Build time can be an issue. By default, the build system uses simple
+controls to try and maximize build efficiency. In general, the default
+settings for all the following variables result in the most efficient
+build times when dealing with single socket systems (i.e. a single CPU).
+If you have multiple CPUs, you might try increasing the default values
+to gain more speed. See the descriptions in the glossary for each
+variable for more information:
+
+- :term:`BB_NUMBER_THREADS`:
+ The maximum number of threads BitBake simultaneously executes.
+
+- :term:`BB_NUMBER_PARSE_THREADS`:
+ The number of threads BitBake uses during parsing.
+
+- :term:`PARALLEL_MAKE`: Extra
+ options passed to the ``make`` command during the
+ :ref:`ref-tasks-compile` task in
+ order to specify parallel compilation on the local build host.
+
+- :term:`PARALLEL_MAKEINST`:
+ Extra options passed to the ``make`` command during the
+ :ref:`ref-tasks-install` task in
+ order to specify parallel installation on the local build host.
+
+As mentioned, these variables all scale to the number of processor cores
+available on the build system. For single socket systems, this
+auto-scaling ensures that the build system fundamentally takes advantage
+of potential parallel operations during the build based on the build
+machine's capabilities.
+
+Additional factors that can affect build speed are:
+
+- File system type: The file system type that the build is being
+ performed on can also influence performance. Using ``ext4`` is
+ recommended as compared to ``ext2`` and ``ext3`` due to ``ext4``
+ improved features such as extents.
+
+- Disabling the updating of access time using ``noatime``: The
+ ``noatime`` mount option prevents the build system from updating file
+ and directory access times.
+
+- Setting a longer commit: Using the "commit=" mount option increases
+ the interval in seconds between disk cache writes. Changing this
+ interval from the five second default to something longer increases
+ the risk of data loss but decreases the need to write to the disk,
+ thus increasing the build performance.
+
+- Choosing the packaging backend: Of the available packaging backends,
+ IPK is the fastest. Additionally, selecting a singular packaging
+ backend also helps.
+
+- Using ``tmpfs`` for :term:`TMPDIR`
+ as a temporary file system: While this can help speed up the build,
+ the benefits are limited due to the compiler using ``-pipe``. The
+ build system goes to some lengths to avoid ``sync()`` calls into the
+ file system on the principle that if there was a significant failure,
+ the :term:`Build Directory` contents could easily be rebuilt.
+
+- Inheriting the :ref:`ref-classes-rm-work` class:
+ Inheriting this class has shown to speed up builds due to
+ significantly lower amounts of data stored in the data cache as well
+ as on disk. Inheriting this class also makes cleanup of
+ :term:`TMPDIR` faster, at the
+ expense of being easily able to dive into the source code. File
+ system maintainers have recommended that the fastest way to clean up
+ large numbers of files is to reformat partitions rather than delete
+ files due to the linear nature of partitions. This, of course,
+ assumes you structure the disk partitions and file systems in a way
+ that this is practical.
+
+Aside from the previous list, you should keep some trade offs in mind
+that can help you speed up the build:
+
+- Remove items from
+ :term:`DISTRO_FEATURES`
+ that you might not need.
+
+- Exclude debug symbols and other debug information: If you do not need
+ these symbols and other debug information, disabling the ``*-dbg``
+ package generation can speed up the build. You can disable this
+ generation by setting the
+ :term:`INHIBIT_PACKAGE_DEBUG_SPLIT`
+ variable to "1".
+
+- Disable static library generation for recipes derived from
+ ``autoconf`` or ``libtool``: Here is an example showing how to
+ disable static libraries and still provide an override to handle
+ exceptions::
+
+ STATICLIBCONF = "--disable-static"
+ STATICLIBCONF:sqlite3-native = ""
+ EXTRA_OECONF += "${STATICLIBCONF}"
+
+ .. note::
+
+ - Some recipes need static libraries in order to work correctly
+ (e.g. ``pseudo-native`` needs ``sqlite3-native``). Overrides,
+ as in the previous example, account for these kinds of
+ exceptions.
+
+ - Some packages have packaging code that assumes the presence of
+ the static libraries. If so, you might need to exclude them as
+ well.
+
diff --git a/documentation/dev-manual/start.rst b/documentation/dev-manual/start.rst
index 96fabac1aa..aec39b02a9 100644
--- a/documentation/dev-manual/start.rst
+++ b/documentation/dev-manual/start.rst
@@ -36,7 +36,7 @@ particular working environment and set of practices.
equipment together and set up your development environment's
hardware topology.
- Here are possible roles:
+ Possible roles are:
- *Application Developer:* This type of developer does application
level work on top of an existing software stack.
@@ -88,30 +88,18 @@ particular working environment and set of practices.
For information about BitBake, see the
:doc:`bitbake:index`.
- It is relatively easy to set up Git services and create
- infrastructure like :yocto_git:`/`, which is based on
- server software called ``gitolite`` with ``cgit`` being used to
- generate the web interface that lets you view the repositories. The
- ``gitolite`` software identifies users using SSH keys and allows
+ It is relatively easy to set up Git services and create infrastructure like
+ :yocto_git:`/`, which is based on server software called
+ `Gitolite <https://gitolite.com>`__
+ with `cgit <https://git.zx2c4.com/cgit/about/>`__ being used to
+ generate the web interface that lets you view the repositories.
+ ``gitolite`` identifies users using SSH keys and allows
branch-based access controls to repositories that you can control as
little or as much as necessary.
- .. note::
-
- The setup of these services is beyond the scope of this manual.
- However, here are sites describing how to perform setup:
-
- - `Gitolite <https://gitolite.com>`__: Information for
- ``gitolite``.
-
- - `Interfaces, frontends, and
- tools <https://git.wiki.kernel.org/index.php/Interfaces,_frontends,_and_tools>`__:
- Documentation on how to create interfaces and frontends for
- Git.
-
5. *Set up the Application Development Machines:* As mentioned earlier,
application developers are creating applications on top of existing
- software stacks. Following are some best practices for setting up
+ software stacks. Here are some best practices for setting up
machines used for application development:
- Use a pre-built toolchain that contains the software stack
@@ -130,7 +118,7 @@ particular working environment and set of practices.
6. *Set up the Core Development Machines:* As mentioned earlier, core
developers work on the contents of the operating system itself.
- Following are some best practices for setting up machines used for
+ Here are some best practices for setting up machines used for
developing images:
- Have the :term:`OpenEmbedded Build System` available on
@@ -223,7 +211,7 @@ particular working environment and set of practices.
- Maintain your Metadata in layers that make sense for your
situation. See the ":ref:`overview-manual/yp-intro:the yocto project layer model`"
section in the Yocto Project Overview and Concepts Manual and the
- ":ref:`dev-manual/common-tasks:understanding and creating layers`"
+ ":ref:`dev-manual/layers:understanding and creating layers`"
section for more information on layers.
- Separate the project's Metadata and code by using separate Git
@@ -246,14 +234,13 @@ particular working environment and set of practices.
- The Yocto Project community encourages you to send patches to the
project to fix bugs or add features. If you do submit patches,
follow the project commit guidelines for writing good commit
- messages. See the
- ":ref:`dev-manual/common-tasks:submitting a change to the yocto project`"
- section.
+ messages. See the ":doc:`../contributor-guide/submit-changes`"
+ section in the Yocto Project and OpenEmbedded Contributor Guide.
- Send changes to the core sooner than later as others are likely
to run into the same issues. For some guidance on mailing lists
- to use, see the list in the
- ":ref:`dev-manual/common-tasks:submitting a change to the yocto project`"
+ to use, see the lists in the
+ ":ref:`contributor-guide/submit-changes:finding a suitable mailing list`"
section. For a description
of the available mailing lists, see the ":ref:`resources-mailinglist`" section in
the Yocto Project Reference Manual.
@@ -267,16 +254,16 @@ development using the Yocto Project. Your build host can be a native
Linux machine (recommended), it can be a machine (Linux, Mac, or
Windows) that uses `CROPS <https://github.com/crops/poky-container>`__,
which leverages `Docker Containers <https://www.docker.com/>`__ or it
-can be a Windows machine capable of running Windows Subsystem For Linux
-v2 (WSL).
+can be a Windows machine capable of running version 2 of Windows Subsystem
+For Linux (WSL 2).
.. note::
- The Yocto Project is not compatible with
- `Windows Subsystem for Linux v1 <https://en.wikipedia.org/wiki/Windows_Subsystem_for_Linux>`__.
- It is compatible but not officially supported nor validated with
- WSLv2. If you still decide to use WSL please upgrade to
- `WSLv2 <https://docs.microsoft.com/en-us/windows/wsl/install-win10>`__.
+ The Yocto Project is not compatible with version 1 of
+ `Windows Subsystem for Linux <https://en.wikipedia.org/wiki/Windows_Subsystem_for_Linux>`__.
+ It is compatible but neither officially supported nor validated with
+ WSL 2. If you still decide to use WSL please upgrade to
+ `WSL 2 <https://learn.microsoft.com/en-us/windows/wsl/install>`__.
Once your build host is set up to use the Yocto Project, further steps
are necessary depending on what you want to accomplish. See the
@@ -302,7 +289,7 @@ Project Build Host:
as these releases are frequently tested against the Yocto Project and
officially supported. For a list of the distributions under
validation and their status, see the ":ref:`Supported Linux
- Distributions <detailed-supported-distros>`"
+ Distributions <system-requirements-supported-distros>`"
section in the Yocto Project Reference Manual and the wiki page at
:yocto_wiki:`Distribution Support </Distribution_Support>`.
@@ -321,10 +308,12 @@ Project Build Host:
- gcc &MIN_GCC_VERSION; or greater.
+ - GNU make &MIN_MAKE_VERSION; or greater
+
If your build host does not meet any of these listed version
requirements, you can take steps to prepare the system so that you
can still use the Yocto Project. See the
- ":ref:`ref-manual/system-requirements:required git, tar, python and gcc versions`"
+ ":ref:`ref-manual/system-requirements:required git, tar, python, make and gcc versions`"
section in the Yocto Project Reference Manual for information.
4. *Install Development Host Packages:* Required development host
@@ -345,7 +334,10 @@ to use the Extensible SDK, see the ":doc:`/sdk-manual/extensible`" Chapter in th
Project Application Development and the Extensible Software Development
Kit (eSDK) manual. If you want to work on the kernel, see the :doc:`/kernel-dev/index`. If you are going to use
Toaster, see the ":doc:`/toaster-manual/setup-and-use`"
-section in the Toaster User Manual.
+section in the Toaster User Manual. If you are a VSCode user, you can configure
+the `Yocto Project BitBake
+<https://marketplace.visualstudio.com/items?itemName=yocto-project.yocto-bitbake>`__
+extension accordingly.
Setting Up to Use CROss PlatformS (CROPS)
-----------------------------------------
@@ -437,37 +429,41 @@ section. If you are going to use the Extensible SDK container, see the
Project Application Development and the Extensible Software Development
Kit (eSDK) manual. If you are going to use the Toaster container, see
the ":doc:`/toaster-manual/setup-and-use`"
-section in the Toaster User Manual.
+section in the Toaster User Manual. If you are a VSCode user, you can configure
+the `Yocto Project BitBake
+<https://marketplace.visualstudio.com/items?itemName=yocto-project.yocto-bitbake>`__
+extension accordingly.
-Setting Up to Use Windows Subsystem For Linux (WSLv2)
+Setting Up to Use Windows Subsystem For Linux (WSL 2)
-----------------------------------------------------
-With `Windows Subsystem for Linux
-(WSLv2) <https://docs.microsoft.com/en-us/windows/wsl/wsl2-about>`__,
+With `Windows Subsystem for Linux (WSL 2)
+<https://learn.microsoft.com/en-us/windows/wsl/>`__,
you can create a Yocto Project development environment that allows you
to build on Windows. You can set up a Linux distribution inside Windows
in which you can develop using the Yocto Project.
-Follow these general steps to prepare a Windows machine using WSLv2 as
+Follow these general steps to prepare a Windows machine using WSL 2 as
your Yocto Project build host:
-1. *Make sure your Windows 10 machine is capable of running WSLv2:*
- WSLv2 is only available for Windows 10 builds > 18917. To check which
- build version you are running, you may open a command prompt on
- Windows and execute the command "ver".
- ::
+1. *Make sure your Windows machine is capable of running WSL 2:*
+
+ While all Windows 11 and Windows Server 2022 builds support WSL 2,
+ the first versions of Windows 10 and Windows Server 2019 didn't.
+ Check the minimum build numbers for `Windows 10
+ <https://learn.microsoft.com/en-us/windows/wsl/install-manual#step-2---check-requirements-for-running-wsl-2>`__
+ and for `Windows Server 2019
+ <https://learn.microsoft.com/en-us/windows/wsl/install-on-server>`__.
+
+ To check which build version you are running, you may open a command
+ prompt on Windows and execute the command "ver"::
C:\Users\myuser> ver
Microsoft Windows [Version 10.0.19041.153]
- If your build is capable of running
- WSLv2 you may continue, for more information on this subject or
- instructions on how to upgrade to WSLv2 visit `Windows 10
- WSLv2 <https://docs.microsoft.com/en-us/windows/wsl/wsl2-install>`__
-
-2. *Install the Linux distribution of your choice inside Windows 10:*
- Once you know your version of Windows 10 supports WSLv2, you can
+2. *Install the Linux distribution of your choice inside WSL 2:*
+ Once you know your version of Windows supports WSL 2, you can
install the distribution of your choice from the Microsoft Store.
Open the Microsoft Store and search for Linux. While there are
several Linux distributions available, the assumption is that your
@@ -476,31 +472,28 @@ your Yocto Project build host:
making your selection, simply click "Get" to download and install the
distribution.
-3. *Check your Linux distribution is using WSLv2:* Open a Windows
+3. *Check which Linux distribution WSL 2 is using:* Open a Windows
PowerShell and run::
C:\WINDOWS\system32> wsl -l -v
NAME STATE VERSION
*Ubuntu Running 2
- Note the version column which says the WSL version
- being used by your distribution, on compatible systems, this can be
- changed back at any point in time.
+ Note that WSL 2 supports running as many different Linux distributions
+ as you want to install.
-4. *Optionally Orient Yourself on WSL:* If you are unfamiliar with WSL,
- you can learn more here -
+4. *Optionally Get Familiar with WSL:* You can learn more on
https://docs.microsoft.com/en-us/windows/wsl/wsl2-about.
5. *Launch your WSL Distibution:* From the Windows start menu simply
launch your WSL distribution just like any other application.
-6. *Optimize your WSLv2 storage often:* Due to the way storage is
- handled on WSLv2, the storage space used by the undelying Linux
- distribution is not reflected immedately, and since bitbake heavily
+6. *Optimize your WSL 2 storage often:* Due to the way storage is
+ handled on WSL 2, the storage space used by the underlying Linux
+ distribution is not reflected immediately, and since BitBake heavily
uses storage, after several builds, you may be unaware you are
- running out of space. WSLv2 uses a VHDX file for storage, this issue
- can be easily avoided by manually optimizing this file often, this
- can be done in the following way:
+ running out of space. As WSL 2 uses a VHDX file for storage, this issue
+ can be easily avoided by regularly optimizing this file in a manual way:
1. *Find the location of your VHDX file:*
@@ -554,20 +547,23 @@ your Yocto Project build host:
.. note::
- The current implementation of WSLv2 does not have out-of-the-box
+ The current implementation of WSL 2 does not have out-of-the-box
access to external devices such as those connected through a USB
port, but it automatically mounts your ``C:`` drive on ``/mnt/c/``
(and others), which you can use to share deploy artifacts to be later
flashed on hardware through Windows, but your build directory should
not reside inside this mountpoint.
-Once you have WSLv2 set up, everything is in place to develop just as if
+Once you have WSL 2 set up, everything is in place to develop just as if
you were running on a native Linux machine. If you are going to use the
Extensible SDK container, see the ":doc:`/sdk-manual/extensible`" Chapter in the Yocto
Project Application Development and the Extensible Software Development
Kit (eSDK) manual. If you are going to use the Toaster container, see
the ":doc:`/toaster-manual/setup-and-use`"
-section in the Toaster User Manual.
+section in the Toaster User Manual. If you are a VSCode user, you can configure
+the `Yocto Project BitBake
+<https://marketplace.visualstudio.com/items?itemName=yocto-project.yocto-bitbake>`__
+extension accordingly.
Locating Yocto Project Source Files
===================================
@@ -616,10 +612,14 @@ Use the following procedure to locate the latest upstream copy of the
Accessing Index of Releases
---------------------------
-Yocto Project maintains an Index of Releases area that contains related
-files that contribute to the Yocto Project. Rather than Git
-repositories, these files are tarballs that represent snapshots in time
-of a given component.
+The Yocto Project also provides source archives of its releases, which
+are available on :yocto_dl:`/releases/yocto/`. Then, choose the subdirectory
+containing the release you wish to use, for example
+:yocto_dl:`yocto-&DISTRO; </releases/yocto/yocto-&DISTRO;/>`.
+
+You will find there source archives of individual components (if you wish
+to use them individually), and of the corresponding Poky release bundling
+a selection of these components.
.. note::
@@ -655,7 +655,7 @@ Follow these steps to locate and download a particular tarball:
Using the Downloads Page
------------------------
-The :yocto_home:`Yocto Project Website <>` uses a "DOWNLOADS" page
+The :yocto_home:`Yocto Project Website <>` uses a "RELEASES" page
from which you can locate and download tarballs of any Yocto Project
release. Rather than Git repositories, these files represent snapshot
tarballs similar to the tarballs located in the Index of Releases
@@ -664,12 +664,13 @@ described in the ":ref:`dev-manual/start:accessing index of releases`" section.
1. *Go to the Yocto Project Website:* Open The
:yocto_home:`Yocto Project Website <>` in your browser.
-2. *Get to the Downloads Area:* Select the "DOWNLOADS" item from the
- pull-down "SOFTWARE" tab menu near the top of the page.
+#. *Get to the Downloads Area:* Select the "RELEASES" item from the
+ pull-down "DEVELOPMENT" tab menu near the top of the page.
-3. *Select a Yocto Project Release:* Use the menu next to "RELEASE" to
- display and choose a recent or past supported Yocto Project release
- (e.g. &DISTRO_NAME_NO_CAP;, &DISTRO_NAME_NO_CAP_MINUS_ONE;, and so forth).
+#. *Select a Yocto Project Release:* On the top of the "RELEASE" page currently
+ supported releases are displayed, further down past supported Yocto Project
+ releases are visible. The "Download" links in the rows of the table there
+ will lead to the download tarballs for the release.
.. note::
@@ -679,9 +680,9 @@ described in the ":ref:`dev-manual/start:accessing index of releases`" section.
You can use the "RELEASE ARCHIVE" link to reveal a menu of all Yocto
Project releases.
-4. *Download Tools or Board Support Packages (BSPs):* From the
- "DOWNLOADS" page, you can download tools or BSPs as well. Just scroll
- down the page and look for what you need.
+#. *Download Tools or Board Support Packages (BSPs):* Next to the tarballs you
+ will find download tools or BSPs as well. Just select a Yocto Project
+ release and look for what you need.
Cloning and Checking Out Branches
=================================
diff --git a/documentation/dev-manual/temporary-source-code.rst b/documentation/dev-manual/temporary-source-code.rst
new file mode 100644
index 0000000000..08bf68d982
--- /dev/null
+++ b/documentation/dev-manual/temporary-source-code.rst
@@ -0,0 +1,66 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Finding Temporary Source Code
+*****************************
+
+You might find it helpful during development to modify the temporary
+source code used by recipes to build packages. For example, suppose you
+are developing a patch and you need to experiment a bit to figure out
+your solution. After you have initially built the package, you can
+iteratively tweak the source code, which is located in the
+:term:`Build Directory`, and then you can force a re-compile and quickly
+test your altered code. Once you settle on a solution, you can then preserve
+your changes in the form of patches.
+
+During a build, the unpacked temporary source code used by recipes to
+build packages is available in the :term:`Build Directory` as defined by the
+:term:`S` variable. Below is the default value for the :term:`S` variable as
+defined in the ``meta/conf/bitbake.conf`` configuration file in the
+:term:`Source Directory`::
+
+ S = "${WORKDIR}/${BP}"
+
+You should be aware that many recipes override the
+:term:`S` variable. For example, recipes that fetch their source from Git
+usually set :term:`S` to ``${WORKDIR}/git``.
+
+.. note::
+
+ The :term:`BP` represents the base recipe name, which consists of the name
+ and version::
+
+ BP = "${BPN}-${PV}"
+
+
+The path to the work directory for the recipe
+(:term:`WORKDIR`) is defined as
+follows::
+
+ ${TMPDIR}/work/${MULTIMACH_TARGET_SYS}/${PN}/${EXTENDPE}${PV}-${PR}
+
+The actual directory depends on several things:
+
+- :term:`TMPDIR`: The top-level build
+ output directory.
+
+- :term:`MULTIMACH_TARGET_SYS`:
+ The target system identifier.
+
+- :term:`PN`: The recipe name.
+
+- :term:`EXTENDPE`: The epoch --- if
+ :term:`PE` is not specified, which is
+ usually the case for most recipes, then :term:`EXTENDPE` is blank.
+
+- :term:`PV`: The recipe version.
+
+- :term:`PR`: The recipe revision.
+
+As an example, assume a Source Directory top-level folder named
+``poky``, a default :term:`Build Directory` at ``poky/build``, and a
+``qemux86-poky-linux`` machine target system. Furthermore, suppose your
+recipe is named ``foo_1.3.0.bb``. In this case, the work directory the
+build system uses to build the package would be as follows::
+
+ poky/build/tmp/work/qemux86-poky-linux/foo/1.3.0-r0
+
diff --git a/documentation/dev-manual/upgrading-recipes.rst b/documentation/dev-manual/upgrading-recipes.rst
new file mode 100644
index 0000000000..4fac78bdfb
--- /dev/null
+++ b/documentation/dev-manual/upgrading-recipes.rst
@@ -0,0 +1,397 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Upgrading Recipes
+*****************
+
+Over time, upstream developers publish new versions for software built
+by layer recipes. It is recommended to keep recipes up-to-date with
+upstream version releases.
+
+While there are several methods to upgrade a recipe, you might
+consider checking on the upgrade status of a recipe first. You can do so
+using the ``devtool check-upgrade-status`` command. See the
+":ref:`devtool-checking-on-the-upgrade-status-of-a-recipe`"
+section in the Yocto Project Reference Manual for more information.
+
+The remainder of this section describes three ways you can upgrade a
+recipe. You can use the Automated Upgrade Helper (AUH) to set up
+automatic version upgrades. Alternatively, you can use
+``devtool upgrade`` to set up semi-automatic version upgrades. Finally,
+you can manually upgrade a recipe by editing the recipe itself.
+
+Using the Auto Upgrade Helper (AUH)
+===================================
+
+The AUH utility works in conjunction with the OpenEmbedded build system
+in order to automatically generate upgrades for recipes based on new
+versions being published upstream. Use AUH when you want to create a
+service that performs the upgrades automatically and optionally sends
+you an email with the results.
+
+AUH allows you to update several recipes with a single use. You can also
+optionally perform build and integration tests using images with the
+results saved to your hard drive and emails of results optionally sent
+to recipe maintainers. Finally, AUH creates Git commits with appropriate
+commit messages in the layer's tree for the changes made to recipes.
+
+.. note::
+
+ In some conditions, you should not use AUH to upgrade recipes
+ and should instead use either ``devtool upgrade`` or upgrade your
+ recipes manually:
+
+ - When AUH cannot complete the upgrade sequence. This situation
+ usually results because custom patches carried by the recipe
+ cannot be automatically rebased to the new version. In this case,
+ ``devtool upgrade`` allows you to manually resolve conflicts.
+
+ - When for any reason you want fuller control over the upgrade
+ process. For example, when you want special arrangements for
+ testing.
+
+The following steps describe how to set up the AUH utility:
+
+#. *Be Sure the Development Host is Set Up:* You need to be sure that
+ your development host is set up to use the Yocto Project. For
+ information on how to set up your host, see the
+ ":ref:`dev-manual/start:Preparing the Build Host`" section.
+
+#. *Make Sure Git is Configured:* The AUH utility requires Git to be
+ configured because AUH uses Git to save upgrades. Thus, you must have
+ Git user and email configured. The following command shows your
+ configurations::
+
+ $ git config --list
+
+ If you do not have the user and
+ email configured, you can use the following commands to do so::
+
+ $ git config --global user.name some_name
+ $ git config --global user.email username@domain.com
+
+#. *Clone the AUH Repository:* To use AUH, you must clone the repository
+ onto your development host. The following command uses Git to create
+ a local copy of the repository on your system::
+
+ $ git clone git://git.yoctoproject.org/auto-upgrade-helper
+ Cloning into 'auto-upgrade-helper'... remote: Counting objects: 768, done.
+ remote: Compressing objects: 100% (300/300), done.
+ remote: Total 768 (delta 499), reused 703 (delta 434)
+ Receiving objects: 100% (768/768), 191.47 KiB | 98.00 KiB/s, done.
+ Resolving deltas: 100% (499/499), done.
+ Checking connectivity... done.
+
+ AUH is not part of the :term:`OpenEmbedded-Core (OE-Core)` or
+ :term:`Poky` repositories.
+
+#. *Create a Dedicated Build Directory:* Run the :ref:`structure-core-script`
+ script to create a fresh :term:`Build Directory` that you use exclusively
+ for running the AUH utility::
+
+ $ cd poky
+ $ source oe-init-build-env your_AUH_build_directory
+
+ Re-using an existing :term:`Build Directory` and its configurations is not
+ recommended as existing settings could cause AUH to fail or behave
+ undesirably.
+
+#. *Make Configurations in Your Local Configuration File:* Several
+ settings are needed in the ``local.conf`` file in the build
+ directory you just created for AUH. Make these following
+ configurations:
+
+ - If you want to enable :ref:`Build
+ History <dev-manual/build-quality:maintaining build output quality>`,
+ which is optional, you need the following lines in the
+ ``conf/local.conf`` file::
+
+ INHERIT =+ "buildhistory"
+ BUILDHISTORY_COMMIT = "1"
+
+ With this configuration and a successful
+ upgrade, a build history "diff" file appears in the
+ ``upgrade-helper/work/recipe/buildhistory-diff.txt`` file found in
+ your :term:`Build Directory`.
+
+ - If you want to enable testing through the :ref:`ref-classes-testimage`
+ class, which is optional, you need to have the following set in
+ your ``conf/local.conf`` file::
+
+ IMAGE_CLASSES += "testimage"
+
+ .. note::
+
+ If your distro does not enable by default ptest, which Poky
+ does, you need the following in your ``local.conf`` file::
+
+ DISTRO_FEATURES:append = " ptest"
+
+
+#. *Optionally Start a vncserver:* If you are running in a server
+ without an X11 session, you need to start a vncserver::
+
+ $ vncserver :1
+ $ export DISPLAY=:1
+
+#. *Create and Edit an AUH Configuration File:* You need to have the
+ ``upgrade-helper/upgrade-helper.conf`` configuration file in your
+ :term:`Build Directory`. You can find a sample configuration file in the
+ :yocto_git:`AUH source repository </auto-upgrade-helper/tree/>`.
+
+ Read through the sample file and make configurations as needed. For
+ example, if you enabled build history in your ``local.conf`` as
+ described earlier, you must enable it in ``upgrade-helper.conf``.
+
+ Also, if you are using the default ``maintainers.inc`` file supplied
+ with Poky and located in ``meta-yocto`` and you do not set a
+ "maintainers_whitelist" or "global_maintainer_override" in the
+ ``upgrade-helper.conf`` configuration, and you specify "-e all" on
+ the AUH command-line, the utility automatically sends out emails to
+ all the default maintainers. Please avoid this.
+
+This next set of examples describes how to use the AUH:
+
+- *Upgrading a Specific Recipe:* To upgrade a specific recipe, use the
+ following form::
+
+ $ upgrade-helper.py recipe_name
+
+ For example, this command upgrades the ``xmodmap`` recipe::
+
+ $ upgrade-helper.py xmodmap
+
+- *Upgrading a Specific Recipe to a Particular Version:* To upgrade a
+ specific recipe to a particular version, use the following form::
+
+ $ upgrade-helper.py recipe_name -t version
+
+ For example, this command upgrades the ``xmodmap`` recipe to version 1.2.3::
+
+ $ upgrade-helper.py xmodmap -t 1.2.3
+
+- *Upgrading all Recipes to the Latest Versions and Suppressing Email
+ Notifications:* To upgrade all recipes to their most recent versions
+ and suppress the email notifications, use the following command::
+
+ $ upgrade-helper.py all
+
+- *Upgrading all Recipes to the Latest Versions and Send Email
+ Notifications:* To upgrade all recipes to their most recent versions
+ and send email messages to maintainers for each attempted recipe as
+ well as a status email, use the following command::
+
+ $ upgrade-helper.py -e all
+
+Once you have run the AUH utility, you can find the results in the AUH
+:term:`Build Directory`::
+
+ ${BUILDDIR}/upgrade-helper/timestamp
+
+The AUH utility
+also creates recipe update commits from successful upgrade attempts in
+the layer tree.
+
+You can easily set up to run the AUH utility on a regular basis by using
+a cron job. See the
+:yocto_git:`weeklyjob.sh </auto-upgrade-helper/tree/weeklyjob.sh>`
+file distributed with the utility for an example.
+
+Using ``devtool upgrade``
+=========================
+
+As mentioned earlier, an alternative method for upgrading recipes to
+newer versions is to use
+:doc:`devtool upgrade </ref-manual/devtool-reference>`.
+You can read about ``devtool upgrade`` in general in the
+":ref:`sdk-manual/extensible:use \`\`devtool upgrade\`\` to create a version of the recipe that supports a newer version of the software`"
+section in the Yocto Project Application Development and the Extensible
+Software Development Kit (eSDK) Manual.
+
+To see all the command-line options available with ``devtool upgrade``,
+use the following help command::
+
+ $ devtool upgrade -h
+
+If you want to find out what version a recipe is currently at upstream
+without any attempt to upgrade your local version of the recipe, you can
+use the following command::
+
+ $ devtool latest-version recipe_name
+
+As mentioned in the previous section describing AUH, ``devtool upgrade``
+works in a less-automated manner than AUH. Specifically,
+``devtool upgrade`` only works on a single recipe that you name on the
+command line, cannot perform build and integration testing using images,
+and does not automatically generate commits for changes in the source
+tree. Despite all these "limitations", ``devtool upgrade`` updates the
+recipe file to the new upstream version and attempts to rebase custom
+patches contained by the recipe as needed.
+
+.. note::
+
+ AUH uses much of ``devtool upgrade`` behind the scenes making AUH somewhat
+ of a "wrapper" application for ``devtool upgrade``.
+
+A typical scenario involves having used Git to clone an upstream
+repository that you use during build operations. Because you have built the
+recipe in the past, the layer is likely added to your
+configuration already. If for some reason, the layer is not added, you
+could add it easily using the
+":ref:`bitbake-layers <bsp-guide/bsp:creating a new bsp layer using the \`\`bitbake-layers\`\` script>`"
+script. For example, suppose you use the ``nano.bb`` recipe from the
+``meta-oe`` layer in the ``meta-openembedded`` repository. For this
+example, assume that the layer has been cloned into following area::
+
+ /home/scottrif/meta-openembedded
+
+The following command from your :term:`Build Directory` adds the layer to
+your build configuration (i.e. ``${BUILDDIR}/conf/bblayers.conf``)::
+
+ $ bitbake-layers add-layer /home/scottrif/meta-openembedded/meta-oe
+ NOTE: Starting bitbake server...
+ Parsing recipes: 100% |##########################################| Time: 0:00:55
+ Parsing of 1431 .bb files complete (0 cached, 1431 parsed). 2040 targets, 56 skipped, 0 masked, 0 errors.
+ Removing 12 recipes from the x86_64 sysroot: 100% |##############| Time: 0:00:00
+ Removing 1 recipes from the x86_64_i586 sysroot: 100% |##########| Time: 0:00:00
+ Removing 5 recipes from the i586 sysroot: 100% |#################| Time: 0:00:00
+ Removing 5 recipes from the qemux86 sysroot: 100% |##############| Time: 0:00:00
+
+For this example, assume that the ``nano.bb`` recipe that
+is upstream has a 2.9.3 version number. However, the version in the
+local repository is 2.7.4. The following command from your build
+directory automatically upgrades the recipe for you::
+
+ $ devtool upgrade nano -V 2.9.3
+ NOTE: Starting bitbake server...
+ NOTE: Creating workspace layer in /home/scottrif/poky/build/workspace
+ Parsing recipes: 100% |##########################################| Time: 0:00:46
+ Parsing of 1431 .bb files complete (0 cached, 1431 parsed). 2040 targets, 56 skipped, 0 masked, 0 errors.
+ NOTE: Extracting current version source...
+ NOTE: Resolving any missing task queue dependencies
+ .
+ .
+ .
+ NOTE: Executing SetScene Tasks
+ NOTE: Executing RunQueue Tasks
+ NOTE: Tasks Summary: Attempted 74 tasks of which 72 didn't need to be rerun and all succeeded.
+ Adding changed files: 100% |#####################################| Time: 0:00:00
+ NOTE: Upgraded source extracted to /home/scottrif/poky/build/workspace/sources/nano
+ NOTE: New recipe is /home/scottrif/poky/build/workspace/recipes/nano/nano_2.9.3.bb
+
+.. note::
+
+ Using the ``-V`` option is not necessary. Omitting the version number causes
+ ``devtool upgrade`` to upgrade the recipe to the most recent version.
+
+Continuing with this example, you can use ``devtool build`` to build the
+newly upgraded recipe::
+
+ $ devtool build nano
+ NOTE: Starting bitbake server...
+ Loading cache: 100% |################################################################################################| Time: 0:00:01
+ Loaded 2040 entries from dependency cache.
+ Parsing recipes: 100% |##############################################################################################| Time: 0:00:00
+ Parsing of 1432 .bb files complete (1431 cached, 1 parsed). 2041 targets, 56 skipped, 0 masked, 0 errors.
+ NOTE: Resolving any missing task queue dependencies
+ .
+ .
+ .
+ NOTE: Executing SetScene Tasks
+ NOTE: Executing RunQueue Tasks
+ NOTE: nano: compiling from external source tree /home/scottrif/poky/build/workspace/sources/nano
+ NOTE: Tasks Summary: Attempted 520 tasks of which 304 didn't need to be rerun and all succeeded.
+
+Within the ``devtool upgrade`` workflow, you can
+deploy and test your rebuilt software. For this example,
+however, running ``devtool finish`` cleans up the workspace once the
+source in your workspace is clean. This usually means using Git to stage
+and submit commits for the changes generated by the upgrade process.
+
+Once the tree is clean, you can clean things up in this example with the
+following command from the ``${BUILDDIR}/workspace/sources/nano``
+directory::
+
+ $ devtool finish nano meta-oe
+ NOTE: Starting bitbake server...
+ Loading cache: 100% |################################################################################################| Time: 0:00:00
+ Loaded 2040 entries from dependency cache.
+ Parsing recipes: 100% |##############################################################################################| Time: 0:00:01
+ Parsing of 1432 .bb files complete (1431 cached, 1 parsed). 2041 targets, 56 skipped, 0 masked, 0 errors.
+ NOTE: Adding new patch 0001-nano.bb-Stuff-I-changed-when-upgrading-nano.bb.patch
+ NOTE: Updating recipe nano_2.9.3.bb
+ NOTE: Removing file /home/scottrif/meta-openembedded/meta-oe/recipes-support/nano/nano_2.7.4.bb
+ NOTE: Moving recipe file to /home/scottrif/meta-openembedded/meta-oe/recipes-support/nano
+ NOTE: Leaving source tree /home/scottrif/poky/build/workspace/sources/nano as-is; if you no longer need it then please delete it manually
+
+
+Using the ``devtool finish`` command cleans up the workspace and creates a patch
+file based on your commits. The tool puts all patch files back into the
+source directory in a sub-directory named ``nano`` in this case.
+
+Manually Upgrading a Recipe
+===========================
+
+If for some reason you choose not to upgrade recipes using
+:ref:`dev-manual/upgrading-recipes:Using the Auto Upgrade Helper (AUH)` or
+by :ref:`dev-manual/upgrading-recipes:Using \`\`devtool upgrade\`\``,
+you can manually edit the recipe files to upgrade the versions.
+
+.. note::
+
+ Manually updating multiple recipes scales poorly and involves many
+ steps. The recommendation to upgrade recipe versions is through AUH
+ or ``devtool upgrade``, both of which automate some steps and provide
+ guidance for others needed for the manual process.
+
+To manually upgrade recipe versions, follow these general steps:
+
+#. *Change the Version:* Rename the recipe such that the version (i.e.
+ the :term:`PV` part of the recipe name)
+ changes appropriately. If the version is not part of the recipe name,
+ change the value as it is set for :term:`PV` within the recipe itself.
+
+#. *Update* :term:`SRCREV` *if Needed*: If the source code your recipe builds
+ is fetched from Git or some other version control system, update
+ :term:`SRCREV` to point to the
+ commit hash that matches the new version.
+
+#. *Build the Software:* Try to build the recipe using BitBake. Typical
+ build failures include the following:
+
+ - License statements were updated for the new version. For this
+ case, you need to review any changes to the license and update the
+ values of :term:`LICENSE` and
+ :term:`LIC_FILES_CHKSUM`
+ as needed.
+
+ .. note::
+
+ License changes are often inconsequential. For example, the
+ license text's copyright year might have changed.
+
+ - Custom patches carried by the older version of the recipe might
+ fail to apply to the new version. For these cases, you need to
+ review the failures. Patches might not be necessary for the new
+ version of the software if the upgraded version has fixed those
+ issues. If a patch is necessary and failing, you need to rebase it
+ into the new version.
+
+#. *Optionally Attempt to Build for Several Architectures:* Once you
+ successfully build the new software for a given architecture, you
+ could test the build for other architectures by changing the
+ :term:`MACHINE` variable and
+ rebuilding the software. This optional step is especially important
+ if the recipe is to be released publicly.
+
+#. *Check the Upstream Change Log or Release Notes:* Checking both these
+ reveals if there are new features that could break
+ backwards-compatibility. If so, you need to take steps to mitigate or
+ eliminate that situation.
+
+#. *Optionally Create a Bootable Image and Test:* If you want, you can
+ test the new software by booting it onto actual hardware.
+
+#. *Create a Commit with the Change in the Layer Repository:* After all
+ builds work and any testing is successful, you can create commits for
+ any changes in the layer holding your upgraded recipe.
+
diff --git a/documentation/dev-manual/vulnerabilities.rst b/documentation/dev-manual/vulnerabilities.rst
new file mode 100644
index 0000000000..ac0ca249c1
--- /dev/null
+++ b/documentation/dev-manual/vulnerabilities.rst
@@ -0,0 +1,214 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Checking for Vulnerabilities
+****************************
+
+Vulnerabilities in Poky and OE-Core
+===================================
+
+The Yocto Project has an infrastructure to track and address unfixed
+known security vulnerabilities, as tracked by the public
+:wikipedia:`Common Vulnerabilities and Exposures (CVE) <Common_Vulnerabilities_and_Exposures>`
+database.
+
+The Yocto Project maintains a `list of known vulnerabilities
+<https://autobuilder.yocto.io/pub/non-release/patchmetrics/>`__
+for packages in Poky and OE-Core, tracking the evolution of the number of
+unpatched CVEs and the status of patches. Such information is available for
+the current development version and for each supported release.
+
+Security is a process, not a product, and thus at any time, a number of security
+issues may be impacting Poky and OE-Core. It is up to the maintainers, users,
+contributors and anyone interested in the issues to investigate and possibly fix them by
+updating software components to newer versions or by applying patches to address them.
+It is recommended to work with Poky and OE-Core upstream maintainers and submit
+patches to fix them, see ":doc:`../contributor-guide/submit-changes`" for details.
+
+Vulnerability check at build time
+=================================
+
+To enable a check for CVE security vulnerabilities using
+:ref:`ref-classes-cve-check` in the specific image or target you are building,
+add the following setting to your configuration::
+
+ INHERIT += "cve-check"
+
+The CVE database contains some old incomplete entries which have been
+deemed not to impact Poky or OE-Core. These CVE entries can be excluded from the
+check using build configuration::
+
+ include conf/distro/include/cve-extra-exclusions.inc
+
+With this CVE check enabled, BitBake build will try to map each compiled software component
+recipe name and version information to the CVE database and generate recipe and
+image specific reports. These reports will contain:
+
+- metadata about the software component like names and versions
+
+- metadata about the CVE issue such as description and NVD link
+
+- for each software component, a list of CVEs which are possibly impacting this version
+
+- status of each CVE: ``Patched``, ``Unpatched`` or ``Ignored``
+
+The status ``Patched`` means that a patch file to address the security issue has been
+applied. ``Unpatched`` status means that no patches to address the issue have been
+applied and that the issue needs to be investigated. ``Ignored`` means that after
+analysis, it has been deemed to ignore the issue as it for example affects
+the software component on a different operating system platform.
+
+After a build with CVE check enabled, reports for each compiled source recipe will be
+found in ``build/tmp/deploy/cve``.
+
+For example the CVE check report for the ``flex-native`` recipe looks like::
+
+ $ cat poky/build/tmp/deploy/cve/flex-native
+ LAYER: meta
+ PACKAGE NAME: flex-native
+ PACKAGE VERSION: 2.6.4
+ CVE: CVE-2016-6354
+ CVE STATUS: Patched
+ CVE SUMMARY: Heap-based buffer overflow in the yy_get_next_buffer function in Flex before 2.6.1 might allow context-dependent attackers to cause a denial of service or possibly execute arbitrary code via vectors involving num_to_read.
+ CVSS v2 BASE SCORE: 7.5
+ CVSS v3 BASE SCORE: 9.8
+ VECTOR: NETWORK
+ MORE INFORMATION: https://nvd.nist.gov/vuln/detail/CVE-2016-6354
+
+ LAYER: meta
+ PACKAGE NAME: flex-native
+ PACKAGE VERSION: 2.6.4
+ CVE: CVE-2019-6293
+ CVE STATUS: Ignored
+ CVE SUMMARY: An issue was discovered in the function mark_beginning_as_normal in nfa.c in flex 2.6.4. There is a stack exhaustion problem caused by the mark_beginning_as_normal function making recursive calls to itself in certain scenarios involving lots of '*' characters. Remote attackers could leverage this vulnerability to cause a denial-of-service.
+ CVSS v2 BASE SCORE: 4.3
+ CVSS v3 BASE SCORE: 5.5
+ VECTOR: NETWORK
+ MORE INFORMATION: https://nvd.nist.gov/vuln/detail/CVE-2019-6293
+
+For images, a summary of all recipes included in the image and their CVEs is also
+generated in textual and JSON formats. These ``.cve`` and ``.json`` reports can be found
+in the ``tmp/deploy/images`` directory for each compiled image.
+
+At build time CVE check will also throw warnings about ``Unpatched`` CVEs::
+
+ WARNING: flex-2.6.4-r0 do_cve_check: Found unpatched CVE (CVE-2019-6293), for more information check /poky/build/tmp/work/core2-64-poky-linux/flex/2.6.4-r0/temp/cve.log
+ WARNING: libarchive-3.5.1-r0 do_cve_check: Found unpatched CVE (CVE-2021-36976), for more information check /poky/build/tmp/work/core2-64-poky-linux/libarchive/3.5.1-r0/temp/cve.log
+
+It is also possible to check the CVE status of individual packages as follows::
+
+ bitbake -c cve_check flex libarchive
+
+Fixing CVE product name and version mappings
+============================================
+
+By default, :ref:`ref-classes-cve-check` uses the recipe name :term:`BPN` as CVE
+product name when querying the CVE database. If this mapping contains false positives, e.g.
+some reported CVEs are not for the software component in question, or false negatives like
+some CVEs are not found to impact the recipe when they should, then the problems can be
+in the recipe name to CVE product mapping. These mapping issues can be fixed by setting
+the :term:`CVE_PRODUCT` variable inside the recipe. This defines the name of the software component in the
+upstream `NIST CVE database <https://nvd.nist.gov/>`__.
+
+The variable supports using vendor and product names like this::
+
+ CVE_PRODUCT = "flex_project:flex"
+
+In this example the vendor name used in the CVE database is ``flex_project`` and the
+product is ``flex``. With this setting the ``flex`` recipe only maps to this specific
+product and not products from other vendors with same name ``flex``.
+
+Similarly, when the recipe version :term:`PV` is not compatible with software versions used by
+the upstream software component releases and the CVE database, these can be fixed using
+the :term:`CVE_VERSION` variable.
+
+Note that if the CVE entries in the NVD database contain bugs or have missing or incomplete
+information, it is recommended to fix the information there directly instead of working
+around the issues possibly for a long time in Poky and OE-Core side recipes. Feedback to
+NVD about CVE entries can be provided through the `NVD contact form <https://nvd.nist.gov/info/contact-form>`__.
+
+Fixing vulnerabilities in recipes
+=================================
+
+If a CVE security issue impacts a software component, it can be fixed by updating to a newer
+version of the software component or by applying a patch. For Poky and OE-Core master branches, updating
+to a newer software component release with fixes is the best option, but patches can be applied
+if releases are not yet available.
+
+For stable branches, it is preferred to apply patches for the issues. For some software
+components minor version updates can also be applied if they are backwards compatible.
+
+Here is an example of fixing CVE security issues with patch files,
+an example from the :oe_layerindex:`ffmpeg recipe</layerindex/recipe/47350>`::
+
+ SRC_URI = "https://www.ffmpeg.org/releases/${BP}.tar.xz \
+ file://0001-libavutil-include-assembly-with-full-path-from-sourc.patch \
+ file://fix-CVE-2020-20446.patch \
+ file://fix-CVE-2020-20453.patch \
+ file://fix-CVE-2020-22015.patch \
+ file://fix-CVE-2020-22021.patch \
+ file://fix-CVE-2020-22033-CVE-2020-22019.patch \
+ file://fix-CVE-2021-33815.patch \
+
+A good practice is to include the CVE identifier in both the patch file name
+and inside the patch file commit message using the format::
+
+ CVE: CVE-2020-22033
+
+CVE checker will then capture this information and change the CVE status to ``Patched``
+in the generated reports.
+
+If analysis shows that the CVE issue does not impact the recipe due to configuration, platform,
+version or other reasons, the CVE can be marked as ``Ignored`` using the :term:`CVE_CHECK_IGNORE` variable.
+As mentioned previously, if data in the CVE database is wrong, it is recommend to fix those
+issues in the CVE database directly.
+
+Recipes can be completely skipped by CVE check by including the recipe name in
+the :term:`CVE_CHECK_SKIP_RECIPE` variable.
+
+Implementation details
+======================
+
+Here's what the :ref:`ref-classes-cve-check` class does to find unpatched CVE IDs.
+
+First the code goes through each patch file provided by a recipe. If a valid CVE ID
+is found in the name of the file, the corresponding CVE is considered as patched.
+Don't forget that if multiple CVE IDs are found in the filename, only the last
+one is considered. Then, the code looks for ``CVE: CVE-ID`` lines in the patch
+file. The found CVE IDs are also considered as patched.
+
+Then, the code looks up all the CVE IDs in the NIST database for all the
+products defined in :term:`CVE_PRODUCT`. Then, for each found CVE:
+
+- If the package name (:term:`PN`) is part of
+ :term:`CVE_CHECK_SKIP_RECIPE`, it is considered as ``Patched``.
+
+- If the CVE ID is part of :term:`CVE_CHECK_IGNORE`, it is
+ set as ``Ignored``.
+
+- If the CVE ID is part of the patched CVE for the recipe, it is
+ already considered as ``Patched``.
+
+- Otherwise, the code checks whether the recipe version (:term:`PV`)
+ is within the range of versions impacted by the CVE. If so, the CVE
+ is considered as ``Unpatched``.
+
+The CVE database is stored in :term:`DL_DIR` and can be inspected using
+``sqlite3`` command as follows::
+
+ sqlite3 downloads/CVE_CHECK/nvdcve_1.1.db .dump | grep CVE-2021-37462
+
+When analyzing CVEs, it is recommended to:
+
+- study the latest information in `CVE database <https://nvd.nist.gov/vuln/search>`__.
+
+- check how upstream developers of the software component addressed the issue, e.g.
+ what patch was applied, which upstream release contains the fix.
+
+- check what other Linux distributions like `Debian <https://security-tracker.debian.org/tracker/>`__
+ did to analyze and address the issue.
+
+- follow security notices from other Linux distributions.
+
+- follow public `open source security mailing lists <https://oss-security.openwall.org/wiki/mailing-lists>`__ for
+ discussions and advance notifications of CVE bugs and software releases with fixes.
+
diff --git a/documentation/dev-manual/wayland.rst b/documentation/dev-manual/wayland.rst
new file mode 100644
index 0000000000..097be9cbde
--- /dev/null
+++ b/documentation/dev-manual/wayland.rst
@@ -0,0 +1,90 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Using Wayland and Weston
+************************
+
+:wikipedia:`Wayland <Wayland_(display_server_protocol)>`
+is a computer display server protocol that provides a method for
+compositing window managers to communicate directly with applications
+and video hardware and expects them to communicate with input hardware
+using other libraries. Using Wayland with supporting targets can result
+in better control over graphics frame rendering than an application
+might otherwise achieve.
+
+The Yocto Project provides the Wayland protocol libraries and the
+reference :wikipedia:`Weston <Wayland_(display_server_protocol)#Weston>`
+compositor as part of its release. You can find the integrated packages
+in the ``meta`` layer of the :term:`Source Directory`.
+Specifically, you
+can find the recipes that build both Wayland and Weston at
+``meta/recipes-graphics/wayland``.
+
+You can build both the Wayland and Weston packages for use only with targets
+that accept the :wikipedia:`Mesa 3D and Direct Rendering Infrastructure
+<Mesa_(computer_graphics)>`, which is also known as Mesa DRI. This implies that
+you cannot build and use the packages if your target uses, for example, the
+Intel Embedded Media and Graphics Driver (Intel EMGD) that overrides Mesa DRI.
+
+.. note::
+
+ Due to lack of EGL support, Weston 1.0.3 will not run directly on the
+ emulated QEMU hardware. However, this version of Weston will run
+ under X emulation without issues.
+
+This section describes what you need to do to implement Wayland and use
+the Weston compositor when building an image for a supporting target.
+
+Enabling Wayland in an Image
+============================
+
+To enable Wayland, you need to enable it to be built and enable it to be
+included (installed) in the image.
+
+Building Wayland
+----------------
+
+To cause Mesa to build the ``wayland-egl`` platform and Weston to build
+Wayland with Kernel Mode Setting
+(`KMS <https://wiki.archlinux.org/index.php/Kernel_Mode_Setting>`__)
+support, include the "wayland" flag in the
+:term:`DISTRO_FEATURES`
+statement in your ``local.conf`` file::
+
+ DISTRO_FEATURES:append = " wayland"
+
+.. note::
+
+ If X11 has been enabled elsewhere, Weston will build Wayland with X11
+ support
+
+Installing Wayland and Weston
+-----------------------------
+
+To install the Wayland feature into an image, you must include the
+following
+:term:`CORE_IMAGE_EXTRA_INSTALL`
+statement in your ``local.conf`` file::
+
+ CORE_IMAGE_EXTRA_INSTALL += "wayland weston"
+
+Running Weston
+==============
+
+To run Weston inside X11, enabling it as described earlier and building
+a Sato image is sufficient. If you are running your image under Sato, a
+Weston Launcher appears in the "Utility" category.
+
+Alternatively, you can run Weston through the command-line interpretor
+(CLI), which is better suited for development work. To run Weston under
+the CLI, you need to do the following after your image is built:
+
+#. Run these commands to export ``XDG_RUNTIME_DIR``::
+
+ mkdir -p /tmp/$USER-weston
+ chmod 0700 /tmp/$USER-weston
+ export XDG_RUNTIME_DIR=/tmp/$USER-weston
+
+#. Launch Weston in the shell::
+
+ weston
+
diff --git a/documentation/dev-manual/wic.rst b/documentation/dev-manual/wic.rst
new file mode 100644
index 0000000000..049996c39e
--- /dev/null
+++ b/documentation/dev-manual/wic.rst
@@ -0,0 +1,732 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Creating Partitioned Images Using Wic
+*************************************
+
+Creating an image for a particular hardware target using the
+OpenEmbedded build system does not necessarily mean you can boot that
+image as is on your device. Physical devices accept and boot images in
+various ways depending on the specifics of the device. Usually,
+information about the hardware can tell you what image format the device
+requires. Should your device require multiple partitions on an SD card,
+flash, or an HDD, you can use the OpenEmbedded Image Creator, Wic, to
+create the properly partitioned image.
+
+The ``wic`` command generates partitioned images from existing
+OpenEmbedded build artifacts. Image generation is driven by partitioning
+commands contained in an OpenEmbedded kickstart file (``.wks``)
+specified either directly on the command line or as one of a selection
+of canned kickstart files as shown with the ``wic list images`` command
+in the
+":ref:`dev-manual/wic:generate an image using an existing kickstart file`"
+section. When you apply the command to a given set of build artifacts, the
+result is an image or set of images that can be directly written onto media and
+used on a particular system.
+
+.. note::
+
+ For a kickstart file reference, see the
+ ":ref:`ref-manual/kickstart:openembedded kickstart (\`\`.wks\`\`) reference`"
+ Chapter in the Yocto Project Reference Manual.
+
+The ``wic`` command and the infrastructure it is based on is by
+definition incomplete. The purpose of the command is to allow the
+generation of customized images, and as such, was designed to be
+completely extensible through a plugin interface. See the
+":ref:`dev-manual/wic:using the wic plugin interface`" section
+for information on these plugins.
+
+This section provides some background information on Wic, describes what
+you need to have in place to run the tool, provides instruction on how
+to use the Wic utility, provides information on using the Wic plugins
+interface, and provides several examples that show how to use Wic.
+
+Background
+==========
+
+This section provides some background on the Wic utility. While none of
+this information is required to use Wic, you might find it interesting.
+
+- The name "Wic" is derived from OpenEmbedded Image Creator (oeic). The
+ "oe" diphthong in "oeic" was promoted to the letter "w", because
+ "oeic" is both difficult to remember and to pronounce.
+
+- Wic is loosely based on the Meego Image Creator (``mic``) framework.
+ The Wic implementation has been heavily modified to make direct use
+ of OpenEmbedded build artifacts instead of package installation and
+ configuration, which are already incorporated within the OpenEmbedded
+ artifacts.
+
+- Wic is a completely independent standalone utility that initially
+ provides easier-to-use and more flexible replacements for an existing
+ functionality in OE-Core's :ref:`ref-classes-image-live`
+ class. The difference between Wic and those examples is that with Wic
+ the functionality of those scripts is implemented by a
+ general-purpose partitioning language, which is based on Redhat
+ kickstart syntax.
+
+Requirements
+============
+
+In order to use the Wic utility with the OpenEmbedded Build system, your
+system needs to meet the following requirements:
+
+- The Linux distribution on your development host must support the
+ Yocto Project. See the ":ref:`system-requirements-supported-distros`"
+ section in the Yocto Project Reference Manual for the list of
+ distributions that support the Yocto Project.
+
+- The standard system utilities, such as ``cp``, must be installed on
+ your development host system.
+
+- You must have sourced the build environment setup script (i.e.
+ :ref:`structure-core-script`) found in the :term:`Build Directory`.
+
+- You need to have the build artifacts already available, which
+ typically means that you must have already created an image using the
+ OpenEmbedded build system (e.g. ``core-image-minimal``). While it
+ might seem redundant to generate an image in order to create an image
+ using Wic, the current version of Wic requires the artifacts in the
+ form generated by the OpenEmbedded build system.
+
+- You must build several native tools, which are built to run on the
+ build system::
+
+ $ bitbake wic-tools
+
+- Include "wic" as part of the
+ :term:`IMAGE_FSTYPES`
+ variable.
+
+- Include the name of the :ref:`wic kickstart file <openembedded-kickstart-wks-reference>`
+ as part of the :term:`WKS_FILE` variable. If multiple candidate files can
+ be provided by different layers, specify all the possible names through the
+ :term:`WKS_FILES` variable instead.
+
+Getting Help
+============
+
+You can get general help for the ``wic`` command by entering the ``wic``
+command by itself or by entering the command with a help argument as
+follows::
+
+ $ wic -h
+ $ wic --help
+ $ wic help
+
+Currently, Wic supports seven commands: ``cp``, ``create``, ``help``,
+``list``, ``ls``, ``rm``, and ``write``. You can get help for all these
+commands except "help" by using the following form::
+
+ $ wic help command
+
+For example, the following command returns help for the ``write``
+command::
+
+ $ wic help write
+
+Wic supports help for three topics: ``overview``, ``plugins``, and
+``kickstart``. You can get help for any topic using the following form::
+
+ $ wic help topic
+
+For example, the following returns overview help for Wic::
+
+ $ wic help overview
+
+There is one additional level of help for Wic. You can get help on
+individual images through the ``list`` command. You can use the ``list``
+command to return the available Wic images as follows::
+
+ $ wic list images
+ genericx86 Create an EFI disk image for genericx86*
+ edgerouter Create SD card image for Edgerouter
+ beaglebone-yocto Create SD card image for Beaglebone
+ qemuriscv Create qcow2 image for RISC-V QEMU machines
+ mkefidisk Create an EFI disk image
+ directdisk-multi-rootfs Create multi rootfs image using rootfs plugin
+ directdisk Create a 'pcbios' direct disk image
+ efi-bootdisk
+ mkhybridiso Create a hybrid ISO image
+ directdisk-gpt Create a 'pcbios' direct disk image
+ systemd-bootdisk Create an EFI disk image with systemd-boot
+ sdimage-bootpart Create SD card image with a boot partition
+ qemux86-directdisk Create a qemu machine 'pcbios' direct disk image
+ directdisk-bootloader-config Create a 'pcbios' direct disk image with custom bootloader config
+
+
+Once you know the list of available
+Wic images, you can use ``help`` with the command to get help on a
+particular image. For example, the following command returns help on the
+"beaglebone-yocto" image::
+
+ $ wic list beaglebone-yocto help
+
+ Creates a partitioned SD card image for Beaglebone.
+ Boot files are located in the first vfat partition.
+
+Operational Modes
+=================
+
+You can use Wic in two different modes, depending on how much control
+you need for specifying the OpenEmbedded build artifacts that are used
+for creating the image: Raw and Cooked:
+
+- *Raw Mode:* You explicitly specify build artifacts through Wic
+ command-line arguments.
+
+- *Cooked Mode:* The current
+ :term:`MACHINE` setting and image
+ name are used to automatically locate and provide the build
+ artifacts. You just supply a kickstart file and the name of the image
+ from which to use artifacts.
+
+Regardless of the mode you use, you need to have the build artifacts
+ready and available.
+
+Raw Mode
+--------
+
+Running Wic in raw mode allows you to specify all the partitions through
+the ``wic`` command line. The primary use for raw mode is if you have
+built your kernel outside of the Yocto Project :term:`Build Directory`.
+In other words, you can point to arbitrary kernel, root filesystem locations,
+and so forth. Contrast this behavior with cooked mode where Wic looks in the
+:term:`Build Directory` (e.g. ``tmp/deploy/images/``\ machine).
+
+The general form of the ``wic`` command in raw mode is::
+
+ $ wic create wks_file options ...
+
+ Where:
+
+ wks_file:
+ An OpenEmbedded kickstart file. You can provide
+ your own custom file or use a file from a set of
+ existing files as described by further options.
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -o OUTDIR, --outdir OUTDIR
+ name of directory to create image in
+ -e IMAGE_NAME, --image-name IMAGE_NAME
+ name of the image to use the artifacts from e.g. core-
+ image-sato
+ -r ROOTFS_DIR, --rootfs-dir ROOTFS_DIR
+ path to the /rootfs dir to use as the .wks rootfs
+ source
+ -b BOOTIMG_DIR, --bootimg-dir BOOTIMG_DIR
+ path to the dir containing the boot artifacts (e.g.
+ /EFI or /syslinux dirs) to use as the .wks bootimg
+ source
+ -k KERNEL_DIR, --kernel-dir KERNEL_DIR
+ path to the dir containing the kernel to use in the
+ .wks bootimg
+ -n NATIVE_SYSROOT, --native-sysroot NATIVE_SYSROOT
+ path to the native sysroot containing the tools to use
+ to build the image
+ -s, --skip-build-check
+ skip the build check
+ -f, --build-rootfs build rootfs
+ -c {gzip,bzip2,xz}, --compress-with {gzip,bzip2,xz}
+ compress image with specified compressor
+ -m, --bmap generate .bmap
+ --no-fstab-update Do not change fstab file.
+ -v VARS_DIR, --vars VARS_DIR
+ directory with <image>.env files that store bitbake
+ variables
+ -D, --debug output debug information
+
+.. note::
+
+ You do not need root privileges to run Wic. In fact, you should not
+ run as root when using the utility.
+
+Cooked Mode
+-----------
+
+Running Wic in cooked mode leverages off artifacts in the
+:term:`Build Directory`. In other words, you do not have to specify kernel or
+root filesystem locations as part of the command. All you need to provide is
+a kickstart file and the name of the image from which to use artifacts
+by using the "-e" option. Wic looks in the :term:`Build Directory` (e.g.
+``tmp/deploy/images/``\ machine) for artifacts.
+
+The general form of the ``wic`` command using Cooked Mode is as follows::
+
+ $ wic create wks_file -e IMAGE_NAME
+
+ Where:
+
+ wks_file:
+ An OpenEmbedded kickstart file. You can provide
+ your own custom file or use a file from a set of
+ existing files provided with the Yocto Project
+ release.
+
+ required argument:
+ -e IMAGE_NAME, --image-name IMAGE_NAME
+ name of the image to use the artifacts from e.g. core-
+ image-sato
+
+Using an Existing Kickstart File
+================================
+
+If you do not want to create your own kickstart file, you can use an
+existing file provided by the Wic installation. As shipped, kickstart
+files can be found in the :ref:`overview-manual/development-environment:yocto project source repositories` in the
+following two locations::
+
+ poky/meta-yocto-bsp/wic
+ poky/scripts/lib/wic/canned-wks
+
+Use the following command to list the available kickstart files::
+
+ $ wic list images
+ genericx86 Create an EFI disk image for genericx86*
+ edgerouter Create SD card image for Edgerouter
+ beaglebone-yocto Create SD card image for Beaglebone
+ qemuriscv Create qcow2 image for RISC-V QEMU machines
+ mkefidisk Create an EFI disk image
+ directdisk-multi-rootfs Create multi rootfs image using rootfs plugin
+ directdisk Create a 'pcbios' direct disk image
+ efi-bootdisk
+ mkhybridiso Create a hybrid ISO image
+ directdisk-gpt Create a 'pcbios' direct disk image
+ systemd-bootdisk Create an EFI disk image with systemd-boot
+ sdimage-bootpart Create SD card image with a boot partition
+ qemux86-directdisk Create a qemu machine 'pcbios' direct disk image
+ directdisk-bootloader-config Create a 'pcbios' direct disk image with custom bootloader config
+
+When you use an existing file, you
+do not have to use the ``.wks`` extension. Here is an example in Raw
+Mode that uses the ``directdisk`` file::
+
+ $ wic create directdisk -r rootfs_dir -b bootimg_dir \
+ -k kernel_dir -n native_sysroot
+
+Here are the actual partition language commands used in the
+``genericx86.wks`` file to generate an image::
+
+ # short-description: Create an EFI disk image for genericx86*
+ # long-description: Creates a partitioned EFI disk image for genericx86* machines
+ part /boot --source bootimg-efi --sourceparams="loader=grub-efi" --ondisk sda --label msdos --active --align 1024
+ part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid
+ part swap --ondisk sda --size 44 --label swap1 --fstype=swap
+
+ bootloader --ptable gpt --timeout=5 --append="rootfstype=ext4 console=ttyS0,115200 console=tty0"
+
+Using the Wic Plugin Interface
+==============================
+
+You can extend and specialize Wic functionality by using Wic plugins.
+This section explains the Wic plugin interface.
+
+.. note::
+
+ Wic plugins consist of "source" and "imager" plugins. Imager plugins
+ are beyond the scope of this section.
+
+Source plugins provide a mechanism to customize partition content during
+the Wic image generation process. You can use source plugins to map
+values that you specify using ``--source`` commands in kickstart files
+(i.e. ``*.wks``) to a plugin implementation used to populate a given
+partition.
+
+.. note::
+
+ If you use plugins that have build-time dependencies (e.g. native
+ tools, bootloaders, and so forth) when building a Wic image, you need
+ to specify those dependencies using the :term:`WKS_FILE_DEPENDS`
+ variable.
+
+Source plugins are subclasses defined in plugin files. As shipped, the
+Yocto Project provides several plugin files. You can see the source
+plugin files that ship with the Yocto Project
+:yocto_git:`here </poky/tree/scripts/lib/wic/plugins/source>`.
+Each of these plugin files contains source plugins that are designed to
+populate a specific Wic image partition.
+
+Source plugins are subclasses of the ``SourcePlugin`` class, which is
+defined in the ``poky/scripts/lib/wic/pluginbase.py`` file. For example,
+the ``BootimgEFIPlugin`` source plugin found in the ``bootimg-efi.py``
+file is a subclass of the ``SourcePlugin`` class, which is found in the
+``pluginbase.py`` file.
+
+You can also implement source plugins in a layer outside of the Source
+Repositories (external layer). To do so, be sure that your plugin files
+are located in a directory whose path is
+``scripts/lib/wic/plugins/source/`` within your external layer. When the
+plugin files are located there, the source plugins they contain are made
+available to Wic.
+
+When the Wic implementation needs to invoke a partition-specific
+implementation, it looks for the plugin with the same name as the
+``--source`` parameter used in the kickstart file given to that
+partition. For example, if the partition is set up using the following
+command in a kickstart file::
+
+ part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
+
+The methods defined as class
+members of the matching source plugin (i.e. ``bootimg-pcbios``) in the
+``bootimg-pcbios.py`` plugin file are used.
+
+To be more concrete, here is the corresponding plugin definition from
+the ``bootimg-pcbios.py`` file for the previous command along with an
+example method called by the Wic implementation when it needs to prepare
+a partition using an implementation-specific function::
+
+ .
+ .
+ .
+ class BootimgPcbiosPlugin(SourcePlugin):
+ """
+ Create MBR boot partition and install syslinux on it.
+ """
+
+ name = 'bootimg-pcbios'
+ .
+ .
+ .
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ In this case, prepare content for legacy bios boot partition.
+ """
+ .
+ .
+ .
+
+If a
+subclass (plugin) itself does not implement a particular function, Wic
+locates and uses the default version in the superclass. It is for this
+reason that all source plugins are derived from the ``SourcePlugin``
+class.
+
+The ``SourcePlugin`` class defined in the ``pluginbase.py`` file defines
+a set of methods that source plugins can implement or override. Any
+plugins (subclass of ``SourcePlugin``) that do not implement a
+particular method inherit the implementation of the method from the
+``SourcePlugin`` class. For more information, see the ``SourcePlugin``
+class in the ``pluginbase.py`` file for details:
+
+The following list describes the methods implemented in the
+``SourcePlugin`` class:
+
+- ``do_prepare_partition()``: Called to populate a partition with
+ actual content. In other words, the method prepares the final
+ partition image that is incorporated into the disk image.
+
+- ``do_configure_partition()``: Called before
+ ``do_prepare_partition()`` to create custom configuration files for a
+ partition (e.g. syslinux or grub configuration files).
+
+- ``do_install_disk()``: Called after all partitions have been
+ prepared and assembled into a disk image. This method provides a hook
+ to allow finalization of a disk image (e.g. writing an MBR).
+
+- ``do_stage_partition()``: Special content-staging hook called
+ before ``do_prepare_partition()``. This method is normally empty.
+
+ Typically, a partition just uses the passed-in parameters (e.g. the
+ unmodified value of ``bootimg_dir``). However, in some cases, things
+ might need to be more tailored. As an example, certain files might
+ additionally need to be taken from ``bootimg_dir + /boot``. This hook
+ allows those files to be staged in a customized fashion.
+
+ .. note::
+
+ ``get_bitbake_var()`` allows you to access non-standard variables that
+ you might want to use for this behavior.
+
+You can extend the source plugin mechanism. To add more hooks, create
+more source plugin methods within ``SourcePlugin`` and the corresponding
+derived subclasses. The code that calls the plugin methods uses the
+``plugin.get_source_plugin_methods()`` function to find the method or
+methods needed by the call. Retrieval of those methods is accomplished
+by filling up a dict with keys that contain the method names of
+interest. On success, these will be filled in with the actual methods.
+See the Wic implementation for examples and details.
+
+Wic Examples
+============
+
+This section provides several examples that show how to use the Wic
+utility. All the examples assume the list of requirements in the
+":ref:`dev-manual/wic:requirements`" section have been met. The
+examples assume the previously generated image is
+``core-image-minimal``.
+
+Generate an Image using an Existing Kickstart File
+--------------------------------------------------
+
+This example runs in Cooked Mode and uses the ``mkefidisk`` kickstart
+file::
+
+ $ wic create mkefidisk -e core-image-minimal
+ INFO: Building wic-tools...
+ .
+ .
+ .
+ INFO: The new image(s) can be found here:
+ ./mkefidisk-201804191017-sda.direct
+
+ The following build artifacts were used to create the image(s):
+ ROOTFS_DIR: /home/stephano/yocto/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/rootfs
+ BOOTIMG_DIR: /home/stephano/yocto/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/recipe-sysroot/usr/share
+ KERNEL_DIR: /home/stephano/yocto/build/tmp-glibc/deploy/images/qemux86
+ NATIVE_SYSROOT: /home/stephano/yocto/build/tmp-glibc/work/i586-oe-linux/wic-tools/1.0-r0/recipe-sysroot-native
+
+ INFO: The image(s) were created using OE kickstart file:
+ /home/stephano/yocto/openembedded-core/scripts/lib/wic/canned-wks/mkefidisk.wks
+
+The previous example shows the easiest way to create an image by running
+in cooked mode and supplying a kickstart file and the "-e" option to
+point to the existing build artifacts. Your ``local.conf`` file needs to
+have the :term:`MACHINE` variable set
+to the machine you are using, which is "qemux86" in this example.
+
+Once the image builds, the output provides image location, artifact use,
+and kickstart file information.
+
+.. note::
+
+ You should always verify the details provided in the output to make
+ sure that the image was indeed created exactly as expected.
+
+Continuing with the example, you can now write the image from the
+:term:`Build Directory` onto a USB stick, or whatever media for which you
+built your image, and boot from the media. You can write the image by using
+``bmaptool`` or ``dd``::
+
+ $ oe-run-native bmap-tools-native bmaptool copy mkefidisk-201804191017-sda.direct /dev/sdX
+
+or ::
+
+ $ sudo dd if=mkefidisk-201804191017-sda.direct of=/dev/sdX
+
+.. note::
+
+ For more information on how to use the ``bmaptool``
+ to flash a device with an image, see the
+ ":ref:`dev-manual/bmaptool:flashing images using \`\`bmaptool\`\``"
+ section.
+
+Using a Modified Kickstart File
+-------------------------------
+
+Because partitioned image creation is driven by the kickstart file, it
+is easy to affect image creation by changing the parameters in the file.
+This next example demonstrates that through modification of the
+``directdisk-gpt`` kickstart file.
+
+As mentioned earlier, you can use the command ``wic list images`` to
+show the list of existing kickstart files. The directory in which the
+``directdisk-gpt.wks`` file resides is
+``scripts/lib/image/canned-wks/``, which is located in the
+:term:`Source Directory` (e.g. ``poky``).
+Because available files reside in this directory, you can create and add
+your own custom files to the directory. Subsequent use of the
+``wic list images`` command would then include your kickstart files.
+
+In this example, the existing ``directdisk-gpt`` file already does most
+of what is needed. However, for the hardware in this example, the image
+will need to boot from ``sdb`` instead of ``sda``, which is what the
+``directdisk-gpt`` kickstart file uses.
+
+The example begins by making a copy of the ``directdisk-gpt.wks`` file
+in the ``scripts/lib/image/canned-wks`` directory and then by changing
+the lines that specify the target disk from which to boot::
+
+ $ cp /home/stephano/yocto/poky/scripts/lib/wic/canned-wks/directdisk-gpt.wks \
+ /home/stephano/yocto/poky/scripts/lib/wic/canned-wks/directdisksdb-gpt.wks
+
+Next, the example modifies the ``directdisksdb-gpt.wks`` file and
+changes all instances of "``--ondisk sda``" to "``--ondisk sdb``". The
+example changes the following two lines and leaves the remaining lines
+untouched::
+
+ part /boot --source bootimg-pcbios --ondisk sdb --label boot --active --align 1024
+ part / --source rootfs --ondisk sdb --fstype=ext4 --label platform --align 1024 --use-uuid
+
+Once the lines are changed, the
+example generates the ``directdisksdb-gpt`` image. The command points
+the process at the ``core-image-minimal`` artifacts for the Next Unit of
+Computing (nuc) :term:`MACHINE` the
+``local.conf``::
+
+ $ wic create directdisksdb-gpt -e core-image-minimal
+ INFO: Building wic-tools...
+ .
+ .
+ .
+ Initialising tasks: 100% |#######################################| Time: 0:00:01
+ NOTE: Executing SetScene Tasks
+ NOTE: Executing RunQueue Tasks
+ NOTE: Tasks Summary: Attempted 1161 tasks of which 1157 didn't need to be rerun and all succeeded.
+ INFO: Creating image(s)...
+
+ INFO: The new image(s) can be found here:
+ ./directdisksdb-gpt-201710090938-sdb.direct
+
+ The following build artifacts were used to create the image(s):
+ ROOTFS_DIR: /home/stephano/yocto/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/rootfs
+ BOOTIMG_DIR: /home/stephano/yocto/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/recipe-sysroot/usr/share
+ KERNEL_DIR: /home/stephano/yocto/build/tmp-glibc/deploy/images/qemux86
+ NATIVE_SYSROOT: /home/stephano/yocto/build/tmp-glibc/work/i586-oe-linux/wic-tools/1.0-r0/recipe-sysroot-native
+
+ INFO: The image(s) were created using OE kickstart file:
+ /home/stephano/yocto/poky/scripts/lib/wic/canned-wks/directdisksdb-gpt.wks
+
+Continuing with the example, you can now directly ``dd`` the image to a
+USB stick, or whatever media for which you built your image, and boot
+the resulting media::
+
+ $ sudo dd if=directdisksdb-gpt-201710090938-sdb.direct of=/dev/sdb
+ 140966+0 records in
+ 140966+0 records out
+ 72174592 bytes (72 MB, 69 MiB) copied, 78.0282 s, 925 kB/s
+ $ sudo eject /dev/sdb
+
+Using a Modified Kickstart File and Running in Raw Mode
+-------------------------------------------------------
+
+This next example manually specifies each build artifact (runs in Raw
+Mode) and uses a modified kickstart file. The example also uses the
+``-o`` option to cause Wic to create the output somewhere other than the
+default output directory, which is the current directory::
+
+ $ wic create test.wks -o /home/stephano/testwic \
+ --rootfs-dir /home/stephano/yocto/build/tmp/work/qemux86-poky-linux/core-image-minimal/1.0-r0/rootfs \
+ --bootimg-dir /home/stephano/yocto/build/tmp/work/qemux86-poky-linux/core-image-minimal/1.0-r0/recipe-sysroot/usr/share \
+ --kernel-dir /home/stephano/yocto/build/tmp/deploy/images/qemux86 \
+ --native-sysroot /home/stephano/yocto/build/tmp/work/i586-poky-linux/wic-tools/1.0-r0/recipe-sysroot-native
+
+ INFO: Creating image(s)...
+
+ INFO: The new image(s) can be found here:
+ /home/stephano/testwic/test-201710091445-sdb.direct
+
+ The following build artifacts were used to create the image(s):
+ ROOTFS_DIR: /home/stephano/yocto/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/rootfs
+ BOOTIMG_DIR: /home/stephano/yocto/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/recipe-sysroot/usr/share
+ KERNEL_DIR: /home/stephano/yocto/build/tmp-glibc/deploy/images/qemux86
+ NATIVE_SYSROOT: /home/stephano/yocto/build/tmp-glibc/work/i586-oe-linux/wic-tools/1.0-r0/recipe-sysroot-native
+
+ INFO: The image(s) were created using OE kickstart file:
+ test.wks
+
+For this example,
+:term:`MACHINE` did not have to be
+specified in the ``local.conf`` file since the artifact is manually
+specified.
+
+Using Wic to Manipulate an Image
+--------------------------------
+
+Wic image manipulation allows you to shorten turnaround time during
+image development. For example, you can use Wic to delete the kernel
+partition of a Wic image and then insert a newly built kernel. This
+saves you time from having to rebuild the entire image each time you
+modify the kernel.
+
+.. note::
+
+ In order to use Wic to manipulate a Wic image as in this example,
+ your development machine must have the ``mtools`` package installed.
+
+The following example examines the contents of the Wic image, deletes
+the existing kernel, and then inserts a new kernel:
+
+#. *List the Partitions:* Use the ``wic ls`` command to list all the
+ partitions in the Wic image::
+
+ $ wic ls tmp/deploy/images/qemux86/core-image-minimal-qemux86.wic
+ Num Start End Size Fstype
+ 1 1048576 25041919 23993344 fat16
+ 2 25165824 72157183 46991360 ext4
+
+ The previous output shows two partitions in the
+ ``core-image-minimal-qemux86.wic`` image.
+
+#. *Examine a Particular Partition:* Use the ``wic ls`` command again
+ but in a different form to examine a particular partition.
+
+ .. note::
+
+ You can get command usage on any Wic command using the following
+ form::
+
+ $ wic help command
+
+
+ For example, the following command shows you the various ways to
+ use the
+ wic ls
+ command::
+
+ $ wic help ls
+
+
+ The following command shows what is in partition one::
+
+ $ wic ls tmp/deploy/images/qemux86/core-image-minimal-qemux86.wic:1
+ Volume in drive : is boot
+ Volume Serial Number is E894-1809
+ Directory for ::/
+
+ libcom32 c32 186500 2017-10-09 16:06
+ libutil c32 24148 2017-10-09 16:06
+ syslinux cfg 220 2017-10-09 16:06
+ vesamenu c32 27104 2017-10-09 16:06
+ vmlinuz 6904608 2017-10-09 16:06
+ 5 files 7 142 580 bytes
+ 16 582 656 bytes free
+
+ The previous output shows five files, with the
+ ``vmlinuz`` being the kernel.
+
+ .. note::
+
+ If you see the following error, you need to update or create a
+ ``~/.mtoolsrc`` file and be sure to have the line "mtools_skip_check=1"
+ in the file. Then, run the Wic command again::
+
+ ERROR: _exec_cmd: /usr/bin/mdir -i /tmp/wic-parttfokuwra ::/ returned '1' instead of 0
+ output: Total number of sectors (47824) not a multiple of sectors per track (32)!
+ Add mtools_skip_check=1 to your .mtoolsrc file to skip this test
+
+
+#. *Remove the Old Kernel:* Use the ``wic rm`` command to remove the
+ ``vmlinuz`` file (kernel)::
+
+ $ wic rm tmp/deploy/images/qemux86/core-image-minimal-qemux86.wic:1/vmlinuz
+
+#. *Add In the New Kernel:* Use the ``wic cp`` command to add the
+ updated kernel to the Wic image. Depending on how you built your
+ kernel, it could be in different places. If you used ``devtool`` and
+ an SDK to build your kernel, it resides in the ``tmp/work`` directory
+ of the extensible SDK. If you used ``make`` to build the kernel, the
+ kernel will be in the ``workspace/sources`` area.
+
+ The following example assumes ``devtool`` was used to build the
+ kernel::
+
+ $ wic cp poky_sdk/tmp/work/qemux86-poky-linux/linux-yocto/4.12.12+git999-r0/linux-yocto-4.12.12+git999/arch/x86/boot/bzImage \
+ poky/build/tmp/deploy/images/qemux86/core-image-minimal-qemux86.wic:1/vmlinuz
+
+ Once the new kernel is added back into the image, you can use the
+ ``dd`` command or :ref:`bmaptool
+ <dev-manual/bmaptool:flashing images using \`\`bmaptool\`\`>`
+ to flash your wic image onto an SD card or USB stick and test your
+ target.
+
+ .. note::
+
+ Using ``bmaptool`` is generally 10 to 20 times faster than using ``dd``.
+
diff --git a/documentation/dev-manual/x32-psabi.rst b/documentation/dev-manual/x32-psabi.rst
new file mode 100644
index 0000000000..92b1f96fa4
--- /dev/null
+++ b/documentation/dev-manual/x32-psabi.rst
@@ -0,0 +1,54 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Using x32 psABI
+***************
+
+x32 processor-specific Application Binary Interface (`x32
+psABI <https://software.intel.com/en-us/node/628948>`__) is a native
+32-bit processor-specific ABI for Intel 64 (x86-64) architectures. An
+ABI defines the calling conventions between functions in a processing
+environment. The interface determines what registers are used and what
+the sizes are for various C data types.
+
+Some processing environments prefer using 32-bit applications even when
+running on Intel 64-bit platforms. Consider the i386 psABI, which is a
+very old 32-bit ABI for Intel 64-bit platforms. The i386 psABI does not
+provide efficient use and access of the Intel 64-bit processor
+resources, leaving the system underutilized. Now consider the x86_64
+psABI. This ABI is newer and uses 64-bits for data sizes and program
+pointers. The extra bits increase the footprint size of the programs,
+libraries, and also increases the memory and file system size
+requirements. Executing under the x32 psABI enables user programs to
+utilize CPU and system resources more efficiently while keeping the
+memory footprint of the applications low. Extra bits are used for
+registers but not for addressing mechanisms.
+
+The Yocto Project supports the final specifications of x32 psABI as
+follows:
+
+- You can create packages and images in x32 psABI format on x86_64
+ architecture targets.
+
+- You can successfully build recipes with the x32 toolchain.
+
+- You can create and boot ``core-image-minimal`` and
+ ``core-image-sato`` images.
+
+- There is RPM Package Manager (RPM) support for x32 binaries.
+
+- There is support for large images.
+
+To use the x32 psABI, you need to edit your ``conf/local.conf``
+configuration file as follows::
+
+ MACHINE = "qemux86-64"
+ DEFAULTTUNE = "x86-64-x32"
+ baselib = "${@d.getVar('BASE_LIB:tune-' + (d.getVar('DEFAULTTUNE') \
+ or 'INVALID')) or 'lib'}"
+
+Once you have set
+up your configuration file, use BitBake to build an image that supports
+the x32 psABI. Here is an example::
+
+ $ bitbake core-image-sato
+
diff --git a/documentation/index.rst b/documentation/index.rst
index 6335c707e0..3fef1704a4 100644
--- a/documentation/index.rst
+++ b/documentation/index.rst
@@ -26,6 +26,7 @@ Welcome to the Yocto Project Documentation
:caption: Manuals
Overview and Concepts Manual <overview-manual/index>
+ Contributor Guide <contributor-guide/index>
Reference Manual <ref-manual/index>
Board Support Package (BSP) Developer's guide <bsp-guide/index>
Development Tasks Manual <dev-manual/index>
diff --git a/documentation/kernel-dev/advanced.rst b/documentation/kernel-dev/advanced.rst
index b5290b61b3..f4820a4ad2 100644
--- a/documentation/kernel-dev/advanced.rst
+++ b/documentation/kernel-dev/advanced.rst
@@ -69,8 +69,7 @@ to indicate the branch.
You can use the :term:`KBRANCH` value to define an alternate branch typically
with a machine override as shown here from the ``meta-yocto-bsp`` layer::
- KBRANCH:edgerouter = "standard/edgerouter"
-
+ KBRANCH:beaglebone-yocto = "standard/beaglebone"
The linux-yocto style recipes can optionally define the following
variables:
@@ -183,7 +182,7 @@ the structure:
order to define a base kernel policy or major kernel type to be
reused across multiple BSPs, place the file in ``ktypes`` directory.
-These distinctions can easily become blurred - especially as out-of-tree
+These distinctions can easily become blurred --- especially as out-of-tree
features slowly merge upstream over time. Also, remember that how the
description files are placed is a purely logical organization and has no
impact on the functionality of the kernel Metadata. There is no impact
@@ -304,8 +303,8 @@ The following listings show the ``build.scc`` file and part of the
.
.
.
- char *dump_write = NULL, *files_source = NULL;
- int opt;
+ char *dump_write = NULL, *files_source = NULL;
+ int opt;
--
2.10.1
diff --git a/documentation/kernel-dev/common.rst b/documentation/kernel-dev/common.rst
index a5dd02ccf2..815695652b 100644
--- a/documentation/kernel-dev/common.rst
+++ b/documentation/kernel-dev/common.rst
@@ -101,13 +101,13 @@ section:
For background information on working with common and BSP layers,
see the
- ":ref:`dev-manual/common-tasks:understanding and creating layers`"
+ ":ref:`dev-manual/layers:understanding and creating layers`"
section in the Yocto Project Development Tasks Manual and the
":ref:`bsp-guide/bsp:bsp layers`" section in the Yocto Project Board
Support (BSP) Developer's Guide, respectively. For information on how to
use the ``bitbake-layers create-layer`` command to quickly set up a layer,
see the
- ":ref:`dev-manual/common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
+ ":ref:`dev-manual/layers:creating a general layer using the \`\`bitbake-layers\`\` script`"
section in the Yocto Project Development Tasks Manual.
4. *Inform the BitBake Build Environment About Your Layer:* As directed
@@ -278,13 +278,13 @@ section:
For background information on working with common and BSP layers,
see the
- ":ref:`dev-manual/common-tasks:understanding and creating layers`"
+ ":ref:`dev-manual/layers:understanding and creating layers`"
section in the Yocto Project Development Tasks Manual and the
":ref:`bsp-guide/bsp:bsp layers`" section in the Yocto Project Board
Support (BSP) Developer's Guide, respectively. For information on how to
use the ``bitbake-layers create-layer`` command to quickly set up a layer,
see the
- ":ref:`dev-manual/common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
+ ":ref:`dev-manual/layers:creating a general layer using the \`\`bitbake-layers\`\` script`"
section in the Yocto Project Development Tasks Manual.
4. *Inform the BitBake Build Environment About Your Layer:* As directed
@@ -364,7 +364,7 @@ layer contains its own :term:`BitBake`
append files (``.bbappend``) and provides a convenient mechanism to
create your own recipe files (``.bb``) as well as store and use kernel
patch files. For background information on working with layers, see the
-":ref:`dev-manual/common-tasks:understanding and creating layers`"
+":ref:`dev-manual/layers:understanding and creating layers`"
section in the Yocto Project Development Tasks Manual.
.. note::
@@ -372,7 +372,7 @@ section in the Yocto Project Development Tasks Manual.
The Yocto Project comes with many tools that simplify tasks you need
to perform. One such tool is the ``bitbake-layers create-layer``
command, which simplifies creating a new layer. See the
- ":ref:`dev-manual/common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
+ ":ref:`dev-manual/layers:creating a general layer using the \`\`bitbake-layers\`\` script`"
section in the Yocto Project Development Tasks Manual for
information on how to use this script to quick set up a new layer.
@@ -383,11 +383,7 @@ home directory:
1. *Create Structure*: Create the layer's structure::
- $ mkdir meta-mylayer
- $ mkdir meta-mylayer/conf
- $ mkdir meta-mylayer/recipes-kernel
- $ mkdir meta-mylayer/recipes-kernel/linux
- $ mkdir meta-mylayer/recipes-kernel/linux/linux-yocto
+ $ mkdir -p meta-mylayer/conf meta-mylayer/recipes-kernel/linux/linux-yocto
The ``conf`` directory holds your configuration files, while the
``recipes-kernel`` directory holds your append file and eventual
@@ -425,7 +421,7 @@ home directory:
The :term:`FILESEXTRAPATHS` and :term:`SRC_URI` statements
enable the OpenEmbedded build system to find patch files. For more
information on using append files, see the
- ":ref:`dev-manual/common-tasks:appending other layers metadata with your layer`"
+ ":ref:`dev-manual/layers:appending other layers metadata with your layer`"
section in the Yocto Project Development Tasks Manual.
Modifying an Existing Recipe
@@ -455,13 +451,13 @@ Creating the Append File
You create this file in your custom layer. You also name it accordingly
based on the linux-yocto recipe you are using. For example, if you are
-modifying the ``meta/recipes-kernel/linux/linux-yocto_4.12.bb`` recipe,
+modifying the ``meta/recipes-kernel/linux/linux-yocto_5.15.bb`` recipe,
the append file will typically be located as follows within your custom
layer:
.. code-block:: none
- your-layer/recipes-kernel/linux/linux-yocto_4.12.bbappend
+ your-layer/recipes-kernel/linux/linux-yocto_5.15.bbappend
The append file should initially extend the
:term:`FILESPATH` search path by
@@ -489,36 +485,36 @@ As an example, consider the following append file used by the BSPs in
.. code-block:: none
- meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.12.bbappend
+ meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.15.bbappend
Here are the contents of this file. Be aware that the actual commit ID
strings in this example listing might be different than the actual
strings in the file from the ``meta-yocto-bsp`` layer upstream.
::
- KBRANCH:genericx86 = "standard/base"
- KBRANCH:genericx86-64 = "standard/base"
+ KBRANCH:genericx86 = "v5.15/standard/base"
+ KBRANCH:genericx86-64 = "v5.15/standard/base"
+ KBRANCH:edgerouter = "v5.15/standard/edgerouter"
+ KBRANCH:beaglebone-yocto = "v5.15/standard/beaglebone"
KMACHINE:genericx86 ?= "common-pc"
KMACHINE:genericx86-64 ?= "common-pc-64"
- KBRANCH:edgerouter = "standard/edgerouter"
- KBRANCH:beaglebone = "standard/beaglebone"
-
- SRCREV_machine:genericx86 ?= "d09f2ce584d60ecb7890550c22a80c48b83c2e19"
- SRCREV_machine:genericx86-64 ?= "d09f2ce584d60ecb7890550c22a80c48b83c2e19"
- SRCREV_machine:edgerouter ?= "b5c8cfda2dfe296410d51e131289fb09c69e1e7d"
- SRCREV_machine:beaglebone ?= "b5c8cfda2dfe296410d51e131289fb09c69e1e7d"
+ KMACHINE:beaglebone-yocto ?= "beaglebone"
+ SRCREV_machine:genericx86 ?= "0b628306d1f9ea28c0e86369ce9bb87a47893c9c"
+ SRCREV_machine:genericx86-64 ?= "0b628306d1f9ea28c0e86369ce9bb87a47893c9c"
+ SRCREV_machine:edgerouter ?= "90f1ee6589264545f548d731c2480b08a007230f"
+ SRCREV_machine:beaglebone-yocto ?= "9aabbaa89fcb21af7028e814c1f5b61171314d5a"
COMPATIBLE_MACHINE:genericx86 = "genericx86"
COMPATIBLE_MACHINE:genericx86-64 = "genericx86-64"
COMPATIBLE_MACHINE:edgerouter = "edgerouter"
- COMPATIBLE_MACHINE:beaglebone = "beaglebone"
+ COMPATIBLE_MACHINE:beaglebone-yocto = "beaglebone-yocto"
- LINUX_VERSION:genericx86 = "4.12.7"
- LINUX_VERSION:genericx86-64 = "4.12.7"
- LINUX_VERSION:edgerouter = "4.12.10"
- LINUX_VERSION:beaglebone = "4.12.10"
+ LINUX_VERSION:genericx86 = "5.15.72"
+ LINUX_VERSION:genericx86-64 = "5.15.72"
+ LINUX_VERSION:edgerouter = "5.15.54"
+ LINUX_VERSION:beaglebone-yocto = "5.15.54"
This append file
contains statements used to support several BSPs that ship with the
@@ -1044,9 +1040,7 @@ Section.
additional structure to your layer using the following commands::
$ cd ~/meta-mylayer
- $ mkdir recipes-kernel
- $ mkdir recipes-kernel/linux
- $ mkdir recipes-kernel/linux/linux-yocto
+ $ mkdir -p recipes-kernel recipes-kernel/linux/linux-yocto
Once you have created this
hierarchy in your layer, you can move the patch file using the
@@ -1070,7 +1064,7 @@ Section.
For more information on append files and patches, see the
":ref:`kernel-dev/common:creating the append file`" and
":ref:`kernel-dev/common:applying patches`" sections. You can also see the
- ":ref:`dev-manual/common-tasks:appending other layers metadata with your layer`"
+ ":ref:`dev-manual/layers:appending other layers metadata with your layer`"
section in the Yocto Project Development Tasks Manual.
.. note::
@@ -1081,7 +1075,7 @@ Section.
the following sequence of commands::
$ cd poky/build
- $ bitbake -c cleanall yocto-linux
+ $ bitbake -c cleanall linux-yocto
$ bitbake core-image-minimal -c cleanall
$ bitbake core-image-minimal
$ runqemu qemux86
@@ -1376,7 +1370,7 @@ In order to run this task, you must have an existing ``.config`` file.
See the ":ref:`kernel-dev/common:using \`\`menuconfig\`\``" section for
information on how to create a configuration file.
-Following is sample output from the ``do_kernel_configcheck`` task:
+Here is sample output from the ``do_kernel_configcheck`` task:
.. code-block:: none
@@ -1755,10 +1749,10 @@ looks much like the one provided with the ``hello-mod`` template::
SRC := $(shell pwd)
all:
- $(MAKE) -C $(KERNEL_SRC) M=$(SRC)
+ $(MAKE) -C $(KERNEL_SRC) M=$(SRC)
modules_install:
- $(MAKE) -C $(KERNEL_SRC) M=$(SRC) modules_install
+ $(MAKE) -C $(KERNEL_SRC) M=$(SRC) modules_install
...
The important point to note here is the :term:`KERNEL_SRC` variable. The
@@ -1809,7 +1803,7 @@ tree. Using Git is an efficient way to see what has changed in the tree.
What Changed in a Kernel?
-------------------------
-Following are a few examples that show how to use Git commands to
+Here are a few examples that show how to use Git commands to
examine changes. These examples are by no means the only way to see
changes.
diff --git a/documentation/kernel-dev/concepts-appx.rst b/documentation/kernel-dev/concepts-appx.rst
index 910318e0f9..7b734a2a39 100644
--- a/documentation/kernel-dev/concepts-appx.rst
+++ b/documentation/kernel-dev/concepts-appx.rst
@@ -117,7 +117,7 @@ upstream Linux kernel development and are managed by the Yocto Project
team's Yocto Linux kernel development strategy. It is the Yocto Project
team's policy to not back-port minor features to the released Yocto
Linux kernel. They only consider back-porting significant technological
-jumps - and, that is done after a complete gap analysis. The reason
+jumps --- and, that is done after a complete gap analysis. The reason
for this policy is that back-porting any small to medium sized change
from an evolving Linux kernel can easily create mismatches,
incompatibilities and very subtle errors.
diff --git a/documentation/kernel-dev/faq.rst b/documentation/kernel-dev/faq.rst
index e40e3ff372..4dffa90dbd 100644
--- a/documentation/kernel-dev/faq.rst
+++ b/documentation/kernel-dev/faq.rst
@@ -36,9 +36,9 @@ How do I install/not-install the kernel image on the root filesystem?
The kernel image (e.g. ``vmlinuz``) is provided by the
``kernel-image`` package. Image recipes depend on ``kernel-base``. To
specify whether or not the kernel image is installed in the generated
-root filesystem, override ``RDEPENDS:${KERNEL_PACKAGE_NAME}-base`` to include or not
+root filesystem, override ``RRECOMMENDS:${KERNEL_PACKAGE_NAME}-base`` to include or not
include "kernel-image". See the
-":ref:`dev-manual/common-tasks:appending other layers metadata with your layer`"
+":ref:`dev-manual/layers:appending other layers metadata with your layer`"
section in the
Yocto Project Development Tasks Manual for information on how to use an
append file to override metadata.
diff --git a/documentation/kernel-dev/intro.rst b/documentation/kernel-dev/intro.rst
index e406f6e47f..cc879db887 100644
--- a/documentation/kernel-dev/intro.rst
+++ b/documentation/kernel-dev/intro.rst
@@ -87,7 +87,7 @@ understand the following documentation:
as described in the Yocto Project Application Development and the
Extensible Software Development Kit (eSDK) manual.
-- The ":ref:`dev-manual/common-tasks:understanding and creating layers`"
+- The ":ref:`dev-manual/layers:understanding and creating layers`"
section in the Yocto Project Development Tasks Manual.
- The ":ref:`kernel-dev/intro:kernel modification workflow`" section.
diff --git a/documentation/migration-guides/migration-1.4.rst b/documentation/migration-guides/migration-1.4.rst
index baf3c08379..471bfb0f2a 100644
--- a/documentation/migration-guides/migration-1.4.rst
+++ b/documentation/migration-guides/migration-1.4.rst
@@ -83,7 +83,7 @@ create an append file for the ``init-ifupdown`` recipe instead, which
you can find in the :term:`Source Directory` at
``meta/recipes-core/init-ifupdown``. For information on how to use
append files, see the
-":ref:`dev-manual/common-tasks:appending other layers metadata with your layer`"
+":ref:`dev-manual/layers:appending other layers metadata with your layer`"
section in the Yocto Project Development Tasks Manual.
.. _migration-1.4-remote-debugging:
diff --git a/documentation/migration-guides/migration-1.5.rst b/documentation/migration-guides/migration-1.5.rst
index 93db14c3ba..06334d6fc7 100644
--- a/documentation/migration-guides/migration-1.5.rst
+++ b/documentation/migration-guides/migration-1.5.rst
@@ -26,8 +26,7 @@ provide packages for these, you can install and use the Buildtools
tarball, which provides an SDK-like environment containing them.
For more information on this requirement, see the
-":ref:`ref-manual/system-requirements:required git, tar, python and gcc versions`"
-section.
+":ref:`system-requirements-buildtools`" section.
.. _migration-1.5-atom-pc-bsp:
@@ -240,11 +239,11 @@ Automated Image Testing
-----------------------
A new automated image testing framework has been added through the
-:ref:`ref-classes-testimage*` classes. This
+:ref:`ref-classes-testimage` classes. This
framework replaces the older ``imagetest-qemu`` framework.
You can learn more about performing automated image tests in the
-":ref:`dev-manual/common-tasks:performing automated runtime testing`"
+":ref:`dev-manual/runtime-testing:performing automated runtime testing`"
section in the Yocto Project Development Tasks Manual.
.. _migration-1.5-build-history:
@@ -252,7 +251,7 @@ section in the Yocto Project Development Tasks Manual.
Build History
-------------
-Following are changes to Build History:
+The changes to Build History are:
- Installed package sizes: ``installed-package-sizes.txt`` for an image
now records the size of the files installed by each package instead
@@ -267,7 +266,7 @@ Following are changes to Build History:
option for each utility for more information on the new syntax.
For more information on Build History, see the
-":ref:`dev-manual/common-tasks:maintaining build output quality`"
+":ref:`dev-manual/build-quality:maintaining build output quality`"
section in the Yocto Project Development Tasks Manual.
.. _migration-1.5-udev:
@@ -275,7 +274,7 @@ section in the Yocto Project Development Tasks Manual.
``udev``
--------
-Following are changes to ``udev``:
+The changes to ``udev`` are:
- ``udev`` no longer brings in ``udev-extraconf`` automatically through
:term:`RRECOMMENDS`, since this was originally
@@ -319,7 +318,7 @@ Removed and Renamed Recipes
Other Changes
-------------
-Following is a list of short entries describing other changes:
+Here is a list of short entries describing other changes:
- ``run-postinsts``: Make this generic.
diff --git a/documentation/migration-guides/migration-1.6.rst b/documentation/migration-guides/migration-1.6.rst
index 358086560b..c50786a36d 100644
--- a/documentation/migration-guides/migration-1.6.rst
+++ b/documentation/migration-guides/migration-1.6.rst
@@ -12,7 +12,7 @@ Project 1.6 Release (codename "daisy") from the prior release.
The :ref:`archiver <ref-classes-archiver>` class has been rewritten
and its configuration has been simplified. For more details on the
source archiver, see the
-":ref:`dev-manual/common-tasks:maintaining open source license compliance during your product's lifecycle`"
+":ref:`dev-manual/licenses:maintaining open source license compliance during your product's lifecycle`"
section in the Yocto Project Development Tasks Manual.
.. _migration-1.6-packaging-changes:
@@ -147,7 +147,7 @@ NFS mount, an error occurs.
The ``PRINC`` variable has been deprecated and triggers a warning if
detected during a build. For :term:`PR` increments on changes,
use the PR service instead. You can find out more about this service in
-the ":ref:`dev-manual/common-tasks:working with a pr service`"
+the ":ref:`dev-manual/packages:working with a pr service`"
section in the Yocto Project Development Tasks Manual.
.. _migration-1.6-variable-changes-IMAGE_TYPES:
@@ -220,7 +220,7 @@ Package Test (ptest)
Package Tests (ptest) are built but not installed by default. For
information on using Package Tests, see the
-":ref:`dev-manual/common-tasks:testing packages with ptest`"
+":ref:`dev-manual/packages:testing packages with ptest`"
section in the Yocto Project Development Tasks Manual. For information on the
``ptest`` class, see the ":ref:`ref-classes-ptest`" section.
@@ -341,39 +341,39 @@ Removed and Renamed Recipes
The following recipes have been removed:
-- ``packagegroup-toolset-native`` - This recipe is largely unused.
+- ``packagegroup-toolset-native`` --- this recipe is largely unused.
-- ``linux-yocto-3.8`` - Support for the Linux yocto 3.8 kernel has been
+- ``linux-yocto-3.8`` --- support for the Linux yocto 3.8 kernel has been
dropped. Support for the 3.10 and 3.14 kernels have been added with
the ``linux-yocto-3.10`` and ``linux-yocto-3.14`` recipes.
-- ``ocf-linux`` - This recipe has been functionally replaced using
+- ``ocf-linux`` --- this recipe has been functionally replaced using
``cryptodev-linux``.
-- ``genext2fs`` - ``genext2fs`` is no longer used by the build system
+- ``genext2fs`` --- ``genext2fs`` is no longer used by the build system
and is unmaintained upstream.
-- ``js`` - This provided an ancient version of Mozilla's javascript
+- ``js`` --- this provided an ancient version of Mozilla's javascript
engine that is no longer needed.
-- ``zaurusd`` - The recipe has been moved to the ``meta-handheld``
+- ``zaurusd`` --- the recipe has been moved to the ``meta-handheld``
layer.
-- ``eglibc 2.17`` - Replaced by the ``eglibc 2.19`` recipe.
+- ``eglibc 2.17`` --- replaced by the ``eglibc 2.19`` recipe.
-- ``gcc 4.7.2`` - Replaced by the now stable ``gcc 4.8.2``.
+- ``gcc 4.7.2`` --- replaced by the now stable ``gcc 4.8.2``.
-- ``external-sourcery-toolchain`` - this recipe is now maintained in
+- ``external-sourcery-toolchain`` --- this recipe is now maintained in
the ``meta-sourcery`` layer.
-- ``linux-libc-headers-yocto 3.4+git`` - Now using version 3.10 of the
+- ``linux-libc-headers-yocto 3.4+git`` --- now using version 3.10 of the
``linux-libc-headers`` by default.
-- ``meta-toolchain-gmae`` - This recipe is obsolete.
+- ``meta-toolchain-gmae`` --- this recipe is obsolete.
-- ``packagegroup-core-sdk-gmae`` - This recipe is obsolete.
+- ``packagegroup-core-sdk-gmae`` --- this recipe is obsolete.
-- ``packagegroup-core-standalone-gmae-sdk-target`` - This recipe is
+- ``packagegroup-core-standalone-gmae-sdk-target`` --- this recipe is
obsolete.
.. _migration-1.6-removed-classes:
diff --git a/documentation/migration-guides/migration-1.7.rst b/documentation/migration-guides/migration-1.7.rst
index 88a6855d50..73eb6d1b81 100644
--- a/documentation/migration-guides/migration-1.7.rst
+++ b/documentation/migration-guides/migration-1.7.rst
@@ -30,8 +30,8 @@ version required on the
build host is now 1.7.8 because the ``--list`` option is now required by
BitBake's Git fetcher. As always, if your host distribution does not
provide a version of Git that meets this requirement, you can use the
-``buildtools-tarball`` that does. See the
-":ref:`ref-manual/system-requirements:required git, tar, python and gcc versions`"
+:term:`buildtools` tarball that does. See the
+":ref:`ref-manual/system-requirements:required git, tar, python, make and gcc versions`"
section for more information.
.. _migration-1.7-autotools-class-changes:
@@ -217,7 +217,7 @@ The following miscellaneous change occurred:
should manually remove old "build-id" files from your existing build
history repositories to avoid confusion. For information on the build
history feature, see the
- ":ref:`dev-manual/common-tasks:maintaining build output quality`"
+ ":ref:`dev-manual/build-quality:maintaining build output quality`"
section in the Yocto Project Development Tasks Manual.
diff --git a/documentation/migration-guides/migration-2.1.rst b/documentation/migration-guides/migration-2.1.rst
index ae6268d509..54d359a67c 100644
--- a/documentation/migration-guides/migration-2.1.rst
+++ b/documentation/migration-guides/migration-2.1.rst
@@ -343,7 +343,7 @@ This release supports generation of GLib Introspective Repository (GIR)
files through GObject introspection, which is the standard mechanism for
accessing GObject-based software from runtime environments. You can
enable, disable, and test the generation of this data. See the
-":ref:`dev-manual/common-tasks:enabling gobject introspection support`"
+":ref:`dev-manual/gobject-introspection:enabling gobject introspection support`"
section in the Yocto Project Development Tasks Manual for more
information.
@@ -356,9 +356,9 @@ These additional changes exist:
- The minimum Git version has been increased to 1.8.3.1. If your host
distribution does not provide a sufficiently recent version, you can
- install the buildtools, which will provide it. See the
- :ref:`ref-manual/system-requirements:required git, tar, python and gcc versions`
- section for more information on the buildtools tarball.
+ install the :term:`buildtools`, which will provide it. See the
+ :ref:`ref-manual/system-requirements:required git, tar, python, make and gcc versions`
+ section for more information on the :term:`buildtools` tarball.
- The buggy and incomplete support for the RPM version 4 package
manager has been removed. The well-tested and maintained support for
diff --git a/documentation/migration-guides/migration-2.2.rst b/documentation/migration-guides/migration-2.2.rst
index 10aa2d0684..979b5aa0f9 100644
--- a/documentation/migration-guides/migration-2.2.rst
+++ b/documentation/migration-guides/migration-2.2.rst
@@ -27,7 +27,7 @@ Staging Directories in Sysroot Has Been Simplified
The way directories are staged in sysroot has been simplified and
introduces the new :term:`SYSROOT_DIRS`,
:term:`SYSROOT_DIRS_NATIVE`, and ``SYSROOT_DIRS_BLACKLIST``
-(replaced by :term:`SYSROOT_DIRS_IGNORE` in version 3.5). See the
+(replaced by :term:`SYSROOT_DIRS_IGNORE` in version 4.0). See the
:oe_lists:`v2 patch series on the OE-Core Mailing List
</pipermail/openembedded-core/2016-May/121365.html>`
for additional information.
@@ -70,9 +70,9 @@ Metadata Must Now Use Python 3 Syntax
The metadata is now required to use Python 3 syntax. For help preparing
metadata, see any of the many Python 3 porting guides available.
-Alternatively, you can reference the conversion commits for Bitbake and
-you can use :term:`OpenEmbedded-Core (OE-Core)` as a guide for changes. Following are
-particular areas of interest:
+Alternatively, you can reference the conversion commits for BitBake and
+you can use :term:`OpenEmbedded-Core (OE-Core)` as a guide for changes.
+Particular areas of interest are:
- subprocess command-line pipes needing locale decoding
@@ -103,7 +103,7 @@ online package-manager support through SMART still require Python 2.
``buildtools-tarball`` Includes Python 3
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-``buildtools-tarball`` now includes Python 3.
+The :term:`buildtools` tarball now includes Python 3.
.. _migration-2.2-uclibc-replaced-by-musl:
@@ -181,14 +181,8 @@ root filesystem, provides an image, and uses the ``nographic`` option::
$ runqemu qemux86-64 tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.ext4 tmp/deploy/images/qemux86-64/bzImage nographic
-Following is a list of variables that can be set in configuration files
-such as ``bsp.conf`` to enable the BSP to be booted by ``runqemu``:
-
-.. note::
-
- "QB" means "QEMU Boot".
-
-::
+Here is a list of variables that can be set in configuration files
+such as ``bsp.conf`` to enable the BSP to be booted by ``runqemu``::
QB_SYSTEM_NAME: QEMU name (e.g. "qemu-system-i386")
QB_OPT_APPEND: Options to append to QEMU (e.g. "-show-cursor")
@@ -442,7 +436,7 @@ The following miscellaneous changes have occurred:
- :ref:`ref-classes-image`: Renamed COMPRESS(ION) to CONVERSION. This change
means that ``COMPRESSIONTYPES``, ``COMPRESS_DEPENDS`` and
``COMPRESS_CMD`` are deprecated in favor of ``CONVERSIONTYPES``,
- ``CONVERSION_DEPENDS`` and ``CONVERSION_CMD``. The ``COMPRESS*``
+ ``CONVERSION_DEPENDS`` and :term:`CONVERSION_CMD`. The ``COMPRESS*``
variable names will still work in the 2.2 release but metadata that
does not need to be backwards-compatible should be changed to use the
new names as the ``COMPRESS*`` ones will be removed in a future
diff --git a/documentation/migration-guides/migration-2.3.rst b/documentation/migration-guides/migration-2.3.rst
index d49ed474ca..b3428039b8 100644
--- a/documentation/migration-guides/migration-2.3.rst
+++ b/documentation/migration-guides/migration-2.3.rst
@@ -363,7 +363,7 @@ The following changes have been made to Wic:
.. note::
For more information on Wic, see the
- ":ref:`dev-manual/common-tasks:creating partitioned images using wic`"
+ ":ref:`dev-manual/wic:creating partitioned images using wic`"
section in the Yocto Project Development Tasks Manual.
- *Default Output Directory Changed:* Wic's default output directory is
diff --git a/documentation/migration-guides/migration-2.4.rst b/documentation/migration-guides/migration-2.4.rst
index ef5f32e6ef..66dfc20d95 100644
--- a/documentation/migration-guides/migration-2.4.rst
+++ b/documentation/migration-guides/migration-2.4.rst
@@ -89,8 +89,6 @@ occurred:
Removed Recipes
---------------
-The following recipes have been removed:
-
- ``acpitests``: This recipe is not maintained.
- ``autogen-native``: No longer required by Grub, oe-core, or
@@ -213,8 +211,6 @@ recipes you might have. This will avoid breakage in post 2.4 releases.
Package QA Changes
------------------
-The following package QA changes took place:
-
- The "unsafe-references-in-scripts" QA check has been removed.
- If you refer to ``${COREBASE}/LICENSE`` within
@@ -229,8 +225,6 @@ The following package QA changes took place:
``README`` File Changes
-----------------------
-The following are changes to ``README`` files:
-
- The main Poky ``README`` file has been moved to the ``meta-poky``
layer and has been renamed ``README.poky``. A symlink has been
created so that references to the old location work.
@@ -246,8 +240,6 @@ The following are changes to ``README`` files:
Miscellaneous Changes
---------------------
-The following are additional changes:
-
- The ``ROOTFS_PKGMANAGE_BOOTSTRAP`` variable and any references to it
have been removed. You should remove this variable from any custom
recipes.
@@ -301,7 +293,7 @@ The following are additional changes:
likely be removed in the next Yocto Project release.
- The ``vmdk``, ``vdi``, and ``qcow2`` image file types are now used in
- conjunction with the "wic" image type through ``CONVERSION_CMD``.
+ conjunction with the "wic" image type through :term:`CONVERSION_CMD`.
Consequently, the equivalent image types are now ``wic.vmdk``,
``wic.vdi``, and ``wic.qcow2``, respectively.
diff --git a/documentation/migration-guides/migration-2.5.rst b/documentation/migration-guides/migration-2.5.rst
index abd26809df..03ecb16dc6 100644
--- a/documentation/migration-guides/migration-2.5.rst
+++ b/documentation/migration-guides/migration-2.5.rst
@@ -85,8 +85,6 @@ The following recipes have been removed:
Scripts and Tools Changes
-------------------------
-The following are changes to scripts and tools:
-
- ``yocto-bsp``, ``yocto-kernel``, and ``yocto-layer``: The
``yocto-bsp``, ``yocto-kernel``, and ``yocto-layer`` scripts
previously shipped with poky but not in OpenEmbedded-Core have been
@@ -117,8 +115,6 @@ The following are changes to scripts and tools:
BitBake Changes
---------------
-The following are BitBake changes:
-
- The ``--runall`` option has changed. There are two different
behaviors people might want:
@@ -151,7 +147,7 @@ The following are BitBake changes:
Python and Python 3 Changes
---------------------------
-The following are auto-packaging changes to Python and Python 3:
+Here are auto-packaging changes to Python and Python 3:
The script-managed ``python-*-manifest.inc`` files that were previously
used to generate Python and Python 3 packages have been replaced with a
@@ -185,8 +181,6 @@ change please see :yocto_git:`this commit
Miscellaneous Changes
---------------------
-The following are additional changes:
-
- The :ref:`kernel <ref-classes-kernel>` class supports building packages for multiple kernels.
If your kernel recipe or ``.bbappend`` file mentions packaging at
all, you should replace references to the kernel in package names
@@ -264,7 +258,7 @@ The following are additional changes:
will trigger a warning during ``do_rootfs``.
For more information, see the
- ":ref:`dev-manual/common-tasks:post-installation scripts`"
+ ":ref:`dev-manual/new-recipe:post-installation scripts`"
section in the Yocto Project Development Tasks Manual.
- The ``elf`` image type has been removed. This image type was removed
diff --git a/documentation/migration-guides/migration-2.6.rst b/documentation/migration-guides/migration-2.6.rst
index 11e659de7c..ffac097a05 100644
--- a/documentation/migration-guides/migration-2.6.rst
+++ b/documentation/migration-guides/migration-2.6.rst
@@ -319,7 +319,7 @@ This section provides information about automatic testing changes:
practices now dictate that you use the
:term:`IMAGE_CLASSES` variable rather than the
:term:`INHERIT` variable when you inherit the
- :ref:`testimage <ref-classes-testimage*>` and
+ :ref:`testimage <ref-classes-testimage>` and
:ref:`testsdk <ref-classes-testsdk>` classes used for automatic
testing.
@@ -368,7 +368,7 @@ Any failure of a ``pkg_postinst()`` script (including exit 1) triggers
an error during the :ref:`ref-tasks-rootfs` task.
For more information on post-installation behavior, see the
-":ref:`dev-manual/common-tasks:post-installation scripts`"
+":ref:`dev-manual/new-recipe:post-installation scripts`"
section in the Yocto Project Development Tasks Manual.
.. _migration-2.6-python-3-profile-guided-optimizations:
diff --git a/documentation/migration-guides/migration-3.0.rst b/documentation/migration-guides/migration-3.0.rst
index 50d6758e71..5a52ebaed2 100644
--- a/documentation/migration-guides/migration-3.0.rst
+++ b/documentation/migration-guides/migration-3.0.rst
@@ -148,7 +148,7 @@ XML feeds that ``cve-check-tool`` was using, supports CVSSv3 scoring,
and makes other improvements.
Additionally, the ``CVE_CHECK_CVE_WHITELIST`` variable has been replaced
-by ``CVE_CHECK_WHITELIST`` (replaced by :term:`CVE_CHECK_IGNORE` in version 3.5).
+by ``CVE_CHECK_WHITELIST`` (replaced by :term:`CVE_CHECK_IGNORE` in version 4.0).
.. _migration-3.0-bitbake-changes:
@@ -216,11 +216,11 @@ The following sanity check changes occurred.
- :term:`SRC_URI` is now checked for usage of two
problematic items:
- - "${PN}" prefix/suffix use - Warnings always appear if ${PN} is
+ - "${PN}" prefix/suffix use --- warnings always appear if ${PN} is
used. You must fix the issue regardless of whether multiconfig or
anything else that would cause prefixing/suffixing to happen.
- - Github archive tarballs - these are not guaranteed to be stable.
+ - Github archive tarballs --- these are not guaranteed to be stable.
Consequently, it is likely that the tarballs will be refreshed and
thus the SRC_URI checksums will fail to apply. It is recommended
that you fetch either an official release tarball or a specific
diff --git a/documentation/migration-guides/migration-3.1.rst b/documentation/migration-guides/migration-3.1.rst
index e3fdbbe425..2312ea6494 100644
--- a/documentation/migration-guides/migration-3.1.rst
+++ b/documentation/migration-guides/migration-3.1.rst
@@ -200,7 +200,7 @@ Packaging changes
-----------------
- ``intltool`` has been removed from ``packagegroup-core-sdk`` as it is
- rarely needed to build modern software - gettext can do most of the
+ rarely needed to build modern software --- gettext can do most of the
things it used to be needed for. ``intltool`` has also been removed
from ``packagegroup-core-self-hosted`` as it is not needed to for
standard builds.
@@ -238,7 +238,7 @@ Warnings will now be shown at ``do_package_qa`` time in the following
circumstances:
- A recipe installs ``.desktop`` files containing ``MimeType`` keys but
- does not inherit the new ``mime-xdg`` class
+ does not inherit the new :ref:`mime-xdg <ref-classes-mime-xdg>` class
- A recipe installs ``.xml`` files into ``${datadir}/mime/packages``
but does not inherit the :ref:`mime <ref-classes-mime>` class
diff --git a/documentation/migration-guides/migration-3.2.rst b/documentation/migration-guides/migration-3.2.rst
index d593effe97..cf0b00f1af 100644
--- a/documentation/migration-guides/migration-3.2.rst
+++ b/documentation/migration-guides/migration-3.2.rst
@@ -11,7 +11,7 @@ Minimum system requirements
``gcc`` version 6.0 is now required at minimum on the build host. For older
host distributions where this is not available, you can use the
-``buildtools-extended-tarball`` (easily installable using
+:term:`buildtools-extended` tarball (easily installable using
``scripts/install-buildtools``).
@@ -23,7 +23,7 @@ Removed recipes
The following recipes have been removed:
- ``bjam-native``: replaced by ``boost-build-native``
-- ``avahi-ui``: folded into the main ``avahi`` recipe - the GTK UI can be disabled using :term:`PACKAGECONFIG` for ``avahi``.
+- ``avahi-ui``: folded into the main ``avahi`` recipe --- the GTK UI can be disabled using :term:`PACKAGECONFIG` for ``avahi``.
- ``build-compare``: no longer needed with the removal of the ``packagefeed-stability`` class
- ``dhcp``: obsolete, functionally replaced by ``dhcpcd`` and ``kea``
- ``libmodulemd-v1``: replaced by ``libmodulemd``
@@ -37,7 +37,7 @@ Removed classes
The following classes (.bbclass files) have been removed:
-- ``spdx``: obsolete - the Yocto Project is a strong supporter of SPDX, but this class was old code using a dated approach and had the potential to be misleading. The ``meta-sdpxscanner`` layer is a much more modern and active approach to handling this and is recommended as a replacement.
+- ``spdx``: obsolete --- the Yocto Project is a strong supporter of SPDX, but this class was old code using a dated approach and had the potential to be misleading. The ``meta-sdpxscanner`` layer is a much more modern and active approach to handling this and is recommended as a replacement.
- ``packagefeed-stability``: this class had become obsolete with the advent of hash equivalence and reproducible builds.
@@ -46,7 +46,7 @@ pseudo path filtering and mismatch behaviour
--------------------------------------------
pseudo now operates on a filtered subset of files. This is a significant change
-to the way pseudo operates within OpenEmbedded - by default, pseudo monitors and
+to the way pseudo operates within OpenEmbedded --- by default, pseudo monitors and
logs (adds to its database) any file created or modified whilst in a ``fakeroot``
environment. However, there are large numbers of files that we simply don't care
about the permissions of whilst in that ``fakeroot`` context, for example ${:term:`S`}, ${:term:`B`}, ${:term:`T`},
@@ -68,7 +68,7 @@ structure above that subdirectory. For these types of cases in your own recipes,
extend :term:`PSEUDO_IGNORE_PATHS` to cover additional paths that pseudo should not
be monitoring.
-In addition, pseudo's behaviour on mismatches has now been changed - rather
+In addition, pseudo's behaviour on mismatches has now been changed --- rather
than doing what turns out to be a rather dangerous "fixup" if it sees a file
with a different path but the same inode as another file it has previously seen,
pseudo will throw an ``abort()`` and direct you to a :yocto_wiki:`wiki page </Pseudo_Abort>`
@@ -137,10 +137,10 @@ DHCP server/client replaced
The ``dhcp`` software package has become unmaintained and thus has been
functionally replaced by ``dhcpcd`` (client) and ``kea`` (server). You will
-need to replace references to the recipe/package names as appropriate - most
+need to replace references to the recipe/package names as appropriate --- most
commonly, at the package level ``dhcp-client`` should be replaced by
``dhcpcd`` and ``dhcp-server`` should be replaced by ``kea``. If you have any
-custom configuration files for these they will need to be adapted - refer to
+custom configuration files for these they will need to be adapted --- refer to
the upstream documentation for ``dhcpcd`` and ``kea`` for further details.
@@ -181,7 +181,7 @@ In addition, the following new checks were added and default to triggering an er
- :ref:`missing-update-alternatives <qa-check-missing-update-alternatives>`: Check if the recipe sets the :term:`ALTERNATIVE` variable for any of its packages, and does not inherit the :ref:`update-alternatives <ref-classes-update-alternatives>` class.
-- A trailing slash or duplicated slashes in the value of :term:`S` or :term:`B` will now trigger a warning so that they can be removed and path comparisons can be more reliable - remove any instances of these in your recipes if the warning is displayed.
+- A trailing slash or duplicated slashes in the value of :term:`S` or :term:`B` will now trigger a warning so that they can be removed and path comparisons can be more reliable --- remove any instances of these in your recipes if the warning is displayed.
.. _migration-3.2-src-uri-file-globbing:
@@ -209,7 +209,7 @@ deploy class now cleans ``DEPLOYDIR`` before ``do_deploy``
``do_deploy`` as implemented in the :ref:`deploy <ref-classes-deploy>` class now cleans up ${:term:`DEPLOYDIR`} before running, just as ``do_install`` cleans up ${:term:`D`} before running. This reduces the risk of :term:`DEPLOYDIR` being accidentally contaminated by files from previous runs, possibly even with different config, in case of incremental builds.
-Most recipes and classes that inherit the :ref:`deploy <ref-classes-deploy>` class or interact with ``do_deploy`` are unlikely to be affected by this unless they add ``prefuncs`` to ``do_deploy`` *which also* put files into ``${DEPLOYDIR}`` - these should be refactored to use ``do_deploy_prepend`` instead.
+Most recipes and classes that inherit the :ref:`deploy <ref-classes-deploy>` class or interact with ``do_deploy`` are unlikely to be affected by this unless they add ``prefuncs`` to ``do_deploy`` *which also* put files into ``${DEPLOYDIR}`` --- these should be refactored to use ``do_deploy_prepend`` instead.
.. _migration-3.2-nativesdk-sdk-provides-dummy:
@@ -303,7 +303,7 @@ now need to be changed to ``inherit image-artifact-names``.
Miscellaneous changes
---------------------
-- Support for the long-deprecated ``PACKAGE_GROUP`` variable has now been removed - replace any remaining instances with :term:`FEATURE_PACKAGES`.
+- Support for the long-deprecated ``PACKAGE_GROUP`` variable has now been removed --- replace any remaining instances with :term:`FEATURE_PACKAGES`.
- The ``FILESPATHPKG`` variable, having been previously deprecated, has now been removed. Replace any remaining references with appropriate use of :term:`FILESEXTRAPATHS`.
- Erroneous use of ``inherit +=`` (instead of ``INHERIT +=``) in a configuration file now triggers an error instead of silently being ignored.
- ptest support has been removed from the ``kbd`` recipe, as upstream has moved to autotest which is difficult to work with in a cross-compilation environment.
diff --git a/documentation/migration-guides/migration-3.3.rst b/documentation/migration-guides/migration-3.3.rst
index 1eb494c286..775ee3172b 100644
--- a/documentation/migration-guides/migration-3.3.rst
+++ b/documentation/migration-guides/migration-3.3.rst
@@ -12,12 +12,11 @@ Minimum system requirements
You will now need at least Python 3.6 installed on your build host. Most recent
distributions provide this, but should you be building on a distribution that
-does not have it, you can use the ``buildtools-tarball`` (easily installable
-using ``scripts/install-buildtools``) - see
-:ref:`ref-manual/system-requirements:required git, tar, python and gcc versions`
+does not have it, you can use the :term:`buildtools` tarball (easily installable
+using ``scripts/install-buildtools``) --- see
+:ref:`ref-manual/system-requirements:required git, tar, python, make and gcc versions`
for details.
-
.. _migration-3.3-removed-recipes:
Removed recipes
diff --git a/documentation/migration-guides/migration-3.4.rst b/documentation/migration-guides/migration-3.4.rst
index d57c955eb4..cbb7efb0f4 100644
--- a/documentation/migration-guides/migration-3.4.rst
+++ b/documentation/migration-guides/migration-3.4.rst
@@ -93,7 +93,7 @@ The ``lz4c``, ``pzstd`` and ``zstd`` commands are now required to be
installed on the build host to support LZ4 and Zstandard compression
functionality. These are typically provided by ``lz4`` and ``zstd``
packages in most Linux distributions. Alternatively they are available
-as part of ``buildtools-tarball`` if your distribution does not provide
+as part of :term:`buildtools` tarball if your distribution does not provide
them. For more information see
:ref:`ref-manual/system-requirements:required packages for the build host`.
@@ -146,7 +146,7 @@ Virtual runtime provides
~~~~~~~~~~~~~~~~~~~~~~~~
Recipes shouldn't use the ``virtual/`` string in :term:`RPROVIDES` and
-:term:`RDEPENDS` - it is confusing because ``virtual/`` has no special
+:term:`RDEPENDS` --- it is confusing because ``virtual/`` has no special
meaning in :term:`RPROVIDES` and :term:`RDEPENDS` (unlike in the
corresponding build-time :term:`PROVIDES` and :term:`DEPENDS`).
@@ -171,7 +171,7 @@ Extensible SDK host extension
For a normal SDK, some layers append to :term:`TOOLCHAIN_HOST_TASK`
unconditionally which is fine, until the eSDK tries to override the
variable to its own values. Instead of installing packages specified
-in this variable it uses native recipes instead - a very different
+in this variable it uses native recipes instead --- a very different
approach. This has led to confusing errors when binaries are added
to the SDK but not relocated.
@@ -252,8 +252,8 @@ Miscellaneous
- The previously deprecated ``COMPRESS_CMD`` and
``CVE_CHECK_CVE_WHITELIST`` variables have been removed. Use
- ``CONVERSION_CMD`` and ``CVE_CHECK_WHITELIST`` (replaced by
- :term:`CVE_CHECK_IGNORE` in version 3.5) respectively
+ :term:`CONVERSION_CMD` and ``CVE_CHECK_WHITELIST`` (replaced by
+ :term:`CVE_CHECK_IGNORE` in version 4.0) respectively
instead.
- The obsolete ``oe_machinstall`` function previously provided in the
diff --git a/documentation/migration-guides/migration-4.0.rst b/documentation/migration-guides/migration-4.0.rst
index a8e6b4c331..5e240457c0 100644
--- a/documentation/migration-guides/migration-4.0.rst
+++ b/documentation/migration-guides/migration-4.0.rst
@@ -66,7 +66,7 @@ changes and you need to review them before committing. An example warning
looks like::
poky/scripts/lib/devtool/upgrade.py needs further work at line 275 since it contains abort
-
+
Fetching changes
~~~~~~~~~~~~~~~~
@@ -109,7 +109,7 @@ License changes
If they do not, by default a warning will be shown. A
:oe_git:`convert-spdx-licenses.py </openembedded-core/tree/scripts/contrib/convert-spdx-licenses.py>`
script can be used to update your recipes.
-
+
- :term:`INCOMPATIBLE_LICENSE` should now use `SPDX identifiers <https://spdx.org/licenses/>`__.
Additionally, wildcarding is now limited to specifically supported values -
see the :term:`INCOMPATIBLE_LICENSE` documentation for further information.
@@ -119,7 +119,7 @@ License changes
which can cause signature issues for users. In addition the ``available_licenses()``
function has been removed from the :ref:`license <ref-classes-license>` class as
it is no longer needed.
-
+
Removed recipes
~~~~~~~~~~~~~~~
@@ -134,13 +134,13 @@ The following recipes have been removed in this release:
Python changes
~~~~~~~~~~~~~~
-
+
- ``distutils`` has been deprecated upstream in Python 3.10 and thus the ``distutils*``
classes have been moved to ``meta-python``. Recipes that inherit the ``distutils*``
classes should be updated to inherit ``setuptools*`` equivalents instead.
-
+
- The Python package build process is now based on `wheels <https://pythonwheels.com/>`__.
- Here are the new Python packaging classes that should be used:
+ The new Python packaging classes that should be used are
:ref:`python_flit_core <ref-classes-python_flit_core>`,
:ref:`python_setuptools_build_meta <ref-classes-python_setuptools_build_meta>`
and :ref:`python_poetry_core <ref-classes-python_poetry_core>`.
@@ -158,7 +158,7 @@ Prelink removed
Prelink has been dropped by ``glibc`` upstream in 2.36. It already caused issues with
binary corruption, has a number of open bugs and is of questionable benefit
without disabling load address randomization and PIE executables.
-
+
We disabled prelinking by default in the honister (3.4) release, but left it able
to be enabled if desired. However, without glibc support it cannot be maintained
any further, so all of the prelinking functionality has been removed in this release.
@@ -169,7 +169,7 @@ reference(s).
Reproducible as standard
~~~~~~~~~~~~~~~~~~~~~~~~
-Reproducibility is now considered as standard functionality, thus the
+Reproducibility is now considered as standard functionality, thus the
``reproducible`` class has been removed and its previous contents merged into the
:ref:`base <ref-classes-base>` class. If you have references in your configuration to
``reproducible`` in :term:`INHERIT`, :term:`USER_CLASSES` etc. then they should be
@@ -189,7 +189,7 @@ Supported host distribution changes
- ``gcc`` version 7.5 is now required at minimum on the build host. For older
host distributions where this is not available, you can use the
- ``buildtools-extended-tarball`` (easily installable using
+ :term:`buildtools-extended` tarball (easily installable using
``scripts/install-buildtools``).
:append/:prepend in combination with other operators
@@ -211,7 +211,7 @@ and :ref:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata:removal (over
Miscellaneous changes
~~~~~~~~~~~~~~~~~~~~~
-
+
- ``blacklist.bbclass`` is removed and the functionality moved to the
:ref:`base <ref-classes-base>` class with a more descriptive
``varflag`` variable named :term:`SKIP_RECIPE` which will use the `bb.parse.SkipRecipe()`
@@ -251,7 +251,7 @@ Miscellaneous changes
- The ``cortexa72-crc`` and ``cortexa72-crc-crypto`` tunes have been removed since
the crc extension is now enabled by default for cortexa72. Replace any references to
these with ``cortexa72`` and ``cortexa72-crypto`` respectively.
-
+
- The Python development shell (previously known as ``devpyshell``) feature has been
renamed to ``pydevshell``. To start it you should now run::
@@ -260,8 +260,11 @@ Miscellaneous changes
- The ``packagegroups-core-full-cmdline-libs`` packagegroup is no longer produced, as
libraries should normally be brought in via dependencies. If you have any references
to this then remove them.
-
+
- The :term:`TOPDIR` variable and the current working directory are no longer modified
when parsing recipes. Any code depending on the previous behaviour will no longer
work - change any such code to explicitly use appropriate path variables instead.
+- In order to exclude the kernel image from the image rootfs,
+ :term:`RRECOMMENDS`\ ``:${KERNEL_PACKAGE_NAME}-base`` should be set instead of
+ :term:`RDEPENDS`\ ``:${KERNEL_PACKAGE_NAME}-base``.
diff --git a/documentation/migration-guides/release-3.4.rst b/documentation/migration-guides/release-3.4.rst
index 81476c4adb..66023108c7 100644
--- a/documentation/migration-guides/release-3.4.rst
+++ b/documentation/migration-guides/release-3.4.rst
@@ -7,4 +7,6 @@ Release 3.4 (honister)
release-notes-3.4
release-notes-3.4.1
release-notes-3.4.2
+ release-notes-3.4.3
+ release-notes-3.4.4
diff --git a/documentation/migration-guides/release-4.0.rst b/documentation/migration-guides/release-4.0.rst
index 7062f9d241..685799e268 100644
--- a/documentation/migration-guides/release-4.0.rst
+++ b/documentation/migration-guides/release-4.0.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
Release 4.0 (kirkstone)
=======================
@@ -5,3 +7,20 @@ Release 4.0 (kirkstone)
migration-4.0
release-notes-4.0
+ release-notes-4.0.1
+ release-notes-4.0.2
+ release-notes-4.0.3
+ release-notes-4.0.4
+ release-notes-4.0.5
+ release-notes-4.0.6
+ release-notes-4.0.7
+ release-notes-4.0.8
+ release-notes-4.0.9
+ release-notes-4.0.10
+ release-notes-4.0.11
+ release-notes-4.0.12
+ release-notes-4.0.13
+ release-notes-4.0.14
+ release-notes-4.0.15
+ release-notes-4.0.16
+ release-notes-4.0.17
diff --git a/documentation/migration-guides/release-notes-3.4.3.rst b/documentation/migration-guides/release-notes-3.4.3.rst
new file mode 100644
index 0000000000..5e118d9b02
--- /dev/null
+++ b/documentation/migration-guides/release-notes-3.4.3.rst
@@ -0,0 +1,197 @@
+Release notes for 3.4.3 (honister)
+----------------------------------
+
+Security Fixes in 3.4.3
+~~~~~~~~~~~~~~~~~~~~~~~
+
+- ghostscript: fix :cve:`2021-3781`
+- ghostscript: fix :cve:`2021-45949`
+- tiff: Add backports for two CVEs from upstream (:cve:`2022-0561` & :cve:`2022-0562`)
+- gcc : Fix :cve:`2021-46195`
+- virglrenderer: fix `CVE-2022-0135 <https://security-tracker.debian.org/tracker/CVE-2022-0135>`__ and `CVE-2022-0175 <https://security-tracker.debian.org/tracker/CVE-2022-0175>`__
+- binutils: Add fix for :cve:`2021-45078`
+
+
+Fixes in 3.4.3
+~~~~~~~~~~~~~~
+
+- Revert "cve-check: add lockfile to task"
+- asciidoc: update git repository
+- bitbake: build: Tweak exception handling for setscene tasks
+- bitbake: contrib: Fix hash server Dockerfile dependencies
+- bitbake: cooker: Improve parsing failure from handled exception usability
+- bitbake: data_smart: Fix overrides file/line message additions
+- bitbake: fetch2: ssh: username and password are optional
+- bitbake: tests/fetch: Handle upstream master -> main branch change
+- bitbake: utils: Ensure shell function failure in python logging is correct
+- build-appliance-image: Update to honister head revision
+- build-appliance-image: Update to honister head revision
+- coreutils: remove obsolete ignored CVE list
+- crate-fetch: fix setscene failures
+- cups: Add --with-dbusdir to EXTRA_OECONF for deterministic build
+- cve-check: create directory of CVE_CHECK_MANIFEST before copy
+- cve-check: get_cve_info should open the database read-only
+- default-distrovars.inc: Switch connectivity check to a yoctoproject.org page
+- depmodwrapper-cross: add config directory option
+- devtool: deploy-target: Remove stripped binaries in pseudo context
+- devtool: explicitly set main or master branches in upgrades when available
+- docs: fix hardcoded link warning messages
+- documentation: conf.py: update for 3.4.2
+- documentation: prepare for 3.4.3 release
+- expat: Upgrade to 2.4.7
+- gcc-target: fix glob to remove gcc-<version> binary
+- gcsections: add nativesdk-cairo to exclude list
+- go: update to 1.16.15
+- gst-devtools: 1.18.5 -> 1.18.6
+- gst-examples: 1.18.5 -> 1.18.6
+- gstreamer1.0-libav: 1.18.5 -> 1.18.6
+- gstreamer1.0-omx: 1.18.5 -> 1.18.6
+- gstreamer1.0-plugins-bad: 1.18.5 -> 1.18.6
+- gstreamer1.0-plugins-base: 1.18.5 -> 1.18.6
+- gstreamer1.0-plugins-good: 1.18.5 -> 1.18.6
+- gstreamer1.0-plugins-ugly: 1.18.5 -> 1.18.6
+- gstreamer1.0-python: 1.18.5 -> 1.18.6
+- gstreamer1.0-rtsp-server: 1.18.5 -> 1.18.6
+- gstreamer1.0-vaapi: 1.18.5 -> 1.18.6
+- gstreamer1.0: 1.18.5 -> 1.18.6
+- harfbuzz: upgrade 2.9.0 -> 2.9.1
+- initramfs-framework: unmount automounts before switch_root
+- kernel-devsrc: do not copy Module.symvers file during install
+- libarchive : update to 3.5.3
+- libpcap: Disable DPDK explicitly
+- libxml-parser-perl: Add missing RDEPENDS
+- linux-firmware: upgrade 20211216 -> 20220209
+- linux-yocto/5.10: Fix ramoops/ftrace
+- linux-yocto/5.10: features/zram: remove CONFIG_ZRAM_DEF_COMP
+- linux-yocto/5.10: fix dssall build error with binutils 2.3.8
+- linux-yocto/5.10: ppc/riscv: fix build with binutils 2.3.8
+- linux-yocto/5.10: update genericx86* machines to v5.10.99
+- linux-yocto/5.10: update to v5.10.103
+- mc: fix build if ncurses have been configured without wide characters
+- oeqa/buildtools: Switch to our webserver instead of example.com
+- patch.py: Prevent git repo reinitialization
+- perl: Improve and update module RPDEPENDS
+- poky.conf: bump version for 3.4.3 honister release
+- qemuboot: Fix build error if UNINATIVE_LOADER is unset
+- quilt: Disable external sendmail for deterministic build
+- recipetool: Fix circular reference in SRC_URI
+- releases: update to include 3.3.5
+- releases: update to include 3.4.2
+- rootfs-postcommands: amend systemd_create_users add user to group check
+- ruby: update 3.0.2 -> 3.0.3
+- scripts/runqemu-ifdown: Don't treat the last iptables command as special
+- sdk: fix search for dynamic loader
+- selftest: recipetool: Correct the URI for socat
+- sstate: inside the threadedpool don't write to the shared localdata
+- uninative: Upgrade to 3.5
+- util-linux: upgrade to 2.37.4
+- vim: Update to 8.2.4524 for further CVE fixes
+- wic: Use custom kernel path if provided
+- wireless-regdb: upgrade 2021.08.28 -> 2022.02.18
+- zip: modify when match.S is built
+
+Contributors to 3.4.3
+~~~~~~~~~~~~~~~~~~~~~
+
+- Alexander Kanavin
+- Anuj Mittal
+- Bill Pittman
+- Bruce Ashfield
+- Chee Yang Lee
+- Christian Eggers
+- Daniel Gomez
+- Daniel Müller
+- Daniel Wagenknecht
+- Florian Amstutz
+- Joe Slater
+- Jose Quaresma
+- Justin Bronder
+- Lee Chee Yang
+- Michael Halstead
+- Michael Opdenacker
+- Oleksandr Ocheretnyi
+- Oleksandr Suvorov
+- Pavel Zhukov
+- Peter Kjellerstedt
+- Richard Purdie
+- Robert Yang
+- Ross Burton
+- Sakib Sajal
+- Saul Wold
+- Sean Anderson
+- Stefan Herbrechtsmeier
+- Tamizharasan Kumar
+- Tean Cunningham
+- Zoltán Böszörményi
+- pgowda
+- wangmy
+
+Repositories / Downloads for 3.4.3
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: https://git.yoctoproject.org/poky/
+- Branch: :yocto_git:`honister </poky/log/?h=honister>`
+- Tag: `yocto-3.4.3 <https://git.yoctoproject.org/poky/tag/?h=yocto-3.4.3>`__
+- Git Revision: :yocto_git:`ee68ae307fd951b9de6b31dc6713ea29186b7749 </poky/commit/?id=ee68ae307fd951b9de6b31dc6713ea29186b7749>`
+- Release Artefact: poky-ee68ae307fd951b9de6b31dc6713ea29186b7749
+- sha: 92c3d73c3e74f0e1d5c2ab2836ce3a3accbe47772cea70df3755845e0db1379b
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-3.4.3/poky-ee68ae307fd951b9de6b31dc6713ea29186b7749.tar.bz2,
+ http://mirrors.kernel.org/yocto/yocto/yocto-3.4.3/poky-ee68ae307fd951b9de6b31dc6713ea29186b7749.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`honister </openembedded-core/log/?h=honister>`
+- Tag: :oe_git:`yocto-3.4.3 </openembedded-core/tag/?h=yocto-3.4.3>`
+- Git Revision: :oe_git:`ebca8f3ac9372b7ebb3d39e8f7f930b63b481448 </openembedded-core/commit/?id=ebca8f3ac9372b7ebb3d39e8f7f930b63b481448>`
+- Release Artefact: oecore-ebca8f3ac9372b7ebb3d39e8f7f930b63b481448
+- sha: f28e503f6f6c0bcd9192dbd528f8e3c7bcea504c089117e0094d9a4f315f4b9f
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-3.4.3/oecore-ebca8f3ac9372b7ebb3d39e8f7f930b63b481448.tar.bz2,
+ http://mirrors.kernel.org/yocto/yocto/yocto-3.4.3/oecore-ebca8f3ac9372b7ebb3d39e8f7f930b63b481448.tar.bz2
+
+meta-mingw
+
+- Repository Location: https://git.yoctoproject.org/meta-mingw
+- Branch: :yocto_git:`honister </meta-mingw/log/?h=honister>`
+- Tag: :yocto_git:`yocto-3.4.3 </meta-mingw/tag/?h=yocto-3.4.3>`
+- Git Revision: :yocto_git:`f5d761cbd5c957e4405c5d40b0c236d263c916a8 </meta-mingw/commit/?id=f5d761cbd5c957e4405c5d40b0c236d263c916a8>`
+- Release Artefact: meta-mingw-f5d761cbd5c957e4405c5d40b0c236d263c916a8
+- sha: d4305d638ef80948584526c8ca386a8cf77933dffb8a3b8da98d26a5c40fcc11
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-3.4.3/meta-mingw-f5d761cbd5c957e4405c5d40b0c236d263c916a8.tar.bz2,
+ http://mirrors.kernel.org/yocto/yocto/yocto-3.4.3/meta-mingw-f5d761cbd5c957e4405c5d40b0c236d263c916a8.tar.bz2
+
+meta-gplv2
+
+- Repository Location: https://git.yoctoproject.org/meta-gplv2
+- Branch: :yocto_git:`honister </meta-gplv2/log/?h=honister>`
+- Tag: :yocto_git:`yocto-3.4.3 </meta-gplv2/tag/?h=yocto-3.4.3>`
+- Git Revision: :yocto_git:`f04e4369bf9dd3385165281b9fa2ed1043b0e400 </meta-gplv2/commit/?id=f04e4369bf9dd3385165281b9fa2ed1043b0e400>`
+- Release Artefact: meta-gplv2-f04e4369bf9dd3385165281b9fa2ed1043b0e400
+- sha: ef8e2b1ec1fb43dbee4ff6990ac736315c7bc2d8c8e79249e1d337558657d3fe
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-3.4.3/meta-gplv2-f04e4369bf9dd3385165281b9fa2ed1043b0e400.tar.bz2,
+ http://mirrors.kernel.org/yocto/yocto/yocto-3.4.3/meta-gplv2-f04e4369bf9dd3385165281b9fa2ed1043b0e400.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`1.52 </bitbake/log/?h=1.52>`
+- Tag: :oe_git:`yocto-3.4.3 </bitbake/tag/?h=yocto-3.4.3>`
+- Git Revision: :oe_git:`43dcb2b2a2b95a5c959be57bca94fb7190ea6257 </bitbake/commit/?id=43dcb2b2a2b95a5c959be57bca94fb7190ea6257>`
+- Release Artefact: bitbake-43dcb2b2a2b95a5c959be57bca94fb7190ea6257
+- sha: 92497ff97fed81dcc6d3e202969fb63ca983a8f5d9d91cafc6aee88312f79cf9
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-3.4.3/bitbake-43dcb2b2a2b95a5c959be57bca94fb7190ea6257.tar.bz2,
+ http://mirrors.kernel.org/yocto/yocto/yocto-3.4.3/bitbake-43dcb2b2a2b95a5c959be57bca94fb7190ea6257.tar.bz2
+
+yocto-docs
+
+- Repository Location: https://git.yoctoproject.org/yocto-docs
+- Branch: :yocto_git:`honister </yocto-docs/log/?h=honister>`
+- Tag: :yocto_git:`yocto-3.4.3 </yocto-docs/tag/?h=yocto-3.4.3>`
+- Git Revision: :yocto_git:`15f46f97d9cad558c19fc1dc19cfbe3720271d04 </yocto-docs/commit/?15f46f97d9cad558c19fc1dc19cfbe3720271d04>`
diff --git a/documentation/migration-guides/release-notes-3.4.4.rst b/documentation/migration-guides/release-notes-3.4.4.rst
new file mode 100644
index 0000000000..91beba0062
--- /dev/null
+++ b/documentation/migration-guides/release-notes-3.4.4.rst
@@ -0,0 +1,155 @@
+Release notes for 3.4.4 (honister)
+----------------------------------
+
+Security Fixes in 3.4.4
+~~~~~~~~~~~~~~~~~~~~~~~
+
+- tiff: fix :cve:`2022-0865`, :cve:`2022-0891`, :cve:`2022-0907`, :cve:`2022-0908`, :cve:`2022-0909` and :cve:`2022-0924`
+- xz: fix `CVE-2022-1271 <https://security-tracker.debian.org/tracker/CVE-2022-1271>`__
+- unzip: fix `CVE-2021-4217 <https://security-tracker.debian.org/tracker/CVE-2021-4217>`__
+- zlib: fix :cve:`2018-25032`
+- grub: ignore :cve:`2021-46705`
+
+Fixes in 3.4.4
+~~~~~~~~~~~~~~
+
+- alsa-tools: Ensure we install correctly
+- bitbake.conf: mark all directories as safe for git to read
+- bitbake: knotty: display active tasks when printing keepAlive() message
+- bitbake: knotty: reduce keep-alive timeout from 5000s (83 minutes) to 10 minutes
+- bitbake: server/process: Disable gc around critical section
+- bitbake: server/xmlrpcserver: Add missing xmlrpcclient import
+- bitbake: toaster: Fix IMAGE_INSTALL issues with _append vs :append
+- bitbake: toaster: fixtures replace gatesgarth
+- build-appliance-image: Update to honister head revision
+- conf.py/poky.yaml: Move version information to poky.yaml and read in conf.py
+- conf/machine: fix QEMU x86 sound options
+- devupstream: fix handling of SRC_URI
+- documentation: update for 3.4.4 release
+- externalsrc/devtool: Fix to work with fixed export funcition flags handling
+- gmp: add missing COPYINGv3
+- gnu-config: update SRC_URI
+- libxml2: fix CVE-2022-23308 regression
+- libxml2: move to gitlab.gnome.org
+- libxml2: update to 2.9.13
+- libxshmfence: Correct LICENSE to HPND
+- license_image.bbclass: close package.manifest file
+- linux-firmware: correct license for ar3k firmware
+- linux-firmware: upgrade 20220310 -> 20220411
+- linux-yocto-rt/5.10: update to -rt61
+- linux-yocto/5.10: cfg/debug: add configs for kcsan
+- linux-yocto/5.10: split vtpm for more granular inclusion
+- linux-yocto/5.10: update to v5.10.109
+- linux-yocto: nohz_full boot arg fix
+- oe-pkgdata-util: Adapt to the new variable override syntax
+- oeqa/selftest/devtool: ensure Git username is set before upgrade tests
+- poky.conf: bump version for 3.4.4 release
+- pseudo: Add patch to workaround paths with crazy lengths
+- pseudo: Fix handling of absolute links
+- sanity: Add warning for local hasheqiv server with remote sstate mirrors
+- scripts/runqemu: Fix memory limits for qemux86-64
+- shadow-native: Simplify and fix syslog disable patch
+- tiff: Add marker for CVE-2022-1056 being fixed
+- toaster: Fix broken overrides usage
+- u-boot: Inherit pkgconfig
+- uninative: Upgrade to 3.6 with gcc 12 support
+- vim: Upgrade 8.2.4524 -> 8.2.4681
+- virglrenderer: update SRC_URI
+- webkitgtk: update to 2.32.4
+- wireless-regdb: upgrade 2022.02.18 -> 2022.04.08
+
+Known Issues
+~~~~~~~~~~~~
+
+There were a couple of known autobuilder intermittent bugs that occurred during release testing but these are not regressions in the release.
+
+Contributors to 3.4.4
+~~~~~~~~~~~~~~~~~~~~~
+
+- Alexandre Belloni
+- Anuj Mittal
+- Bruce Ashfield
+- Chee Yang Lee
+- Dmitry Baryshkov
+- Joe Slater
+- Konrad Weihmann
+- Martin Jansa
+- Michael Opdenacker
+- Minjae Kim
+- Peter Kjellerstedt
+- Ralph Siemsen
+- Richard Purdie
+- Ross Burton
+- Tim Orling
+- wangmy
+- zhengruoqin
+
+Repositories / Downloads for 3.4.4
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: https://git.yoctoproject.org/poky/
+- Branch: :yocto_git:`honister </poky/log/?h=honister>`
+- Tag: `yocto-3.4.4 <https://git.yoctoproject.org/poky/tag/?h=yocto-3.4.4>`__
+- Git Revision: :yocto_git:`780eeec8851950ee6ac07a2a398ba937206bd2e4 </poky/commit/?id=780eeec8851950ee6ac07a2a398ba937206bd2e4>`
+- Release Artefact: poky-780eeec8851950ee6ac07a2a398ba937206bd2e4
+- sha: 09558927064454ec2492da376156b716d9fd14aae57196435d742db7bfdb4b95
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-3.4.4/poky-780eeec8851950ee6ac07a2a398ba937206bd2e4.tar.bz2,
+ http://mirrors.kernel.org/yocto/yocto/yocto-3.4.4/poky-780eeec8851950ee6ac07a2a398ba937206bd2e4.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`honister </openembedded-core/log/?h=honister>`
+- Tag: :oe_git:`yocto-3.4.4 </openembedded-core/tag/?h=yocto-3.4.4>`
+- Git Revision: :oe_git:`1a6f5e27249afb6fb4d47c523b62b5dd2482a69d </openembedded-core/commit/?id=1a6f5e27249afb6fb4d47c523b62b5dd2482a69d>`
+- Release Artefact: oecore-1a6f5e27249afb6fb4d47c523b62b5dd2482a69d
+- sha: b8354ca457756384139a579b9e51f1ba854013c99add90c0c4c6ef68421fede5
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-3.4.4/oecore-1a6f5e27249afb6fb4d47c523b62b5dd2482a69d.tar.bz2,
+ http://mirrors.kernel.org/yocto/yocto/yocto-3.4.4/oecore-1a6f5e27249afb6fb4d47c523b62b5dd2482a69d.tar.bz2
+
+meta-mingw
+
+- Repository Location: https://git.yoctoproject.org/meta-mingw
+- Branch: :yocto_git:`honister </meta-mingw/log/?h=honister>`
+- Tag: :yocto_git:`yocto-3.4.4 </meta-mingw/tag/?h=yocto-3.4.4>`
+- Git Revision: :yocto_git:`f5d761cbd5c957e4405c5d40b0c236d263c916a8 </meta-mingw/commit/?id=f5d761cbd5c957e4405c5d40b0c236d263c916a8>`
+- Release Artefact: meta-mingw-f5d761cbd5c957e4405c5d40b0c236d263c916a8
+- sha: d4305d638ef80948584526c8ca386a8cf77933dffb8a3b8da98d26a5c40fcc11
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-3.4.4/meta-mingw-f5d761cbd5c957e4405c5d40b0c236d263c916a8.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-3.4.4/meta-mingw-f5d761cbd5c957e4405c5d40b0c236d263c916a8.tar.bz2
+
+meta-gplv2
+
+- Repository Location: https://git.yoctoproject.org/meta-gplv2
+- Branch: :yocto_git:`honister </meta-gplv2/log/?h=honister>`
+- Tag: :yocto_git:`yocto-3.4.4 </meta-gplv2/tag/?h=yocto-3.4.4>`
+- Git Revision: :yocto_git:`f04e4369bf9dd3385165281b9fa2ed1043b0e400 </meta-gplv2/commit/?id=f04e4369bf9dd3385165281b9fa2ed1043b0e400>`
+- Release Artefact: meta-gplv2-f04e4369bf9dd3385165281b9fa2ed1043b0e400
+- sha: ef8e2b1ec1fb43dbee4ff6990ac736315c7bc2d8c8e79249e1d337558657d3fe
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-3.4.4/meta-gplv2-f04e4369bf9dd3385165281b9fa2ed1043b0e400.tar.bz2,
+ http://mirrors.kernel.org/yocto/yocto/yocto-3.4.4/meta-gplv2-f04e4369bf9dd3385165281b9fa2ed1043b0e400.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`1.52 </bitbake/log/?h=1.52>`
+- Tag: :oe_git:`yocto-3.4.4 </bitbake/tag/?h=yocto-3.4.3>`
+- Git Revision: :oe_git:`c2d8f9b2137bd4a98eb0f51519493131773e7517 </bitbake/commit/?id=c2d8f9b2137bd4a98eb0f51519493131773e7517>`
+- Release Artefact: bitbake-c2d8f9b2137bd4a98eb0f51519493131773e7517
+- sha: a8b6217f2d63975bbf49f430e11046608023ee2827faa893b15d9a0d702cf833
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-3.4.4/bitbake-c2d8f9b2137bd4a98eb0f51519493131773e7517.tar.bz2,
+ http://mirrors.kernel.org/yocto/yocto/yocto-3.4.4/bitbake-c2d8f9b2137bd4a98eb0f51519493131773e7517.tar.bz2
+
+yocto-docs
+
+- Repository Location: https://git.yoctoproject.org/yocto-docs
+- Branch: :yocto_git:`honister </yocto-docs/log/?h=honister>`
+- Tag: :yocto_git:`yocto-3.4.4 </yocto-docs/tag/?h=yocto-3.4.4>`
+- Git Revision: :yocto_git:`5ead7d39aaf9044078dff27f462e29a8e31d89e4 </yocto-docs/commit/?5ead7d39aaf9044078dff27f462e29a8e31d89e4>`
diff --git a/documentation/migration-guides/release-notes-3.4.rst b/documentation/migration-guides/release-notes-3.4.rst
index 5a8fb4b5a9..323e4df7ae 100644
--- a/documentation/migration-guides/release-notes-3.4.rst
+++ b/documentation/migration-guides/release-notes-3.4.rst
@@ -5,7 +5,7 @@ New Features / Enhancements in 3.4
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Linux kernel 5.14, glibc 2.34 and ~280 other recipe upgrades
-- Switched override character to ':' (replacing '_') for more robust parsing and improved performance - see the above migration guide for help
+- Switched override character to ':' (replacing '_') for more robust parsing and improved performance --- see the above migration guide for help
- Rust integrated into core, providing rust support for cross-compilation and SDK
- New create-spdx class for creating SPDX SBoM documents
- New recipes: cargo, core-image-ptest-all, core-image-ptest-fast, core-image-weston-sdk, erofs-utils, gcompat, gi-docgen, libmicrohttpd, libseccomp, libstd-rs, perlcross, python3-markdown, python3-pyyaml, python3-smartypants, python3-typogrify, rust, rust-cross, rust-cross-canadian, rust-hello-world, rust-llvm, rust-tools-cross-canadian, rustfmt, xwayland
diff --git a/documentation/migration-guides/release-notes-4.0.1.rst b/documentation/migration-guides/release-notes-4.0.1.rst
new file mode 100644
index 0000000000..81da6e5f2d
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.1.rst
@@ -0,0 +1,248 @@
+Release notes for 4.0.1 (kirkstone)
+-----------------------------------
+
+Security Fixes in 4.0.1
+~~~~~~~~~~~~~~~~~~~~~~~
+
+- linux-yocto/5.15: fix :cve:`2022-28796`
+- python3: ignore :cve:`2015-20107`
+- e2fsprogs: fix :cve:`2022-1304`
+- lua: fix :cve:`2022-28805`
+- busybox: fix :cve:`2022-28391`
+
+Fixes in 4.0.1
+~~~~~~~~~~~~~~
+
+- abi_version/sstate: Bump hashequiv and sstate versions due to git changes
+- apt: add apt selftest to test signed package feeds
+- apt: upgrade 2.4.4 -> 2.4.5
+- arch-armv8-2a.inc: fix a typo in TUNEVALID variable
+- babeltrace: Disable warnings as errors
+- base: Avoid circular references to our own scripts
+- base: Drop git intercept
+- build-appliance-image: Update to kirkstone head revision
+- build-appliance: Switch to kirkstone branch
+- buildtools-tarball: Only add cert envvars if certs are included
+- busybox: Use base_bindir instead of hardcoding /bin path
+- cases/buildepoxy.py: fix typo
+- create-spdx: delete virtual/kernel dependency to fix FreeRTOS build
+- create-spdx: fix error when symlink cannot be created
+- cve-check: add JSON format to summary output
+- cve-check: fix symlinks where link and output path are equal
+- cve-check: no need to depend on the fetch task
+- cve-update-db-native: let the user to drive the update interval
+- cve-update-db-native: update the CVE database once a day only
+- cve_check: skip remote patches that haven't been fetched when searching for CVE tags
+- dev-manual: add command used to add the signed-off-by line.
+- devshell.bbclass: Allow devshell & pydevshell to use the network
+- docs: conf.py: fix cve extlinks caption for sphinx <4.0
+- docs: migration-guides: migration-3.4: mention that hardcoded password are supported if hashed
+- docs: migration-guides: release-notes-4.0: fix risc-v typo
+- docs: migration-guides: release-notes-4.0: replace kernel placeholder with correct recipe name
+- docs: ref-manual: variables: add hashed password example in EXTRA_USERS_PARAMS
+- docs: set_versions.py: add information about obsolescence of a release
+- docs: set_versions.py: fix latest release of a branch being shown twice in switchers.js
+- docs: set_versions.py: fix latest version of an active release shown as obsolete
+- docs: set_versions.py: mark as obsolete only branches and old tags from obsolete releases
+- docs: sphinx-static: switchers.js.in: do not mark branches as outdated
+- docs: sphinx-static: switchers.js.in: fix broken switcher for branches
+- docs: sphinx-static: switchers.js.in: improve obsolete version detection
+- docs: sphinx-static: switchers.js.in: remove duplicate for outdated versions
+- docs: sphinx-static: switchers.js.in: rename all_versions to switcher_versions
+- docs: update Bitbake objects.inv location for master branch
+- documentation/brief-yoctoprojectqs: add directory for local.conf
+- gcompat: Fix build when usrmerge distro feature is enabled
+- git: correct license
+- git: upgrade 2.35.2 -> 2.35.3
+- glib: upgrade 2.72.0 -> 2.72.1
+- glibc: ptest: Fix glibc-tests package issue
+- gnupg: Disable FORTIFY_SOURCES on mips
+- go.bbclass: disable the use of the default configuration file
+- gstreamer1.0-plugins-bad: drop patch
+- gstreamer1.0-plugins-good: Fix libsoup dependency
+- gstreamer1.0: Minor documentation addition
+- install/devshell: Introduce git intercept script due to fakeroot issues
+- kernel-yocto.bbclass: Fixup do_kernel_configcheck usage of KMETA
+- libc-glibc: Use libxcrypt to provide virtual/crypt
+- libgit2: upgrade 1.4.2 -> 1.4.3
+- libsoup: upgrade 3.0.5 -> 3.0.6
+- libusb1: upgrade 1.0.25 -> 1.0.26
+- linux-firmware: correct license for ar3k firmware
+- linux-firmware: upgrade 20220310 -> 20220411
+- linux-yocto/5.10: base: enable kernel crypto userspace API
+- linux-yocto/5.10: update to v5.10.112
+- linux-yocto/5.15: arm: poky-tiny cleanup and fixes
+- linux-yocto/5.15: base: enable kernel crypto userspace API
+- linux-yocto/5.15: fix -standard kernel build issue
+- linux-yocto/5.15: fix ppc boot
+- linux-yocto/5.15: fix qemuarm graphical boot
+- linux-yocto/5.15: kasan: fix BUG: sleeping function called from invalid context
+- linux-yocto/5.15: netfilter: conntrack: avoid useless indirection during conntrack destruction
+- linux-yocto/5.15: update to v5.15.36
+- linux-yocto: enable powerpc-debug fragment
+- mdadm: Drop clang specific cflags
+- migration-3.4: add missing entry on EXTRA_USERS_PARAMS
+- migration-guides: add release notes for 4.0
+- migration-guides: complete migration guide for 4.0
+- migration-guides: release-notes-4.0: mention LTS release
+- migration-guides: release-notes-4.0: update 'Repositories / Downloads' section
+- migration-guides: stop including documents with ".. include"
+- musl: Fix build when usrmerge distro feature is enabled
+- ncurses: use COPYING file
+- neard: Switch SRC_URI to git repo
+- oeqa/selftest: add test for git working correctly inside pseudo
+- openssl: minor security upgrade 3.0.2 -> 3.0.3
+- package.bbclass: Prevent perform_packagecopy from removing /sysroot-only
+- package: Ensure we track whether PRSERV was active or not
+- package_manager: fix missing dependency on gnupg when signing deb package feeds
+- poky-tiny: enable qemuarmv5/qemuarm64 and cleanups
+- poky.conf: bump version for 4.0.1 release
+- qemu.bbclass: Extend ppc/ppc64 extra options
+- qemuarm64: use virtio pci interfaces
+- qemuarmv5: use arm-versatile-926ejs KMACHINE
+- ref-manual: Add XZ_THREADS and XZ_MEMLIMIT
+- ref-manual: add KERNEL_DEBUG_TIMESTAMPS
+- ref-manual: add ZSTD_THREADS
+- ref-manual: add a note about hard-coded passwords
+- ref-manual: add empty-dirs QA check and QA_EMPTY_DIRS*
+- ref-manual: add mention of vendor filtering to CVE_PRODUCT
+- ref-manual: mention wildcarding support in INCOMPATIBLE_LICENSE
+- releases: update for yocto 4.0
+- rootfs-postcommands: fix symlinks where link and output path are equal
+- ruby: upgrade 3.1.1 -> 3.1.2
+- sanity: skip make 4.2.1 warning for debian
+- scripts/git: Ensure we don't have circular references
+- scripts: Make git intercept global
+- seatd: Disable overflow warning as error on ppc64/musl
+- selftest/lic_checksum: Add test for filename containing space
+- set_versions: update for 4.0 release
+- staging: Ensure we filter out ourselves
+- strace: fix ptest failure in landlock
+- subversion: upgrade to 1.14.2
+- systemd-boot: remove outdated EFI_LD comment
+- systemtap: Fix build with gcc-12
+- terminal.py: Restore error output from Terminal
+- u-boot: Correct the SRC_URI
+- u-boot: Inherit pkgconfig
+- update_udev_hwdb: fix multilib issue with systemd
+- util-linux: Create u-a symlink for findfs utility
+- virgl: skip headless test on alma 8.6
+- webkitgtk: adjust patch status
+- wic: do not use PARTLABEL for msdos partition tables
+- wireless-regdb: upgrade 2022.02.18 -> 2022.04.08
+- xserver-xorg: Fix build with gcc12
+- yocto-bsps: update to v5.15.36
+
+Contributors to 4.0.1
+~~~~~~~~~~~~~~~~~~~~~
+
+- Abongwa Amahnui Bonalais
+- Alexander Kanavin
+- Bruce Ashfield
+- Carlos Rafael Giani
+- Chen Qi
+- Davide Gardenal
+- Dmitry Baryshkov
+- Ferry Toth
+- Henning Schild
+- Jon Mason
+- Justin Bronder
+- Kai Kang
+- Khem Raj
+- Konrad Weihmann
+- Lee Chee Yang
+- Marta Rybczynska
+- Martin Jansa
+- Matt Madison
+- Michael Halstead
+- Michael Opdenacker
+- Naveen Saini
+- Nicolas Dechesne
+- Paul Eggleton
+- Paul Gortmaker
+- Paulo Neves
+- Peter Kjellerstedt
+- Peter Marko
+- Pgowda
+- Portia
+- Quentin Schulz
+- Rahul Kumar
+- Richard Purdie
+- Robert Joslyn
+- Robert Yang
+- Roland Hieber
+- Ross Burton
+- Russ Dill
+- Steve Sakoman
+- wangmy
+- zhengruoqin
+
+Repositories / Downloads for 4.0.1
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: https://git.yoctoproject.org/git/poky
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.1 </poky/tag/?h=yocto-4.0.1>`
+- Git Revision: :yocto_git:`8c489602f218bcf21de0d3c9f8cf620ea5f06430 </poky/commit/?id=8c489602f218bcf21de0d3c9f8cf620ea5f06430>`
+- Release Artefact: poky-8c489602f218bcf21de0d3c9f8cf620ea5f06430
+- sha: 65c545a316bd8efb13ae1358eeccc8953543be908008103b51f7f90aed960d00
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.1/poky-8c489602f218bcf21de0d3c9f8cf620ea5f06430.tar.bz2,
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.1/poky-8c489602f218bcf21de0d3c9f8cf620ea5f06430.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.1 </openembedded-core/tag/?h=yocto-4.0>`
+- Git Revision: :oe_git:`cb8647c08959abb1d6b7c2b3a34b4b415f66d7ee </openembedded-core/commit/?id=cb8647c08959abb1d6b7c2b3a34b4b415f66d7ee>`
+- Release Artefact: oecore-cb8647c08959abb1d6b7c2b3a34b4b415f66d7ee
+- sha: 43981b8fad82f601618a133dffbec839524f0d0a055efc3d8f808cbfd811ab17
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.1/oecore-cb8647c08959abb1d6b7c2b3a34b4b415f66d7ee.tar.bz2,
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.1/oecore-cb8647c08959abb1d6b7c2b3a34b4b415f66d7ee.tar.bz2
+
+meta-mingw
+
+- Repository Location: https://git.yoctoproject.org/git/meta-mingw
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.1 </meta-mingw/tag/?h=yocto-4.0.1>`
+- Git Revision: :yocto_git:`a90614a6498c3345704e9611f2842eb933dc51c1 </meta-mingw/commit/?id=a90614a6498c3345704e9611f2842eb933dc51c1>`
+- Release Artefact: meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1
+- sha: 49f9900bfbbc1c68136f8115b314e95d0b7f6be75edf36a75d9bcd1cca7c6302
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.1/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.1/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+
+meta-gplv2
+
+- Repository Location: https://git.yoctoproject.org/git/meta-gplv2
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.1 </meta-gplv2/tag/?h=yocto-4.0.1>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-mingw/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.1/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.1/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0 </bitbake/tag/?h=yocto-4.0>`
+- Git Revision: :oe_git:`59c16ae6c55c607c56efd2287537a1b97ba2bf52 </bitbake/commit/?id=59c16ae6c55c607c56efd2287537a1b97ba2bf52>`
+- Release Artefact: bitbake-59c16ae6c55c607c56efd2287537a1b97ba2bf52
+- sha: 3ae466c31f738fc45c3d7c6f665952d59f01697f2667ea42f0544d4298dd6ef0
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.1/bitbake-59c16ae6c55c607c56efd2287537a1b97ba2bf52.tar.bz2,
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.1/bitbake-59c16ae6c55c607c56efd2287537a1b97ba2bf52.tar.bz2
+
+yocto-docs
+
+- Repository Location: https://git.yoctoproject.org/git/yocto-docs
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.1 </yocto-docs/tag/?h=yocto-4.0>`
+- Git Revision: :yocto_git:`4ec9df3336a425719a9a35532504731ce56984ca </yocto-docs/commit/?id=4ec9df3336a425719a9a35532504731ce56984ca>`
diff --git a/documentation/migration-guides/release-notes-4.0.10.rst b/documentation/migration-guides/release-notes-4.0.10.rst
new file mode 100644
index 0000000000..f37c3471ea
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.10.rst
@@ -0,0 +1,180 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Release notes for Yocto-4.0.10 (Kirkstone)
+------------------------------------------
+
+Security Fixes in Yocto-4.0.10
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- binutils: Fix :cve:`2023-1579`, :cve:`2023-1972`, :cve_mitre:`2023-25584`, :cve_mitre:`2023-25585` and :cve_mitre:`2023-25588`
+- cargo : Ignore :cve:`2022-46176`
+- connman: Fix :cve:`2023-28488`
+- curl: Fix :cve:`2023-27533`, :cve:`2023-27534`, :cve:`2023-27535`, :cve:`2023-27536` and :cve:`2023-27538`
+- ffmpeg: Fix :cve:`2022-48434`
+- freetype: Fix :cve:`2023-2004`
+- ghostscript: Fix :cve_mitre:`2023-29979`
+- git: Fix :cve:`2023-25652` and :cve:`2023-29007`
+- go: Fix :cve:`2022-41722`, :cve:`2022-41724`, :cve:`2022-41725`, :cve:`2023-24534`, :cve:`2023-24537` and :cve:`2023-24538`
+- go: Ignore :cve:`2022-41716`
+- libxml2: Fix :cve:`2023-28484` and :cve:`2023-29469`
+- libxpm: Fix :cve:`2022-44617`, :cve:`2022-46285` and :cve:`2022-4883`
+- linux-yocto: Ignore :cve:`2021-3759`, :cve:`2021-4135`, :cve:`2021-4155`, :cve:`2022-0168`, :cve:`2022-0171`, :cve:`2022-1016`, :cve:`2022-1184`, :cve:`2022-1198`, :cve:`2022-1199`, :cve:`2022-1462`, :cve:`2022-1734`, :cve:`2022-1852`, :cve:`2022-1882`, :cve:`2022-1998`, :cve:`2022-2078`, :cve:`2022-2196`, :cve:`2022-2318`, :cve:`2022-2380`, :cve:`2022-2503`, :cve:`2022-26365`, :cve:`2022-2663`, :cve:`2022-2873`, :cve:`2022-2905`, :cve:`2022-2959`, :cve:`2022-3028`, :cve:`2022-3078`, :cve:`2022-3104`, :cve:`2022-3105`, :cve:`2022-3106`, :cve:`2022-3107`, :cve:`2022-3111`, :cve:`2022-3112`, :cve:`2022-3113`, :cve:`2022-3115`, :cve:`2022-3202`, :cve:`2022-32250`, :cve:`2022-32296`, :cve:`2022-32981`, :cve:`2022-3303`, :cve:`2022-33740`, :cve:`2022-33741`, :cve:`2022-33742`, :cve:`2022-33743`, :cve:`2022-33744`, :cve:`2022-33981`, :cve:`2022-3424`, :cve:`2022-3435`, :cve:`2022-34918`, :cve:`2022-3521`, :cve:`2022-3545`, :cve:`2022-3564`, :cve:`2022-3586`, :cve:`2022-3594`, :cve:`2022-36123`, :cve:`2022-3621`, :cve:`2022-3623`, :cve:`2022-3629`, :cve:`2022-3633`, :cve:`2022-3635`, :cve:`2022-3646`, :cve:`2022-3649`, :cve:`2022-36879`, :cve:`2022-36946`, :cve:`2022-3707`, :cve:`2022-39188`, :cve:`2022-39190`, :cve:`2022-39842`, :cve:`2022-40307`, :cve:`2022-40768`, :cve:`2022-4095`, :cve:`2022-41218`, :cve:`2022-4139`, :cve:`2022-41849`, :cve:`2022-41850`, :cve:`2022-41858`, :cve:`2022-42328`, :cve:`2022-42329`, :cve:`2022-42703`, :cve:`2022-42721`, :cve:`2022-42722`, :cve:`2022-42895`, :cve:`2022-4382`, :cve:`2022-4662`, :cve:`2022-47518`, :cve:`2022-47519`, :cve:`2022-47520`, :cve:`2022-47929`, :cve:`2023-0179`, :cve:`2023-0394`, :cve:`2023-0461`, :cve:`2023-0590`, :cve:`2023-1073`, :cve:`2023-1074`, :cve:`2023-1077`, :cve:`2023-1078`, :cve:`2023-1079`, :cve:`2023-1095`, :cve:`2023-1118`, :cve:`2023-1249`, :cve:`2023-1252`, :cve:`2023-1281`, :cve:`2023-1382`, :cve:`2023-1513`, :cve:`2023-1829`, :cve:`2023-1838`, :cve:`2023-1998`, :cve:`2023-2006`, :cve:`2023-2008`, :cve:`2023-2162`, :cve:`2023-2166`, :cve:`2023-2177`, :cve:`2023-22999`, :cve:`2023-23002`, :cve:`2023-23004`, :cve:`2023-23454`, :cve:`2023-23455`, :cve:`2023-23559`, :cve:`2023-25012`, :cve:`2023-26545`, :cve:`2023-28327` and :cve:`2023-28328`
+- nasm: Fix :cve:`2022-44370`
+- python3-cryptography: Fix :cve:`2023-23931`
+- qemu: Ignore :cve:`2023-0664`
+- ruby: Fix :cve:`2023-28755` and :cve:`2023-28756`
+- screen: Fix :cve:`2023-24626`
+- shadow: Fix :cve:`2023-29383`
+- tiff: Fix :cve:`2022-4645`
+- webkitgtk: Fix :cve:`2022-32888` and :cve:`2022-32923`
+- xserver-xorg: Fix :cve:`2023-1393`
+
+
+Fixes in Yocto-4.0.10
+~~~~~~~~~~~~~~~~~~~~~
+
+- bitbake: bin/utils: Ensure locale en_US.UTF-8 is available on the system
+- build-appliance-image: Update to kirkstone head revision
+- cmake: add CMAKE_SYSROOT to generated toolchain file
+- glibc: stable 2.35 branch updates.
+- kernel-devsrc: depend on python3-core instead of python3
+- kernel: improve initramfs bundle processing time
+- libarchive: Enable acls, xattr for native as well as target
+- libbsd: Add correct license for all packages
+- libpam: Fix the xtests/tst-pam_motd[1|3] failures
+- libxpm: upgrade to 3.5.15
+- linux-firmware: upgrade to 20230404
+- linux-yocto/5.15: upgrade to v5.15.108
+- migration-guides: add release-notes for 4.0.9
+- oeqa/utils/metadata.py: Fix running oe-selftest running with no distro set
+- openssl: Move microblaze to linux-latomic config
+- package.bbclass: correct check for /build in copydebugsources()
+- poky.conf: bump version for 4.0.10
+- populate_sdk_base: add zip options
+- populate_sdk_ext.bbclass: set :term:`METADATA_REVISION` with an :term:`DISTRO` override
+- run-postinsts: Set dependency for ldconfig to avoid boot issues
+- update-alternatives.bbclass: fix old override syntax
+- wic/bootimg-efi: if fixed-size is set then use that for mkdosfs
+- wpebackend-fdo: upgrade to 1.14.2
+- xorg-lib-common: Add variable to set tarball type
+- xserver-xorg: upgrade to 21.1.8
+
+
+Known Issues in Yocto-4.0.10
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- N/A
+
+
+Contributors to Yocto-4.0.10
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Archana Polampalli
+- Arturo Buzarra
+- Bruce Ashfield
+- Christoph Lauer
+- Deepthi Hemraj
+- Dmitry Baryshkov
+- Frank de Brabander
+- Hitendra Prajapati
+- Joe Slater
+- Kai Kang
+- Kyle Russell
+- Lee Chee Yang
+- Mark Hatle
+- Martin Jansa
+- Mingli Yu
+- Narpat Mali
+- Pascal Bach
+- Pawan Badganchi
+- Peter Bergin
+- Peter Marko
+- Piotr Łobacz
+- Randolph Sapp
+- Ranjitsinh Rathod
+- Ross Burton
+- Shubham Kulkarni
+- Siddharth Doshi
+- Steve Sakoman
+- Sundeep KOKKONDA
+- Thomas Roos
+- Virendra Thakur
+- Vivek Kumbhar
+- Wang Mingyu
+- Xiangyu Chen
+- Yash Shinde
+- Yoann Congal
+- Yogita Urade
+- Zhixiong Chi
+
+
+Repositories / Downloads for Yocto-4.0.10
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: :yocto_git:`/poky`
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.10 </poky/log/?h=yocto-4.0.10>`
+- Git Revision: :yocto_git:`f53ab3a2ff206a130cdc843839dd0ea5ec4ad02f </poky/commit/?id=f53ab3a2ff206a130cdc843839dd0ea5ec4ad02f>`
+- Release Artefact: poky-f53ab3a2ff206a130cdc843839dd0ea5ec4ad02f
+- sha: 8820aeac857ce6bbd1c7ef26cadbb86eca02be93deded253b4a5f07ddd69255d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.10/poky-f53ab3a2ff206a130cdc843839dd0ea5ec4ad02f.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.10/poky-f53ab3a2ff206a130cdc843839dd0ea5ec4ad02f.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.10 </openembedded-core/log/?h=yocto-4.0.10>`
+- Git Revision: :oe_git:`d2713785f9cd2d58731df877bc8b7bcc71b6c8e6 </openembedded-core/commit/?id=d2713785f9cd2d58731df877bc8b7bcc71b6c8e6>`
+- Release Artefact: oecore-d2713785f9cd2d58731df877bc8b7bcc71b6c8e6
+- sha: 78e084a1aceaaa6ec022702f29f80eaffade3159e9c42b6b8985c1b7ddd2fbab
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.10/oecore-d2713785f9cd2d58731df877bc8b7bcc71b6c8e6.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.10/oecore-d2713785f9cd2d58731df877bc8b7bcc71b6c8e6.tar.bz2
+
+meta-mingw
+
+- Repository Location: :yocto_git:`/meta-mingw`
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.10 </meta-mingw/log/?h=yocto-4.0.10>`
+- Git Revision: :yocto_git:`a90614a6498c3345704e9611f2842eb933dc51c1 </meta-mingw/commit/?id=a90614a6498c3345704e9611f2842eb933dc51c1>`
+- Release Artefact: meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1
+- sha: 49f9900bfbbc1c68136f8115b314e95d0b7f6be75edf36a75d9bcd1cca7c6302
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.10/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.10/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+
+meta-gplv2
+
+- Repository Location: :yocto_git:`/meta-gplv2`
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.10 </meta-gplv2/log/?h=yocto-4.0.10>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.10/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.10/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.10 </bitbake/log/?h=yocto-4.0.10>`
+- Git Revision: :oe_git:`0c6f86b60cfba67c20733516957c0a654eb2b44c </bitbake/commit/?id=0c6f86b60cfba67c20733516957c0a654eb2b44c>`
+- Release Artefact: bitbake-0c6f86b60cfba67c20733516957c0a654eb2b44c
+- sha: 4caa94ee4d644017b0cc51b702e330191677f7d179018cbcec8b1793949ebc74
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.10/bitbake-0c6f86b60cfba67c20733516957c0a654eb2b44c.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.10/bitbake-0c6f86b60cfba67c20733516957c0a654eb2b44c.tar.bz2
+
+yocto-docs
+
+- Repository Location: :yocto_git:`/yocto-docs`
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.10 </yocto-docs/log/?h=yocto-4.0.10>`
+- Git Revision: :yocto_git:`8388be749806bd0bf4fccf1005dae8f643aa4ef4 </yocto-docs/commit/?id=8388be749806bd0bf4fccf1005dae8f643aa4ef4>`
+
diff --git a/documentation/migration-guides/release-notes-4.0.11.rst b/documentation/migration-guides/release-notes-4.0.11.rst
new file mode 100644
index 0000000000..8a15884908
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.11.rst
@@ -0,0 +1,214 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Release notes for Yocto-4.0.11 (Kirkstone)
+------------------------------------------
+
+Security Fixes in Yocto-4.0.11
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- cups: Fix :cve:`2023-32324`
+- curl: Fix :cve:`2023-28319`, :cve:`2023-28320`, :cve:`2023-28321` and :cve:`2023-28322`
+- git: Ignore :cve:`2023-25815`
+- go: Fix :cve:`2023-24539` and :cve:`2023-24540`
+- nasm: Fix :cve:`2022-46457`
+- openssh: Fix :cve:`2023-28531`
+- openssl: Fix :cve:`2023-1255` and :cve:`2023-2650`
+- perl: Fix :cve:`2023-31484`
+- python3-requests: Fix for :cve:`2023-32681`
+- sysstat: Fix :cve:`2023-33204`
+- vim: Fix :cve:`2023-2426`
+- webkitgtk: fix :cve:`2022-42867`, :cve:`2022-46691`, :cve:`2022-46699` and :cve:`2022-46700`
+
+
+Fixes in Yocto-4.0.11
+~~~~~~~~~~~~~~~~~~~~~
+
+- Revert "docs: conf.py: fix cve extlinks caption for sphinx <4.0"
+- Revert "ipk: Decode byte data to string in manifest handling"
+- avahi: fix D-Bus introspection
+- build-appliance-image: Update to kirkstone head revision
+- conf.py: add macro for Mitre CVE links
+- conf: add nice level to the hash config ignred variables
+- cpio: Fix wrong CRC with ASCII CRC for large files
+- cve-update-nvd2-native: added the missing http import
+- cve-update-nvd2-native: new CVE database fetcher
+- dhcpcd: use git instead of tarballs
+- e2fsprogs: fix ptest bug for second running
+- gcc-runtime: Use static dummy libstdc++
+- glibc: stable 2.35 branch updates (cbceb903c4d7)
+- go.bbclass: don't use test to check output from ls
+- gstreamer1.0: Upgrade to 1.20.6
+- iso-codes: Upgrade to 4.15.0
+- kernel-devicetree: allow specification of dtb directory
+- kernel-devicetree: make shell scripts posix compliant
+- kernel-devicetree: recursively search for dtbs
+- kernel: don't force PAHOLE=false
+- kmscube: Correct :term:`DEPENDS` to avoid overwrite
+- lib/terminal.py: Add urxvt terminal
+- license.bbclass: Include :term:`LICENSE` in the output when it fails to parse
+- linux-yocto/5.10: Upgrade to v5.10.180
+- linux-yocto/5.15: Upgrade to v5.15.113
+- llvm: backport a fix for build with gcc-13
+- maintainers.inc: Fix email address typo
+- maintainers.inc: Move repo to unassigned
+- migration-guides: add release notes for 4.0.10
+- migration-guides: use new cve_mitre macro
+- nghttp2: Deleted the entries for -client and -server, and removed a dependency on them from the main package.
+- oeqa/selftest/cases/devtool.py: skip all tests require folder a git repo
+- openssh: Remove BSD-4-clause contents completely from codebase
+- openssl: Upgrade to 3.0.9
+- overview-manual: concepts.rst: Fix a typo
+- p11-kit: add native to :term:`BBCLASSEXTEND`
+- package: enable recursion on file globs
+- package_manager/ipk: fix config path generation in _create_custom_config()
+- piglit: Add :term:`PACKAGECONFIG` for glx and opencl
+- piglit: Add missing glslang dependencies
+- piglit: Fix build time dependency
+- poky.conf: bump version for 4.0.11
+- profile-manual: fix blktrace remote usage instructions
+- quilt: Fix merge.test race condition
+- ref-manual: add clarification for :term:`SRCREV`
+- selftest/reproducible: Allow native/cross reuse in test
+- staging.bbclass: do not add extend_recipe_sysroot to prefuncs of prepare_recipe_sysroot
+- systemd-networkd: backport fix for rm unmanaged wifi
+- systemd-systemctl: fix instance template WantedBy symlink construction
+- systemd-systemctl: support instance expansion in WantedBy
+- uninative: Upgrade to 3.10 to support gcc 13
+- uninative: Upgrade to 4.0 to include latest gcc 13.1.1
+- vim: Upgrade to 9.0.1527
+- waffle: Upgrade to 1.7.2
+- weston: add xwayland to :term:`DEPENDS` for :term:`PACKAGECONFIG` xwayland
+
+
+Known Issues in Yocto-4.0.11
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- N/A
+
+
+Contributors to Yocto-4.0.11
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Alexander Kanavin
+- Andrew Jeffery
+- Archana Polampalli
+- Bhabu Bindu
+- Bruce Ashfield
+- C. Andy Martin
+- Chen Qi
+- Daniel Ammann
+- Deepthi Hemraj
+- Ed Beroset
+- Eero Aaltonen
+- Enrico Jörns
+- Hannu Lounento
+- Hitendra Prajapati
+- Ian Ray
+- Jan Luebbe
+- Jan Vermaete
+- Khem Raj
+- Lee Chee Yang
+- Lei Maohui
+- Lorenzo Arena
+- Marek Vasut
+- Marta Rybczynska
+- Martin Jansa
+- Martin Siegumfeldt
+- Michael Halstead
+- Michael Opdenacker
+- Ming Liu
+- Narpat Mali
+- Omkar Patil
+- Pablo Saavedra
+- Pavel Zhukov
+- Peter Kjellerstedt
+- Peter Marko
+- Qiu Tingting
+- Quentin Schulz
+- Randolph Sapp
+- Randy MacLeod
+- Ranjitsinh Rathod
+- Richard Purdie
+- Riyaz Khan
+- Sakib Sajal
+- Sanjay Chitroda
+- Soumya Sambu
+- Steve Sakoman
+- Thomas Roos
+- Tom Hochstein
+- Vivek Kumbhar
+- Wang Mingyu
+- Yogita Urade
+- Zoltan Boszormenyi
+
+
+Repositories / Downloads for Yocto-4.0.11
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: :yocto_git:`/poky`
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.11 </poky/log/?h=yocto-4.0.11>`
+- Git Revision: :yocto_git:`fc697fe87412b9b179ae3a68d266ace85bb1fcc6 </poky/commit/?id=fc697fe87412b9b179ae3a68d266ace85bb1fcc6>`
+- Release Artefact: poky-fc697fe87412b9b179ae3a68d266ace85bb1fcc6
+- sha: d42ab1b76b9d8ab164d86dc0882c908658f6b5be0742b13a71531068f6a5ee98
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.11/poky-fc697fe87412b9b179ae3a68d266ace85bb1fcc6.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.11/poky-fc697fe87412b9b179ae3a68d266ace85bb1fcc6.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.11 </openembedded-core/log/?h=yocto-4.0.11>`
+- Git Revision: :oe_git:`7949e786cf8e50f716ff1f1c4797136637205e0c </openembedded-core/commit/?id=7949e786cf8e50f716ff1f1c4797136637205e0c>`
+- Release Artefact: oecore-7949e786cf8e50f716ff1f1c4797136637205e0c
+- sha: 3bda3f7d15961bad5490faf3194709528591a97564b5eae3da7345b63be20334
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.11/oecore-7949e786cf8e50f716ff1f1c4797136637205e0c.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.11/oecore-7949e786cf8e50f716ff1f1c4797136637205e0c.tar.bz2
+
+meta-mingw
+
+- Repository Location: :yocto_git:`/meta-mingw`
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.11 </meta-mingw/log/?h=yocto-4.0.11>`
+- Git Revision: :yocto_git:`a90614a6498c3345704e9611f2842eb933dc51c1 </meta-mingw/commit/?id=a90614a6498c3345704e9611f2842eb933dc51c1>`
+- Release Artefact: meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1
+- sha: 49f9900bfbbc1c68136f8115b314e95d0b7f6be75edf36a75d9bcd1cca7c6302
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.11/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.11/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+
+meta-gplv2
+
+- Repository Location: :yocto_git:`/meta-gplv2`
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.11 </meta-gplv2/log/?h=yocto-4.0.11>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.11/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.11/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.11 </bitbake/log/?h=yocto-4.0.11>`
+- Git Revision: :oe_git:`0c6f86b60cfba67c20733516957c0a654eb2b44c </bitbake/commit/?id=0c6f86b60cfba67c20733516957c0a654eb2b44c>`
+- Release Artefact: bitbake-0c6f86b60cfba67c20733516957c0a654eb2b44c
+- sha: 4caa94ee4d644017b0cc51b702e330191677f7d179018cbcec8b1793949ebc74
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.11/bitbake-0c6f86b60cfba67c20733516957c0a654eb2b44c.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.11/bitbake-0c6f86b60cfba67c20733516957c0a654eb2b44c.tar.bz2
+
+yocto-docs
+
+- Repository Location: :yocto_git:`/yocto-docs`
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.11 </yocto-docs/log/?h=yocto-4.0.11>`
+- Git Revision: :yocto_git:`6d16d2bde0aa32276a035ee49703e6eea7c7b29a </yocto-docs/commit/?id=6d16d2bde0aa32276a035ee49703e6eea7c7b29a>`
+
diff --git a/documentation/migration-guides/release-notes-4.0.12.rst b/documentation/migration-guides/release-notes-4.0.12.rst
new file mode 100644
index 0000000000..0ea92a453d
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.12.rst
@@ -0,0 +1,277 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Release notes for Yocto-4.0.12 (Kirkstone)
+------------------------------------------
+
+Security Fixes in Yocto-4.0.12
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- bind: Fix :cve:`2023-2828` and :cve:`2023-2911`
+- cups: Fix :cve:`2023-34241`
+- curl: Added :cve:`2023-28320` Follow-up patch
+- dbus: Fix :cve:`2023-34969`
+- dmidecode: fix :cve:`2023-30630`
+- ghostscript: fix :cve:`2023-36664`
+- go: fix :cve_mitre:`2023-24531`, :cve:`2023-24536`, :cve:`2023-29400`, :cve:`2023-29402`, :cve:`2023-29404`, :cve:`2023-29405` and :cve:`2023-29406`
+- libarchive: Ignore :cve:`2023-30571`
+- libcap: Fix :cve:`2023-2602` and :cve:`2023-2603`
+- libjpeg-turbo: Fix :cve:`2023-2804`
+- libpcre2: Fix :cve:`2022-41409`
+- libtiff: fix :cve:`2023-26965`
+- libwebp: Fix :cve:`2023-1999`
+- libx11: Fix :cve:`2023-3138`
+- libxpm: Fix :cve:`2022-44617`
+- ninja: Ignore :cve:`2021-4336`
+- openssh: Fix :cve:`2023-38408`
+- openssl: Fix :cve:`2023-2975`, :cve:`2023-3446` and :cve:`2023-3817`
+- perl: Fix :cve:`2023-31486`
+- python3: Ignore :cve:`2023-36632`
+- qemu: Fix :cve:`2023-0330`, :cve_mitre:`2023-2861`, :cve_mitre:`2023-3255` and :cve_mitre:`2023-3301`
+- sqlite3: Fix :cve:`2023-36191`
+- tiff: Fix :cve:`2023-0795`, :cve:`2023-0796`, :cve:`2023-0797`, :cve:`2023-0798`, :cve:`2023-0799`, :cve:`2023-25433`, :cve:`2023-25434` and :cve:`2023-25435`
+- vim: :cve:`2023-2609` and :cve:`2023-2610`
+
+
+Fixes in Yocto-4.0.12
+~~~~~~~~~~~~~~~~~~~~~
+
+- babeltrace2: Always use BFD linker when building tests with ld-is-lld distro feature
+- babeltrace2: upgrade to 2.0.5
+- bitbake.conf: add unzstd in :term:`HOSTTOOLS`
+- bitbake: bitbake-layers: initialize tinfoil before registering command line arguments
+- bitbake: runqueue: Fix deferred task/multiconfig race issue
+- blktrace: ask for python3 specifically
+- build-appliance-image: Update to kirkstone head revision
+- cmake: Fix CMAKE_SYSTEM_PROCESSOR setting for SDK
+- connman: fix warning by specifying runstatedir at configure time
+- cpio: Replace fix wrong CRC with ASCII CRC for large files with upstream backport
+- cve-update-nvd2-native: actually use API keys
+- cve-update-nvd2-native: always pass str for json.loads()
+- cve-update-nvd2-native: fix cvssV3 metrics
+- cve-update-nvd2-native: handle all configuration nodes, not just first
+- cve-update-nvd2-native: increase retry count
+- cve-update-nvd2-native: log a little more
+- cve-update-nvd2-native: retry all errors and sleep between retries
+- cve-update-nvd2-native: use exact times, don't truncate
+- dbus: upgrade to 1.14.8
+- devtool: Fix the wrong variable in srcuri_entry
+- diffutils: upgrade to 3.10
+- docs: ref-manual: terms: fix typos in :term:`SPDX` term
+- fribidi: upgrade to 1.0.13
+- gcc: upgrade to v11.4
+- gcc-testsuite: Fix ppc cpu specification
+- gcc: don't pass --enable-standard-branch-protection
+- gcc: fix runpath errors in cc1 binary
+- grub: submit determinism.patch upstream
+- image_types: Fix reproducible builds for initramfs and UKI img
+- kernel: add missing path to search for debug files
+- kmod: remove unused ptest.patch
+- layer.conf: Add missing dependency exclusion
+- libassuan: upgrade to 2.5.6
+- libksba: upgrade to 1.6.4
+- libpng: Add ptest for libpng
+- libxcrypt: fix build with perl-5.38 and use master branch
+- libxcrypt: fix hard-coded ".so" extension
+- libxpm: upgrade to 3.5.16
+- linux-firmware: upgrade to 20230515
+- linux-yocto/5.10: cfg: fix DECNET configuration warning
+- linux-yocto/5.10: update to v5.10.185
+- linux-yocto/5.15: cfg: fix DECNET configuration warning
+- linux-yocto/5.15: update to v5.15.120
+- logrotate: Do not create logrotate.status file
+- lttng-ust: upgrade to 2.13.6
+- machine/arch-arm64: add -mbranch-protection=standard
+- maintainers.inc: correct Carlos Rafael Giani's email address
+- maintainers.inc: correct unassigned entries
+- maintainers.inc: unassign Adrian Bunk from wireless-regdb
+- maintainers.inc: unassign Alistair Francis from opensbi
+- maintainers.inc: unassign Andreas Müller from itstool entry
+- maintainers.inc: unassign Pascal Bach from cmake entry
+- maintainers.inc: unassign Ricardo Neri from ovmf
+- maintainers.inc: unassign Richard Weinberger from erofs-utils entry
+- mdadm: fix 07revert-inplace ptest
+- mdadm: fix segfaults when running ptests
+- mdadm: fix util-linux ptest dependency
+- mdadm: skip running known broken ptests
+- meson.bbclass: Point to llvm-config from native sysroot
+- meta: lib: oe: npm_registry: Add more safe caracters
+- migration-guides: add release notes for 4.0.11
+- minicom: remove unused patch files
+- mobile-broadband-provider-info: upgrade to 20230416
+- oe-depends-dot: Handle new format for task-depends.dot
+- oeqa/runtime/cases/rpm: fix wait_for_no_process_for_user failure case
+- oeqa/selftest/bbtests: add non-existent prefile/postfile tests
+- oeqa/selftest/devtool: add unit test for "devtool add -b"
+- openssl: Upgrade to 3.0.10
+- openssl: add PERLEXTERNAL path to test its existence
+- openssl: use a glob on the PERLEXTERNAL to track updates on the path
+- package.bbclass: moving field data process before variable process in process_pkgconfig
+- pm-utils: fix multilib conflictions
+- poky.conf: bump version for 4.0.12
+- psmisc: Set :term:`ALTERNATIVE` for pstree to resolve conflict with busybox
+- pybootchartgui: show elapsed time for each task
+- python3: fix missing comma in get_module_deps3.py
+- python3: upgrade to 3.10.12
+- recipetool: Fix inherit in created -native* recipes
+- ref-manual: add LTS and Mixin terms
+- ref-manual: document image-specific variant of :term:`INCOMPATIBLE_LICENSE`
+- ref-manual: release-process: update for LTS releases
+- rust-llvm: backport a fix for build with gcc-13
+- scripts/runqemu: allocate unfsd ports in a way that doesn't race or clash with unrelated processes
+- scripts/runqemu: split lock dir creation into a reusable function
+- sdk.py: error out when moving file fails
+- sdk.py: fix moving dnf contents
+- selftest reproducible.py: support different build targets
+- selftest/license: Exclude from world
+- selftest/reproducible: Allow chose the package manager
+- serf: upgrade to 1.3.10
+- strace: Disable failing test
+- strace: Merge two similar patches
+- strace: Update patches/tests with upstream fixes
+- sysfsutils: fetch a supported fork from github
+- systemd-systemctl: fix errors in instance name expansion
+- systemd: Backport nspawn: make sure host root can write to the uidmapped mounts we prepare for the container payload
+- tzdata: upgrade to 2023c
+- uboot-extlinux-config.bbclass: fix old override syntax in comment
+- unzip: fix configure check for cross compilation
+- useradd-staticids.bbclass: improve error message
+- util-linux: add alternative links for ipcs,ipcrm
+- v86d: Improve kernel dependency
+- vim: upgrade to 9.0.1592
+- wget: upgrade to 1.21.4
+- wic: Add dependencies for erofs-utils
+- wireless-regdb: upgrade to 2023.05.03
+- xdpyinfo: upgrade to 1.3.4
+- zip: fix configure check by using _Static_assert
+
+
+Known Issues in Yocto-4.0.12
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- N/A
+
+
+Contributors to Yocto-4.0.12
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Alberto Planas
+- Alexander Kanavin
+- Alexander Sverdlin
+- Andrej Valek
+- Archana Polampalli
+- BELOUARGA Mohamed
+- Benjamin Bouvier
+- Bruce Ashfield
+- Charlie Wu
+- Chen Qi
+- Etienne Cordonnier
+- Fabien Mahot
+- Frieder Paape
+- Frieder Schrempf
+- Heiko Thole
+- Hitendra Prajapati
+- Jermain Horsman
+- Jose Quaresma
+- Kai Kang
+- Khem Raj
+- Lee Chee Yang
+- Marc Ferland
+- Marek Vasut
+- Martin Jansa
+- Mauro Queiros
+- Michael Opdenacker
+- Mikko Rapeli
+- Nikhil R
+- Ovidiu Panait
+- Peter Marko
+- Poonam Jadhav
+- Quentin Schulz
+- Richard Purdie
+- Ross Burton
+- Rusty Howell
+- Sakib Sajal
+- Soumya Sambu
+- Steve Sakoman
+- Sundeep KOKKONDA
+- Tim Orling
+- Tom Hochstein
+- Trevor Gamblin
+- Vijay Anusuri
+- Vivek Kumbhar
+- Wang Mingyu
+- Xiangyu Chen
+- Yoann Congal
+- Yogita Urade
+- Yuta Hayama
+
+
+Repositories / Downloads for Yocto-4.0.12
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: :yocto_git:`/poky`
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.12 </poky/log/?h=yocto-4.0.12>`
+- Git Revision: :yocto_git:`d6b8790370500b99ca11f0d8a05c39b661ab2ba6 </poky/commit/?id=d6b8790370500b99ca11f0d8a05c39b661ab2ba6>`
+- Release Artefact: poky-d6b8790370500b99ca11f0d8a05c39b661ab2ba6
+- sha: 35f0390e0c5a12f403ed471c0b1254c13cbb9d7c7b46e5a3538e63e36c1ac280
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.12/poky-d6b8790370500b99ca11f0d8a05c39b661ab2ba6.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.12/poky-d6b8790370500b99ca11f0d8a05c39b661ab2ba6.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.12 </openembedded-core/log/?h=yocto-4.0.12>`
+- Git Revision: :oe_git:`e1a604db8d2cf8782038b4016cc2e2052467333b </openembedded-core/commit/?id=e1a604db8d2cf8782038b4016cc2e2052467333b>`
+- Release Artefact: oecore-e1a604db8d2cf8782038b4016cc2e2052467333b
+- sha: 8b302eb3f3ffe5643f88bc6e4ae8f9a5cda63544d67e04637ecc4197e9750a1d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.12/oecore-e1a604db8d2cf8782038b4016cc2e2052467333b.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.12/oecore-e1a604db8d2cf8782038b4016cc2e2052467333b.tar.bz2
+
+meta-mingw
+
+- Repository Location: :yocto_git:`/meta-mingw`
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.12 </meta-mingw/log/?h=yocto-4.0.12>`
+- Git Revision: :yocto_git:`a90614a6498c3345704e9611f2842eb933dc51c1 </meta-mingw/commit/?id=a90614a6498c3345704e9611f2842eb933dc51c1>`
+- Release Artefact: meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1
+- sha: 49f9900bfbbc1c68136f8115b314e95d0b7f6be75edf36a75d9bcd1cca7c6302
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.12/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.12/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+
+meta-gplv2
+
+- Repository Location: :yocto_git:`/meta-gplv2`
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.12 </meta-gplv2/log/?h=yocto-4.0.12>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.12/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.12/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.12 </bitbake/log/?h=yocto-4.0.12>`
+- Git Revision: :oe_git:`41b6684489d0261753344956042be2cc4adb0159 </bitbake/commit/?id=41b6684489d0261753344956042be2cc4adb0159>`
+- Release Artefact: bitbake-41b6684489d0261753344956042be2cc4adb0159
+- sha: efa2b1c4d0be115ed3960750d1e4ed958771b2db6d7baee2d13ad386589376e8
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.12/bitbake-41b6684489d0261753344956042be2cc4adb0159.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.12/bitbake-41b6684489d0261753344956042be2cc4adb0159.tar.bz2
+
+yocto-docs
+
+- Repository Location: :yocto_git:`/yocto-docs`
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.12 </yocto-docs/log/?h=yocto-4.0.12>`
+- Git Revision: :yocto_git:`4dfef81ac6164764c6541e39a9fef81d49227096 </yocto-docs/commit/?id=4dfef81ac6164764c6541e39a9fef81d49227096>`
+
diff --git a/documentation/migration-guides/release-notes-4.0.13.rst b/documentation/migration-guides/release-notes-4.0.13.rst
new file mode 100644
index 0000000000..3c096c356f
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.13.rst
@@ -0,0 +1,271 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Release notes for Yocto-4.0.13 (Kirkstone)
+------------------------------------------
+
+Security Fixes in Yocto-4.0.13
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- bind: Fix :cve:`2023-2829`
+- binutils: Fix :cve:`2022-48065`
+- busybox: Fix :cve:`2022-48174`
+- cups: Fix :cve:`2023-32360`
+- curl: Fix :cve:`2023-32001`
+- dmidecode: Fix :cve:`2023-30630`
+- dropbear: Fix :cve:`2023-36328`
+- ffmpeg: Ignored :cve:`2023-39018`
+- file: Fix :cve:`2022-48554`
+- flac: Fix :cve:`2020-22219`
+- gcc: Fix :cve:`2023-4039`
+- gdb: Fix :cve:`2023-39128`
+- ghostscript: Fix :cve:`2023-38559`
+- glib-2.0: Fix :cve:`2023-29499`, :cve:`2023-32611`, :cve:`2023-32636`, :cve:`2023-32643` and :cve:`2023-32665`
+- go: Fix :cve:`2023-29409` and :cve:`2023-39319`
+- gstreamer1.0-plugins-bad: Fix :cve_mitre:`2023-37329`
+- gstreamer1.0-plugins-base: Fix :cve_mitre:`2023-37328`
+- gstreamer1.0-plugins-good: Fix :cve_mitre:`2023-37327`
+- inetutils: Fix :cve:`2023-40303`
+- json-c: Fix :cve:`2021-32292`
+- librsvg: Fix :cve:`2023-38633`
+- libssh2: Fix :cve:`2020-22218`
+- libtiff: Fix :cve:`2023-26966`
+- libxml2: Fix :cve:`2023-39615`
+- linux-yocto/5.15: Ignore :cve:`2003-1604`, :cve:`2004-0230`, :cve:`2006-3635`, :cve:`2006-5331`, :cve:`2006-6128`, :cve:`2007-4774`, :cve:`2007-6761`, :cve:`2007-6762`, :cve:`2008-7316`, :cve:`2009-2692`, :cve:`2010-0008`, :cve:`2010-3432`, :cve:`2010-4648`, :cve:`2010-5313`, :cve:`2010-5328`, :cve:`2010-5329`, :cve:`2010-5331`, :cve:`2010-5332`, :cve:`2011-4098`, :cve:`2011-4131`, :cve:`2011-4915`, :cve:`2011-5321`, :cve:`2011-5327`, :cve:`2012-0957`, :cve:`2012-2119`, :cve:`2012-2136`, :cve:`2012-2137`, :cve:`2012-2313`, :cve:`2012-2319`, :cve:`2012-2372`, :cve:`2012-2375`, :cve:`2012-2390`, :cve:`2012-2669`, :cve:`2012-2744`, :cve:`2012-2745`, :cve:`2012-3364`, :cve:`2012-3375`, :cve:`2012-3400`, :cve:`2012-3412`, :cve:`2012-3430`, :cve:`2012-3510`, :cve:`2012-3511`, :cve:`2012-3520`, :cve:`2012-3552`, :cve:`2012-4398`, :cve:`2012-4444`, :cve:`2012-4461`, :cve:`2012-4467`, :cve:`2012-4508`, :cve:`2012-4530`, :cve:`2012-4565`, :cve:`2012-5374`, :cve:`2012-5375`, :cve:`2012-5517`, :cve:`2012-6536`, :cve:`2012-6537`, :cve:`2012-6538`, :cve:`2012-6539`, :cve:`2012-6540`, :cve:`2012-6541`, :cve:`2012-6542`, :cve:`2012-6543`, :cve:`2012-6544`, :cve:`2012-6545`, :cve:`2012-6546`, :cve:`2012-6547`, :cve:`2012-6548`, :cve:`2012-6549`, :cve:`2012-6638`, :cve:`2012-6647`, :cve:`2012-6657`, :cve:`2012-6689`, :cve:`2012-6701`, :cve:`2012-6703`, :cve:`2012-6704`, :cve:`2012-6712`, :cve:`2013-0160`, :cve:`2013-0190`, :cve:`2013-0216`, :cve:`2013-0217`, :cve:`2013-0228`, :cve:`2013-0231`, :cve:`2013-0268`, :cve:`2013-0290`, :cve:`2013-0309`, :cve:`2013-0310`, :cve:`2013-0311`, :cve:`2013-0313`, :cve:`2013-0343`, :cve:`2013-0349`, :cve:`2013-0871`, :cve:`2013-0913`, :cve:`2013-0914`, :cve:`2013-1059`, :cve:`2013-1763`, :cve:`2013-1767`, :cve:`2013-1772`, :cve:`2013-1773`, :cve:`2013-1774`, :cve:`2013-1792`, :cve:`2013-1796`, :cve:`2013-1797`, :cve:`2013-1798`, :cve:`2013-1819`, :cve:`2013-1826`, :cve:`2013-1827`, :cve:`2013-1828`, :cve:`2013-1848`, :cve:`2013-1858`, :cve:`2013-1860`, :cve:`2013-1928`, :cve:`2013-1929`, :cve:`2013-1943`, :cve:`2013-1956`, :cve:`2013-1957`, :cve:`2013-1958`, :cve:`2013-1959`, :cve:`2013-1979`, :cve:`2013-2015`, :cve:`2013-2017`, :cve:`2013-2058`, :cve:`2013-2094`, :cve:`2013-2128`, :cve:`2013-2140`, :cve:`2013-2141`, :cve:`2013-2146`, :cve:`2013-2147`, :cve:`2013-2148`, :cve:`2013-2164`, :cve:`2013-2206`, :cve:`2013-2232`, :cve:`2013-2234`, :cve:`2013-2237`, :cve:`2013-2546`, :cve:`2013-2547`, :cve:`2013-2548`, :cve:`2013-2596`, :cve:`2013-2634`, :cve:`2013-2635`, :cve:`2013-2636`, :cve:`2013-2850`, :cve:`2013-2851`, :cve:`2013-2852`, :cve:`2013-2888`, :cve:`2013-2889`, :cve:`2013-2890`, :cve:`2013-2891`, :cve:`2013-2892`, :cve:`2013-2893`, :cve:`2013-2894`, :cve:`2013-2895`, :cve:`2013-2896`, :cve:`2013-2897`, :cve:`2013-2898`, :cve:`2013-2899`, :cve:`2013-2929`, :cve:`2013-2930`, :cve:`2013-3076`, :cve:`2013-3222`, :cve:`2013-3223`, :cve:`2013-3224`, :cve:`2013-3225`, :cve:`2013-3226`, :cve:`2013-3227`, :cve:`2013-3228`, :cve:`2013-3229`, :cve:`2013-3230`, :cve:`2013-3231`, :cve:`2013-3232`, :cve:`2013-3233`, :cve:`2013-3234`, :cve:`2013-3235`, :cve:`2013-3236`, :cve:`2013-3237`, :cve:`2013-3301`, :cve:`2013-3302`, :cve:`2013-4125`, :cve:`2013-4127`, :cve:`2013-4129`, :cve:`2013-4162`, :cve:`2013-4163`, :cve:`2013-4205`, :cve:`2013-4220`, :cve:`2013-4247`, :cve:`2013-4254`, :cve:`2013-4270`, :cve:`2013-4299`, :cve:`2013-4300`, :cve:`2013-4312`, :cve:`2013-4343`, :cve:`2013-4345`, :cve:`2013-4348`, :cve:`2013-4350`, :cve:`2013-4387`, :cve:`2013-4470`, :cve:`2013-4483`, :cve:`2013-4511`, :cve:`2013-4512`, :cve:`2013-4513`, :cve:`2013-4514`, :cve:`2013-4515`, :cve:`2013-4516`, :cve:`2013-4563`, :cve:`2013-4579`, :cve:`2013-4587`, :cve:`2013-4588`, :cve:`2013-4591`, :cve:`2013-4592`, :cve:`2013-5634`, :cve:`2013-6282`, :cve:`2013-6367`, :cve:`2013-6368`, :cve:`2013-6376`, :cve:`2013-6378`, :cve:`2013-6380`, :cve:`2013-6381`, :cve:`2013-6382`, :cve:`2013-6383`, :cve:`2013-6431`, :cve:`2013-6432`, :cve:`2013-6885`, :cve:`2013-7026`, :cve:`2013-7027`, :cve:`2013-7263`, :cve:`2013-7264`, :cve:`2013-7265`, :cve:`2013-7266`, :cve:`2013-7267`, :cve:`2013-7268`, :cve:`2013-7269`, :cve:`2013-7270`, :cve:`2013-7271`, :cve:`2013-7281`, :cve:`2013-7339`, :cve:`2013-7348`, :cve:`2013-7421`, :cve:`2013-7446`, :cve:`2013-7470`, :cve:`2014-0038`, :cve:`2014-0049`, :cve:`2014-0055`, :cve:`2014-0069`, :cve:`2014-0077`, :cve:`2014-0100`, :cve:`2014-0101`, :cve:`2014-0102`, :cve:`2014-0131`, :cve:`2014-0155`, :cve:`2014-0181`, :cve:`2014-0196`, :cve:`2014-0203`, :cve:`2014-0205`, :cve:`2014-0206`, :cve:`2014-1438`, :cve:`2014-1444`, :cve:`2014-1445`, :cve:`2014-1446`, :cve:`2014-1690`, :cve:`2014-1737`, :cve:`2014-1738`, :cve:`2014-1739`, :cve:`2014-1874`, :cve:`2014-2038`, :cve:`2014-2039`, :cve:`2014-2309`, :cve:`2014-2523`, :cve:`2014-2568`, :cve:`2014-2580`, :cve:`2014-2672`, :cve:`2014-2673`, :cve:`2014-2678`, :cve:`2014-2706`, :cve:`2014-2739`, :cve:`2014-2851`, :cve:`2014-2889`, :cve:`2014-3122`, :cve:`2014-3144`, :cve:`2014-3145`, :cve:`2014-3153`, :cve:`2014-3180`, :cve:`2014-3181`, :cve:`2014-3182`, :cve:`2014-3183`, :cve:`2014-3184`, :cve:`2014-3185`, :cve:`2014-3186`, :cve:`2014-3534`, :cve:`2014-3535`, :cve:`2014-3601`, :cve:`2014-3610`, :cve:`2014-3611`, :cve:`2014-3631`, :cve:`2014-3645`, :cve:`2014-3646`, :cve:`2014-3647`, :cve:`2014-3673`, :cve:`2014-3687`, :cve:`2014-3688`, :cve:`2014-3690`, :cve:`2014-3917`, :cve:`2014-3940`, :cve:`2014-4014`, :cve:`2014-4027`, :cve:`2014-4157`, :cve:`2014-4171`, :cve:`2014-4508`, :cve:`2014-4608`, :cve:`2014-4611`, :cve:`2014-4652`, :cve:`2014-4653`, :cve:`2014-4654`, :cve:`2014-4655`, :cve:`2014-4656`, :cve:`2014-4667`, :cve:`2014-4699`, :cve:`2014-4943`, :cve:`2014-5045`, :cve:`2014-5077`, :cve:`2014-5206`, :cve:`2014-5207`, :cve:`2014-5471`, :cve:`2014-5472`, :cve:`2014-6410`, :cve:`2014-6416`, :cve:`2014-6417`, :cve:`2014-6418`, :cve:`2014-7145`, :cve:`2014-7283`, :cve:`2014-7284`, :cve:`2014-7822`, :cve:`2014-7825`, :cve:`2014-7826`, :cve:`2014-7841`, :cve:`2014-7842`, :cve:`2014-7843`, :cve:`2014-7970`, :cve:`2014-7975`, :cve:`2014-8086`, :cve:`2014-8133`, :cve:`2014-8134`, :cve:`2014-8159`, :cve:`2014-8160`, :cve:`2014-8171`, :cve:`2014-8172`, :cve:`2014-8173`, :cve:`2014-8369`, :cve:`2014-8480`, :cve:`2014-8481`, :cve:`2014-8559`, :cve:`2014-8709`, :cve:`2014-8884`, :cve:`2014-8989`, :cve:`2014-9090`, :cve:`2014-9322`, :cve:`2014-9419`, :cve:`2014-9420`, :cve:`2014-9428`, :cve:`2014-9529`, :cve:`2014-9584`, :cve:`2014-9585`, :cve:`2014-9644`, :cve:`2014-9683`, :cve:`2014-9710`, :cve:`2014-9715`, :cve:`2014-9717`, :cve:`2014-9728`, :cve:`2014-9729`, :cve:`2014-9730`, :cve:`2014-9731`, :cve:`2014-9803`, :cve:`2014-9870`, :cve:`2014-9888`, :cve:`2014-9895`, :cve:`2014-9903`, :cve:`2014-9904`, :cve:`2014-9914`, :cve:`2014-9922`, :cve:`2014-9940`, :cve:`2015-0239`, :cve:`2015-0274`, :cve:`2015-0275`, :cve:`2015-1333`, :cve:`2015-1339`, :cve:`2015-1350`, :cve:`2015-1420`, :cve:`2015-1421`, :cve:`2015-1465`, :cve:`2015-1573`, :cve:`2015-1593`, :cve:`2015-1805`, :cve:`2015-2041`, :cve:`2015-2042`, :cve:`2015-2150`, :cve:`2015-2666`, :cve:`2015-2672`, :cve:`2015-2686`, :cve:`2015-2830`, :cve:`2015-2922`, :cve:`2015-2925`, :cve:`2015-3212`, :cve:`2015-3214`, :cve:`2015-3288`, :cve:`2015-3290`, :cve:`2015-3291`, :cve:`2015-3331`, :cve:`2015-3339`, :cve:`2015-3636`, :cve:`2015-4001`, :cve:`2015-4002`, :cve:`2015-4003`, :cve:`2015-4004`, :cve:`2015-4036`, :cve:`2015-4167`, :cve:`2015-4170`, :cve:`2015-4176`, :cve:`2015-4177`, :cve:`2015-4178`, :cve:`2015-4692`, :cve:`2015-4700`, :cve:`2015-5156`, :cve:`2015-5157`, :cve:`2015-5257`, :cve:`2015-5283`, :cve:`2015-5307`, :cve:`2015-5327`, :cve:`2015-5364`, :cve:`2015-5366`, :cve:`2015-5697`, :cve:`2015-5706`, :cve:`2015-5707`, :cve:`2015-6252`, :cve:`2015-6526`, :cve:`2015-6937`, :cve:`2015-7509`, :cve:`2015-7513`, :cve:`2015-7515`, :cve:`2015-7550`, :cve:`2015-7566`, :cve:`2015-7613`, :cve:`2015-7799`, :cve:`2015-7833`, :cve:`2015-7872`, :cve:`2015-7884`, :cve:`2015-7885`, :cve:`2015-7990`, :cve:`2015-8104`, :cve:`2015-8215`, :cve:`2015-8324`, :cve:`2015-8374`, :cve:`2015-8539`, :cve:`2015-8543`, :cve:`2015-8550`, :cve:`2015-8551`, :cve:`2015-8552`, :cve:`2015-8553`, :cve:`2015-8569`, :cve:`2015-8575`, :cve:`2015-8660`, :cve:`2015-8709`, :cve:`2015-8746`, :cve:`2015-8767`, :cve:`2015-8785`, :cve:`2015-8787`, :cve:`2015-8812`, :cve:`2015-8816`, :cve:`2015-8830`, :cve:`2015-8839`, :cve:`2015-8844`, :cve:`2015-8845`, :cve:`2015-8950`, :cve:`2015-8952`, :cve:`2015-8953`, :cve:`2015-8955`, :cve:`2015-8956`, :cve:`2015-8961`, :cve:`2015-8962`, :cve:`2015-8963`, :cve:`2015-8964`, :cve:`2015-8966`, :cve:`2015-8967`, :cve:`2015-8970`, :cve:`2015-9004`, :cve:`2015-9016`, :cve:`2015-9289`, :cve:`2016-0617`, :cve:`2016-0723`, :cve:`2016-0728`, :cve:`2016-0758`, :cve:`2016-0821`, :cve:`2016-0823`, :cve:`2016-10044`, :cve:`2016-10088`, :cve:`2016-10147`, :cve:`2016-10150`, :cve:`2016-10153`, :cve:`2016-10154`, :cve:`2016-10200`, :cve:`2016-10208`, :cve:`2016-10229`, :cve:`2016-10318`, :cve:`2016-10723`, :cve:`2016-10741`, :cve:`2016-10764`, :cve:`2016-10905`, :cve:`2016-10906`, :cve:`2016-10907`, :cve:`2016-1237`, :cve:`2016-1575`, :cve:`2016-1576`, :cve:`2016-1583`, :cve:`2016-2053`, :cve:`2016-2069`, :cve:`2016-2070`, :cve:`2016-2085`, :cve:`2016-2117`, :cve:`2016-2143`, :cve:`2016-2184`, :cve:`2016-2185`, :cve:`2016-2186`, :cve:`2016-2187`, :cve:`2016-2188`, :cve:`2016-2383`, :cve:`2016-2384`, :cve:`2016-2543`, :cve:`2016-2544`, :cve:`2016-2545`, :cve:`2016-2546`, :cve:`2016-2547`, :cve:`2016-2548`, :cve:`2016-2549`, :cve:`2016-2550`, :cve:`2016-2782`, :cve:`2016-2847`, :cve:`2016-3044`, :cve:`2016-3070`, :cve:`2016-3134`, :cve:`2016-3135`, :cve:`2016-3136`, :cve:`2016-3137`, :cve:`2016-3138`, :cve:`2016-3139`, :cve:`2016-3140`, :cve:`2016-3156`, :cve:`2016-3157`, :cve:`2016-3672`, :cve:`2016-3689`, :cve:`2016-3713`, :cve:`2016-3841`, :cve:`2016-3857`, :cve:`2016-3951`, :cve:`2016-3955`, :cve:`2016-3961`, :cve:`2016-4440`, :cve:`2016-4470`, :cve:`2016-4482`, :cve:`2016-4485`, :cve:`2016-4486`, :cve:`2016-4557`, :cve:`2016-4558`, :cve:`2016-4565`, :cve:`2016-4568`, :cve:`2016-4569`, :cve:`2016-4578`, :cve:`2016-4580`, :cve:`2016-4581`, :cve:`2016-4794`, :cve:`2016-4805`, :cve:`2016-4913`, :cve:`2016-4951`, :cve:`2016-4997`, :cve:`2016-4998`, :cve:`2016-5195`, :cve:`2016-5243`, :cve:`2016-5244`, :cve:`2016-5400`, :cve:`2016-5412`, :cve:`2016-5696`, :cve:`2016-5728`, :cve:`2016-5828`, :cve:`2016-5829`, :cve:`2016-6130`, :cve:`2016-6136`, :cve:`2016-6156`, :cve:`2016-6162`, :cve:`2016-6187`, :cve:`2016-6197`, :cve:`2016-6198`, :cve:`2016-6213`, :cve:`2016-6327`, :cve:`2016-6480`, :cve:`2016-6516`, :cve:`2016-6786`, :cve:`2016-6787`, :cve:`2016-6828`, :cve:`2016-7039`, :cve:`2016-7042`, :cve:`2016-7097`, :cve:`2016-7117`, :cve:`2016-7425`, :cve:`2016-7910`, :cve:`2016-7911`, :cve:`2016-7912`, :cve:`2016-7913`, :cve:`2016-7914`, :cve:`2016-7915`, :cve:`2016-7916`, :cve:`2016-7917`, :cve:`2016-8399`, :cve:`2016-8405`, :cve:`2016-8630`, :cve:`2016-8632`, :cve:`2016-8633`, :cve:`2016-8636`, :cve:`2016-8645`, :cve:`2016-8646`, :cve:`2016-8650`, :cve:`2016-8655`, :cve:`2016-8658`, :cve:`2016-8666`, :cve:`2016-9083`, :cve:`2016-9084`, :cve:`2016-9120`, :cve:`2016-9178`, :cve:`2016-9191`, :cve:`2016-9313`, :cve:`2016-9555`, :cve:`2016-9576`, :cve:`2016-9588`, :cve:`2016-9604`, :cve:`2016-9685`, :cve:`2016-9754`, :cve:`2016-9755`, :cve:`2016-9756`, :cve:`2016-9777`, :cve:`2016-9793`, :cve:`2016-9794`, :cve:`2016-9806`, :cve:`2016-9919`, :cve:`2017-0605`, :cve:`2017-0627`, :cve:`2017-0750`, :cve:`2017-0786`, :cve:`2017-0861`, :cve:`2017-1000`, :cve:`2017-1000111`, :cve:`2017-1000112`, :cve:`2017-1000251`, :cve:`2017-1000252`, :cve:`2017-1000253`, :cve:`2017-1000255`, :cve:`2017-1000363`, :cve:`2017-1000364`, :cve:`2017-1000365`, :cve:`2017-1000370`, :cve:`2017-1000371`, :cve:`2017-1000379`, :cve:`2017-1000380`, :cve:`2017-1000405`, :cve:`2017-1000407`, :cve:`2017-1000410`, :cve:`2017-10661`, :cve:`2017-10662`, :cve:`2017-10663`, :cve:`2017-10810`, :cve:`2017-10911`, :cve:`2017-11089`, :cve:`2017-11176`, :cve:`2017-11472`, :cve:`2017-11473`, :cve:`2017-11600`, :cve:`2017-12134`, :cve:`2017-12146`, :cve:`2017-12153`, :cve:`2017-12154`, :cve:`2017-12168`, :cve:`2017-12188`, :cve:`2017-12190`, :cve:`2017-12192`, :cve:`2017-12193`, :cve:`2017-12762`, :cve:`2017-13080`, :cve:`2017-13166`, :cve:`2017-13167`, :cve:`2017-13168`, :cve:`2017-13215`, :cve:`2017-13216`, :cve:`2017-13220`, :cve:`2017-13305`, :cve:`2017-13686`, :cve:`2017-13695`, :cve:`2017-13715`, :cve:`2017-14051`, :cve:`2017-14106`, :cve:`2017-14140`, :cve:`2017-14156`, :cve:`2017-14340`, :cve:`2017-14489`, :cve:`2017-14497`, :cve:`2017-14954`, :cve:`2017-14991`, :cve:`2017-15102`, :cve:`2017-15115`, :cve:`2017-15116`, :cve:`2017-15121`, :cve:`2017-15126`, :cve:`2017-15127`, :cve:`2017-15128`, :cve:`2017-15129`, :cve:`2017-15265`, :cve:`2017-15274`, :cve:`2017-15299`, :cve:`2017-15306`, :cve:`2017-15537`, :cve:`2017-15649`, :cve:`2017-15868`, :cve:`2017-15951`, :cve:`2017-16525`, :cve:`2017-16526`, :cve:`2017-16527`, :cve:`2017-16528`, :cve:`2017-16529`, :cve:`2017-16530`, :cve:`2017-16531`, :cve:`2017-16532`, :cve:`2017-16533`, :cve:`2017-16534`, :cve:`2017-16535`, :cve:`2017-16536`, :cve:`2017-16537`, :cve:`2017-16538`, :cve:`2017-16643`, :cve:`2017-16644`, :cve:`2017-16645`, :cve:`2017-16646`, :cve:`2017-16647`, :cve:`2017-16648`, :cve:`2017-16649`, :cve:`2017-16650`, :cve:`2017-16911`, :cve:`2017-16912`, :cve:`2017-16913`, :cve:`2017-16914`, :cve:`2017-16939`, :cve:`2017-16994`, :cve:`2017-16995`, :cve:`2017-16996`, :cve:`2017-17052`, :cve:`2017-17053`, :cve:`2017-17448`, :cve:`2017-17449`, :cve:`2017-17450`, :cve:`2017-17558`, :cve:`2017-17712`, :cve:`2017-17741`, :cve:`2017-17805`, :cve:`2017-17806`, :cve:`2017-17807`, :cve:`2017-17852`, :cve:`2017-17853`, :cve:`2017-17854`, :cve:`2017-17855`, :cve:`2017-17856`, :cve:`2017-17857`, :cve:`2017-17862`, :cve:`2017-17863`, :cve:`2017-17864`, :cve:`2017-17975`, :cve:`2017-18017`, :cve:`2017-18075`, :cve:`2017-18079`, :cve:`2017-18174`, :cve:`2017-18193`, :cve:`2017-18200`, :cve:`2017-18202`, :cve:`2017-18203`, :cve:`2017-18204`, :cve:`2017-18208`, :cve:`2017-18216`, :cve:`2017-18218`, :cve:`2017-18221`, :cve:`2017-18222`, :cve:`2017-18224`, :cve:`2017-18232`, :cve:`2017-18241`, :cve:`2017-18249`, :cve:`2017-18255`, :cve:`2017-18257`, :cve:`2017-18261`, :cve:`2017-18270`, :cve:`2017-18344`, :cve:`2017-18360`, :cve:`2017-18379`, :cve:`2017-18509`, :cve:`2017-18549`, :cve:`2017-18550`, :cve:`2017-18551`, :cve:`2017-18552`, :cve:`2017-18595`, :cve:`2017-2583`, :cve:`2017-2584`, :cve:`2017-2596`, :cve:`2017-2618`, :cve:`2017-2634`, :cve:`2017-2636`, :cve:`2017-2647`, :cve:`2017-2671`, :cve:`2017-5123`, :cve:`2017-5546`, :cve:`2017-5547`, :cve:`2017-5548`, :cve:`2017-5549`, :cve:`2017-5550`, :cve:`2017-5551`, :cve:`2017-5576`, :cve:`2017-5577`, :cve:`2017-5669`, :cve:`2017-5715`, :cve:`2017-5753`, :cve:`2017-5754`, :cve:`2017-5897`, :cve:`2017-5967`, :cve:`2017-5970`, :cve:`2017-5972`, :cve:`2017-5986`, :cve:`2017-6001`, :cve:`2017-6074`, :cve:`2017-6214`, :cve:`2017-6345`, :cve:`2017-6346`, :cve:`2017-6347`, :cve:`2017-6348`, :cve:`2017-6353`, :cve:`2017-6874`, :cve:`2017-6951`, :cve:`2017-7184`, :cve:`2017-7187`, :cve:`2017-7261`, :cve:`2017-7273`, :cve:`2017-7277`, :cve:`2017-7294`, :cve:`2017-7308`, :cve:`2017-7346`, :cve:`2017-7374`, :cve:`2017-7472`, :cve:`2017-7477`, :cve:`2017-7482`, :cve:`2017-7487`, :cve:`2017-7495`, :cve:`2017-7518`, :cve:`2017-7533`, :cve:`2017-7541`, :cve:`2017-7542`, :cve:`2017-7558`, :cve:`2017-7616`, :cve:`2017-7618`, :cve:`2017-7645`, :cve:`2017-7889`, :cve:`2017-7895`, :cve:`2017-7979`, :cve:`2017-8061`, :cve:`2017-8062`, :cve:`2017-8063`, :cve:`2017-8064`, :cve:`2017-8065`, :cve:`2017-8066`, :cve:`2017-8067`, :cve:`2017-8068`, :cve:`2017-8069`, :cve:`2017-8070`, :cve:`2017-8071`, :cve:`2017-8072`, :cve:`2017-8106`, :cve:`2017-8240`, :cve:`2017-8797`, :cve:`2017-8824`, :cve:`2017-8831`, :cve:`2017-8890`, :cve:`2017-8924`, :cve:`2017-8925`, :cve:`2017-9059`, :cve:`2017-9074`, :cve:`2017-9075`, :cve:`2017-9076`, :cve:`2017-9077`, :cve:`2017-9150`, :cve:`2017-9211`, :cve:`2017-9242`, :cve:`2017-9605`, :cve:`2017-9725`, :cve:`2017-9984`, :cve:`2017-9985`, :cve:`2017-9986`, :cve:`2018-1000004`, :cve:`2018-1000026`, :cve:`2018-1000028`, :cve:`2018-1000199`, :cve:`2018-1000200`, :cve:`2018-1000204`, :cve:`2018-10021`, :cve:`2018-10074`, :cve:`2018-10087`, :cve:`2018-10124`, :cve:`2018-10322`, :cve:`2018-10323`, :cve:`2018-1065`, :cve:`2018-1066`, :cve:`2018-10675`, :cve:`2018-1068`, :cve:`2018-10840`, :cve:`2018-10853`, :cve:`2018-1087`, :cve:`2018-10876`, :cve:`2018-10877`, :cve:`2018-10878`, :cve:`2018-10879`, :cve:`2018-10880`, :cve:`2018-10881`, :cve:`2018-10882`, :cve:`2018-10883`, :cve:`2018-10901`, :cve:`2018-10902`, :cve:`2018-1091`, :cve:`2018-1092`, :cve:`2018-1093`, :cve:`2018-10938`, :cve:`2018-1094`, :cve:`2018-10940`, :cve:`2018-1095`, :cve:`2018-1108`, :cve:`2018-1118`, :cve:`2018-1120`, :cve:`2018-11232`, :cve:`2018-1128`, :cve:`2018-1129`, :cve:`2018-1130`, :cve:`2018-11412`, :cve:`2018-11506`, :cve:`2018-11508`, :cve:`2018-12126`, :cve:`2018-12127`, :cve:`2018-12130`, :cve:`2018-12207`, :cve:`2018-12232`, :cve:`2018-12233`, :cve:`2018-12633`, :cve:`2018-12714`, :cve:`2018-12896`, :cve:`2018-12904`, :cve:`2018-13053`, :cve:`2018-13093`, :cve:`2018-13094`, :cve:`2018-13095`, :cve:`2018-13096`, :cve:`2018-13097`, :cve:`2018-13098`, :cve:`2018-13099`, :cve:`2018-13100`, :cve:`2018-13405`, :cve:`2018-13406`, :cve:`2018-14609`, :cve:`2018-14610`, :cve:`2018-14611`, :cve:`2018-14612`, :cve:`2018-14613`, :cve:`2018-14614`, :cve:`2018-14615`, :cve:`2018-14616`, :cve:`2018-14617`, :cve:`2018-14619`, :cve:`2018-14625`, :cve:`2018-14633`, :cve:`2018-14634`, :cve:`2018-14641`, :cve:`2018-14646`, :cve:`2018-14656`, :cve:`2018-14678`, :cve:`2018-14734`, :cve:`2018-15471`, :cve:`2018-15572`, :cve:`2018-15594`, :cve:`2018-16276`, :cve:`2018-16597`, :cve:`2018-16658`, :cve:`2018-16862`, :cve:`2018-16871`, :cve:`2018-16880`, :cve:`2018-16882`, :cve:`2018-16884`, :cve:`2018-17182`, :cve:`2018-17972`, :cve:`2018-18021`, :cve:`2018-18281`, :cve:`2018-18386`, :cve:`2018-18397`, :cve:`2018-18445`, :cve:`2018-18559`, :cve:`2018-18690`, :cve:`2018-18710`, :cve:`2018-18955`, :cve:`2018-19406`, :cve:`2018-19407`, :cve:`2018-19824`, :cve:`2018-19854`, :cve:`2018-19985`, :cve:`2018-20169`, :cve:`2018-20449`, :cve:`2018-20509`, :cve:`2018-20510`, :cve:`2018-20511`, :cve:`2018-20669`, :cve:`2018-20784`, :cve:`2018-20836`, :cve:`2018-20854`, :cve:`2018-20855`, :cve:`2018-20856`, :cve:`2018-20961`, :cve:`2018-20976`, :cve:`2018-21008`, :cve:`2018-25015`, :cve:`2018-25020`, :cve:`2018-3620`, :cve:`2018-3639`, :cve:`2018-3646`, :cve:`2018-3665`, :cve:`2018-3693`, :cve:`2018-5332`, :cve:`2018-5333`, :cve:`2018-5344`, :cve:`2018-5390`, :cve:`2018-5391`, :cve:`2018-5703`, :cve:`2018-5750`, :cve:`2018-5803`, :cve:`2018-5814`, :cve:`2018-5848`, :cve:`2018-5873`, :cve:`2018-5953`, :cve:`2018-5995`, :cve:`2018-6412`, :cve:`2018-6554`, :cve:`2018-6555`, :cve:`2018-6927`, :cve:`2018-7191`, :cve:`2018-7273`, :cve:`2018-7480`, :cve:`2018-7492`, :cve:`2018-7566`, :cve:`2018-7740`, :cve:`2018-7754`, :cve:`2018-7755`, :cve:`2018-7757`, :cve:`2018-7995`, :cve:`2018-8043`, :cve:`2018-8087`, :cve:`2018-8781`, :cve:`2018-8822`, :cve:`2018-8897`, :cve:`2018-9363`, :cve:`2018-9385`, :cve:`2018-9415`, :cve:`2018-9422`, :cve:`2018-9465`, :cve:`2018-9516`, :cve:`2018-9517`, :cve:`2018-9518`, :cve:`2018-9568`, :cve:`2019-0136`, :cve:`2019-0145`, :cve:`2019-0146`, :cve:`2019-0147`, :cve:`2019-0148`, :cve:`2019-0149`, :cve:`2019-0154`, :cve:`2019-0155`, :cve:`2019-10124`, :cve:`2019-10125`, :cve:`2019-10126`, :cve:`2019-10142`, :cve:`2019-10207`, :cve:`2019-10220`, :cve:`2019-10638`, :cve:`2019-10639`, :cve:`2019-11085`, :cve:`2019-11091`, :cve:`2019-11135`, :cve:`2019-11190`, :cve:`2019-11191`, :cve:`2019-1125`, :cve:`2019-11477`, :cve:`2019-11478`, :cve:`2019-11479`, :cve:`2019-11486`, :cve:`2019-11487`, :cve:`2019-11599`, :cve:`2019-11683`, :cve:`2019-11810`, :cve:`2019-11811`, :cve:`2019-11815`, :cve:`2019-11833`, :cve:`2019-11884`, :cve:`2019-12378`, :cve:`2019-12379`, :cve:`2019-12380`, :cve:`2019-12381`, :cve:`2019-12382`, :cve:`2019-12454`, :cve:`2019-12455`, :cve:`2019-12614`, :cve:`2019-12615`, :cve:`2019-12817`, :cve:`2019-12818`, :cve:`2019-12819`, :cve:`2019-12881`, :cve:`2019-12984`, :cve:`2019-13233`, :cve:`2019-13272`, :cve:`2019-13631`, :cve:`2019-13648`, :cve:`2019-14283`, :cve:`2019-14284`, :cve:`2019-14615`, :cve:`2019-14763`, :cve:`2019-14814`, :cve:`2019-14815`, :cve:`2019-14816`, :cve:`2019-14821`, :cve:`2019-14835`, :cve:`2019-14895`, :cve:`2019-14896`, :cve:`2019-14897`, :cve:`2019-14901`, :cve:`2019-15030`, :cve:`2019-15031`, :cve:`2019-15090`, :cve:`2019-15098`, :cve:`2019-15099`, :cve:`2019-15117`, :cve:`2019-15118`, :cve:`2019-15211`, :cve:`2019-15212`, :cve:`2019-15213`, :cve:`2019-15214`, :cve:`2019-15215`, :cve:`2019-15216`, :cve:`2019-15217`, :cve:`2019-15218`, :cve:`2019-15219`, :cve:`2019-15220`, :cve:`2019-15221`, :cve:`2019-15222`, :cve:`2019-15223`, :cve:`2019-15291`, :cve:`2019-15292`, :cve:`2019-15504`, :cve:`2019-15505`, :cve:`2019-15538`, :cve:`2019-15666`, :cve:`2019-15794`, :cve:`2019-15807`, :cve:`2019-15916`, :cve:`2019-15917`, :cve:`2019-15918`, :cve:`2019-15919`, :cve:`2019-15920`, :cve:`2019-15921`, :cve:`2019-15922`, :cve:`2019-15923`, :cve:`2019-15924`, :cve:`2019-15925`, :cve:`2019-15926`, :cve:`2019-15927`, :cve:`2019-16229`, :cve:`2019-16230`, :cve:`2019-16231`, :cve:`2019-16232`, :cve:`2019-16233`, :cve:`2019-16234`, :cve:`2019-16413`, :cve:`2019-16714`, :cve:`2019-16746`, :cve:`2019-16921`, :cve:`2019-16994`, :cve:`2019-16995`, :cve:`2019-17052`, :cve:`2019-17053`, :cve:`2019-17054`, :cve:`2019-17055`, :cve:`2019-17056`, :cve:`2019-17075`, :cve:`2019-17133`, :cve:`2019-17351`, :cve:`2019-17666`, :cve:`2019-18198`, :cve:`2019-18282`, :cve:`2019-18660`, :cve:`2019-18675`, :cve:`2019-18683`, :cve:`2019-18786`, :cve:`2019-18805`, :cve:`2019-18806`, :cve:`2019-18807`, :cve:`2019-18808`, :cve:`2019-18809`, :cve:`2019-18810`, :cve:`2019-18811`, :cve:`2019-18812`, :cve:`2019-18813`, :cve:`2019-18814`, :cve:`2019-18885`, :cve:`2019-19036`, :cve:`2019-19037`, :cve:`2019-19039`, :cve:`2019-19043`, :cve:`2019-19044`, :cve:`2019-19045`, :cve:`2019-19046`, :cve:`2019-19047`, :cve:`2019-19048`, :cve:`2019-19049`, :cve:`2019-19050`, :cve:`2019-19051`, :cve:`2019-19052`, :cve:`2019-19053`, :cve:`2019-19054`, :cve:`2019-19055`, :cve:`2019-19056`, :cve:`2019-19057`, :cve:`2019-19058`, :cve:`2019-19059`, :cve:`2019-19060`, :cve:`2019-19061`, :cve:`2019-19062`, :cve:`2019-19063`, :cve:`2019-19064`, :cve:`2019-19065`, :cve:`2019-19066`, :cve:`2019-19067`, :cve:`2019-19068`, :cve:`2019-19069`, :cve:`2019-19070`, :cve:`2019-19071`, :cve:`2019-19072`, :cve:`2019-19073`, :cve:`2019-19074`, :cve:`2019-19075`, :cve:`2019-19076`, :cve:`2019-19077`, :cve:`2019-19078`, :cve:`2019-19079`, :cve:`2019-19080`, :cve:`2019-19081`, :cve:`2019-19082`, :cve:`2019-19083`, :cve:`2019-19227`, :cve:`2019-19241`, :cve:`2019-19252`, :cve:`2019-19318`, :cve:`2019-19319`, :cve:`2019-19332`, :cve:`2019-19338`, :cve:`2019-19377`, :cve:`2019-19447`, :cve:`2019-19448`, :cve:`2019-19449`, :cve:`2019-19462`, :cve:`2019-19523`, :cve:`2019-19524`, :cve:`2019-19525`, :cve:`2019-19526`, :cve:`2019-19527`, :cve:`2019-19528`, :cve:`2019-19529`, :cve:`2019-19530`, :cve:`2019-19531`, :cve:`2019-19532`, :cve:`2019-19533`, :cve:`2019-19534`, :cve:`2019-19535`, :cve:`2019-19536`, :cve:`2019-19537`, :cve:`2019-19543`, :cve:`2019-19602`, :cve:`2019-19767`, :cve:`2019-19768`, :cve:`2019-19769`, :cve:`2019-19770`, :cve:`2019-19807`, :cve:`2019-19813`, :cve:`2019-19815`, :cve:`2019-19816`, :cve:`2019-19922`, :cve:`2019-19927`, :cve:`2019-19947`, :cve:`2019-19965` and :cve:`2019-1999`
+- nasm: Fix :cve:`2020-21528`
+- ncurses: Fix :cve:`2023-29491`
+- nghttp2: Fix :cve:`2023-35945`
+- procps: Fix :cve:`2023-4016`
+- python3-certifi: Fix :cve:`2023-37920`
+- python3-git: Fix :cve:`2022-24439` and :cve:`2023-40267`
+- python3-pygments: Fix :cve:`2022-40896`
+- python3: Fix :cve:`2023-40217`
+- qemu: Fix :cve:`2020-14394`, :cve:`2021-3638`, :cve_mitre:`2023-2861`, :cve:`2023-3180` and :cve:`2023-3354`
+- tiff: fix :cve:`2023-2908`, :cve:`2023-3316` and :cve:`2023-3618`
+- vim: Fix :cve:`2023-3896`, :cve:`2023-4733`, :cve:`2023-4734`, :cve:`2023-4735`, :cve:`2023-4736`, :cve:`2023-4738`, :cve:`2023-4750` and :cve:`2023-4752`
+- webkitgtk: fix :cve:`2022-48503` and :cve:`2023-23529`
+
+
+
+Fixes in Yocto-4.0.13
+~~~~~~~~~~~~~~~~~~~~~
+
+- acl/attr: ptest fixes and improvements
+- automake: fix buildtest patch
+- bind: Upgrade to 9.18.17
+- binutils: stable 2.38 branch updates
+- build-appliance-image: Update to kirkstone head revision
+- build-sysroots: Add :term:`SUMMARY` field
+- cargo.bbclass: set up cargo environment in common do_compile
+- contributor-guide: recipe-style-guide: add Upstream-Status
+- dbus: Specify runstatedir configure option
+- dev-manual: common-tasks: mention faster "find" command to trim sstate cache
+- dev-manual: disk-space: improve wording for obsolete sstate cache files
+- dev-manual: licenses: mention :term:`SPDX` for license compliance
+- dev-manual: licenses: update license manifest location
+- dev-manual: new-recipe.rst fix inconsistency with contributor guide
+- dev-manual: split common-tasks.rst
+- dev-manual: wic.rst: Update native tools build command
+- documentation/README: align with master
+- efivar: backport 5 patches to fix build with gold
+- externalsrc: fix dependency chain issues
+- glibc-locale: use stricter matching for metapackages' runtime dependencies
+- glibc/check-test-wrapper: don't emit warnings from ssh
+- glibc: stable 2.35 branch updates
+- gst-devtools: Upgrade to 1.20.7
+- gstreamer1.0-libav: Upgrade to 1.20.7
+- gstreamer1.0-omx: Upgrade to 1.20.7
+- gstreamer1.0-plugins-bad: Upgrade to 1.20.7
+- gstreamer1.0-plugins-base: Upgrade to 1.20.7
+- gstreamer1.0-plugins-good: Upgrade to 1.20.7
+- gstreamer1.0-plugins-ugly: Upgrade to 1.20.7
+- gstreamer1.0-python: Upgrade to 1.20.7
+- gstreamer1.0-rtsp-server: Upgrade to 1.20.7
+- gstreamer1.0-vaapi: Upgrade to 1.20.7
+- gstreamer1.0: Upgrade to 1.20.7
+- kernel: Fix path comparison in kernel staging dir symlinking
+- lib/package_manager: Improve repo artefact filtering
+- libdnf: resolve cstdint inclusion for newer gcc versions
+- libnss-nis: Upgrade to 3.2
+- librsvg: Upgrade to 2.52.10
+- libxcrypt: update :term:`PV` to match :term:`SRCREV`
+- linux-firmware : Add firmware of RTL8822 serie
+- linux-firmware: Fix mediatek mt7601u firmware path
+- linux-firmware: package firmware for Dragonboard 410c
+- linux-firmware: split platform-specific Adreno shaders to separate packages
+- linux-firmware: Upgrade to 20230625
+- linux-yocto/5.10: update to v5.10.188
+- linux-yocto/5.15: update to v5.15.124
+- linux-yocto: add script to generate kernel :term:`CVE_CHECK_IGNORE` entries
+- linux/cve-exclusion: add generated CVE_CHECK_IGNORES.
+- linux/cve-exclusion: remove obsolete manual entries
+- manuals: add new contributor guide
+- manuals: document "mime-xdg" class and :term:`MIME_XDG_PACKAGES`
+- manuals: update former references to dev-manual/common-tasks
+- mdadm: add util-linux-blockdev ptest dependency
+- migration-guides: add release notes for 4.0.12
+- npm.bbclass: avoid DeprecationWarning with new python
+- oeqa/runtime/ltp: Increase ltp test output timeout
+- oeqa/ssh: Further improve process exit handling
+- oeqa/target/ssh: Ensure EAGAIN doesn't truncate output
+- oeqa/utils/nfs: allow requesting non-udp ports
+- pixman: Remove duplication of license MIT
+- poky.conf: bump version for 4.0.13
+- poky.conf: update :term:`SANITY_TESTED_DISTROS` to match autobuilder
+- pseudo: Fix to work with glibc 2.38
+- python3-git: Upgrade to 3.1.32
+- python3: upgrade to 3.10.13
+- ref-manual: add Initramfs term
+- ref-manual: add meson class and variables
+- ref-manual: add new variables
+- ref-manual: qa-checks: align with master
+- ref-manual: system-requirements: update supported distros
+- resulttool/report: Avoid divide by zero
+- resulttool/resultutils: allow index generation despite corrupt json
+- rootfs: Add debugfs package db file copy and cleanup
+- rpm2cpio.sh: update to the last 4.x version
+- rpm: Pick debugfs package db files/dirs explicitly
+- scripts/create-pull-request: update URLs to git repositories
+- scripts/rpm2cpio.sh: Use bzip2 instead of bunzip2
+- sdk-manual: extensible.rst: align with master branch
+- selftest/cases/glibc.py: fix the override syntax
+- selftest/cases/glibc.py: increase the memory for testing
+- selftest/cases/glibc.py: switch to using NFS over TCP
+- shadow-sysroot: add license information
+- sysklogd: fix integration with systemd-journald
+- tar: Upgrade to 1.35
+- target/ssh: Ensure exit code set for commands
+- tcl: prevent installing another copy of tzdata
+- template: fix typo in section header
+- vim: Upgrade to 9.0.1894
+- vim: update obsolete comment
+- wic: fix wrong attempt to create file system in unpartitioned regions
+- yocto-uninative: Update to 4.2 for glibc 2.38
+- yocto-uninative: Update to 4.3
+
+
+Known Issues in Yocto-4.0.13
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- N/A
+
+
+Contributors to Yocto-4.0.13
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Abe Kohandel
+- Adrian Freihofer
+- Alberto Planas
+- Alex Kiernan
+- Alexander Kanavin
+- Alexis Lothoré
+- Anuj Mittal
+- Archana Polampalli
+- Ashish Sharma
+- BELOUARGA Mohamed
+- Bruce Ashfield
+- Changqing Li
+- Dmitry Baryshkov
+- Enrico Scholz
+- Etienne Cordonnier
+- Hitendra Prajapati
+- Julien Stephan
+- Kai Kang
+- Khem Raj
+- Lee Chee Yang
+- Marek Vasut
+- Markus Niebel
+- Martin Jansa
+- Meenali Gupta
+- Michael Halstead
+- Michael Opdenacker
+- Narpat Mali
+- Ovidiu Panait
+- Pavel Zhukov
+- Peter Marko
+- Peter Suti
+- Poonam Jadhav
+- Richard Purdie
+- Roland Hieber
+- Ross Burton
+- Sanjana
+- Siddharth Doshi
+- Soumya Sambu
+- Staffan Rydén
+- Steve Sakoman
+- Trevor Gamblin
+- Vijay Anusuri
+- Vivek Kumbhar
+- Wang Mingyu
+- Yogita Urade
+
+
+Repositories / Downloads for Yocto-4.0.13
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: :yocto_git:`/poky`
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.13 </poky/log/?h=yocto-4.0.13>`
+- Git Revision: :yocto_git:`e51bf557f596c4da38789a948a3228ba11455e3c </poky/commit/?id=e51bf557f596c4da38789a948a3228ba11455e3c>`
+- Release Artefact: poky-e51bf557f596c4da38789a948a3228ba11455e3c
+- sha: afddadb367a90154751f04993077bceffdc1413f9ba9b8c03acb487d0437286e
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.13/poky-e51bf557f596c4da38789a948a3228ba11455e3c.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.13/poky-e51bf557f596c4da38789a948a3228ba11455e3c.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.13 </openembedded-core/log/?h=yocto-4.0.13>`
+- Git Revision: :oe_git:`d90e4d5e3cca9cffe8f60841afc63667a9ac39fa </openembedded-core/commit/?id=d90e4d5e3cca9cffe8f60841afc63667a9ac39fa>`
+- Release Artefact: oecore-d90e4d5e3cca9cffe8f60841afc63667a9ac39fa
+- sha: 56e3bdac81b3628e74dfef2132a54be4db7d87373139a00ed64f5c9a354d716a
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.13/oecore-d90e4d5e3cca9cffe8f60841afc63667a9ac39fa.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.13/oecore-d90e4d5e3cca9cffe8f60841afc63667a9ac39fa.tar.bz2
+
+meta-mingw
+
+- Repository Location: :yocto_git:`/meta-mingw`
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.13 </meta-mingw/log/?h=yocto-4.0.13>`
+- Git Revision: :yocto_git:`a90614a6498c3345704e9611f2842eb933dc51c1 </meta-mingw/commit/?id=a90614a6498c3345704e9611f2842eb933dc51c1>`
+- Release Artefact: meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1
+- sha: 49f9900bfbbc1c68136f8115b314e95d0b7f6be75edf36a75d9bcd1cca7c6302
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.13/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.13/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+
+meta-gplv2
+
+- Repository Location: :yocto_git:`/meta-gplv2`
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.13 </meta-gplv2/log/?h=yocto-4.0.13>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.13/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.13/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.13 </bitbake/log/?h=yocto-4.0.13>`
+- Git Revision: :oe_git:`41b6684489d0261753344956042be2cc4adb0159 </bitbake/commit/?id=41b6684489d0261753344956042be2cc4adb0159>`
+- Release Artefact: bitbake-41b6684489d0261753344956042be2cc4adb0159
+- sha: efa2b1c4d0be115ed3960750d1e4ed958771b2db6d7baee2d13ad386589376e8
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.13/bitbake-41b6684489d0261753344956042be2cc4adb0159.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.13/bitbake-41b6684489d0261753344956042be2cc4adb0159.tar.bz2
+
+yocto-docs
+
+- Repository Location: :yocto_git:`/yocto-docs`
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.13 </yocto-docs/log/?h=yocto-4.0.13>`
+- Git Revision: :yocto_git:`8f02741de867125f11a37822b2d206be180d4ee3 </yocto-docs/commit/?id=8f02741de867125f11a37822b2d206be180d4ee3>`
+
diff --git a/documentation/migration-guides/release-notes-4.0.14.rst b/documentation/migration-guides/release-notes-4.0.14.rst
new file mode 100644
index 0000000000..02253f33f7
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.14.rst
@@ -0,0 +1,227 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Release notes for Yocto-4.0.14 (Kirkstone)
+------------------------------------------
+
+Security Fixes in Yocto-4.0.14
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- bind: Fix :cve:`2023-3341` and :cve:`2023-4236`
+- binutils: Fix :cve:`2022-44840`, :cve:`2022-45703`, :cve:`2022-47008`, :cve:`2022-47011`, :cve:`2022-47673`, :cve:`2022-47695`, :cve:`2022-47696` and :cve:`2022-48063`
+- cups: Fix :cve:`2023-4504`
+- curl: Fix :cve:`2023-38545` and :cve:`2023-38546`
+- gawk: Fix :cve:`2023-4156`
+- ghostscript: Fix :cve:`2023-43115`
+- glibc: Fix :cve:`2023-4806`, :cve:`2023-4813`, :cve:`2023-4911` and :cve:`2023-5156`
+- glibc: Ignore :cve:`2023-4527`
+- go: Fix :cve:`2023-24538` and :cve:`2023-39318`
+- gstreamer1.0-plugins-bad: fix :cve_mitre:`2023-40474`, :cve_mitre:`2023-40475` and :cve_mitre:`2023-40476`
+- libtiff: Fix :cve:`2022-40090` and :cve:`2023-1916`
+- libwebp: Fix :cve:`2023-5129`
+- libx11: Fix :cve:`2023-43785`, :cve:`2023-43786` and :cve:`2023-43787`
+- libxml2: Fix :cve:`2023-45322`
+- libxpm: Fix :cve:`2023-43788` and :cve:`2023-43789`
+- linux-firmware: Fix :cve:`2022-40982`, :cve:`2023-20569` and :cve:`2023-20593`
+- linux-yocto: update CVE exclusions
+- linux-yocto/5.10: Ignore :cve:`2003-1604`, :cve:`2004-0230`, :cve:`2006-3635`, :cve:`2006-5331`, :cve:`2006-6128`, :cve:`2007-4774`, :cve:`2007-6761`, :cve:`2007-6762`, :cve:`2008-7316`, :cve:`2009-2692`, :cve:`2010-0008`, :cve:`2010-3432`, :cve:`2010-4648`, :cve:`2010-5313`, :cve:`2010-5328`, :cve:`2010-5329`, :cve:`2010-5331`, :cve:`2010-5332`, :cve:`2011-4098`, :cve:`2011-4131`, :cve:`2011-4915`, :cve:`2011-5321`, :cve:`2011-5327`, :cve:`2012-0957`, :cve:`2012-2119`, :cve:`2012-2136`, :cve:`2012-2137`, :cve:`2012-2313`, :cve:`2012-2319`, :cve:`2012-2372`, :cve:`2012-2375`, :cve:`2012-2390`, :cve:`2012-2669`, :cve:`2012-2744`, :cve:`2012-2745`, :cve:`2012-3364`, :cve:`2012-3375`, :cve:`2012-3400`, :cve:`2012-3412`, :cve:`2012-3430`, :cve:`2012-3510`, :cve:`2012-3511`, :cve:`2012-3520`, :cve:`2012-3552`, :cve:`2012-4398`, :cve:`2012-4444`, :cve:`2012-4461`, :cve:`2012-4467`, :cve:`2012-4508`, :cve:`2012-4530`, :cve:`2012-4565`, :cve:`2012-5374`, :cve:`2012-5375`, :cve:`2012-5517`, :cve:`2012-6536`, :cve:`2012-6537`, :cve:`2012-6538`, :cve:`2012-6539`, :cve:`2012-6540`, :cve:`2012-6541`, :cve:`2012-6542`, :cve:`2012-6543`, :cve:`2012-6544`, :cve:`2012-6545`, :cve:`2012-6546`, :cve:`2012-6547`, :cve:`2012-6548`, :cve:`2012-6549`, :cve:`2012-6638`, :cve:`2012-6647`, :cve:`2012-6657`, :cve:`2012-6689`, :cve:`2012-6701`, :cve:`2012-6703`, :cve:`2012-6704`, :cve:`2012-6712`, :cve:`2013-0160`, :cve:`2013-0190`, :cve:`2013-0216`, :cve:`2013-0217`, :cve:`2013-0228`, :cve:`2013-0231`, :cve:`2013-0268`, :cve:`2013-0290`, :cve:`2013-0309`, :cve:`2013-0310`, :cve:`2013-0311`, :cve:`2013-0313`, :cve:`2013-0343`, :cve:`2013-0349`, :cve:`2013-0871`, :cve:`2013-0913`, :cve:`2013-0914`, :cve:`2013-1059`, :cve:`2013-1763`, :cve:`2013-1767`, :cve:`2013-1772`, :cve:`2013-1773`, :cve:`2013-1774`, :cve:`2013-1792`, :cve:`2013-1796`, :cve:`2013-1797`, :cve:`2013-1798`, :cve:`2013-1819`, :cve:`2013-1826`, :cve:`2013-1827`, :cve:`2013-1828`, :cve:`2013-1848`, :cve:`2013-1858`, :cve:`2013-1860`, :cve:`2013-1928`, :cve:`2013-1929`, :cve:`2013-1943`, :cve:`2013-1956`, :cve:`2013-1957`, :cve:`2013-1958`, :cve:`2013-1959`, :cve:`2013-1979`, :cve:`2013-2015`, :cve:`2013-2017`, :cve:`2013-2058`, :cve:`2013-2094`, :cve:`2013-2128`, :cve:`2013-2140`, :cve:`2013-2141`, :cve:`2013-2146`, :cve:`2013-2147`, :cve:`2013-2148`, :cve:`2013-2164`, :cve:`2013-2206`, :cve:`2013-2232`, :cve:`2013-2234`, :cve:`2013-2237`, :cve:`2013-2546`, :cve:`2013-2547`, :cve:`2013-2548`, :cve:`2013-2596`, :cve:`2013-2634`, :cve:`2013-2635`, :cve:`2013-2636`, :cve:`2013-2850`, :cve:`2013-2851`, :cve:`2013-2852`, :cve:`2013-2888`, :cve:`2013-2889`, :cve:`2013-2890`, :cve:`2013-2891`, :cve:`2013-2892`, :cve:`2013-2893`, :cve:`2013-2894`, :cve:`2013-2895`, :cve:`2013-2896`, :cve:`2013-2897`, :cve:`2013-2898`, :cve:`2013-2899`, :cve:`2013-2929`, :cve:`2013-2930`, :cve:`2013-3076`, :cve:`2013-3222`, :cve:`2013-3223`, :cve:`2013-3224`, :cve:`2013-3225`, :cve:`2013-3226`, :cve:`2013-3227`, :cve:`2013-3228`, :cve:`2013-3229`, :cve:`2013-3230`, :cve:`2013-3231`, :cve:`2013-3232`, :cve:`2013-3233`, :cve:`2013-3234`, :cve:`2013-3235`, :cve:`2013-3236`, :cve:`2013-3237`, :cve:`2013-3301`, :cve:`2013-3302`, :cve:`2013-4125`, :cve:`2013-4127`, :cve:`2013-4129`, :cve:`2013-4162`, :cve:`2013-4163`, :cve:`2013-4205`, :cve:`2013-4220`, :cve:`2013-4247`, :cve:`2013-4254`, :cve:`2013-4270`, :cve:`2013-4299`, :cve:`2013-4300`, :cve:`2013-4312`, :cve:`2013-4343`, :cve:`2013-4345`, :cve:`2013-4348`, :cve:`2013-4350`, :cve:`2013-4387`, :cve:`2013-4470`, :cve:`2013-4483`, :cve:`2013-4511`, :cve:`2013-4512`, :cve:`2013-4513`, :cve:`2013-4514`, :cve:`2013-4515`, :cve:`2013-4516`, :cve:`2013-4563`, :cve:`2013-4579`, :cve:`2013-4587`, :cve:`2013-4588`, :cve:`2013-4591`, :cve:`2013-4592`, :cve:`2013-5634`, :cve:`2013-6282`, :cve:`2013-6367`, :cve:`2013-6368`, :cve:`2013-6376`, :cve:`2013-6378`, :cve:`2013-6380`, :cve:`2013-6381`, :cve:`2013-6382`, :cve:`2013-6383`, :cve:`2013-6431`, :cve:`2013-6432`, :cve:`2013-6885`, :cve:`2013-7026`, :cve:`2013-7027`, :cve:`2013-7263`, :cve:`2013-7264`, :cve:`2013-7265`, :cve:`2013-7266`, :cve:`2013-7267`, :cve:`2013-7268`, :cve:`2013-7269`, :cve:`2013-7270`, :cve:`2013-7271`, :cve:`2013-7281`, :cve:`2013-7339`, :cve:`2013-7348`, :cve:`2013-7421`, :cve:`2013-7446`, :cve:`2013-7470`, :cve:`2014-0038`, :cve:`2014-0049`, :cve:`2014-0055`, :cve:`2014-0069`, :cve:`2014-0077`, :cve:`2014-0100`, :cve:`2014-0101`, :cve:`2014-0102`, :cve:`2014-0131`, :cve:`2014-0155`, :cve:`2014-0181`, :cve:`2014-0196`, :cve:`2014-0203`, :cve:`2014-0205`, :cve:`2014-0206`, :cve:`2014-1438`, :cve:`2014-1444`, :cve:`2014-1445`, :cve:`2014-1446`, :cve:`2014-1690`, :cve:`2014-1737`, :cve:`2014-1738`, :cve:`2014-1739`, :cve:`2014-1874`, :cve:`2014-2038`, :cve:`2014-2039`, :cve:`2014-2309`, :cve:`2014-2523`, :cve:`2014-2568`, :cve:`2014-2580`, :cve:`2014-2672`, :cve:`2014-2673`, :cve:`2014-2678`, :cve:`2014-2706`, :cve:`2014-2739`, :cve:`2014-2851`, :cve:`2014-2889`, :cve:`2014-3122`, :cve:`2014-3144`, :cve:`2014-3145`, :cve:`2014-3153`, :cve:`2014-3180`, :cve:`2014-3181`, :cve:`2014-3182`, :cve:`2014-3183`, :cve:`2014-3184`, :cve:`2014-3185`, :cve:`2014-3186`, :cve:`2014-3534`, :cve:`2014-3535`, :cve:`2014-3601`, :cve:`2014-3610`, :cve:`2014-3611`, :cve:`2014-3631`, :cve:`2014-3645`, :cve:`2014-3646`, :cve:`2014-3647`, :cve:`2014-3673`, :cve:`2014-3687`, :cve:`2014-3688`, :cve:`2014-3690`, :cve:`2014-3917`, :cve:`2014-3940`, :cve:`2014-4014`, :cve:`2014-4027`, :cve:`2014-4157`, :cve:`2014-4171`, :cve:`2014-4508`, :cve:`2014-4608`, :cve:`2014-4611`, :cve:`2014-4652`, :cve:`2014-4653`, :cve:`2014-4654`, :cve:`2014-4655`, :cve:`2014-4656`, :cve:`2014-4667`, :cve:`2014-4699`, :cve:`2014-4943`, :cve:`2014-5045`, :cve:`2014-5077`, :cve:`2014-5206`, :cve:`2014-5207`, :cve:`2014-5471`, :cve:`2014-5472`, :cve:`2014-6410`, :cve:`2014-6416`, :cve:`2014-6417`, :cve:`2014-6418`, :cve:`2014-7145`, :cve:`2014-7283`, :cve:`2014-7284`, :cve:`2014-7822`, :cve:`2014-7825`, :cve:`2014-7826`, :cve:`2014-7841`, :cve:`2014-7842`, :cve:`2014-7843`, :cve:`2014-7970`, :cve:`2014-7975`, :cve:`2014-8086`, :cve:`2014-8133`, :cve:`2014-8134`, :cve:`2014-8159`, :cve:`2014-8160`, :cve:`2014-8171`, :cve:`2014-8172`, :cve:`2014-8173`, :cve:`2014-8369`, :cve:`2014-8480`, :cve:`2014-8481`, :cve:`2014-8559`, :cve:`2014-8709`, :cve:`2014-8884`, :cve:`2014-8989`, :cve:`2014-9090`, :cve:`2014-9322`, :cve:`2014-9419`, :cve:`2014-9420`, :cve:`2014-9428`, :cve:`2014-9529`, :cve:`2014-9584`, :cve:`2014-9585`, :cve:`2014-9644`, :cve:`2014-9683`, :cve:`2014-9710`, :cve:`2014-9715`, :cve:`2014-9717`, :cve:`2014-9728`, :cve:`2014-9729`, :cve:`2014-9730`, :cve:`2014-9731`, :cve:`2014-9803`, :cve:`2014-9870`, :cve:`2014-9888`, :cve:`2014-9895`, :cve:`2014-9903`, :cve:`2014-9904`, :cve:`2014-9914`, :cve:`2014-9922`, :cve:`2014-9940`, :cve:`2015-0239`, :cve:`2015-0274`, :cve:`2015-0275`, :cve:`2015-1333`, :cve:`2015-1339`, :cve:`2015-1350`, :cve:`2015-1420`, :cve:`2015-1421`, :cve:`2015-1465`, :cve:`2015-1573`, :cve:`2015-1593`, :cve:`2015-1805`, :cve:`2015-2041`, :cve:`2015-2042`, :cve:`2015-2150`, :cve:`2015-2666`, :cve:`2015-2672`, :cve:`2015-2686`, :cve:`2015-2830`, :cve:`2015-2922`, :cve:`2015-2925`, :cve:`2015-3212`, :cve:`2015-3214`, :cve:`2015-3288`, :cve:`2015-3290`, :cve:`2015-3291`, :cve:`2015-3331`, :cve:`2015-3339`, :cve:`2015-3636`, :cve:`2015-4001`, :cve:`2015-4002`, :cve:`2015-4003`, :cve:`2015-4004`, :cve:`2015-4036`, :cve:`2015-4167`, :cve:`2015-4170`, :cve:`2015-4176`, :cve:`2015-4177`, :cve:`2015-4178`, :cve:`2015-4692`, :cve:`2015-4700`, :cve:`2015-5156`, :cve:`2015-5157`, :cve:`2015-5257`, :cve:`2015-5283`, :cve:`2015-5307`, :cve:`2015-5327`, :cve:`2015-5364`, :cve:`2015-5366`, :cve:`2015-5697`, :cve:`2015-5706`, :cve:`2015-5707`, :cve:`2015-6252`, :cve:`2015-6526`, :cve:`2015-6937`, :cve:`2015-7509`, :cve:`2015-7513`, :cve:`2015-7515`, :cve:`2015-7550`, :cve:`2015-7566`, :cve:`2015-7613`, :cve:`2015-7799`, :cve:`2015-7833`, :cve:`2015-7872`, :cve:`2015-7884`, :cve:`2015-7885`, :cve:`2015-7990`, :cve:`2015-8104`, :cve:`2015-8215`, :cve:`2015-8324`, :cve:`2015-8374`, :cve:`2015-8539`, :cve:`2015-8543`, :cve:`2015-8550`, :cve:`2015-8551`, :cve:`2015-8552`, :cve:`2015-8553`, :cve:`2015-8569`, :cve:`2015-8575`, :cve:`2015-8660`, :cve:`2015-8709`, :cve:`2015-8746`, :cve:`2015-8767`, :cve:`2015-8785`, :cve:`2015-8787`, :cve:`2015-8812`, :cve:`2015-8816`, :cve:`2015-8830`, :cve:`2015-8839`, :cve:`2015-8844`, :cve:`2015-8845`, :cve:`2015-8950`, :cve:`2015-8952`, :cve:`2015-8953`, :cve:`2015-8955`, :cve:`2015-8956`, :cve:`2015-8961`, :cve:`2015-8962`, :cve:`2015-8963`, :cve:`2015-8964`, :cve:`2015-8966`, :cve:`2015-8967`, :cve:`2015-8970`, :cve:`2015-9004`, :cve:`2015-9016`, :cve:`2015-9289`, :cve:`2016-0617`, :cve:`2016-0723`, :cve:`2016-0728`, :cve:`2016-0758`, :cve:`2016-0821`, :cve:`2016-0823`, :cve:`2016-10044`, :cve:`2016-10088`, :cve:`2016-10147`, :cve:`2016-10150`, :cve:`2016-10153`, :cve:`2016-10154`, :cve:`2016-10200`, :cve:`2016-10208`, :cve:`2016-10229`, :cve:`2016-10318`, :cve:`2016-10723`, :cve:`2016-10741`, :cve:`2016-10764`, :cve:`2016-10905`, :cve:`2016-10906`, :cve:`2016-10907`, :cve:`2016-1237`, :cve:`2016-1575`, :cve:`2016-1576`, :cve:`2016-1583`, :cve:`2016-2053`, :cve:`2016-2069`, :cve:`2016-2070`, :cve:`2016-2085`, :cve:`2016-2117`, :cve:`2016-2143`, :cve:`2016-2184`, :cve:`2016-2185`, :cve:`2016-2186`, :cve:`2016-2187`, :cve:`2016-2188`, :cve:`2016-2383`, :cve:`2016-2384`, :cve:`2016-2543`, :cve:`2016-2544`, :cve:`2016-2545`, :cve:`2016-2546`, :cve:`2016-2547`, :cve:`2016-2548`, :cve:`2016-2549`, :cve:`2016-2550`, :cve:`2016-2782`, :cve:`2016-2847`, :cve:`2016-3044`, :cve:`2016-3070`, :cve:`2016-3134`, :cve:`2016-3135`, :cve:`2016-3136`, :cve:`2016-3137`, :cve:`2016-3138`, :cve:`2016-3139`, :cve:`2016-3140`, :cve:`2016-3156`, :cve:`2016-3157`, :cve:`2016-3672`, :cve:`2016-3689`, :cve:`2016-3713`, :cve:`2016-3841`, :cve:`2016-3857`, :cve:`2016-3951`, :cve:`2016-3955`, :cve:`2016-3961`, :cve:`2016-4440`, :cve:`2016-4470`, :cve:`2016-4482`, :cve:`2016-4485`, :cve:`2016-4486`, :cve:`2016-4557`, :cve:`2016-4558`, :cve:`2016-4565`, :cve:`2016-4568`, :cve:`2016-4569`, :cve:`2016-4578`, :cve:`2016-4580`, :cve:`2016-4581`, :cve:`2016-4794`, :cve:`2016-4805`, :cve:`2016-4913`, :cve:`2016-4951`, :cve:`2016-4997`, :cve:`2016-4998`, :cve:`2016-5195`, :cve:`2016-5243`, :cve:`2016-5244`, :cve:`2016-5400`, :cve:`2016-5412`, :cve:`2016-5696`, :cve:`2016-5728`, :cve:`2016-5828`, :cve:`2016-5829`, :cve:`2016-6130`, :cve:`2016-6136`, :cve:`2016-6156`, :cve:`2016-6162`, :cve:`2016-6187`, :cve:`2016-6197`, :cve:`2016-6198`, :cve:`2016-6213`, :cve:`2016-6327`, :cve:`2016-6480`, :cve:`2016-6516`, :cve:`2016-6786`, :cve:`2016-6787`, :cve:`2016-6828`, :cve:`2016-7039`, :cve:`2016-7042`, :cve:`2016-7097`, :cve:`2016-7117`, :cve:`2016-7425`, :cve:`2016-7910`, :cve:`2016-7911`, :cve:`2016-7912`, :cve:`2016-7913`, :cve:`2016-7914`, :cve:`2016-7915`, :cve:`2016-7916`, :cve:`2016-7917`, :cve:`2016-8399`, :cve:`2016-8405`, :cve:`2016-8630`, :cve:`2016-8632`, :cve:`2016-8633`, :cve:`2016-8636`, :cve:`2016-8645`, :cve:`2016-8646`, :cve:`2016-8650`, :cve:`2016-8655`, :cve:`2016-8658`, :cve:`2016-8666`, :cve:`2016-9083`, :cve:`2016-9084`, :cve:`2016-9120`, :cve:`2016-9178`, :cve:`2016-9191`, :cve:`2016-9313`, :cve:`2016-9555`, :cve:`2016-9576`, :cve:`2016-9588`, :cve:`2016-9604`, :cve:`2016-9685`, :cve:`2016-9754`, :cve:`2016-9755`, :cve:`2016-9756`, :cve:`2016-9777`, :cve:`2016-9793`, :cve:`2016-9794`, :cve:`2016-9806`, :cve:`2016-9919`, :cve:`2017-0605`, :cve:`2017-0627`, :cve:`2017-0750`, :cve:`2017-0786`, :cve:`2017-0861`, :cve:`2017-1000`, :cve:`2017-1000111`, :cve:`2017-1000112`, :cve:`2017-1000251`, :cve:`2017-1000252`, :cve:`2017-1000253`, :cve:`2017-1000255`, :cve:`2017-1000363`, :cve:`2017-1000364`, :cve:`2017-1000365`, :cve:`2017-1000370`, :cve:`2017-1000371`, :cve:`2017-1000379`, :cve:`2017-1000380`, :cve:`2017-1000405`, :cve:`2017-1000407`, :cve:`2017-1000410`, :cve:`2017-10661`, :cve:`2017-10662`, :cve:`2017-10663`, :cve:`2017-10810`, :cve:`2017-10911`, :cve:`2017-11089`, :cve:`2017-11176`, :cve:`2017-11472`, :cve:`2017-11473`, :cve:`2017-11600`, :cve:`2017-12134`, :cve:`2017-12146`, :cve:`2017-12153`, :cve:`2017-12154`, :cve:`2017-12168`, :cve:`2017-12188`, :cve:`2017-12190`, :cve:`2017-12192`, :cve:`2017-12193`, :cve:`2017-12762`, :cve:`2017-13080`, :cve:`2017-13166`, :cve:`2017-13167`, :cve:`2017-13168`, :cve:`2017-13215`, :cve:`2017-13216`, :cve:`2017-13220`, :cve:`2017-13305`, :cve:`2017-13686`, :cve:`2017-13695`, :cve:`2017-13715`, :cve:`2017-14051`, :cve:`2017-14106`, :cve:`2017-14140`, :cve:`2017-14156`, :cve:`2017-14340`, :cve:`2017-14489`, :cve:`2017-14497`, :cve:`2017-14954`, :cve:`2017-14991`, :cve:`2017-15102`, :cve:`2017-15115`, :cve:`2017-15116`, :cve:`2017-15121`, :cve:`2017-15126`, :cve:`2017-15127`, :cve:`2017-15128`, :cve:`2017-15129`, :cve:`2017-15265`, :cve:`2017-15274`, :cve:`2017-15299`, :cve:`2017-15306`, :cve:`2017-15537`, :cve:`2017-15649`, :cve:`2017-15868`, :cve:`2017-15951`, :cve:`2017-16525`, :cve:`2017-16526`, :cve:`2017-16527`, :cve:`2017-16528`, :cve:`2017-16529`, :cve:`2017-16530`, :cve:`2017-16531`, :cve:`2017-16532`, :cve:`2017-16533`, :cve:`2017-16534`, :cve:`2017-16535`, :cve:`2017-16536`, :cve:`2017-16537`, :cve:`2017-16538`, :cve:`2017-16643`, :cve:`2017-16644`, :cve:`2017-16645`, :cve:`2017-16646`, :cve:`2017-16647`, :cve:`2017-16648`, :cve:`2017-16649`, :cve:`2017-16650`, :cve:`2017-16911`, :cve:`2017-16912`, :cve:`2017-16913`, :cve:`2017-16914`, :cve:`2017-16939`, :cve:`2017-16994`, :cve:`2017-16995`, :cve:`2017-16996`, :cve:`2017-17052`, :cve:`2017-17053`, :cve:`2017-17448`, :cve:`2017-17449`, :cve:`2017-17450`, :cve:`2017-17558`, :cve:`2017-17712`, :cve:`2017-17741`, :cve:`2017-17805`, :cve:`2017-17806`, :cve:`2017-17807`, :cve:`2017-17852`, :cve:`2017-17853`, :cve:`2017-17854`, :cve:`2017-17855`, :cve:`2017-17856`, :cve:`2017-17857`, :cve:`2017-17862`, :cve:`2017-17863`, :cve:`2017-17864`, :cve:`2017-17975`, :cve:`2017-18017`, :cve:`2017-18075`, :cve:`2017-18079`, :cve:`2017-18174`, :cve:`2017-18193`, :cve:`2017-18200`, :cve:`2017-18202`, :cve:`2017-18203`, :cve:`2017-18204`, :cve:`2017-18208`, :cve:`2017-18216`, :cve:`2017-18218`, :cve:`2017-18221`, :cve:`2017-18222`, :cve:`2017-18224`, :cve:`2017-18232`, :cve:`2017-18241`, :cve:`2017-18249`, :cve:`2017-18255`, :cve:`2017-18257`, :cve:`2017-18261`, :cve:`2017-18270`, :cve:`2017-18344`, :cve:`2017-18360`, :cve:`2017-18379`, :cve:`2017-18509`, :cve:`2017-18549`, :cve:`2017-18550`, :cve:`2017-18551`, :cve:`2017-18552`, :cve:`2017-18595`, :cve:`2017-2583`, :cve:`2017-2584`, :cve:`2017-2596`, :cve:`2017-2618`, :cve:`2017-2634`, :cve:`2017-2636`, :cve:`2017-2647`, :cve:`2017-2671`, :cve:`2017-5123`, :cve:`2017-5546`, :cve:`2017-5547`, :cve:`2017-5548`, :cve:`2017-5549`, :cve:`2017-5550`, :cve:`2017-5551`, :cve:`2017-5576`, :cve:`2017-5577`, :cve:`2017-5669`, :cve:`2017-5715`, :cve:`2017-5753`, :cve:`2017-5754`, :cve:`2017-5897`, :cve:`2017-5967`, :cve:`2017-5970`, :cve:`2017-5972`, :cve:`2017-5986`, :cve:`2017-6001`, :cve:`2017-6074`, :cve:`2017-6214`, :cve:`2017-6345`, :cve:`2017-6346`, :cve:`2017-6347`, :cve:`2017-6348`, :cve:`2017-6353`, :cve:`2017-6874`, :cve:`2017-6951`, :cve:`2017-7184`, :cve:`2017-7187`, :cve:`2017-7261`, :cve:`2017-7273`, :cve:`2017-7277`, :cve:`2017-7294`, :cve:`2017-7308`, :cve:`2017-7346`, :cve:`2017-7374`, :cve:`2017-7472`, :cve:`2017-7477`, :cve:`2017-7482`, :cve:`2017-7487`, :cve:`2017-7495`, :cve:`2017-7518`, :cve:`2017-7533`, :cve:`2017-7541`, :cve:`2017-7542`, :cve:`2017-7558`, :cve:`2017-7616`, :cve:`2017-7618`, :cve:`2017-7645`, :cve:`2017-7889`, :cve:`2017-7895`, :cve:`2017-7979`, :cve:`2017-8061`, :cve:`2017-8062`, :cve:`2017-8063`, :cve:`2017-8064`, :cve:`2017-8065`, :cve:`2017-8066`, :cve:`2017-8067`, :cve:`2017-8068`, :cve:`2017-8069`, :cve:`2017-8070`, :cve:`2017-8071`, :cve:`2017-8072`, :cve:`2017-8106`, :cve:`2017-8240`, :cve:`2017-8797`, :cve:`2017-8824`, :cve:`2017-8831`, :cve:`2017-8890`, :cve:`2017-8924`, :cve:`2017-8925`, :cve:`2017-9059`, :cve:`2017-9074`, :cve:`2017-9075`, :cve:`2017-9076`, :cve:`2017-9077`, :cve:`2017-9150`, :cve:`2017-9211`, :cve:`2017-9242`, :cve:`2017-9605`, :cve:`2017-9725`, :cve:`2017-9984`, :cve:`2017-9985`, :cve:`2017-9986`, :cve:`2018-1000004`, :cve:`2018-1000026`, :cve:`2018-1000028`, :cve:`2018-1000199`, :cve:`2018-1000200`, :cve:`2018-1000204`, :cve:`2018-10021`, :cve:`2018-10074`, :cve:`2018-10087`, :cve:`2018-10124`, :cve:`2018-10322`, :cve:`2018-10323`, :cve:`2018-1065`, :cve:`2018-1066`, :cve:`2018-10675`, :cve:`2018-1068`, :cve:`2018-10840`, :cve:`2018-10853`, :cve:`2018-1087`, :cve:`2018-10876`, :cve:`2018-10877`, :cve:`2018-10878`, :cve:`2018-10879`, :cve:`2018-10880`, :cve:`2018-10881`, :cve:`2018-10882`, :cve:`2018-10883`, :cve:`2018-10901`, :cve:`2018-10902`, :cve:`2018-1091`, :cve:`2018-1092`, :cve:`2018-1093`, :cve:`2018-10938`, :cve:`2018-1094`, :cve:`2018-10940`, :cve:`2018-1095`, :cve:`2018-1108`, :cve:`2018-1118`, :cve:`2018-1120`, :cve:`2018-11232`, :cve:`2018-1128`, :cve:`2018-1129`, :cve:`2018-1130`, :cve:`2018-11412`, :cve:`2018-11506`, :cve:`2018-11508`, :cve:`2018-12126`, :cve:`2018-12127`, :cve:`2018-12130`, :cve:`2018-12207`, :cve:`2018-12232`, :cve:`2018-12233`, :cve:`2018-12633`, :cve:`2018-12714`, :cve:`2018-12896`, :cve:`2018-12904`, :cve:`2018-13053`, :cve:`2018-13093`, :cve:`2018-13094`, :cve:`2018-13095`, :cve:`2018-13096`, :cve:`2018-13097`, :cve:`2018-13098`, :cve:`2018-13099`, :cve:`2018-13100`, :cve:`2018-13405`, :cve:`2018-13406`, :cve:`2018-14609`, :cve:`2018-14610`, :cve:`2018-14611`, :cve:`2018-14612`, :cve:`2018-14613`, :cve:`2018-14614`, :cve:`2018-14615`, :cve:`2018-14616`, :cve:`2018-14617`, :cve:`2018-14619`, :cve:`2018-14625`, :cve:`2018-14633`, :cve:`2018-14634`, :cve:`2018-14641`, :cve:`2018-14646`, :cve:`2018-14656`, :cve:`2018-14678`, :cve:`2018-14734`, :cve:`2018-15471`, :cve:`2018-15572`, :cve:`2018-15594`, :cve:`2018-16276`, :cve:`2018-16597`, :cve:`2018-16658`, :cve:`2018-16862`, :cve:`2018-16871`, :cve:`2018-16880`, :cve:`2018-16882`, :cve:`2018-16884`, :cve:`2018-17182`, :cve:`2018-17972`, :cve:`2018-18021`, :cve:`2018-18281`, :cve:`2018-18386`, :cve:`2018-18397`, :cve:`2018-18445`, :cve:`2018-18559`, :cve:`2018-18690`, :cve:`2018-18710`, :cve:`2018-18955`, :cve:`2018-19406`, :cve:`2018-19407`, :cve:`2018-19824`, :cve:`2018-19854`, :cve:`2018-19985`, :cve:`2018-20169`, :cve:`2018-20449`, :cve:`2018-20509`, :cve:`2018-20510`, :cve:`2018-20511`, :cve:`2018-20669`, :cve:`2018-20784`, :cve:`2018-20836`, :cve:`2018-20854`, :cve:`2018-20855`, :cve:`2018-20856`, :cve:`2018-20961`, :cve:`2018-20976`, :cve:`2018-21008`, :cve:`2018-25015`, :cve:`2018-25020`, :cve:`2018-3620`, :cve:`2018-3639`, :cve:`2018-3646`, :cve:`2018-3665`, :cve:`2018-3693`, :cve:`2018-5332`, :cve:`2018-5333`, :cve:`2018-5344`, :cve:`2018-5390`, :cve:`2018-5391`, :cve:`2018-5703`, :cve:`2018-5750`, :cve:`2018-5803`, :cve:`2018-5814`, :cve:`2018-5848`, :cve:`2018-5873`, :cve:`2018-5953`, :cve:`2018-5995`, :cve:`2018-6412`, :cve:`2018-6554`, :cve:`2018-6555`, :cve:`2018-6927`, :cve:`2018-7191`, :cve:`2018-7273`, :cve:`2018-7480`, :cve:`2018-7492`, :cve:`2018-7566`, :cve:`2018-7740`, :cve:`2018-7754`, :cve:`2018-7755`, :cve:`2018-7757`, :cve:`2018-7995`, :cve:`2018-8043`, :cve:`2018-8087`, :cve:`2018-8781`, :cve:`2018-8822`, :cve:`2018-8897`, :cve:`2018-9363`, :cve:`2018-9385`, :cve:`2018-9415`, :cve:`2018-9422`, :cve:`2018-9465`, :cve:`2018-9516`, :cve:`2018-9517`, :cve:`2018-9518`, :cve:`2018-9568`, :cve:`2019-0136`, :cve:`2019-0145`, :cve:`2019-0146`, :cve:`2019-0147`, :cve:`2019-0148`, :cve:`2019-0149`, :cve:`2019-0154`, :cve:`2019-0155`, :cve:`2019-10124`, :cve:`2019-10125`, :cve:`2019-10126`, :cve:`2019-10142`, :cve:`2019-10207`, :cve:`2019-10220`, :cve:`2019-10638`, :cve:`2019-10639`, :cve:`2019-11085`, :cve:`2019-11091`, :cve:`2019-11135`, :cve:`2019-11190`, :cve:`2019-11191`, :cve:`2019-1125`, :cve:`2019-11477`, :cve:`2019-11478`, :cve:`2019-11479`, :cve:`2019-11486`, :cve:`2019-11487`, :cve:`2019-11599`, :cve:`2019-11683`, :cve:`2019-11810`, :cve:`2019-11811`, :cve:`2019-11815`, :cve:`2019-11833`, :cve:`2019-11884`, :cve:`2019-12378`, :cve:`2019-12379`, :cve:`2019-12380`, :cve:`2019-12381`, :cve:`2019-12382`, :cve:`2019-12454`, :cve:`2019-12455`, :cve:`2019-12614`, :cve:`2019-12615`, :cve:`2019-12817`, :cve:`2019-12818`, :cve:`2019-12819`, :cve:`2019-12881`, :cve:`2019-12984`, :cve:`2019-13233`, :cve:`2019-13272`, :cve:`2019-13631`, :cve:`2019-13648`, :cve:`2019-14283`, :cve:`2019-14284`, :cve:`2019-14615`, :cve:`2019-14763`, :cve:`2019-14814`, :cve:`2019-14815`, :cve:`2019-14816`, :cve:`2019-14821`, :cve:`2019-14835`, :cve:`2019-14895`, :cve:`2019-14896`, :cve:`2019-14897`, :cve:`2019-14901`, :cve:`2019-15030`, :cve:`2019-15031`, :cve:`2019-15090`, :cve:`2019-15098`, :cve:`2019-15099`, :cve:`2019-15117`, :cve:`2019-15118`, :cve:`2019-15211`, :cve:`2019-15212`, :cve:`2019-15213`, :cve:`2019-15214`, :cve:`2019-15215`, :cve:`2019-15216`, :cve:`2019-15217`, :cve:`2019-15218`, :cve:`2019-15219`, :cve:`2019-15220`, :cve:`2019-15221`, :cve:`2019-15222`, :cve:`2019-15223`, :cve:`2019-15291`, :cve:`2019-15292`, :cve:`2019-15504`, :cve:`2019-15505`, :cve:`2019-15538`, :cve:`2019-15666`, :cve:`2019-15807`, :cve:`2019-15916`, :cve:`2019-15917`, :cve:`2019-15918`, :cve:`2019-15919`, :cve:`2019-15920`, :cve:`2019-15921`, :cve:`2019-15922`, :cve:`2019-15923`, :cve:`2019-15924`, :cve:`2019-15925`, :cve:`2019-15926`, :cve:`2019-15927`, :cve:`2019-16229`, :cve:`2019-16230`, :cve:`2019-16231`, :cve:`2019-16232`, :cve:`2019-16233`, :cve:`2019-16234`, :cve:`2019-16413`, :cve:`2019-16714`, :cve:`2019-16746`, :cve:`2019-16921`, :cve:`2019-16994`, :cve:`2019-16995`, :cve:`2019-17052`, :cve:`2019-17053`, :cve:`2019-17054`, :cve:`2019-17055`, :cve:`2019-17056`, :cve:`2019-17075`, :cve:`2019-17133`, :cve:`2019-17351`, :cve:`2019-17666`, :cve:`2019-18198`, :cve:`2019-18282`, :cve:`2019-18660`, :cve:`2019-18675`, :cve:`2019-18683`, :cve:`2019-18786`, :cve:`2019-18805`, :cve:`2019-18806`, :cve:`2019-18807`, :cve:`2019-18808`, :cve:`2019-18809`, :cve:`2019-18810`, :cve:`2019-18811`, :cve:`2019-18812`, :cve:`2019-18813`, :cve:`2019-18814`, :cve:`2019-18885`, :cve:`2019-19036`, :cve:`2019-19037`, :cve:`2019-19039`, :cve:`2019-19043`, :cve:`2019-19044`, :cve:`2019-19045`, :cve:`2019-19046`, :cve:`2019-19047`, :cve:`2019-19048`, :cve:`2019-19049`, :cve:`2019-19050`, :cve:`2019-19051`, :cve:`2019-19052`, :cve:`2019-19053`, :cve:`2019-19054`, :cve:`2019-19055`, :cve:`2019-19056`, :cve:`2019-19057`, :cve:`2019-19058`, :cve:`2019-19059`, :cve:`2019-19060`, :cve:`2019-19061`, :cve:`2019-19062`, :cve:`2019-19063`, :cve:`2019-19064`, :cve:`2019-19065`, :cve:`2019-19066`, :cve:`2019-19067`, :cve:`2019-19068`, :cve:`2019-19069`, :cve:`2019-19070`, :cve:`2019-19071`, :cve:`2019-19072`, :cve:`2019-19073`, :cve:`2019-19074`, :cve:`2019-19075`, :cve:`2019-19076`, :cve:`2019-19077`, :cve:`2019-19078`, :cve:`2019-19079`, :cve:`2019-19080`, :cve:`2019-19081`, :cve:`2019-19082`, :cve:`2019-19083`, :cve:`2019-19227`, :cve:`2019-19241`, :cve:`2019-19252`, :cve:`2019-19318`, :cve:`2019-19319`, :cve:`2019-19332`, :cve:`2019-19338`, :cve:`2019-19377`, :cve:`2019-19447`, :cve:`2019-19448`, :cve:`2019-19449`, :cve:`2019-19462`, :cve:`2019-19523`, :cve:`2019-19524`, :cve:`2019-19525`, :cve:`2019-19526`, :cve:`2019-19527`, :cve:`2019-19528`, :cve:`2019-19529`, :cve:`2019-19530`, :cve:`2019-19531`, :cve:`2019-19532`, :cve:`2019-19533`, :cve:`2019-19534`, :cve:`2019-19535`, :cve:`2019-19536`, :cve:`2019-19537`, :cve:`2019-19543`, :cve:`2019-19602`, :cve:`2019-19767`, :cve:`2019-19768`, :cve:`2019-19769`, :cve:`2019-19770`, :cve:`2019-19807`, :cve:`2019-19813`, :cve:`2019-19815`, :cve:`2019-19816`, :cve:`2019-19922`, :cve:`2019-19927`, :cve:`2019-19947`, :cve:`2019-19965`, :cve:`2019-19966`, :cve:`2019-1999`, :cve:`2019-20054`, :cve:`2019-20095`, :cve:`2019-20096`, :cve:`2019-2024`, :cve:`2019-2025`, :cve:`2019-20422`, :cve:`2019-2054`, :cve:`2019-20636`, :cve:`2019-20806`, :cve:`2019-20810`, :cve:`2019-20811`, :cve:`2019-20812`, :cve:`2019-20908`, :cve:`2019-20934`, :cve:`2019-2101`, :cve:`2019-2181`, :cve:`2019-2182`, :cve:`2019-2213`, :cve:`2019-2214`, :cve:`2019-2215`, :cve:`2019-25044`, :cve:`2019-25045`, :cve:`2019-3016`, :cve:`2019-3459`, :cve:`2019-3460`, :cve:`2019-3701`, :cve:`2019-3819`, :cve:`2019-3837`, :cve:`2019-3846`, :cve:`2019-3874`, :cve:`2019-3882`, :cve:`2019-3887`, :cve:`2019-3892`, :cve:`2019-3896`, :cve:`2019-3900`, :cve:`2019-3901`, :cve:`2019-5108`, :cve:`2019-6133`, :cve:`2019-6974`, :cve:`2019-7221`, :cve:`2019-7222`, :cve:`2019-7308`, :cve:`2019-8912`, :cve:`2019-8956`, :cve:`2019-8980`, :cve:`2019-9003`, :cve:`2019-9162`, :cve:`2019-9213`, :cve:`2019-9245`, :cve:`2019-9444`, :cve:`2019-9445`, :cve:`2019-9453`, :cve:`2019-9454`, :cve:`2019-9455`, :cve:`2019-9456`, :cve:`2019-9457`, :cve:`2019-9458`, :cve:`2019-9466`, :cve:`2019-9500`, :cve:`2019-9503`, :cve:`2019-9506`, :cve:`2019-9857`, :cve:`2020-0009`, :cve:`2020-0030`, :cve:`2020-0041`, :cve:`2020-0066`, :cve:`2020-0067`, :cve:`2020-0110`, :cve:`2020-0255`, :cve:`2020-0305`, :cve:`2020-0404`, :cve:`2020-0423`, :cve:`2020-0427`, :cve:`2020-0429`, :cve:`2020-0430`, :cve:`2020-0431`, :cve:`2020-0432`, :cve:`2020-0433`, :cve:`2020-0435`, :cve:`2020-0444`, :cve:`2020-0465`, :cve:`2020-0466`, :cve:`2020-0543`, :cve:`2020-10135`, :cve:`2020-10690`, :cve:`2020-10711`, :cve:`2020-10720`, :cve:`2020-10732`, :cve:`2020-10742`, :cve:`2020-10751`, :cve:`2020-10757`, :cve:`2020-10766`, :cve:`2020-10767`, :cve:`2020-10768`, :cve:`2020-10769`, :cve:`2020-10773`, :cve:`2020-10781`, :cve:`2020-10942`, :cve:`2020-11494`, :cve:`2020-11565`, :cve:`2020-11608`, :cve:`2020-11609`, :cve:`2020-11668`, :cve:`2020-11669`, :cve:`2020-11884`, :cve:`2020-12114`, :cve:`2020-12351`, :cve:`2020-12352`, :cve:`2020-12464`, :cve:`2020-12465`, :cve:`2020-12652`, :cve:`2020-12653`, :cve:`2020-12654`, :cve:`2020-12655`, :cve:`2020-12656`, :cve:`2020-12657`, :cve:`2020-12659`, :cve:`2020-12768`, :cve:`2020-12769`, :cve:`2020-12770`, :cve:`2020-12771`, :cve:`2020-12826`, :cve:`2020-12888`, :cve:`2020-12912`, :cve:`2020-13143`, :cve:`2020-13974`, :cve:`2020-14305`, :cve:`2020-14314`, :cve:`2020-14331`, :cve:`2020-14351`, :cve:`2020-14353`, :cve:`2020-14356`, :cve:`2020-14381`, :cve:`2020-14385`, :cve:`2020-14386`, :cve:`2020-14390`, :cve:`2020-14416`, :cve:`2020-15393`, :cve:`2020-15436`, :cve:`2020-15437`, :cve:`2020-15780`, :cve:`2020-15852`, :cve:`2020-16119`, :cve:`2020-16120`, :cve:`2020-16166`, :cve:`2020-1749`, :cve:`2020-24394`, :cve:`2020-24490`, :cve:`2020-24586`, :cve:`2020-24587`, :cve:`2020-24588`, :cve:`2020-25211`, :cve:`2020-25212`, :cve:`2020-25221`, :cve:`2020-25284`, :cve:`2020-25285`, :cve:`2020-25639`, :cve:`2020-25641`, :cve:`2020-25643`, :cve:`2020-25645`, :cve:`2020-25656`, :cve:`2020-25668`, :cve:`2020-25669`, :cve:`2020-25670`, :cve:`2020-25671`, :cve:`2020-25672`, :cve:`2020-25673`, :cve:`2020-25704`, :cve:`2020-25705`, :cve:`2020-26088`, :cve:`2020-26139`, :cve:`2020-26141`, :cve:`2020-26145`, :cve:`2020-26147`, :cve:`2020-26541`, :cve:`2020-26555`, :cve:`2020-26558`, :cve:`2020-27066`, :cve:`2020-27067`, :cve:`2020-27068`, :cve:`2020-27152`, :cve:`2020-27170`, :cve:`2020-27171`, :cve:`2020-27194`, :cve:`2020-2732`, :cve:`2020-27418`, :cve:`2020-27673`, :cve:`2020-27675`, :cve:`2020-27777`, :cve:`2020-27784`, :cve:`2020-27786`, :cve:`2020-27815`, :cve:`2020-27820`, :cve:`2020-27825`, :cve:`2020-27830`, :cve:`2020-27835`, :cve:`2020-28097`, :cve:`2020-28374`, :cve:`2020-28588`, :cve:`2020-28915`, :cve:`2020-28941`, :cve:`2020-28974`, :cve:`2020-29368`, :cve:`2020-29369`, :cve:`2020-29370`, :cve:`2020-29371`, :cve:`2020-29372`, :cve:`2020-29373`, :cve:`2020-29374`, :cve:`2020-29534`, :cve:`2020-29568`, :cve:`2020-29569`, :cve:`2020-29660`, :cve:`2020-29661`, :cve:`2020-35499`, :cve:`2020-35508`, :cve:`2020-35513`, :cve:`2020-35519`, :cve:`2020-36158`, :cve:`2020-36310`, :cve:`2020-36311`, :cve:`2020-36312`, :cve:`2020-36313`, :cve:`2020-36322`, :cve:`2020-36385`, :cve:`2020-36386`, :cve:`2020-36387`, :cve:`2020-36516`, :cve:`2020-36557`, :cve:`2020-36558`, :cve:`2020-36691`, :cve:`2020-36694`, :cve:`2020-36766`, :cve:`2020-3702`, :cve:`2020-4788`, :cve:`2020-7053`, :cve:`2020-8428`, :cve:`2020-8647`, :cve:`2020-8648`, :cve:`2020-8649`, :cve:`2020-8694`, :cve:`2020-8834`, :cve:`2020-8835`, :cve:`2020-8992`, :cve:`2020-9383`, :cve:`2020-9391`, :cve:`2021-0129`, :cve:`2021-0342`, :cve_mitre:`2021-0447`, :cve_mitre:`2021-0448`, :cve:`2021-0512`, :cve:`2021-0605`, :cve:`2021-0707`, :cve:`2021-0920`, :cve:`2021-0929`, :cve:`2021-0935`, :cve_mitre:`2021-0937`, :cve:`2021-0938`, :cve:`2021-0941`, :cve:`2021-1048`, :cve:`2021-20177`, :cve:`2021-20194`, :cve:`2021-20226`, :cve:`2021-20239`, :cve:`2021-20261`, :cve:`2021-20265`, :cve:`2021-20268`, :cve:`2021-20292`, :cve:`2021-20317`, :cve:`2021-20320`, :cve:`2021-20321`, :cve:`2021-20322`, :cve:`2021-21781`, :cve:`2021-22543`, :cve:`2021-22555`, :cve:`2021-22600`, :cve:`2021-23133`, :cve:`2021-23134`, :cve:`2021-26401`, :cve:`2021-26708`, :cve:`2021-26930`, :cve:`2021-26931`, :cve:`2021-26932`, :cve:`2021-27363`, :cve:`2021-27364`, :cve:`2021-27365`, :cve:`2021-28038`, :cve:`2021-28039`, :cve:`2021-28375`, :cve:`2021-28660`, :cve:`2021-28688`, :cve:`2021-28691`, :cve:`2021-28711`, :cve:`2021-28712`, :cve:`2021-28713`, :cve:`2021-28714`, :cve:`2021-28715`, :cve:`2021-28950`, :cve:`2021-28951`, :cve:`2021-28952`, :cve:`2021-28964`, :cve:`2021-28971`, :cve:`2021-28972`, :cve:`2021-29154`, :cve:`2021-29155`, :cve:`2021-29264`, :cve:`2021-29265`, :cve:`2021-29266`, :cve:`2021-29646`, :cve:`2021-29647`, :cve:`2021-29648`, :cve:`2021-29649`, :cve:`2021-29650`, :cve:`2021-29657`, :cve:`2021-30002`, :cve:`2021-30178`, :cve:`2021-31440`, :cve:`2021-3178`, :cve:`2021-31829`, :cve:`2021-31916`, :cve:`2021-32399`, :cve:`2021-32606`, :cve:`2021-33033`, :cve:`2021-33034`, :cve:`2021-33098`, :cve:`2021-33135`, :cve:`2021-33200`, :cve:`2021-3347`, :cve:`2021-3348`, :cve:`2021-33624`, :cve:`2021-33655`, :cve:`2021-33656`, :cve:`2021-33909`, :cve:`2021-3411`, :cve:`2021-3428`, :cve:`2021-3444`, :cve:`2021-34556`, :cve:`2021-34693`, :cve:`2021-3483`, :cve:`2021-34866`, :cve:`2021-3489`, :cve:`2021-3490`, :cve:`2021-3491`, :cve_mitre:`2021-34981`, :cve:`2021-3501`, :cve:`2021-35039`, :cve:`2021-3506`, :cve:`2021-3543`, :cve:`2021-35477`, :cve:`2021-3564`, :cve:`2021-3573`, :cve:`2021-3587`, :cve_mitre:`2021-3600`, :cve:`2021-3609`, :cve:`2021-3612`, :cve:`2021-3635`, :cve:`2021-3640`, :cve:`2021-3653`, :cve:`2021-3655`, :cve:`2021-3656`, :cve:`2021-3659`, :cve:`2021-3679`, :cve:`2021-3715`, :cve:`2021-37159`, :cve:`2021-3732`, :cve:`2021-3736`, :cve:`2021-3739`, :cve:`2021-3743`, :cve:`2021-3744`, :cve:`2021-3752`, :cve:`2021-3753`, :cve:`2021-37576`, :cve:`2021-3759`, :cve:`2021-3760`, :cve:`2021-3764`, :cve:`2021-3772`, :cve:`2021-38160`, :cve:`2021-38166`, :cve:`2021-38198`, :cve:`2021-38199`, :cve:`2021-38200`, :cve:`2021-38201`, :cve:`2021-38202`, :cve:`2021-38203`, :cve:`2021-38204`, :cve:`2021-38205`, :cve:`2021-38206`, :cve:`2021-38207`, :cve:`2021-38208`, :cve:`2021-38209`, :cve:`2021-38300`, :cve:`2021-3894`, :cve:`2021-3896`, :cve:`2021-3923`, :cve:`2021-39633`, :cve:`2021-39634`, :cve:`2021-39636`, :cve:`2021-39648`, :cve:`2021-39656`, :cve:`2021-39657`, :cve:`2021-39685`, :cve:`2021-39686`, :cve:`2021-39698`, :cve:`2021-39711`, :cve:`2021-39713`, :cve:`2021-39714`, :cve:`2021-4001`, :cve:`2021-4002`, :cve:`2021-4028`, :cve:`2021-4032`, :cve:`2021-4037`, :cve:`2021-40490`, :cve:`2021-4083`, :cve:`2021-4090`, :cve:`2021-4093`, :cve:`2021-4095`, :cve:`2021-41073`, :cve:`2021-4135`, :cve:`2021-4148`, :cve:`2021-4149`, :cve:`2021-4154`, :cve:`2021-4155`, :cve:`2021-4157`, :cve:`2021-4159`, :cve:`2021-41864`, :cve:`2021-4197`, :cve:`2021-42008`, :cve:`2021-4202`, :cve:`2021-4203`, :cve:`2021-4218`, :cve:`2021-42252`, :cve:`2021-42327`, :cve:`2021-42739`, :cve:`2021-43056`, :cve:`2021-43057`, :cve:`2021-43267`, :cve:`2021-43389`, :cve:`2021-43975`, :cve:`2021-43976`, :cve:`2021-44733`, :cve:`2021-45095`, :cve:`2021-45100`, :cve:`2021-45402`, :cve:`2021-45469`, :cve:`2021-45480`, :cve:`2021-45485`, :cve:`2021-45486`, :cve:`2021-45868`, :cve:`2021-46283`, :cve:`2022-0001`, :cve:`2022-0002`, :cve:`2022-0168`, :cve:`2022-0171`, :cve:`2022-0185`, :cve:`2022-0264`, :cve:`2022-0286`, :cve:`2022-0322`, :cve:`2022-0330`, :cve:`2022-0433`, :cve:`2022-0435`, :cve:`2022-0487`, :cve:`2022-0492`, :cve:`2022-0494`, :cve:`2022-0516`, :cve:`2022-0617`, :cve:`2022-0644`, :cve:`2022-0646`, :cve:`2022-0742`, :cve:`2022-0812`, :cve:`2022-0847`, :cve:`2022-0850`, :cve:`2022-0854`, :cve:`2022-0995`, :cve:`2022-1011`, :cve:`2022-1012`, :cve:`2022-1015`, :cve:`2022-1016`, :cve:`2022-1043`, :cve:`2022-1048`, :cve:`2022-1055`, :cve:`2022-1158`, :cve:`2022-1184`, :cve:`2022-1195`, :cve:`2022-1198`, :cve:`2022-1199`, :cve:`2022-1204`, :cve:`2022-1205`, :cve:`2022-1353`, :cve:`2022-1419`, :cve:`2022-1462`, :cve:`2022-1516`, :cve:`2022-1651`, :cve:`2022-1652`, :cve:`2022-1671`, :cve:`2022-1678`, :cve:`2022-1679`, :cve:`2022-1729`, :cve:`2022-1734`, :cve:`2022-1786`, :cve:`2022-1789`, :cve:`2022-1836`, :cve:`2022-1852`, :cve:`2022-1882`, :cve:`2022-1943`, :cve:`2022-1966`, :cve:`2022-1972`, :cve:`2022-1973`, :cve:`2022-1974`, :cve:`2022-1975`, :cve:`2022-1976`, :cve:`2022-1998`, :cve:`2022-20008`, :cve:`2022-20132`, :cve:`2022-20141`, :cve:`2022-20153`, :cve:`2022-20154`, :cve:`2022-20158`, :cve:`2022-20166`, :cve:`2022-20368`, :cve:`2022-20369`, :cve:`2022-20421`, :cve:`2022-20422`, :cve:`2022-20423`, :cve_mitre:`2022-20565`, :cve:`2022-20566`, :cve:`2022-20567`, :cve:`2022-20572`, :cve:`2022-2078`, :cve:`2022-21123`, :cve:`2022-21125`, :cve:`2022-21166`, :cve:`2022-21385`, :cve:`2022-21499`, :cve_mitre:`2022-21505`, :cve:`2022-2153`, :cve:`2022-2196`, :cve_mitre:`2022-22942`, :cve:`2022-23036`, :cve:`2022-23037`, :cve:`2022-23038`, :cve:`2022-23039`, :cve:`2022-23040`, :cve:`2022-23041`, :cve:`2022-23042`, :cve:`2022-2308`, :cve:`2022-2318`, :cve:`2022-2380`, :cve:`2022-23816`, :cve:`2022-23960`, :cve:`2022-24122`, :cve:`2022-24448`, :cve:`2022-24958`, :cve:`2022-24959`, :cve:`2022-2503`, :cve:`2022-25258`, :cve:`2022-25375`, :cve:`2022-25636`, :cve_mitre:`2022-2585`, :cve_mitre:`2022-2586`, :cve_mitre:`2022-2588`, :cve:`2022-2590`, :cve_mitre:`2022-2602`, :cve:`2022-26365`, :cve:`2022-26373`, :cve:`2022-2639`, :cve:`2022-26490`, :cve:`2022-2663`, :cve:`2022-26966`, :cve:`2022-27223`, :cve:`2022-27666`, :cve:`2022-2785`, :cve:`2022-27950`, :cve:`2022-28356`, :cve:`2022-28388`, :cve:`2022-28389`, :cve:`2022-28390`, :cve:`2022-2873`, :cve:`2022-28796`, :cve:`2022-28893`, :cve:`2022-2905`, :cve:`2022-29156`, :cve:`2022-2938`, :cve:`2022-29581`, :cve:`2022-29582`, :cve:`2022-2959`, :cve:`2022-2964`, :cve:`2022-2977`, :cve:`2022-2978`, :cve:`2022-29900`, :cve:`2022-29901`, :cve:`2022-29968`, :cve:`2022-3028`, :cve:`2022-30594`, :cve:`2022-3061`, :cve:`2022-3077`, :cve:`2022-3078`, :cve:`2022-3103`, :cve:`2022-3104`, :cve:`2022-3105`, :cve:`2022-3106`, :cve:`2022-3107`, :cve:`2022-3110`, :cve:`2022-3111`, :cve:`2022-3112`, :cve:`2022-3113`, :cve:`2022-3114`, :cve:`2022-3115`, :cve:`2022-3169`, :cve:`2022-3170`, :cve:`2022-3202`, :cve:`2022-32250`, :cve:`2022-32296`, :cve:`2022-3239`, :cve:`2022-32981`, :cve:`2022-3303`, :cve:`2022-33740`, :cve:`2022-33741`, :cve:`2022-33742`, :cve:`2022-33743`, :cve:`2022-33744`, :cve:`2022-33981`, :cve:`2022-3424`, :cve:`2022-3435`, :cve:`2022-34494`, :cve:`2022-34495`, :cve:`2022-34918`, :cve:`2022-3521`, :cve:`2022-3524`, :cve:`2022-3526`, :cve:`2022-3531`, :cve:`2022-3532`, :cve:`2022-3534`, :cve:`2022-3535`, :cve:`2022-3541`, :cve:`2022-3542`, :cve:`2022-3543`, :cve:`2022-3545`, :cve:`2022-3564`, :cve:`2022-3565`, :cve:`2022-3577`, :cve:`2022-3586`, :cve:`2022-3594`, :cve:`2022-36123`, :cve:`2022-3619`, :cve:`2022-3621`, :cve:`2022-3623`, :cve:`2022-3625`, :cve:`2022-3628`, :cve:`2022-36280`, :cve:`2022-3629`, :cve:`2022-3630`, :cve:`2022-3633`, :cve:`2022-3635`, :cve:`2022-3640`, :cve:`2022-3643`, :cve:`2022-3646`, :cve:`2022-3649`, :cve:`2022-36879`, :cve:`2022-36946`, :cve:`2022-3707`, :cve:`2022-3910`, :cve:`2022-39189`, :cve:`2022-39190`, :cve:`2022-3977`, :cve:`2022-39842`, :cve:`2022-40307`, :cve:`2022-40476`, :cve:`2022-40768`, :cve:`2022-4095`, :cve:`2022-40982`, :cve:`2022-41218`, :cve:`2022-41222`, :cve:`2022-4127`, :cve:`2022-4128`, :cve:`2022-4129`, :cve:`2022-4139`, :cve:`2022-41674`, :cve:`2022-41849`, :cve:`2022-41850`, :cve:`2022-41858`, :cve:`2022-42328`, :cve:`2022-42329`, :cve:`2022-42432`, :cve:`2022-4269`, :cve:`2022-42703`, :cve:`2022-42719`, :cve:`2022-42720`, :cve:`2022-42721`, :cve:`2022-42722`, :cve:`2022-42895`, :cve:`2022-42896`, :cve:`2022-43750`, :cve:`2022-4378`, :cve:`2022-4379`, :cve:`2022-4382`, :cve:`2022-43945`, :cve:`2022-45869`, :cve:`2022-45886`, :cve:`2022-45887`, :cve:`2022-45888`, :cve:`2022-45919`, :cve:`2022-45934`, :cve:`2022-4662`, :cve:`2022-4744`, :cve:`2022-47518`, :cve:`2022-47519`, :cve:`2022-47520`, :cve:`2022-47521`, :cve:`2022-47929`, :cve:`2022-47938`, :cve:`2022-47939`, :cve:`2022-47940`, :cve:`2022-47941`, :cve:`2022-47942`, :cve:`2022-47943`, :cve:`2022-4842`, :cve:`2022-48423`, :cve:`2022-48424`, :cve:`2022-48425`, :cve:`2022-48502`, :cve:`2023-0030`, :cve:`2023-0045`, :cve:`2023-0047`, :cve:`2023-0122`, :cve:`2023-0160`, :cve:`2023-0179`, :cve:`2023-0210`, :cve:`2023-0240`, :cve:`2023-0266`, :cve:`2023-0394`, :cve:`2023-0458`, :cve:`2023-0459`, :cve:`2023-0461`, :cve:`2023-0468`, :cve:`2023-0469`, :cve:`2023-0590`, :cve:`2023-0615`, :cve_mitre:`2023-1032`, :cve:`2023-1073`, :cve:`2023-1074`, :cve:`2023-1076`, :cve:`2023-1077`, :cve:`2023-1078`, :cve:`2023-1079`, :cve:`2023-1095`, :cve:`2023-1118`, :cve:`2023-1192`, :cve:`2023-1194`, :cve:`2023-1195`, :cve:`2023-1206`, :cve:`2023-1249`, :cve:`2023-1252`, :cve:`2023-1281`, :cve:`2023-1380`, :cve:`2023-1382`, :cve:`2023-1390`, :cve:`2023-1513`, :cve:`2023-1582`, :cve:`2023-1583`, :cve:`2023-1611`, :cve:`2023-1637`, :cve:`2023-1652`, :cve:`2023-1670`, :cve:`2023-1829`, :cve:`2023-1838`, :cve:`2023-1855`, :cve:`2023-1859`, :cve:`2023-1989`, :cve:`2023-1990`, :cve:`2023-1998`, :cve:`2023-2002`, :cve:`2023-2006`, :cve:`2023-2008`, :cve:`2023-2019`, :cve:`2023-20569`, :cve:`2023-20588`, :cve:`2023-20593`, :cve:`2023-20938`, :cve:`2023-21102`, :cve:`2023-21106`, :cve:`2023-2124`, :cve:`2023-21255`, :cve:`2023-21264`, :cve:`2023-2156`, :cve:`2023-2162`, :cve:`2023-2163`, :cve:`2023-2166`, :cve:`2023-2177`, :cve:`2023-2194`, :cve:`2023-2235`, :cve:`2023-2236`, :cve:`2023-2248`, :cve:`2023-2269`, :cve:`2023-22996`, :cve:`2023-22997`, :cve:`2023-22998`, :cve:`2023-22999`, :cve:`2023-23001`, :cve:`2023-23002`, :cve:`2023-23003`, :cve:`2023-23004`, :cve:`2023-23005`, :cve:`2023-23006`, :cve:`2023-23454`, :cve:`2023-23455`, :cve:`2023-23559`, :cve:`2023-2483`, :cve:`2023-25012`, :cve:`2023-2513`, :cve:`2023-25775`, :cve:`2023-2598`, :cve:`2023-26544`, :cve:`2023-26545`, :cve:`2023-26605`, :cve:`2023-26606`, :cve:`2023-26607`, :cve:`2023-28327`, :cve:`2023-28328`, :cve:`2023-28410`, :cve:`2023-28464`, :cve:`2023-28466`, :cve:`2023-2860`, :cve:`2023-28772`, :cve:`2023-28866`, :cve:`2023-2898`, :cve:`2023-2985`, :cve:`2023-3006`, :cve:`2023-30456`, :cve:`2023-30772`, :cve:`2023-3090`, :cve:`2023-3106`, :cve:`2023-3111`, :cve:`2023-3117`, :cve:`2023-31248`, :cve:`2023-3141`, :cve:`2023-31436`, :cve:`2023-3159`, :cve:`2023-3161`, :cve:`2023-3212`, :cve:`2023-3220`, :cve:`2023-32233`, :cve:`2023-32247`, :cve:`2023-32248`, :cve:`2023-32250`, :cve:`2023-32252`, :cve:`2023-32254`, :cve:`2023-32257`, :cve:`2023-32258`, :cve:`2023-32269`, :cve:`2023-3268`, :cve:`2023-3269`, :cve:`2023-3312`, :cve:`2023-3317`, :cve:`2023-33203`, :cve:`2023-33250`, :cve:`2023-33288`, :cve:`2023-3338`, :cve:`2023-3355`, :cve:`2023-3357`, :cve:`2023-3358`, :cve:`2023-3359`, :cve:`2023-3390`, :cve:`2023-33951`, :cve:`2023-33952`, :cve:`2023-34255`, :cve:`2023-34256`, :cve:`2023-34319`, :cve:`2023-3439`, :cve:`2023-35001`, :cve:`2023-3567`, :cve:`2023-35788`, :cve:`2023-35823`, :cve:`2023-35824`, :cve:`2023-35826`, :cve:`2023-35828`, :cve:`2023-35829`, :cve:`2023-3609`, :cve:`2023-3610`, :cve:`2023-3611`, :cve:`2023-37453`, :cve:`2023-3772`, :cve:`2023-3773`, :cve:`2023-3776`, :cve:`2023-3777`, :cve:`2023-3812`, :cve:`2023-38409`, :cve:`2023-38426`, :cve:`2023-38427`, :cve:`2023-38428`, :cve:`2023-38429`, :cve:`2023-38430`, :cve:`2023-38431`, :cve:`2023-38432`, :cve:`2023-3863`, :cve_mitre:`2023-3865`, :cve_mitre:`2023-3866`, :cve_mitre:`2023-3867`, :cve:`2023-39189`, :cve:`2023-39192`, :cve:`2023-39193`, :cve:`2023-39194`, :cve:`2023-4004`, :cve:`2023-4015`, :cve:`2023-40283`, :cve:`2023-4128`, :cve:`2023-4132`, :cve:`2023-4147`, :cve:`2023-4155`, :cve:`2023-4194`, :cve:`2023-4206`, :cve:`2023-4207`, :cve:`2023-4208`, :cve:`2023-4273`, :cve:`2023-42752`, :cve:`2023-42753`, :cve:`2023-42755`, :cve:`2023-42756`, :cve:`2023-4385`, :cve:`2023-4387`, :cve:`2023-4389`, :cve:`2023-4394`, :cve:`2023-44466`, :cve:`2023-4459`, :cve:`2023-4569`, :cve:`2023-45862`, :cve:`2023-45871`, :cve:`2023-4611`, :cve:`2023-4623`, :cve:`2023-4732`, :cve:`2023-4921` and :cve:`2023-5345`
+- linux-yocto/5.15: Ignore :cve:`2022-45886`, :cve:`2022-45887`, :cve:`2022-45919`, :cve:`2022-48502`, :cve:`2023-0160`, :cve:`2023-1206`, :cve:`2023-20593`, :cve:`2023-21264`, :cve:`2023-2898`, :cve:`2023-31248`, :cve:`2023-33250`, :cve:`2023-34319`, :cve:`2023-35001`, :cve:`2023-3611`, :cve:`2023-37453`, :cve:`2023-3773`, :cve:`2023-3776`, :cve:`2023-3777`, :cve:`2023-38432`, :cve:`2023-3863`, :cve_mitre:`2023-3865`, :cve_mitre:`2023-3866`, :cve:`2023-4004`, :cve:`2023-4015`, :cve:`2023-4132`, :cve:`2023-4147`, :cve:`2023-4194`, :cve:`2023-4385`, :cve:`2023-4387`, :cve:`2023-4389`, :cve:`2023-4394`, :cve:`2023-4459` and :cve:`2023-4611`
+- openssl: Fix :cve:`2023-4807` and :cve:`2023-5363`
+- python3-git: Fix :cve:`2023-40590` and :cve:`2023-41040`
+- python3-urllib3: Fix :cve:`2023-43804`
+- qemu: Ignore :cve:`2023-2680`
+- ruby: Fix :cve:`2023-36617`
+- shadow: Fix :cve_mitre:`2023-4641`
+- tiff: Fix :cve:`2023-3576` and :cve:`2023-40745`
+- vim: Fix :cve:`2023-5441` and :cve:`2023-5535`
+- webkitgtk: Fix :cve:`2023-32439`
+- xdg-utils: Fix :cve:`2022-4055`
+- xserver-xorg: ignore :cve:`2022-3553` (XQuartz-specific)
+- zlib: Fix :cve:`2023-45853`
+
+
+
+Fixes in Yocto-4.0.14
+~~~~~~~~~~~~~~~~~~~~~
+
+- SECURITY.md: Add file
+- apt: add missing <cstdint> for uint16_t
+- bind: update to 9.18.19
+- bitbake: SECURITY.md: add file
+- bitbake: bitbake-getvar: Add a quiet command line argument
+- bitbake: bitbake-worker/runqueue: Avoid unnecessary bytes object copies
+- brief-yoctoprojectqs: use new CDN mirror for sstate
+- bsp-guide: bsp.rst: replace reference to wiki
+- bsp-guide: bsp: skip Intel machines no longer supported in Poky
+- build-appliance-image: Update to kirkstone head revision
+- ccache: fix build with gcc-13
+- cml1: Fix KCONFIG_CONFIG_COMMAND not conveyed fully in do_menuconfig
+- contributor-guide/style-guide: Add a note about task idempotence
+- contributor-guide/style-guide: Refer to recipes, not packages
+- contributor-guide: deprecate "Accepted" patch status
+- contributor-guide: discourage marking patches as Inappropriate
+- contributor-guide: recipe-style-guide: add more patch tagging examples
+- contributor-guide: recipe-style-guide: add section about CVE patches
+- contributor-guide: style-guide: discourage using Pending patch status
+- dev-manual: add security team processes
+- dev-manual: fix testimage usage instructions
+- dev-manual: layers: Add notes about layer.conf
+- dev-manual: new-recipe.rst: add missing parenthesis to "Patching Code" section
+- dev-manual: new-recipe.rst: replace reference to wiki
+- dev-manual: start.rst: remove obsolete reference
+- dev-manual: wic: update "wic list images" output
+- dev/ref-manual: Document :term:`INIT_MANAGER`
+- fontcache.bbclass: avoid native recipes depending on target fontconfig
+- glibc: Update to latest on stable 2.35 branch (c84018a05aec..)
+- json-c: define :term:`CVE_VERSION`
+- kernel.bbclass: Add force flag to rm calls
+- libxpm: upgrade to 3.5.17
+- linux-firmware: create separate packages
+- linux-firmware: upgrade to 20230804
+- linux-yocto/5.10: update to v5.10.197
+- linux-yocto: update CVE exclusions
+- manuals: correct "yocto-linux" by "linux-yocto"
+- manuals: update linux-yocto append examples
+- migration-guides: add release notes for 4.0.13
+- openssl: Upgrade to 3.0.12
+- overview: Add note about non-reproducibility side effects
+- package_rpm: Allow compression mode override
+- poky.conf: bump version for 4.0.14
+- profile-manual: aesthetic cleanups
+- python3-git: upgrade to 3.1.37
+- python3-jinja2: fix for the ptest result format
+- python3-urllib3: upgrade to 1.26.17
+- ref-manual: Fix :term:`PACKAGECONFIG` term and add an example
+- ref-manual: Warn about :term:`COMPATIBLE_MACHINE` skipping native recipes
+- ref-manual: releases.svg: Scarthgap is now version 5.0
+- ref-manual: variables: add :term:`RECIPE_SYSROOT` and :term:`RECIPE_SYSROOT_NATIVE`
+- ref-manual: variables: add :term:`TOOLCHAIN_OPTIONS` variable
+- ref-manual: variables: add example for :term:`SYSROOT_DIRS` variable
+- ref-manual: variables: provide no-match example for :term:`COMPATIBLE_MACHINE`
+- sdk-manual: appendix-obtain: improve and update descriptions
+- test-manual: reproducible-builds: stop mentioning LTO bug
+- uboot-extlinux-config.bbclass: fix missed override syntax migration
+- vim: Upgrade to 9.0.2048
+
+
+Known Issues in Yocto-4.0.14
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- N/A
+
+
+Contributors to Yocto-4.0.14
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Alexander Kanavin
+- Archana Polampalli
+- Armin Kuster
+- Arne Schwerdt
+- BELHADJ SALEM Talel
+- Bruce Ashfield
+- Chaitanya Vadrevu
+- Colin McAllister
+- Deepthi Hemraj
+- Etienne Cordonnier
+- Fahad Arslan
+- Hitendra Prajapati
+- Jaeyoon Jung
+- Joshua Watt
+- Khem Raj
+- Lee Chee Yang
+- Marta Rybczynska
+- Martin Jansa
+- Meenali Gupta
+- Michael Opdenacker
+- Narpat Mali
+- Niko Mauno
+- Paul Eggleton
+- Paulo Neves
+- Peter Marko
+- Quentin Schulz
+- Richard Purdie
+- Robert P. J. Day
+- Roland Hieber
+- Ross Burton
+- Ryan Eatmon
+- Shubham Kulkarni
+- Siddharth Doshi
+- Soumya Sambu
+- Steve Sakoman
+- Tim Orling
+- Trevor Gamblin
+- Vijay Anusuri
+- Wang Mingyu
+- Yash Shinde
+- Yogita Urade
+
+
+Repositories / Downloads for Yocto-4.0.14
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: :yocto_git:`/poky`
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.14 </poky/log/?h=yocto-4.0.14>`
+- Git Revision: :yocto_git:`d8d6d921fad14b82167d9f031d4fca06b5e01883 </poky/commit/?id=d8d6d921fad14b82167d9f031d4fca06b5e01883>`
+- Release Artefact: poky-d8d6d921fad14b82167d9f031d4fca06b5e01883
+- sha: 46a6301e3921ee67cfe6be7ea544d6257f0c0f02ef15c5091287e024ff02d5f5
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.14/poky-d8d6d921fad14b82167d9f031d4fca06b5e01883.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.14/poky-d8d6d921fad14b82167d9f031d4fca06b5e01883.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.14 </openembedded-core/log/?h=yocto-4.0.14>`
+- Git Revision: :oe_git:`0eb8e67aa6833df0cde29833568a70e65c21d7e5 </openembedded-core/commit/?id=0eb8e67aa6833df0cde29833568a70e65c21d7e5>`
+- Release Artefact: oecore-0eb8e67aa6833df0cde29833568a70e65c21d7e5
+- sha: d510a7067b87ba935b8a7c9f9608d0e06b057009ea753ed190ddfacc7195ecc5
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.14/oecore-0eb8e67aa6833df0cde29833568a70e65c21d7e5.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.14/oecore-0eb8e67aa6833df0cde29833568a70e65c21d7e5.tar.bz2
+
+meta-mingw
+
+- Repository Location: :yocto_git:`/meta-mingw`
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.14 </meta-mingw/log/?h=yocto-4.0.14>`
+- Git Revision: :yocto_git:`f6b38ce3c90e1600d41c2ebb41e152936a0357d7 </meta-mingw/commit/?id=f6b38ce3c90e1600d41c2ebb41e152936a0357d7>`
+- Release Artefact: meta-mingw-f6b38ce3c90e1600d41c2ebb41e152936a0357d7
+- sha: 7d57167c19077f4ab95623d55a24c2267a3a3fb5ed83688659b4c03586373b25
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.14/meta-mingw-f6b38ce3c90e1600d41c2ebb41e152936a0357d7.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.14/meta-mingw-f6b38ce3c90e1600d41c2ebb41e152936a0357d7.tar.bz2
+
+meta-gplv2
+
+- Repository Location: :yocto_git:`/meta-gplv2`
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.14 </meta-gplv2/log/?h=yocto-4.0.14>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.14/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.14/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.14 </bitbake/log/?h=yocto-4.0.14>`
+- Git Revision: :oe_git:`6c1ffa9091d0c53a100e8c8c15122d28642034bd </bitbake/commit/?id=6c1ffa9091d0c53a100e8c8c15122d28642034bd>`
+- Release Artefact: bitbake-6c1ffa9091d0c53a100e8c8c15122d28642034bd
+- sha: 1ceffc3b3359063341530c989a3606c897d862b61111538e683f101b02a360a2
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.14/bitbake-6c1ffa9091d0c53a100e8c8c15122d28642034bd.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.14/bitbake-6c1ffa9091d0c53a100e8c8c15122d28642034bd.tar.bz2
+
+yocto-docs
+
+- Repository Location: :yocto_git:`/yocto-docs`
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.14 </yocto-docs/log/?h=yocto-4.0.14>`
+- Git Revision: :yocto_git:`260b446a1a75d99399a3421cd8d6ba276f508f37 </yocto-docs/commit/?id=260b446a1a75d99399a3421cd8d6ba276f508f37>`
+
diff --git a/documentation/migration-guides/release-notes-4.0.15.rst b/documentation/migration-guides/release-notes-4.0.15.rst
new file mode 100644
index 0000000000..b2731530e8
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.15.rst
@@ -0,0 +1,189 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Release notes for Yocto-4.0.15 (Kirkstone)
+------------------------------------------
+
+Security Fixes in Yocto-4.0.15
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- avahi: Fix :cve:`2023-1981`, :cve:`2023-38469`, :cve:`2023-38470`, :cve:`2023-38471`, :cve:`2023-38472` and :cve:`2023-38473`
+- binutils: Fix :cve:`2022-47007`, :cve:`2022-47010` and :cve:`2022-48064`
+- bluez5: Fix :cve:`2023-45866`
+- ghostscript: Ignore GhostPCL :cve:`2023-38560`
+- gnutls: Fix :cve:`2023-5981`
+- go: Ignore :cve:`2023-45283` and :cve:`2023-45284`
+- grub: Fix :cve:`2023-4692` and :cve:`2023-4693`
+- gstreamer1.0-plugins-bad: Fix :cve_mitre:`2023-44429`
+- libsndfile: Fix :cve:`2022-33065`
+- libwebp: Fix :cve:`2023-4863`
+- openssl: Fix :cve:`2023-5678`
+- python3-cryptography: Fix :cve:`2023-49083`
+- qemu: Fix :cve:`2023-1544`
+- sudo: :cve:`2023-42456` and :cve_mitre:`2023-42465`
+- tiff: Fix :cve:`2023-41175`
+- vim: Fix :cve:`2023-46246`, :cve:`2023-48231`, :cve:`2023-48232`, :cve:`2023-48233`, :cve:`2023-48234`, :cve:`2023-48235`, :cve:`2023-48236`, :cve:`2023-48237` and :cve:`2023-48706`
+- xserver-xorg: Fix :cve:`2023-5367` and :cve:`2023-5380`
+- xwayland: Fix :cve:`2023-5367`
+
+
+Fixes in Yocto-4.0.15
+~~~~~~~~~~~~~~~~~~~~~
+
+- bash: changes to SIGINT handler while waiting for a child
+- bitbake: Fix disk space monitoring on cephfs
+- bitbake: bitbake-getvar: Make --quiet work with --recipe
+- bitbake: runqueue.py: fix PSI check logic
+- bitbake: runqueue: Add pressure change logging
+- bitbake: runqueue: convert deferral messages from bb.note to bb.debug
+- bitbake: runqueue: fix PSI check calculation
+- bitbake: runqueue: show more pressure data
+- bitbake: runqueue: show number of currently running bitbake threads when pressure changes
+- bitbake: tinfoil: Do not fail when logging is disabled and full config is used
+- build-appliance-image: Update to kirkstone head revision
+- cve-check: don't warn if a patch is remote
+- cve-check: slightly more verbose warning when adding the same package twice
+- cve-check: sort the package list in the JSON report
+- cve-exclusion_5.10.inc: update for 5.10.202
+- go: Fix issue in DNS resolver
+- goarch: Move Go architecture mapping to a library
+- gstreamer1.0-plugins-base: enable glx/opengl support
+- linux-yocto/5.10: update to v5.10.202
+- manuals: update class references
+- migration-guide: add release notes for 4.0.14
+- native: Clear TUNE_FEATURES/ABIEXTENSION
+- openssh: drop sudo from ptest dependencies
+- overview-manual: concepts: Add Bitbake Tasks Map
+- poky.conf: bump version for 4.0.15
+- python3-jinja2: Fixed ptest result output as per the standard
+- ref-manual: classes: explain cml1 class name
+- ref-manual: update :term:`SDK_NAME` variable documentation
+- ref-manual: variables: add :term:`RECIPE_MAINTAINER`
+- ref-manual: variables: document OEQA_REPRODUCIBLE_* variables
+- ref-manual: variables: mention new CDN for :term:`SSTATE_MIRRORS`
+- rust-common: Set llvm-target correctly for cross SDK targets
+- rust-cross-canadian: Fix ordering of target json config generation
+- rust-cross/rust-common: Merge arm target handling code to fix cross-canadian
+- rust-cross: Simplfy the rust_gen_target calls
+- rust-llvm: Allow overriding LLVM target archs
+- sdk-manual: extensible.rst: remove instructions for using SDK functionality directly in a yocto build
+- sudo: upgrade to 1.9.15p2
+- systemtap_git: fix used uninitialized error
+- vim: Improve locale handling
+- vim: Upgrade to 9.0.2130
+- vim: use upstream generated .po files
+
+
+Known Issues in Yocto-4.0.15
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- N/A
+
+
+Contributors to Yocto-4.0.15
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Alexander Kanavin
+- Archana Polampalli
+- BELHADJ SALEM Talel
+- Bruce Ashfield
+- Chaitanya Vadrevu
+- Chen Qi
+- Deepthi Hemraj
+- Denys Dmytriyenko
+- Hitendra Prajapati
+- Lee Chee Yang
+- Li Wang
+- Martin Jansa
+- Meenali Gupta
+- Michael Opdenacker
+- Mikko Rapeli
+- Narpat Mali
+- Niko Mauno
+- Ninad Palsule
+- Niranjan Pradhan
+- Paul Eggleton
+- Peter Kjellerstedt
+- Peter Marko
+- Richard Purdie
+- Ross Burton
+- Samantha Jalabert
+- Sanjana
+- Soumya Sambu
+- Steve Sakoman
+- Tim Orling
+- Vijay Anusuri
+- Vivek Kumbhar
+- Wenlin Kang
+- Yogita Urade
+
+
+Repositories / Downloads for Yocto-4.0.15
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: :yocto_git:`/poky`
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.15 </poky/log/?h=yocto-4.0.15>`
+- Git Revision: :yocto_git:`755632c2fcab43aa05cdcfa529727064b045073c </poky/commit/?id=755632c2fcab43aa05cdcfa529727064b045073c>`
+- Release Artefact: poky-755632c2fcab43aa05cdcfa529727064b045073c
+- sha: b40b43bd270d21a420c399981f9cfe0eb999f15e051fc2c89d124f249cdc0bd5
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.15/poky-755632c2fcab43aa05cdcfa529727064b045073c.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.15/poky-755632c2fcab43aa05cdcfa529727064b045073c.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.15 </openembedded-core/log/?h=yocto-4.0.15>`
+- Git Revision: :oe_git:`eea685e1caafd8e8121006d3f8b5d0b8a4f2a933 </openembedded-core/commit/?id=eea685e1caafd8e8121006d3f8b5d0b8a4f2a933>`
+- Release Artefact: oecore-eea685e1caafd8e8121006d3f8b5d0b8a4f2a933
+- sha: ddc3d4a2c8a097f2aa7132ae716affacc44b119c616a1eeffb7db56caa7fc79e
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.15/oecore-eea685e1caafd8e8121006d3f8b5d0b8a4f2a933.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.15/oecore-eea685e1caafd8e8121006d3f8b5d0b8a4f2a933.tar.bz2
+
+meta-mingw
+
+- Repository Location: :yocto_git:`/meta-mingw`
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.15 </meta-mingw/log/?h=yocto-4.0.15>`
+- Git Revision: :yocto_git:`f6b38ce3c90e1600d41c2ebb41e152936a0357d7 </meta-mingw/commit/?id=f6b38ce3c90e1600d41c2ebb41e152936a0357d7>`
+- Release Artefact: meta-mingw-f6b38ce3c90e1600d41c2ebb41e152936a0357d7
+- sha: 7d57167c19077f4ab95623d55a24c2267a3a3fb5ed83688659b4c03586373b25
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.15/meta-mingw-f6b38ce3c90e1600d41c2ebb41e152936a0357d7.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.15/meta-mingw-f6b38ce3c90e1600d41c2ebb41e152936a0357d7.tar.bz2
+
+meta-gplv2
+
+- Repository Location: :yocto_git:`/meta-gplv2`
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.15 </meta-gplv2/log/?h=yocto-4.0.15>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.15/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.15/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.15 </bitbake/log/?h=yocto-4.0.15>`
+- Git Revision: :oe_git:`42a1c9fe698a03feb34c5bba223c6e6e0350925b </bitbake/commit/?id=42a1c9fe698a03feb34c5bba223c6e6e0350925b>`
+- Release Artefact: bitbake-42a1c9fe698a03feb34c5bba223c6e6e0350925b
+- sha: 64c684ccd661fa13e25c859dfc68d66bec79281da0f4f81b0d6a9995acb659b5
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.15/bitbake-42a1c9fe698a03feb34c5bba223c6e6e0350925b.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.15/bitbake-42a1c9fe698a03feb34c5bba223c6e6e0350925b.tar.bz2
+
+yocto-docs
+
+- Repository Location: :yocto_git:`/yocto-docs`
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.15 </yocto-docs/log/?h=yocto-4.0.15>`
+- Git Revision: :yocto_git:`08fda7a5601393617b1ecfe89229459e14a90b1d </yocto-docs/commit/?id=08fda7a5601393617b1ecfe89229459e14a90b1d>`
+
diff --git a/documentation/migration-guides/release-notes-4.0.16.rst b/documentation/migration-guides/release-notes-4.0.16.rst
new file mode 100644
index 0000000000..0eb31832ab
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.16.rst
@@ -0,0 +1,191 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Release notes for Yocto-4.0.16 (Kirkstone)
+------------------------------------------
+
+Security Fixes in Yocto-4.0.16
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- cpio: Fix :cve_mitre:`2023-7207`
+- curl: Revert "curl: Backport fix CVE-2023-32001"
+- curl: Fix :cve:`2023-46218`
+- dropbear:Fix :cve:`2023-48795`
+- ffmpeg: Fix :cve:`2022-3964` and :cve:`2022-3965`
+- ghostscript: Fix :cve:`2023-46751`
+- gnutls: Fix :cve:`2024-0553` and :cve:`2024-0567`
+- go: Fix :cve:`2023-39326`
+- openssh: Fix :cve:`2023-48795`, :cve:`2023-51384` and :cve:`2023-51385`
+- openssl: Fix :cve:`2023-6129` and :cve_mitre:`2023-6237`
+- pam: Fix :cve_mitre:`2024-22365`
+- perl: Fix :cve:`2023-47038`
+- qemu: Fix :cve:`2023-5088`
+- sqlite3: Fix :cve:`2023-7104`
+- systemd: Fix :cve:`2023-7008`
+- tiff: Fix :cve:`2023-6228`
+- xserver-xorg: Fix :cve:`2023-6377`, :cve:`2023-6478`, :cve:`2023-6816`, :cve_mitre:`2024-0229`, :cve:`2024-0408`, :cve:`2024-0409`, :cve_mitre:`2024-21885` and :cve_mitre:`2024-21886`
+- zlib: Ignore :cve:`2023-6992`
+
+
+Fixes in Yocto-4.0.16
+~~~~~~~~~~~~~~~~~~~~~
+
+- bitbake: asyncrpc: Add context manager API
+- bitbake: data: Add missing dependency handling of remove operator
+- bitbake: lib/bb: Add workaround for libgcc issues with python 3.8 and 3.9
+- bitbake: toastergui: verify that an existing layer path is given
+- build-appliance-image: Update to kirkstone head revision
+- contributor-guide: add License-Update tag
+- contributor-guide: fix command option
+- contributor-guide: use "apt" instead of "aptitude"
+- cpio: upgrade to 2.14
+- cve-update-nvd2-native: faster requests with API keys
+- cve-update-nvd2-native: increase the delay between subsequent request failures
+- cve-update-nvd2-native: make number of fetch attemtps configurable
+- cve-update-nvd2-native: remove unused variable CVE_SOCKET_TIMEOUT
+- dev-manual: Discourage the use of SRC_URI[md5sum]
+- dev-manual: layers: update link to YP Compatible form
+- dev-manual: runtime-testing: fix test module name
+- dev-manual: start.rst: update use of Download page
+- docs:what-i-wish-id-known.rst: fix URL
+- docs: document VSCode extension
+- docs:brief-yoctoprojectqs:index.rst: align variable order with default local.conf
+- docs:migration-guides: add release notes for 4.0.15
+- docs:migration-guides: release 3.5 is actually 4.0
+- elfutils: Disable stringop-overflow warning for build host
+- externalsrc: Ensure :term:`SRCREV` is processed before accessing :term:`SRC_URI`
+- linux-firmware: upgrade to 20231030
+- manuals: Add :term:`CONVERSION_CMD` definition
+- manuals: Add :term:`UBOOT_BINARY`, extend :term:`UBOOT_CONFIG`
+- perl: upgrade to 5.34.3
+- poky.conf: bump version for 4.0.16
+- pybootchartgui: fix 2 SyntaxWarnings
+- python3-ptest: skip test_storlines
+- ref-manual: Fix reference to MIRRORS/PREMIRRORS defaults
+- ref-manual: classes: remove insserv bbclass
+- ref-manual: releases.svg: update nanbield release status
+- ref-manual: resources: sync with master branch
+- ref-manual: update tested and supported distros
+- test-manual: add links to python unittest
+- test-manual: add or improve hyperlinks
+- test-manual: explicit or fix file paths
+- test-manual: resource updates
+- test-manual: text and formatting fixes
+- test-manual: use working example
+- testimage: Exclude wtmp from target-dumper commands
+- testimage: drop target_dumper, host_dumper, and monitor_dumper
+- tzdata: Upgrade to 2023d
+
+
+Known Issues in Yocto-4.0.16
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- N/A
+
+
+Contributors to Yocto-4.0.16
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Aatir Manzur
+- Archana Polampalli
+- Dhairya Nagodra
+- Dmitry Baryshkov
+- Enguerrand de Ribaucourt
+- Hitendra Prajapati
+- Insu Park
+- Joshua Watt
+- Justin Bronder
+- Jörg Sommer
+- Khem Raj
+- Lee Chee Yang
+- mark.yang
+- Marta Rybczynska
+- Martin Jansa
+- Maxin B. John
+- Michael Opdenacker
+- Paul Barker
+- Peter Kjellerstedt
+- Peter Marko
+- Poonam Jadhav
+- Richard Purdie
+- Shubham Kulkarni
+- Simone Weiß
+- Soumya Sambu
+- Sourav Pramanik
+- Steve Sakoman
+- Trevor Gamblin
+- Vijay Anusuri
+- Vivek Kumbhar
+- Yoann Congal
+- Yogita Urade
+
+
+Repositories / Downloads for Yocto-4.0.16
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: :yocto_git:`/poky`
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.16 </poky/log/?h=yocto-4.0.16>`
+- Git Revision: :yocto_git:`54af8c5e80ebf63707ef4e51cc9d374f716da603 </poky/commit/?id=54af8c5e80ebf63707ef4e51cc9d374f716da603>`
+- Release Artefact: poky-54af8c5e80ebf63707ef4e51cc9d374f716da603
+- sha: a53ec3a661cf56ca40c0fbf1500288c2c20abe94896d66a572bc5ccf5d92e9d6
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.16/poky-54af8c5e80ebf63707ef4e51cc9d374f716da603.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.16/poky-54af8c5e80ebf63707ef4e51cc9d374f716da603.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.16 </openembedded-core/log/?h=yocto-4.0.16>`
+- Git Revision: :oe_git:`a744a897f0ea7d34c31c024c13031221f9a85f24 </openembedded-core/commit/?id=a744a897f0ea7d34c31c024c13031221f9a85f24>`
+- Release Artefact: oecore-a744a897f0ea7d34c31c024c13031221f9a85f24
+- sha: 8c2bc9487597b0caa9f5a1d72b18cfcd1ddc7e6d91f0f051313563d6af95aeec
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.16/oecore-a744a897f0ea7d34c31c024c13031221f9a85f24.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.16/oecore-a744a897f0ea7d34c31c024c13031221f9a85f24.tar.bz2
+
+meta-mingw
+
+- Repository Location: :yocto_git:`/meta-mingw`
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.16 </meta-mingw/log/?h=yocto-4.0.16>`
+- Git Revision: :yocto_git:`f6b38ce3c90e1600d41c2ebb41e152936a0357d7 </meta-mingw/commit/?id=f6b38ce3c90e1600d41c2ebb41e152936a0357d7>`
+- Release Artefact: meta-mingw-f6b38ce3c90e1600d41c2ebb41e152936a0357d7
+- sha: 7d57167c19077f4ab95623d55a24c2267a3a3fb5ed83688659b4c03586373b25
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.16/meta-mingw-f6b38ce3c90e1600d41c2ebb41e152936a0357d7.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.16/meta-mingw-f6b38ce3c90e1600d41c2ebb41e152936a0357d7.tar.bz2
+
+meta-gplv2
+
+- Repository Location: :yocto_git:`/meta-gplv2`
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.16 </meta-gplv2/log/?h=yocto-4.0.16>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.16/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.16/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.16 </bitbake/log/?h=yocto-4.0.16>`
+- Git Revision: :oe_git:`ee090484cc25d760b8c20f18add17b5eff485b40 </bitbake/commit/?id=ee090484cc25d760b8c20f18add17b5eff485b40>`
+- Release Artefact: bitbake-ee090484cc25d760b8c20f18add17b5eff485b40
+- sha: 479e3a57ae9fbc2aa95292a7554caeef113bbfb28c226ed19547b8dde1c95314
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.16/bitbake-ee090484cc25d760b8c20f18add17b5eff485b40.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.16/bitbake-ee090484cc25d760b8c20f18add17b5eff485b40.tar.bz2
+
+yocto-docs
+
+- Repository Location: :yocto_git:`/yocto-docs`
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.16 </yocto-docs/log/?h=yocto-4.0.16>`
+- Git Revision: :yocto_git:`aba67b58711019a6ba439b2b77337f813ed799ac </yocto-docs/commit/?id=aba67b58711019a6ba439b2b77337f813ed799ac>`
+
diff --git a/documentation/migration-guides/release-notes-4.0.17.rst b/documentation/migration-guides/release-notes-4.0.17.rst
new file mode 100644
index 0000000000..07242584b8
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.17.rst
@@ -0,0 +1,238 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Release notes for Yocto-4.0.17 (Kirkstone)
+------------------------------------------
+
+Security Fixes in Yocto-4.0.17
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- bind: Fix :cve:`2023-4408`, :cve:`2023-5517`, :cve:`2023-5679`, :cve:`2023-50868` and :cve:`2023-50387`
+- binutils: Fix :cve:`2023-39129` and :cve:`2023-39130`
+- curl: Fix :cve:`2023-46219`
+- curl: Ignore :cve:`2023-42915`
+- gcc: Ignore :cve:`2023-4039`
+- gdb: Fix :cve:`2023-39129` and :cve:`2023-39130`
+- glibc: Ignore :cve:`2023-0687`
+- go: Fix :cve:`2023-29406`, :cve:`2023-45285`, :cve:`2023-45287`, :cve:`2023-45289`, :cve:`2023-45290`, :cve:`2024-24784` and :cve:`2024-24785`
+- less: Fix :cve:`2022-48624`
+- libgit2: Fix :cve:`2024-24575` and :cve:`2024-24577`
+- libuv: fix :cve:`2024-24806`
+- libxml2: Fix for :cve:`2024-25062`
+- linux-yocto/5.15: Fix :cve:`2022-36402`, :cve:`2022-40982`, :cve:`2022-47940`, :cve:`2023-1193`, :cve:`2023-1194`, :cve:`2023-3772`, :cve_mitre:`2023-3867`, :cve:`2023-4128`, :cve:`2023-4206`, :cve:`2023-4207`, :cve:`2023-4208`, :cve:`2023-4244`, :cve:`2023-4273`, :cve:`2023-4563`, :cve:`2023-4569`, :cve:`2023-4623`, :cve:`2023-4881`, :cve:`2023-4921`, :cve:`2023-5158`, :cve:`2023-5717`, :cve:`2023-6040`, :cve:`2023-6121`, :cve:`2023-6176`, :cve:`2023-6546`, :cve:`2023-6606`, :cve:`2023-6622`, :cve:`2023-6817`, :cve:`2023-6915`, :cve:`2023-6931`, :cve:`2023-6932`, :cve:`2023-20569`, :cve:`2023-20588`, :cve:`2023-25775`, :cve:`2023-31085`, :cve:`2023-32247`, :cve:`2023-32250`, :cve:`2023-32252`, :cve:`2023-32254`, :cve:`2023-32257`, :cve:`2023-32258`, :cve:`2023-34324`, :cve:`2023-35827`, :cve:`2023-38427`, :cve:`2023-38430`, :cve:`2023-38431`, :cve:`2023-39189`, :cve:`2023-39192`, :cve:`2023-39193`, :cve:`2023-39194`, :cve:`2023-39198`, :cve:`2023-40283`, :cve:`2023-42752`, :cve:`2023-42753`, :cve:`2023-42754`, :cve:`2023-42755`, :cve:`2023-45871`, :cve:`2023-46343`, :cve:`2023-46813`, :cve:`2023-46838`, :cve:`2023-46862`, :cve:`2023-51042`, :cve:`2023-51779`, :cve_mitre:`2023-52340`, :cve:`2023-52429`, :cve:`2023-52435`, :cve:`2023-52436`, :cve:`2023-52438`, :cve:`2023-52439`, :cve:`2023-52441`, :cve:`2023-52442`, :cve:`2023-52443`, :cve:`2023-52444`, :cve:`2023-52445`, :cve:`2023-52448`, :cve:`2023-52449`, :cve:`2023-52451`, :cve:`2023-52454`, :cve:`2023-52456`, :cve:`2023-52457`, :cve:`2023-52458`, :cve:`2023-52463`, :cve:`2023-52464`, :cve:`2024-0340`, :cve:`2024-0584`, :cve:`2024-0607`, :cve:`2024-0641`, :cve:`2024-0646`, :cve:`2024-1085`, :cve:`2024-1086`, :cve:`2024-1151`, :cve:`2024-22705`, :cve:`2024-23849`, :cve:`2024-23850`, :cve:`2024-23851`, :cve:`2024-24860`, :cve:`2024-26586`, :cve:`2024-26589`, :cve:`2024-26591`, :cve:`2024-26592`, :cve:`2024-26593`, :cve:`2024-26594`, :cve:`2024-26597` and :cve:`2024-26598`
+- linux-yocto/5.15: Ignore :cve:`2020-27418`, :cve:`2020-36766`, :cve:`2021-33630`, :cve:`2021-33631`, :cve:`2022-48619`, :cve:`2023-2430`, :cve:`2023-4610`, :cve:`2023-4732`, :cve:`2023-5090`, :cve:`2023-5178`, :cve:`2023-5197`, :cve:`2023-5345`, :cve:`2023-5633`, :cve:`2023-5972`, :cve:`2023-6111`, :cve:`2023-6200`, :cve:`2023-6531`, :cve:`2023-6679`, :cve:`2023-7192`, :cve:`2023-40791`, :cve:`2023-42756`, :cve:`2023-44466`, :cve:`2023-45862`, :cve:`2023-45863`, :cve:`2023-45898`, :cve:`2023-51043`, :cve:`2023-51780`, :cve:`2023-51781`, :cve:`2023-51782`, :cve:`2023-52433`, :cve:`2023-52440`, :cve:`2023-52446`, :cve:`2023-52450`, :cve:`2023-52453`, :cve:`2023-52455`, :cve:`2023-52459`, :cve:`2023-52460`, :cve:`2023-52461`, :cve:`2023-52462`, :cve:`2024-0193`, :cve:`2024-0443`, :cve:`2024-0562`, :cve:`2024-0582`, :cve:`2024-0639`, :cve:`2024-0775`, :cve:`2024-26581`, :cve:`2024-26582`, :cve:`2024-26590`, :cve:`2024-26596` and :cve:`2024-26599`
+- linux-yocto/5.10: Fix :cve:`2023-6040`, :cve:`2023-6121`, :cve:`2023-6606`, :cve:`2023-6817`, :cve:`2023-6915`, :cve:`2023-6931`, :cve:`2023-6932`, :cve:`2023-39198`, :cve:`2023-46838`, :cve:`2023-51779`, :cve:`2023-51780`, :cve:`2023-51781`, :cve:`2023-51782`, :cve_mitre:`2023-52340`, :cve:`2024-0584` and :cve:`2024-0646`
+- linux-yocto/5.10: Ignore :cve:`2021-33630`, :cve:`2021-33631`, :cve:`2022-1508`, :cve:`2022-36402`, :cve:`2022-48619`, :cve:`2023-2430`, :cve:`2023-4610`, :cve:`2023-5972`, :cve:`2023-6039`, :cve:`2023-6200`, :cve:`2023-6531`, :cve:`2023-6546`, :cve:`2023-6622`, :cve:`2023-6679`, :cve:`2023-7192`, :cve:`2023-46343`, :cve:`2023-51042`, :cve:`2023-51043`, :cve:`2024-0193`, :cve:`2024-0443`, :cve:`2024-0562`, :cve:`2024-0582`, :cve:`2024-0639`, :cve:`2024-0641`, :cve:`2024-0775`, :cve:`2024-1085` and :cve:`2024-22705`
+- openssl: Fix :cve:`2024-0727`
+- python3-pycryptodome: Fix :cve:`2023-52323`
+- qemu: Fix :cve:`2023-6693`, :cve:`2023-42467` and :cve:`2024-24474`
+- vim: Fix :cve:`2024-22667`
+- xwayland: Fix :cve:`2023-6377` and :cve:`2023-6478`
+
+
+Fixes in Yocto-4.0.17
+~~~~~~~~~~~~~~~~~~~~~
+
+- bind: Upgrade to 9.18.24
+- bitbake: bitbake/codeparser.py: address ast module deprecations in py 3.12
+- bitbake: bitbake/lib/bs4/tests/test_tree.py: python 3.12 regex
+- bitbake: codeparser: replace deprecated ast.Str and 's'
+- bitbake: fetch2: Ensure that git LFS objects are available
+- bitbake: tests/fetch: Add real git lfs tests and decorator
+- bitbake: tests/fetch: git-lfs restore _find_git_lfs
+- bitbake: toaster/toastergui: Bug-fix verify given layer path only if import/add local layer
+- build-appliance-image: Update to kirkstone head revision
+- cmake: Unset CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES
+- contributor-guide: fix lore URL
+- curl: don't enable debug builds
+- cve_check: cleanup logging
+- dbus: Add missing :term:`CVE_PRODUCT`
+- dev-manual: sbom: Rephrase spdx creation
+- dev-manual: runtime-testing: gen-tapdevs need iptables installed
+- dev-manual: packages: clarify shared :term:`PR` service constraint
+- dev-manual: packages: need enough free space
+- dev-manual: start: remove idle line
+- feature-microblaze-versions.inc: python 3.12 regex
+- ghostscript: correct :term:`LICENSE` with AGPLv3
+- image-live.bbclass: LIVE_ROOTFS_TYPE support compression
+- kernel.bbclass: Set pkg-config variables for building modules
+- kernel.bbclass: introduce KERNEL_LOCALVERSION
+- kernel: fix localversion in v6.3+
+- kernel: make LOCALVERSION consistent between recipes
+- ldconfig-native: Fix to point correctly on the DT_NEEDED entries in an ELF file
+- librsvg: Fix do_package_qa error for librsvg
+- linux-firmware: upgrade to 20231211
+- linux-yocto/5.10: update to v5.10.210
+- linux-yocto/5.15: update to v5.15.150
+- manuals: add minimum RAM requirements
+- manuals: suppress excess use of "following" word
+- manuals: update disk space requirements
+- manuals: update references to buildtools
+- manuals: updates for building on Windows (WSL 2)
+- meta/lib/oeqa: python 3.12 regex
+- meta/recipes: python 3.12 regex
+- migration-guide: add release notes for 4.0.16
+- oeqa/selftest/oelib/buildhistory: git default branch
+- oeqa/selftest/recipetool: downgrade meson version to not use pyproject.toml
+- oeqa/selftest/recipetool: expect meson.bb
+- oeqa/selftest/recipetool: fix for python 3.12
+- oeqa/selftest/runtime_test: only run the virgl tests on qemux86-64
+- oeqa: replace deprecated assertEquals
+- openssl: Upgrade to 3.0.13
+- poky.conf: bump version for 4.0.17
+- populate_sdk_ext: use ConfigParser instead of SafeConfigParser
+- python3-jinja2: upgrade to 3.1.3
+- recipetool/create_buildsys_python: use importlib instead of imp
+- ref-manual: system-requirements: recommend buildtools for not supported distros
+- ref-manual: system-requirements: add info on buildtools-make-tarball
+- ref-manual: release-process: grammar fix
+- ref-manual: system-requirements: fix AlmaLinux variable name
+- ref-manual: system-requirements: modify anchor
+- ref-manual: system-requirements: remove outdated note
+- ref-manual: system-requirements: simplify supported distro requirements
+- ref-manual: system-requirements: update packages to build docs
+- scripts/runqemu: add qmp socket support
+- scripts/runqemu: direct mesa to use its own drivers, rather than ones provided by host distro
+- scripts/runqemu: fix regex escape sequences
+- scripts: python 3.12 regex
+- selftest: skip virgl gtk/sdl test on ubuntu 18.04
+- systemd: Only add myhostname to nsswitch.conf if in :term:`PACKAGECONFIG`
+- tzdata : Upgrade to 2024a
+- u-boot: Move UBOOT_INITIAL_ENV back to u-boot.inc
+- useradd-example: do not use unsupported clear text password
+- vim: upgrade to v9.0.2190
+- yocto-bsp: update to v5.15.150
+
+
+Known Issues in Yocto-4.0.17
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- N/A
+
+
+Contributors to Yocto-4.0.17
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Adrian Freihofer
+- Alassane Yattara
+- Alexander Kanavin
+- Alexander Sverdlin
+- Archana Polampalli
+- Baruch Siach
+- Bruce Ashfield
+- Chen Qi
+- Chris Laplante
+- Deepthi Hemraj
+- Dhairya Nagodra
+- Fabien Mahot
+- Fabio Estevam
+- Hitendra Prajapati
+- Hugo SIMELIERE
+- Jermain Horsman
+- Kai Kang
+- Lee Chee Yang
+- Ludovic Jozeau
+- Michael Opdenacker
+- Ming Liu
+- Munehisa Kamata
+- Narpat Mali
+- Nikhil R
+- Paul Eggleton
+- Paulo Neves
+- Peter Marko
+- Philip Lorenz
+- Poonam Jadhav
+- Priyal Doshi
+- Ross Burton
+- Simone Weiß
+- Soumya Sambu
+- Steve Sakoman
+- Tim Orling
+- Trevor Gamblin
+- Vijay Anusuri
+- Vivek Kumbhar
+- Wang Mingyu
+- Zahir Hussain
+
+
+Repositories / Downloads for Yocto-4.0.17
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: :yocto_git:`/poky`
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.17 </poky/log/?h=yocto-4.0.17>`
+- Git Revision: :yocto_git:`6d1a878bbf24c66f7186b270f823fcdf82e35383 </poky/commit/?id=6d1a878bbf24c66f7186b270f823fcdf82e35383>`
+- Release Artefact: poky-6d1a878bbf24c66f7186b270f823fcdf82e35383
+- sha: 3bc3010340b674f7b0dd0a7997f0167b2240b794fbd4aa28c0c4217bddd15e30
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.17/poky-6d1a878bbf24c66f7186b270f823fcdf82e35383.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.17/poky-6d1a878bbf24c66f7186b270f823fcdf82e35383.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.17 </openembedded-core/log/?h=yocto-4.0.17>`
+- Git Revision: :oe_git:`2501534c9581c6c3439f525d630be11554a57d24 </openembedded-core/commit/?id=2501534c9581c6c3439f525d630be11554a57d24>`
+- Release Artefact: oecore-2501534c9581c6c3439f525d630be11554a57d24
+- sha: 52cc6cce9e920bdce078584b89136e81cc01e0c55616fab5fca6c3e04264c88e
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.17/oecore-2501534c9581c6c3439f525d630be11554a57d24.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.17/oecore-2501534c9581c6c3439f525d630be11554a57d24.tar.bz2
+
+meta-mingw
+
+- Repository Location: :yocto_git:`/meta-mingw`
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.17 </meta-mingw/log/?h=yocto-4.0.17>`
+- Git Revision: :yocto_git:`f6b38ce3c90e1600d41c2ebb41e152936a0357d7 </meta-mingw/commit/?id=f6b38ce3c90e1600d41c2ebb41e152936a0357d7>`
+- Release Artefact: meta-mingw-f6b38ce3c90e1600d41c2ebb41e152936a0357d7
+- sha: 7d57167c19077f4ab95623d55a24c2267a3a3fb5ed83688659b4c03586373b25
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.17/meta-mingw-f6b38ce3c90e1600d41c2ebb41e152936a0357d7.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.17/meta-mingw-f6b38ce3c90e1600d41c2ebb41e152936a0357d7.tar.bz2
+
+meta-gplv2
+
+- Repository Location: :yocto_git:`/meta-gplv2`
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.17 </meta-gplv2/log/?h=yocto-4.0.17>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.17/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.17/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+meta-clang
+
+- Repository Location: :yocto_git:`/meta-clang`
+- Branch: :yocto_git:`kirkstone </meta-clang/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.17 </meta-clang/log/?h=yocto-4.0.17>`
+- Git Revision: :yocto_git:`eebe4ff2e539f3ffb01c5060cc4ca8b226ea8b52 </meta-clang/commit/?id=eebe4ff2e539f3ffb01c5060cc4ca8b226ea8b52>`
+- Release Artefact: meta-clang-eebe4ff2e539f3ffb01c5060cc4ca8b226ea8b52
+- sha: 3299e96e069a22c0971e903fbc191f2427efffc83d910ac51bf0237caad01d17
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.17/meta-clang-eebe4ff2e539f3ffb01c5060cc4ca8b226ea8b52.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.17/meta-clang-eebe4ff2e539f3ffb01c5060cc4ca8b226ea8b52.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.17 </bitbake/log/?h=yocto-4.0.17>`
+- Git Revision: :oe_git:`40fd5f4eef7460ca67f32cfce8e229e67e1ff607 </bitbake/commit/?id=40fd5f4eef7460ca67f32cfce8e229e67e1ff607>`
+- Release Artefact: bitbake-40fd5f4eef7460ca67f32cfce8e229e67e1ff607
+- sha: 5d20a0e4c5d0fce44bd84778168714a261a30a4b83f67c88df3b8a7e7115e444
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.17/bitbake-40fd5f4eef7460ca67f32cfce8e229e67e1ff607.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.17/bitbake-40fd5f4eef7460ca67f32cfce8e229e67e1ff607.tar.bz2
+
+yocto-docs
+
+- Repository Location: :yocto_git:`/yocto-docs`
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.17 </yocto-docs/log/?h=yocto-4.0.17>`
+- Git Revision: :yocto_git:`08ce7db2aa3a38deb8f5aa59bafc78542986babb </yocto-docs/commit/?id=08ce7db2aa3a38deb8f5aa59bafc78542986babb>`
+
diff --git a/documentation/migration-guides/release-notes-4.0.2.rst b/documentation/migration-guides/release-notes-4.0.2.rst
new file mode 100644
index 0000000000..cb10068b8d
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.2.rst
@@ -0,0 +1,296 @@
+Release notes for Yocto-4.0.2 (Kirkstone)
+-----------------------------------------
+
+Security Fixes in Yocto-4.0.2
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- libxslt: Mark :cve:`2022-29824` as not applying
+- tiff: Add jbig PACKAGECONFIG and clarify IGNORE :cve:`2022-1210`
+- tiff: mark :cve:`2022-1622` and :cve:`2022-1623` as invalid
+- pcre2:fix :cve:`2022-1586` Out-of-bounds read
+- curl: fix :cve:`2022-22576`, :cve:`2022-27775`, :cve:`2022-27776`, :cve:`2022-27774`, :cve:`2022-30115`, :cve:`2022-27780`, :cve:`2022-27781`, :cve:`2022-27779` and :cve:`2022-27782`
+- qemu: fix :cve:`2021-4206` and :cve:`2021-4207`
+- freetype: fix :cve:`2022-27404`, :cve:`2022-27405` and :cve:`2022-27406`
+
+Fixes in Yocto-4.0.2
+~~~~~~~~~~~~~~~~~~~~
+
+- alsa-plugins: fix libavtp vs. avtp packageconfig
+- archiver: don't use machine variables in shared recipes
+- archiver: use bb.note instead of echo
+- baremetal-image: fix broken symlink in do_rootfs
+- base-passwd: Disable shell for default users
+- bash: submit patch upstream
+- bind: upgrade 9.18.1 -> 9.18.2
+- binutils: Bump to latest 2.38 release branch
+- bitbake.conf: Make TCLIBC and TCMODE lazy assigned
+- bitbake: build: Add clean_stamp API function to allow removal of task stamps
+- bitbake: data: Do not depend on vardepvalueexclude flag
+- bitbake: fetch2/osc: Small fixes for osc fetcher
+- bitbake: server/process: Fix logging issues where only the first message was displayed
+- build-appliance-image: Update to kirkstone head revision
+- buildhistory.bbclass: fix shell syntax when using dash
+- cairo: Add missing GPLv3 license checksum entry
+- classes: rootfs-postcommands: add skip option to overlayfs_qa_check
+- cronie: upgrade 1.6.0 -> 1.6.1
+- cups: upgrade 2.4.1 -> 2.4.2
+- cve-check.bbclass: Added do_populate_sdk[recrdeptask].
+- cve-check: Add helper for symlink handling
+- cve-check: Allow warnings to be disabled
+- cve-check: Fix report generation
+- cve-check: Only include installed packages for rootfs manifest
+- cve-check: add support for Ignored CVEs
+- cve-check: fix return type in check_cves
+- cve-check: move update_symlinks to a library
+- cve-check: write empty fragment files in the text mode
+- cve-extra-exclusions: Add kernel CVEs
+- cve-update-db-native: make it possible to disable database updates
+- devtool: Fix _copy_file() TypeError
+- e2fsprogs: add alternatives handling of lsattr as well
+- e2fsprogs: update upstream status
+- efivar: add musl libc compatibility
+- epiphany: upgrade 42.0 -> 42.2
+- ffmpeg: upgrade 5.0 -> 5.0.1
+- fribidi: upgrade 1.0.11 -> 1.0.12
+- gcc-cross-canadian: Add nativesdk-zstd dependency
+- gcc-source: Fix incorrect task dependencies from ${B}
+- gcc: Upgrade to 11.3 release
+- gcc: depend on zstd-native
+- git: fix override syntax in RDEPENDS
+- glib-2.0: upgrade 2.72.1 -> 2.72.2
+- glibc: Drop make-native dependency
+- go: upgrade 1.17.8 -> 1.17.10
+- gst-devtools: upgrade 1.20.1 -> 1.20.2
+- gstreamer1.0-libav: upgrade 1.20.1 -> 1.20.2
+- gstreamer1.0-omx: upgrade 1.20.1 -> 1.20.2
+- gstreamer1.0-plugins-bad: upgrade 1.20.1 -> 1.20.2
+- gstreamer1.0-plugins-base: upgrade 1.20.1 -> 1.20.2
+- gstreamer1.0-plugins-good: upgrade 1.20.1 -> 1.20.2
+- gstreamer1.0-plugins-ugly: upgrade 1.20.1 -> 1.20.2
+- gstreamer1.0-python: upgrade 1.20.1 -> 1.20.2
+- gstreamer1.0-rtsp-server: upgrade 1.20.1 -> 1.20.2
+- gstreamer1.0-vaapi: upgrade 1.20.1 -> 1.20.2
+- gstreamer1.0: upgrade 1.20.1 -> 1.20.2
+- gtk+3: upgrade 3.24.33 -> 3.24.34
+- gtk-doc: Fix potential shebang overflow on gtkdoc-mkhtml2
+- image.bbclass: allow overriding dependency on virtual/kernel:do_deploy
+- insane.bbclass: make sure to close .patch files
+- iso-codes: upgrade 4.9.0 -> 4.10.0
+- kernel-yocto.bbclass: Reset to exiting on non-zero return code at end of task
+- libcgroup: upgrade 2.0.1 -> 2.0.2
+- liberror-perl: Update sstate/equiv versions to clean cache
+- libinput: upgrade 1.19.3 -> 1.19.4
+- libpcre2: upgrade 10.39 -> 10.40
+- librepo: upgrade 1.14.2 -> 1.14.3
+- libseccomp: Add missing files for ptests
+- libseccomp: Correct LIC_FILES_CHKSUM
+- libxkbcommon: upgrade 1.4.0 -> 1.4.1
+- libxml2: Upgrade 2.9.13 -> 2.9.14
+- license.bbclass: Bound beginline and endline in copy_license_files()
+- license_image.bbclass: Make QA errors fail the build
+- linux-firmware: add support for building snapshots
+- linux-firmware: package new Qualcomm firmware
+- linux-firmware: replace mkdir by install
+- linux-firmware: split ath3k firmware
+- linux-firmware: upgrade to 20220610
+- linux-yocto/5.10: update to v5.10.119
+- linux-yocto/5.15: Enable MDIO bus config
+- linux-yocto/5.15: bpf: explicitly disable unpriv eBPF by default
+- linux-yocto/5.15: cfg/xen: Move x86 configs to separate file
+- linux-yocto/5.15: update to v5.15.44
+- local.conf.sample: Update sstate url to new 'all' path
+- logrotate: upgrade 3.19.0 -> 3.20.1
+- lttng-modules: Fix build failure for 5.10.119+ and 5.15.44+ kernel
+- lttng-modules: fix build against 5.18-rc7+
+- lttng-modules: fix shell syntax
+- lttng-ust: upgrade 2.13.2 -> 2.13.3
+- lzo: Add further info to a patch and mark as Inactive-Upstream
+- makedevs: Don't use COPYING.patch just to add license file into ${S}
+- manuals: switch to the sstate mirror shared between all versions
+- mesa.inc: package 00-radv-defaults.conf
+- mesa: backport a patch to support compositors without zwp_linux_dmabuf_v1 again
+- mesa: upgrade to 22.0.3
+- meson.bbclass: add cython binary to cross/native toolchain config
+- mmc-utils: upgrade to latest revision
+- mobile-broadband-provider-info: upgrade 20220315 -> 20220511
+- ncurses: update to patchlevel 20220423
+- oeqa/selftest/cve_check: add tests for Ignored and partial reports
+- oeqa/selftest/cve_check: add tests for recipe and image reports
+- oescripts: change compare logic in OEListPackageconfigTests
+- openssl: Backport fix for ptest cert expiry
+- overlayfs: add docs about skipping QA check & service dependencies
+- ovmf: Fix native build with gcc-12
+- patch.py: make sure that patches/series file exists before quilt pop
+- pciutils: avoid lspci conflict with busybox
+- perl: Add dependency on make-native to avoid race issues
+- perl: Fix build with gcc-12
+- poky.conf: bump version for 4.0.2
+- popt: fix override syntax in RDEPENDS
+- pypi.bbclass: Set CVE_PRODUCT to PYPI_PACKAGE
+- python3: Ensure stale empty python module directories don't break the build
+- python3: Remove problematic paths from sysroot files
+- python3: fix reproducibility issue with python3-core
+- python3: use built-in distutils for ptest, rather than setuptools' 'fork'
+- python: Avoid shebang overflow on python-config.py
+- rootfs-postcommands.bbclass: correct comments
+- rootfs.py: close kernel_abi_ver_file
+- rootfs.py: find .ko.zst kernel modules
+- rust-common: Drop LLVM_TARGET and simplify
+- rust-common: Ensure sstate signatures have correct dependencues for do_rust_gen_targets
+- rust-common: Fix for target definitions returning 'NoneType' for arm
+- rust-common: Fix native signature dependency issues
+- rust-common: Fix sstate signatures between arm hf and non-hf
+- sanity: Don't warn about make 4.2.1 for mint
+- sanity: Switch to make 4.0 as a minimum version
+- sed: Specify shell for "nobody" user in run-ptest
+- selftest/imagefeatures/overlayfs: Always append to DISTRO_FEATURES
+- selftest/multiconfig: Test that multiconfigs in separate layers works
+- sqlite3: upgrade to 3.38.5
+- staging.bbclass: process direct dependencies in deterministic order
+- staging: Fix rare sysroot corruption issue
+- strace: Don't run ptest as "nobody"
+- systemd: Correct 0001-pass-correct-parameters-to-getdents64.patch
+- systemd: Correct path returned in sd_path_lookup()
+- systemd: Document future actions needed for set of musl patches
+- systemd: Drop 0001-test-parse-argument-Include-signal.h.patch
+- systemd: Drop 0002-don-t-use-glibc-specific-qsort_r.patch
+- systemd: Drop 0016-Hide-__start_BUS_ERROR_MAP-and-__stop_BUS_ERROR_MAP.patch
+- systemd: Drop redundant musl patches
+- systemd: Fix build regression with latest update
+- systemd: Remove __compare_fn_t type in musl-specific patch
+- systemd: Update patch status
+- systemd: systemd-systemctl: Support instance conf files during enable
+- systemd: update ``0008-add-missing-FTW_-macros-for-musl.patch``
+- systemd: upgrade 250.4 -> 250.5
+- uboot-sign: Fix potential index error issues
+- valgrind: submit arm patches upstream
+- vim: Upgrade to 8.2.5083
+- webkitgtk: upgrade to 2.36.3
+- wic/plugins/rootfs: Fix permissions when splitting rootfs folders across partitions
+- xwayland: upgrade 22.1.0 -> 22.1.1
+- xxhash: fix build with gcc 12
+- zip/unzip: mark all submittable patches as Inactive-Upstream
+
+Known Issues in Yocto-4.0.2
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- There were build failures at the autobuilder due to a known scp issue on Fedora-36 hosts.
+
+Contributors to Yocto-4.0.2
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Alex Kiernan
+- Alexander Kanavin
+- Aryaman Gupta
+- Bruce Ashfield
+- Claudius Heine
+- Davide Gardenal
+- Dmitry Baryshkov
+- Ernst Sjöstrand
+- Felix Moessbauer
+- Gunjan Gupta
+- He Zhe
+- Hitendra Prajapati
+- Jack Mitchell
+- Jeremy Puhlman
+- Jiaqing Zhao
+- Joerg Vehlow
+- Jose Quaresma
+- Kai Kang
+- Khem Raj
+- Konrad Weihmann
+- Marcel Ziswiler
+- Markus Volk
+- Marta Rybczynska
+- Martin Jansa
+- Michael Opdenacker
+- Mingli Yu
+- Naveen Saini
+- Nick Potenski
+- Paulo Neves
+- Pavel Zhukov
+- Peter Kjellerstedt
+- Rasmus Villemoes
+- Richard Purdie
+- Robert Joslyn
+- Ross Burton
+- Samuli Piippo
+- Sean Anderson
+- Stefan Wiehler
+- Steve Sakoman
+- Sundeep Kokkonda
+- Tomasz Dziendzielski
+- Xiaobing Luo
+- Yi Zhao
+- leimaohui
+- wangmy
+
+Repositories / Downloads for Yocto-4.0.2
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: https://git.yoctoproject.org/git/poky
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.2 </poky/log/?h=yocto-4.0.2>`
+- Git Revision: :yocto_git:`a5ea426b1da472fc8549459fff3c1b8c6e02f4b5 </poky/commit/?id=a5ea426b1da472fc8549459fff3c1b8c6e02f4b5>`
+- Release Artefact: poky-a5ea426b1da472fc8549459fff3c1b8c6e02f4b5
+- sha: 474ddfacfed6661be054c161597a1a5273188dfe021b31d6156955d93c6b7359
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.2/poky-a5ea426b1da472fc8549459fff3c1b8c6e02f4b5.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.2/poky-a5ea426b1da472fc8549459fff3c1b8c6e02f4b5.tar.bz2
+
+openembedded-core
+
+- Repository Location: https://git.openembedded.org/openembedded-core
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.2 </openembedded-core/log/?h=yocto-4.0.2>`
+- Git Revision: :oe_git:`eea52e0c3d24c79464f4afdbc3c397e1cb982231 </openembedded-core/commit/?id=eea52e0c3d24c79464f4afdbc3c397e1cb982231>`
+- Release Artefact: oecore-eea52e0c3d24c79464f4afdbc3c397e1cb982231
+- sha: 252d5c2c2db7e14e7365fcc69d32075720b37d629894bae36305eba047a39907
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.2/oecore-eea52e0c3d24c79464f4afdbc3c397e1cb982231.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.2/oecore-eea52e0c3d24c79464f4afdbc3c397e1cb982231.tar.bz2
+
+meta-mingw
+
+- Repository Location: https://git.yoctoproject.org/git/meta-mingw
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.2 </meta-mingw/log/?h=yocto-4.0.2>`
+- Git Revision: :yocto_git:`a90614a6498c3345704e9611f2842eb933dc51c1 </meta-mingw/commit/?id=a90614a6498c3345704e9611f2842eb933dc51c1>`
+- Release Artefact: meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1
+- sha: 49f9900bfbbc1c68136f8115b314e95d0b7f6be75edf36a75d9bcd1cca7c6302
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.2/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.2/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+
+meta-gplv2
+
+- Repository Location: https://git.yoctoproject.org/git/meta-gplv2
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.2 </meta-gplv2/log/?h=yocto-4.0.2>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.2/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.2/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: https://git.openembedded.org/bitbake
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.2 </bitbake/log/?h=yocto-4.0.2>`
+- Git Revision: :oe_git:`b8fd6f5d9959d27176ea016c249cf6d35ac8ba03 </bitbake/commit/?id=b8fd6f5d9959d27176ea016c249cf6d35ac8ba03>`
+- Release Artefact: bitbake-b8fd6f5d9959d27176ea016c249cf6d35ac8ba03
+- sha: 373818b1dee2c502264edf654d6d8f857b558865437f080e02d5ba6bb9e72cc3
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.2/bitbake-b8fd6f5d9959d27176ea016c249cf6d35ac8ba03.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.2/bitbake-b8fd6f5d9959d27176ea016c249cf6d35ac8ba03.tar.bz2
+
+yocto-docs
+
+- Repository Location: https://git.yoctoproject.org/git/yocto-docs
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.2 </yocto-docs/log/?h=yocto-4.0.2>`
+- Git Revision: :yocto_git:`662294dccd028828d5c7e9fd8f5c8e14df53df4b </yocto-docs/commit/?id=662294dccd028828d5c7e9fd8f5c8e14df53df4b>`
diff --git a/documentation/migration-guides/release-notes-4.0.3.rst b/documentation/migration-guides/release-notes-4.0.3.rst
new file mode 100644
index 0000000000..e2a212cb62
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.3.rst
@@ -0,0 +1,314 @@
+Release notes for Yocto-4.0.3 (Kirkstone)
+-----------------------------------------
+
+Security Fixes in Yocto-4.0.3
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- binutils: fix :cve:`2019-1010204`
+- busybox: fix :cve:`2022-30065`
+- cups: ignore :cve:`2022-26691`
+- curl: Fix :cve:`2022-32205`, :cve:`2022-32206`, :cve:`2022-32207` and :cve:`2022-32208`
+- dpkg: fix :cve:`2022-1664`
+- ghostscript: fix :cve:`2022-2085`
+- harfbuzz: fix :cve:`2022-33068`
+- libtirpc: fix :cve:`2021-46828`
+- lua: fix :cve:`2022-33099`
+- nasm: ignore :cve:`2020-18974`
+- qemu: fix :cve:`2022-35414`
+- qemu: ignore :cve:`2021-20255` and :cve:`2019-12067`
+- tiff: fix :cve:`2022-1354`, :cve:`2022-1355`, :cve:`2022-2056`, :cve:`2022-2057` and :cve:`2022-2058`
+- u-boot: fix :cve:`2022-34835`
+- unzip: fix :cve:`2022-0529` and :cve:`2022-0530`
+
+
+Fixes in Yocto-4.0.3
+~~~~~~~~~~~~~~~~~~~~
+
+- alsa-state: correct license
+- at: take tarballs from debian
+- base.bbclass: Correct the test for obsolete license exceptions
+- base/reproducible: Change Source Date Epoch generation methods
+- bin_package: install into base_prefix
+- bind: Remove legacy python3 PACKAGECONFIG code
+- bind: upgrade to 9.18.4
+- binutils: stable 2.38 branch updates
+- build-appliance-image: Update to kirkstone head revision
+- cargo_common.bbclass: enable bitbake vendoring for externalsrc
+- coreutils: Tweak packaging variable names for coreutils-dev
+- curl: backport openssl fix CN check error code
+- cve-check: hook cleanup to the BuildCompleted event, not CookerExit
+- cve-extra-exclusions: Clean up and ignore three CVEs (2xqemu and nasm)
+- devtool: finish: handle patching when S points to subdir of a git repo
+- devtool: ignore pn- overrides when determining SRC_URI overrides
+- docs: BB_HASHSERVE_UPSTREAM: update to new host
+- dropbear: break dependency on base package for -dev package
+- efivar: fix import functionality
+- encodings: update to 1.0.6
+- epiphany: upgrade to 42.3
+- externalsrc.bbclass: support crate fetcher on externalsrc
+- font-util: update 1.3.2 -> 1.3.3
+- gcc-runtime: Fix build when using gold
+- gcc-runtime: Fix missing MLPREFIX in debug mappings
+- gcc-runtime: Pass -nostartfiles when building dummy libstdc++.so
+- gcc: Backport a fix for gcc bug 105039
+- git: upgrade to v2.35.4
+- glib-2.0: upgrade to 2.72.3
+- glib-networking: upgrade to 2.72.1
+- glibc : stable 2.35 branch updates
+- glibc-tests: Avoid reproducibility issues
+- glibc-tests: not clear BBCLASSEXTEND
+- glibc: revert one upstream change to work around broken DEBUG_BUILD build
+- glibc: stable 2.35 branch updates
+- gnupg: upgrade to 2.3.7
+- go: upgrade to v1.17.12
+- gobject-introspection-data: Disable cache for g-ir-scanner
+- gperf: Add a patch to work around reproducibility issues
+- gperf: Switch to upstream patch
+- gst-devtools: upgrade to 1.20.3
+- gstreamer1.0-libav: upgrade to 1.20.3
+- gstreamer1.0-omx: upgrade to 1.20.3
+- gstreamer1.0-plugins-bad: upgrade to 1.20.3
+- gstreamer1.0-plugins-base: upgrade to 1.20.3
+- gstreamer1.0-plugins-good: upgrade to 1.20.3
+- gstreamer1.0-plugins-ugly: upgrade to 1.20.3
+- gstreamer1.0-python: upgrade to 1.20.3
+- gstreamer1.0-rtsp-server: upgrade to 1.20.3
+- gstreamer1.0-vaapi: upgrade to 1.20.3
+- gstreamer1.0: upgrade to 1.20.3
+- gtk-doc: Remove hardcoded buildpath
+- harfbuzz: Fix compilation with clang
+- initramfs-framework: move storage mounts to actual rootfs
+- initscripts: run umountnfs as a KILL script
+- insane.bbclass: host-user-contaminated: Correct per package home path
+- insane: Fix buildpaths test to work with special devices
+- kernel-arch: Fix buildpaths leaking into external module compiles
+- kernel-devsrc: fix reproducibility and buildpaths QA warning
+- kernel-devsrc: ppc32: fix reproducibility
+- kernel-uboot.bbclass: Use vmlinux.initramfs when INITRAMFS_IMAGE_BUNDLE set
+- kernel.bbclass: pass LD also in savedefconfig
+- libffi: fix native build being not portable
+- libgcc: Fix standalone target builds with usrmerge distro feature
+- libmodule-build-perl: Use env utility to find perl interpreter
+- libsoup: upgrade to 3.0.7
+- libuv: upgrade to 1.44.2
+- linux-firmware: upgrade to 20220708
+- linux-firwmare: restore WHENCE_CHKSUM variable
+- linux-yocto-rt/5.15: update to -rt48 (and fix -stable merge)
+- linux-yocto/5.10: fix build_OID_registry/conmakehash buildpaths warning
+- linux-yocto/5.10: fix buildpaths issue with gen-mach-types
+- linux-yocto/5.10: fix buildpaths issue with pnmtologo
+- linux-yocto/5.10: update to v5.10.135
+- linux-yocto/5.15: drop obselete GPIO sysfs ABI
+- linux-yocto/5.15: fix build_OID_registry buildpaths warning
+- linux-yocto/5.15: fix buildpaths issue with gen-mach-types
+- linux-yocto/5.15: fix buildpaths issue with pnmtologo
+- linux-yocto/5.15: fix qemuppc buildpaths warning
+- linux-yocto/5.15: fix reproducibility issues
+- linux-yocto/5.15: update to v5.15.59
+- log4cplus: upgrade to 2.0.8
+- lttng-modules: Fix build failure for kernel v5.15.58
+- lttng-modules: upgrade to 2.13.4
+- lua: Fix multilib buildpath reproducibility issues
+- mkfontscale: upgrade to 1.2.2
+- oe-selftest-image: Ensure the image has sftp as well as dropbear
+- oe-selftest: devtool: test modify git recipe building from a subdir
+- oeqa/runtime/scp: Disable scp test for dropbear
+- oeqa/runtime: add test that the kernel has CONFIG_PREEMPT_RT enabled
+- oeqa/sdk: drop the nativesdk-python 2.x test
+- openssh: Add openssh-sftp-server to openssh RDEPENDS
+- openssh: break dependency on base package for -dev package
+- openssl: update to 3.0.5
+- package.bbclass: Avoid stripping signed kernel modules in splitdebuginfo
+- package.bbclass: Fix base directory for debugsource files when using externalsrc
+- package.bbclass: Fix kernel source handling when not using externalsrc
+- package_manager/ipk: do not pipe stderr to stdout
+- packagegroup-core-ssh-dropbear: Add openssh-sftp-server recommendation
+- patch: handle if S points to a subdirectory of a git repo
+- perf: fix reproducibility in 5.19+
+- perf: fix reproduciblity in older releases of Linux
+- perf: sort-pmuevents: really keep array terminators
+- perl: don't install Makefile.old into perl-ptest
+- poky.conf: bump version for 4.0.3
+- pulseaudio: add m4-native to DEPENDS
+- python3: Backport patch to fix an issue in subinterpreters
+- qemu: Add PACKAGECONFIG for brlapi
+- qemu: Avoid accidental librdmacm linkage
+- qemu: Avoid accidental libvdeplug linkage
+- qemu: Fix slirp determinism issue
+- qemu: add PACKAGECONFIG for capstone
+- recipetool/devtool: Fix python egg whitespace issues in PACKAGECONFIG
+- ref-manual: variables: remove sphinx directive from literal block
+- rootfs-postcommands.bbclass: move host-user-contaminated.txt to ${S}
+- ruby: add PACKAGECONFIG for capstone
+- rust: fix issue building cross-canadian tools for aarch64 on x86_64
+- sanity.bbclass: Add ftps to accepted URI protocols for mirrors sanity
+- selftest/runtime_test/virgl: Disable for all almalinux
+- sstatesig: Include all dependencies in SPDX task signatures
+- strace: set COMPATIBLE_HOST for riscv32
+- systemd: Added base_bindir into pkg_postinst:udev-hwdb.
+- udev-extraconf/initrdscripts/parted: Rename mount.blacklist -> mount.ignorelist
+- udev-extraconf/mount.sh: add LABELs to mountpoints
+- udev-extraconf/mount.sh: ignore lvm in automount
+- udev-extraconf/mount.sh: only mount devices on hotplug
+- udev-extraconf/mount.sh: save mount name in our tmp filecache
+- udev-extraconf: fix some systemd automount issues
+- udev-extraconf: force systemd-udevd to use shared MountFlags
+- udev-extraconf: let automount base directory configurable
+- udev-extraconf:mount.sh: fix a umount issue
+- udev-extraconf:mount.sh: fix path mismatching issues
+- vala: Fix on target wrapper buildpaths issue
+- vala: upgrade to 0.56.2
+- vim: upgrade to 9.0.0063
+- waffle: correctly request wayland-scanner executable
+- webkitgtk: upgrade to 2.36.4
+- weston: upgrade to 10.0.1
+- wic/plugins/rootfs: Fix NameError for 'orig_path'
+- wic: fix WicError message
+- wireless-regdb: upgrade to 2022.06.06
+- xdpyinfo: upgrade to 1.3.3
+- xev: upgrade to 1.2.5
+- xf86-input-synaptics: upgrade to 1.9.2
+- xmodmap: upgrade to 1.0.11
+- xorg-app: Tweak handling of compression changes in SRC_URI
+- xserver-xorg: upgrade to 21.1.4
+- xwayland: upgrade to 22.1.3
+- yocto-bsps/5.10: fix buildpaths issue with gen-mach-types
+- yocto-bsps/5.10: fix buildpaths issue with pnmtologo
+- yocto-bsps/5.15: fix buildpaths issue with gen-mach-types
+- yocto-bsps/5.15: fix buildpaths issue with pnmtologo
+- yocto-bsps: buildpaths fixes
+- yocto-bsps: update to v5.10.130
+- yocto-bsps: buildpaths fixes
+- yocto-bsps: update to v5.15.54
+
+
+Known Issues in Yocto-4.0.3
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- N/A
+
+
+Contributors to Yocto-4.0.3
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Ahmed Hossam
+- Alejandro Hernandez Samaniego
+- Alex Kiernan
+- Alexander Kanavin
+- Bruce Ashfield
+- Chanho Park
+- Christoph Lauer
+- David Bagonyi
+- Dmitry Baryshkov
+- He Zhe
+- Hitendra Prajapati
+- Jose Quaresma
+- Joshua Watt
+- Kai Kang
+- Khem Raj
+- Lee Chee Yang
+- Lucas Stach
+- Markus Volk
+- Martin Jansa
+- Maxime Roussin-Bélanger
+- Michael Opdenacker
+- Mihai Lindner
+- Ming Liu
+- Mingli Yu
+- Muhammad Hamza
+- Naveen
+- Pascal Bach
+- Paul Eggleton
+- Pavel Zhukov
+- Peter Bergin
+- Peter Kjellerstedt
+- Peter Marko
+- Pgowda
+- Raju Kumar Pothuraju
+- Richard Purdie
+- Robert Joslyn
+- Ross Burton
+- Sakib Sajal
+- Shruthi Ravichandran
+- Steve Sakoman
+- Sundeep Kokkonda
+- Thomas Roos
+- Tom Hochstein
+- Wentao Zhang
+- Yi Zhao
+- Yue Tao
+- gr embeter
+- leimaohui
+- wangmy
+
+
+Repositories / Downloads for Yocto-4.0.3
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: https://git.yoctoproject.org/git/poky
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.3 </poky/log/?h=yocto-4.0.3>`
+- Git Revision: :yocto_git:`387ab5f18b17c3af3e9e30dc58584641a70f359f </poky/commit/?id=387ab5f18b17c3af3e9e30dc58584641a70f359f>`
+- Release Artefact: poky-387ab5f18b17c3af3e9e30dc58584641a70f359f
+- sha: fe674186bdb0684313746caa9472134fc19e6f1443c274fe02c06cb1e675b404
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.3/poky-387ab5f18b17c3af3e9e30dc58584641a70f359f.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.3/poky-387ab5f18b17c3af3e9e30dc58584641a70f359f.tar.bz2
+
+openembedded-core
+
+- Repository Location: https://git.openembedded.org/openembedded-core
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.3 </openembedded-core/log/?h=yocto-4.0.3>`
+- Git Revision: :oe_git:`2cafa6ed5f0aa9df5a120b6353755d56c7c7800d </openembedded-core/commit/?id=2cafa6ed5f0aa9df5a120b6353755d56c7c7800d>`
+- Release Artefact: oecore-2cafa6ed5f0aa9df5a120b6353755d56c7c7800d
+- sha: 5181d3e8118c6112936637f01a07308b715e0e3d12c7eba338556747dfcabe92
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.3/oecore-2cafa6ed5f0aa9df5a120b6353755d56c7c7800d.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.3/oecore-2cafa6ed5f0aa9df5a120b6353755d56c7c7800d.tar.bz2
+
+meta-mingw
+
+- Repository Location: https://git.yoctoproject.org/git/meta-mingw
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.3 </meta-mingw/log/?h=yocto-4.0.3>`
+- Git Revision: :yocto_git:`a90614a6498c3345704e9611f2842eb933dc51c1 </meta-mingw/commit/?id=a90614a6498c3345704e9611f2842eb933dc51c1>`
+- Release Artefact: meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1
+- sha: 49f9900bfbbc1c68136f8115b314e95d0b7f6be75edf36a75d9bcd1cca7c6302
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.3/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.3/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+
+meta-gplv2
+
+- Repository Location: https://git.yoctoproject.org/git/meta-gplv2
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.3 </meta-gplv2/log/?h=yocto-4.0.3>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.3/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.3/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: https://git.openembedded.org/bitbake
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.3 </bitbake/log/?h=yocto-4.0.3>`
+- Git Revision: :oe_git:`b8fd6f5d9959d27176ea016c249cf6d35ac8ba03 </bitbake/commit/?id=b8fd6f5d9959d27176ea016c249cf6d35ac8ba03>`
+- Release Artefact: bitbake-b8fd6f5d9959d27176ea016c249cf6d35ac8ba03
+- sha: 373818b1dee2c502264edf654d6d8f857b558865437f080e02d5ba6bb9e72cc3
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.3/bitbake-b8fd6f5d9959d27176ea016c249cf6d35ac8ba03.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.3/bitbake-b8fd6f5d9959d27176ea016c249cf6d35ac8ba03.tar.bz2
+
+yocto-docs
+
+- Repository Location: https://git.yoctoproject.org/git/yocto-docs
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.3 </yocto-docs/log/?h=yocto-4.0.3>`
+- Git Revision: :yocto_git:`d9b3dcf65ef25c06f552482aba460dd16862bf96 </yocto-docs/commit/?id=d9b3dcf65ef25c06f552482aba460dd16862bf96>`
+
diff --git a/documentation/migration-guides/release-notes-4.0.4.rst b/documentation/migration-guides/release-notes-4.0.4.rst
new file mode 100644
index 0000000000..2623a1dca7
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.4.rst
@@ -0,0 +1,299 @@
+Release notes for Yocto-4.0.4 (Kirkstone)
+-----------------------------------------
+
+Security Fixes in Yocto-4.0.4
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- binutils : fix :cve:`2022-38533`
+- curl: fix :cve:`2022-35252`
+- sqlite: fix :cve:`2022-35737`
+- grub2: fix :cve:`2021-3695`, :cve:`2021-3696`, :cve:`2021-3697`, :cve:`2022-28733`, :cve:`2022-28734` and :cve:`2022-28735`
+- u-boot: fix :cve:`2022-30552` and :cve:`2022-33967`
+- libxml2: Ignore :cve:`2016-3709`
+- libtiff: fix :cve:`2022-34526`
+- zlib: fix :cve:`2022-37434`
+- gnutls: fix :cve:`2022-2509`
+- u-boot: fix :cve:`2022-33103`
+- qemu: fix :cve:`2021-3507`, :cve:`2021-3929`, :cve:`2021-4158`, :cve:`2022-0216` and :cve:`2022-0358`
+
+
+Fixes in Yocto-4.0.4
+~~~~~~~~~~~~~~~~~~~~
+
+- apr: Cache configure tests which use AC_TRY_RUN
+- apr: Use correct strerror_r implementation based on libc type
+- apt: fix nativesdk-apt build failure during the second time build
+- archiver.bbclass: remove unsed do_deploy_archives[dirs]
+- archiver.bbclass: some recipes that uses the kernelsrc bbclass uses the shared source
+- autoconf: Fix strict prototype errors in generated tests
+- autoconf: Update K & R stype functions
+- bind: upgrade to 9.18.5
+- bitbake.conf: set BB_DEFAULT_UMASK using ??=
+- bitbake: ConfHandler/BBHandler: Improve comment error messages and add tests
+- bitbake: ConfHandler: Remove lingering close
+- bitbake: bb/utils: movefile: use the logger for printing
+- bitbake: bb/utils: remove: check the path again the expand python glob
+- bitbake: bitbake-user-manual: Correct description of the ??= operator
+- bitbake: bitbake-user-manual: npm fetcher: improve description of SRC_URI format
+- bitbake: bitbake: bitbake-user-manual: hashserv can be accessed on a dedicated domain
+- bitbake: bitbake: runqueue: add cpu/io pressure regulation
+- bitbake: bitbake: runqueue: add memory pressure regulation
+- bitbake: cooker: Drop sre_constants usage
+- bitbake: doc: bitbake-user-manual: add explicit target for crates fetcher
+- bitbake: doc: bitbake-user-manual: document npm and npmsw fetchers
+- bitbake: event.py: ignore exceptions from stdout and sterr operations in atexit
+- bitbake: fetch2: Ensure directory exists before creating symlink
+- bitbake: fetch2: gitsm: fix incorrect handling of git submodule relative urls
+- bitbake: runqueue: Change pressure file warning to a note
+- bitbake: runqueue: Fix unihash cache mismatch issues
+- bitbake: toaster: fix kirkstone version
+- bitbake: utils: Pass lock argument in fileslocked
+- bluez5: upgrade to 5.65
+- boost: fix install of fiber shared libraries
+- cairo: Adapt the license information based on what is being built
+- classes: cve-check: Get shared database lock
+- cmake: remove CMAKE_ASM_FLAGS variable in toolchain file
+- connman: Backports for security fixes
+- core-image.bbclass: Exclude openssh complementary packages
+- cracklib: Drop using register keyword
+- cracklib: upgrade to 2.9.8
+- create-spdx: Fix supplier field
+- create-spdx: handle links to inaccessible locations
+- create-spdx: ignore packing control files from ipk and deb
+- cve-check: Don't use f-strings
+- cve-check: close cursors as soon as possible
+- devtool/upgrade: catch bb.fetch2.decodeurl errors
+- devtool/upgrade: correctly clean up when recipe filename isn't yet known
+- devtool: error out when workspace is using old override syntax
+- ell: upgrade to 0.50
+- epiphany: upgrade to 42.4
+- externalsrc: Don't wipe out src dir when EXPORT_FUNCTIONS is used.
+- gcc-multilib-config: Fix i686 toolchain relocation issues
+- gcr: Define _GNU_SOURCE
+- gdk-pixbuf: upgrade to 2.42.9
+- glib-networking: upgrade to 2.72.2
+- go: upgrade to v1.17.13
+- insane.bbclass: Skip patches not in oe-core by full path
+- iso-codes: upgrade to 4.11.0
+- kernel-fitimage.bbclass: add padding algorithm property in config nodes
+- kernel-fitimage.bbclass: only package unique DTBs
+- kernel: Always set CC and LD for the kernel build
+- kernel: Use consistent make flags for menuconfig
+- lib:npm_registry: initial checkin
+- libatomic-ops: upgrade to 7.6.14
+- libcap: upgrade to 2.65
+- libjpeg-turbo: upgrade to 2.1.4
+- libpam: use /run instead of /var/run in systemd tmpfiles
+- libtasn1: upgrade to 4.19.0
+- liburcu: upgrade to 0.13.2
+- libwebp: upgrade to 1.2.4
+- libwpe: upgrade to 1.12.3
+- libxml2: Port gentest.py to Python-3
+- lighttpd: upgrade to 1.4.66
+- linux-yocto/5.10: update genericx86* machines to v5.10.135
+- linux-yocto/5.10: update to v5.10.137
+- linux-yocto/5.15: update genericx86* machines to v5.15.59
+- linux-yocto/5.15: update to v5.15.62
+- linux-yocto: Fix COMPATIBLE_MACHINE regex match
+- linux-yocto: prepend the the value with a space when append to KERNEL_EXTRA_ARGS
+- lttng-modules: fix 5.19+ build
+- lttng-modules: fix build against mips and v5.19 kernel
+- lttng-modules: fix build for kernel 5.10.137
+- lttng-modules: replace mips compaction fix with upstream change
+- lz4: upgrade to 1.9.4
+- maintainers: update opkg maintainer
+- meta: introduce UBOOT_MKIMAGE_KERNEL_TYPE
+- migration guides: add missing release notes
+- mobile-broadband-provider-info: upgrade to 20220725
+- nativesdk: Clear TUNE_FEATURES
+- npm: replace 'npm pack' call by 'tar czf'
+- npm: return content of 'package.json' in 'npm_pack'
+- npm: take 'version' directly from 'package.json'
+- npm: use npm_registry to cache package
+- oeqa/gotoolchain: put writable files in the Go module cache
+- oeqa/gotoolchain: set CGO_ENABLED=1
+- oeqa/parselogs: add qemuarmv5 arm-charlcd masking
+- oeqa/qemurunner: add run_serial() comment
+- oeqa/selftest: rename git.py to intercept.py
+- oeqa: qemurunner: Report UNIX Epoch timestamp on login
+- package_rpm: Do not replace square brackets in %files
+- packagegroup-self-hosted: update for strace
+- parselogs: Ignore xf86OpenConsole error
+- perf: Fix reproducibility issues with 5.19 onwards
+- pinentry: enable _XOPEN_SOURCE on musl for wchar usage in curses
+- poky.conf: add ubuntu-22.04 to tested distros
+- poky.conf: bump version for 4.0.4
+- pseudo: Update to include recent upstream minor fixes
+- python3-pip: Fix RDEPENDS after the update
+- ref-manual: add numa to machine features
+- relocate_sdk.py: ensure interpreter size error causes relocation to fail
+- rootfs-postcommands.bbclass: avoid moving ssh host keys if etc is writable
+- rootfs.py: dont try to list installed packages for baremetal images
+- rootfspostcommands.py: Cleanup subid backup files generated by shadow-utils
+- ruby: drop capstone support
+- runqemu: Add missing space on default display option
+- runqemu: display host uptime when starting
+- sanity: add a comment to ensure CONNECTIVITY_CHECK_URIS is correct
+- scripts/oe-setup-builddir: make it known where configurations come from
+- scripts/runqemu.README: fix typos and trailing whitespaces
+- selftest/wic: Tweak test case to not depend on kernel size
+- shadow: Avoid nss warning/error with musl
+- shadow: Enable subid support
+- system-requirements.rst: Add Ubuntu 22.04 to list of supported distros
+- systemd: Add 'no-dns-fallback' PACKAGECONFIG option
+- systemd: Fix unwritable /var/lock when no sysvinit handling
+- sysvinit-inittab/start_getty: Fix respawn too fast
+- tcp-wrappers: Fix implicit-function-declaration warnings
+- tzdata: upgrade to 2022b
+- util-linux: Remove --enable-raw from EXTRA_OECONF
+- vala: upgrade to 0.56.3
+- vim: Upgrade to 9.0.0453
+- watchdog: Include needed system header for function decls
+- webkitgtk: upgrade to 2.36.5
+- weston: upgrade to 10.0.2
+- wic/bootimg-efi: use cross objcopy when building unified kernel image
+- wic: add target tools to PATH when executing native commands
+- wic: depend on cross-binutils
+- wireless-regdb: upgrade to 2022.08.12
+- wpebackend-fdo: upgrade to 1.12.1
+- xinetd: Pass missing -D_GNU_SOURCE
+- xz: update to 5.2.6
+
+
+Known Issues in Yocto-4.0.4
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- N/A
+
+
+Contributors to Yocto-4.0.4
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Alejandro Hernandez Samaniego
+- Alex Stewart
+- Alexander Kanavin
+- Alexandre Belloni
+- Andrei Gherzan
+- Anuj Mittal
+- Aryaman Gupta
+- Awais Belal
+- Beniamin Sandu
+- Bertrand Marquis
+- Bruce Ashfield
+- Changqing Li
+- Chee Yang Lee
+- Daiane Angolini
+- Enrico Scholz
+- Ernst Sjöstrand
+- Gennaro Iorio
+- Hitendra Prajapati
+- Jacob Kroon
+- Jon Mason
+- Jose Quaresma
+- Joshua Watt
+- Kai Kang
+- Khem Raj
+- Kristian Amlie
+- LUIS ENRIQUEZ
+- Mark Hatle
+- Martin Beeger
+- Martin Jansa
+- Mateusz Marciniec
+- Michael Opdenacker
+- Mihai Lindner
+- Mikko Rapeli
+- Ming Liu
+- Niko Mauno
+- Ola x Nilsson
+- Otavio Salvador
+- Paul Eggleton
+- Pavel Zhukov
+- Peter Bergin
+- Peter Kjellerstedt
+- Peter Marko
+- Rajesh Dangi
+- Randy MacLeod
+- Rasmus Villemoes
+- Richard Purdie
+- Robert Joslyn
+- Roland Hieber
+- Ross Burton
+- Sakib Sajal
+- Shubham Kulkarni
+- Steve Sakoman
+- Ulrich Ölmann
+- Yang Xu
+- Yongxin Liu
+- ghassaneben
+- pgowda
+- wangmy
+
+Repositories / Downloads for Yocto-4.0.4
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: https://git.yoctoproject.org/git/poky
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.4 </poky/log/?h=yocto-4.0.4>`
+- Git Revision: :yocto_git:`d64bef1c7d713b92a51228e5ade945835e5a94a4 </poky/commit/?id=d64bef1c7d713b92a51228e5ade945835e5a94a4>`
+- Release Artefact: poky-d64bef1c7d713b92a51228e5ade945835e5a94a4
+- sha: b5e92506b31f88445755bad2f45978b747ad1a5bea66ca897370542df5f1e7db
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.4/poky-d64bef1c7d713b92a51228e5ade945835e5a94a4.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.4/poky-d64bef1c7d713b92a51228e5ade945835e5a94a4.tar.bz2
+
+openembedded-core
+
+- Repository Location: https://git.openembedded.org/openembedded-core
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.4 </openembedded-core/log/?h=yocto-4.0.4>`
+- Git Revision: :oe_git:`f7766da462905ec67bf549d46b8017be36cd5b2a </openembedded-core/commit/?id=f7766da462905ec67bf549d46b8017be36cd5b2a>`
+- Release Artefact: oecore-f7766da462905ec67bf549d46b8017be36cd5b2a
+- sha: ce0ac011474db5e5f0bb1be3fb97f890a02e46252a719dbcac5813268e48ff16
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.4/oecore-f7766da462905ec67bf549d46b8017be36cd5b2a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.4/oecore-f7766da462905ec67bf549d46b8017be36cd5b2a.tar.bz2
+
+meta-mingw
+
+- Repository Location: https://git.yoctoproject.org/git/meta-mingw
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.4 </meta-mingw/log/?h=yocto-4.0.4>`
+- Git Revision: :yocto_git:`a90614a6498c3345704e9611f2842eb933dc51c1 </meta-mingw/commit/?id=a90614a6498c3345704e9611f2842eb933dc51c1>`
+- Release Artefact: meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1
+- sha: 49f9900bfbbc1c68136f8115b314e95d0b7f6be75edf36a75d9bcd1cca7c6302
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.4/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.4/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+
+meta-gplv2
+
+- Repository Location: https://git.yoctoproject.org/git/meta-gplv2
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.4 </meta-gplv2/log/?h=yocto-4.0.4>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.4/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.4/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: https://git.openembedded.org/bitbake
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.4 </bitbake/log/?h=yocto-4.0.4>`
+- Git Revision: :oe_git:`ac576d6fad6bba0cfea931883f25264ea83747ca </bitbake/commit/?id=ac576d6fad6bba0cfea931883f25264ea83747ca>`
+- Release Artefact: bitbake-ac576d6fad6bba0cfea931883f25264ea83747ca
+- sha: 526c2768874eeda61ade8c9ddb3113c90d36ef44a026d6690f02de6f3dd0ea12
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.4/bitbake-ac576d6fad6bba0cfea931883f25264ea83747ca.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.4/bitbake-ac576d6fad6bba0cfea931883f25264ea83747ca.tar.bz2
+
+yocto-docs
+
+- Repository Location: https://git.yoctoproject.org/git/yocto-docs
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.4 </yocto-docs/log/?h=yocto-4.0.4>`
+- Git Revision: :yocto_git:`f632dad24c39778f948014029e74db3c871d9d21 </yocto-docs/commit/?id=f632dad24c39778f948014029e74db3c871d9d21>`
diff --git a/documentation/migration-guides/release-notes-4.0.5.rst b/documentation/migration-guides/release-notes-4.0.5.rst
new file mode 100644
index 0000000000..95cc6f6d30
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.5.rst
@@ -0,0 +1,196 @@
+Release notes for Yocto-4.0.5 (Kirkstone)
+-----------------------------------------
+
+Security Fixes in Yocto-4.0.5
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- qemu: fix :cve:`2021-3750`, :cve:`2021-3611` and :cve:`2022-2962`
+- binutils : fix :cve:`2022-38126`, :cve:`2022-38127` and :cve:`2022-38128`
+- tff: fix :cve:`2022-2867`, :cve:`2022-2868` and :cve:`2022-2869`
+- inetutils: fix :cve:`2022-39028`
+- go: fix :cve:`2022-27664`
+
+Fixes in Yocto-4.0.5
+~~~~~~~~~~~~~~~~~~~~
+
+- Revert "gcc-cross-canadian: Add symlink to real-ld alongside other symlinks"
+- bind: upgrade to 9.18.7
+- binutils: stable 2.38 branch updates (dc2474e7)
+- bitbake: Fix npm to use https rather than http
+- bitbake: asyncrpc/client: Fix unix domain socket chdir race issues
+- bitbake: bitbake: Add copyright headers where missing
+- bitbake: gitsm: Error out if submodule refers to parent repo
+- bitbake: runqueue: Drop deadlock breaking force fail
+- bitbake: runqueue: Ensure deferred tasks are sorted by multiconfig
+- bitbake: runqueue: Improve deadlock warning messages
+- bitbake: siggen: Fix insufficent entropy in sigtask file names
+- bitbake: tests/fetch: Allow handling of a file:// url within a submodule
+- build-appliance-image: Update to kirkstone head revision (4a88ada)
+- busybox: add devmem 128-bit support
+- classes: files: Extend overlayfs-etc class
+- coreutils: add openssl PACKAGECONFIG
+- create-pull-request: don't switch the git remote protocol to git://
+- dev-manual: fix reference to BitBake user manual
+- expat: upgrade 2.4.8 -> 2.4.9
+- files: overlayfs-etc: refactor preinit template
+- gcc-cross-canadian: add default plugin linker
+- gcc: add arm-v9 support
+- git: upgrade 2.35.4 -> 2.35.5
+- glibc-locale: explicitly remove empty dirs in ${libdir}
+- glibc-tests: use += instead of :append
+- glibc: stable 2.35 branch updates.(8d125a1f)
+- go-native: switch from SRC_URI:append to SRC_URI +=
+- image_types_wic.bbclass: fix cross binutils dependency
+- kern-tools: allow 'y' or 'm' to avoid config audit warnings
+- kern-tools: fix queue processing in relative TOPDIR configurations
+- kernel-yocto: allow patch author date to be commit date
+- libpng: upgrade to 1.6.38
+- linux-firmware: package new Qualcomm firmware
+- linux-firmware: upgrade 20220708 -> 20220913
+- linux-libc-headers: switch from SRC_URI:append to SRC_URI +=
+- linux-yocto-dev: add qemuarm64
+- linux-yocto/5.10: update to v5.10.149
+- linux-yocto/5.15: cfg: fix ACPI warnings for -tiny
+- linux-yocto/5.15: update to v5.15.68
+- local.conf.sample: correct the location of public hashserv
+- ltp: Fix pread02 case trigger the glibc overflow detection
+- lttng-modules: Fix crash on powerpc64
+- lttng-tools: Disable on qemuriscv32
+- lttng-tools: Disable on riscv32
+- migration-guides: add 4.0.4 release notes
+- oeqa/runtime/dnf: fix typo
+- own-mirrors: add crate
+- perf: Fix for recent kernel upgrades
+- poky.conf: bump version for 4.0.5
+- poky.yaml.in: update version requirements
+- python3-rfc3986-validator: switch from SRC_URI:append to SRC_URI +=
+- python3: upgrade 3.10.4 -> 3.10.7
+- qemu: Backport patches from upstream to support float128 on qemu-ppc64
+- rpm: Remove -Wimplicit-function-declaration warnings
+- rpm: update to 4.17.1
+- rsync: update to 3.2.5
+- stress-cpu: disable float128 math on powerpc64 to avoid SIGILL
+- tune-neoversen2: support tune-neoversen2 base on armv9a
+- tzdata: update to 2022d
+- u-boot: switch from append to += in SRC_URI
+- uninative: Upgrade to 3.7 to work with glibc 2.36
+- vim: Upgrade to 9.0.0598
+- webkitgtk: Update to 2.36.7
+
+
+Known Issues in Yocto-4.0.5
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- There are recent CVEs in key components such as openssl. They are not included in this release as it was built before the issues were known and fixes were available but these are now available on the kirkstone branch.
+
+
+Contributors to Yocto-4.0.5
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Adrian Freihofer
+- Alexander Kanavin
+- Alexandre Belloni
+- Bhabu Bindu
+- Bruce Ashfield
+- Chen Qi
+- Daniel McGregor
+- Denys Dmytriyenko
+- Dmitry Baryshkov
+- Florin Diaconescu
+- He Zhe
+- Joshua Watt
+- Khem Raj
+- Martin Jansa
+- Michael Halstead
+- Michael Opdenacker
+- Mikko Rapeli
+- Mingli Yu
+- Neil Horman
+- Pavel Zhukov
+- Richard Purdie
+- Robert Joslyn
+- Ross Burton
+- Ruiqiang Hao
+- Samuli Piippo
+- Steve Sakoman
+- Sundeep KOKKONDA
+- Teoh Jay Shen
+- Tim Orling
+- Virendra Thakur
+- Vyacheslav Yurkov
+- Xiangyu Chen
+- Yash Shinde
+- pgowda
+- Wang Mingyu
+
+
+Repositories / Downloads for Yocto-4.0.5
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: :yocto_git:`/poky`
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.5 </poky/log/?h=yocto-4.0.5>`
+- Git Revision: :yocto_git:`2e79b199114b25d81bfaa029ccfb17676946d20d </poky/commit/?id=2e79b199114b25d81bfaa029ccfb17676946d20d>`
+- Release Artefact: poky-2e79b199114b25d81bfaa029ccfb17676946d20d
+- sha: 7bcf3f901d4c5677fc95944ab096e9e306f4c758a658dde5befd16861ad2b8ea
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.5/poky-2e79b199114b25d81bfaa029ccfb17676946d20d.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.5/poky-2e79b199114b25d81bfaa029ccfb17676946d20d.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.5 </openembedded-core/log/?h=yocto-4.0.5>`
+- Git Revision: :oe_git:`fbdf93f43ff4b876487e1f26752598ec8abcb46e </openembedded-core/commit/?id=fbdf93f43ff4b876487e1f26752598ec8abcb46e>`
+- Release Artefact: oecore-fbdf93f43ff4b876487e1f26752598ec8abcb46e
+- sha: 2d9b5a8e9355b633bb57633cc8c2d319ba13fe4721f79204e61116b3faa6cbf1
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.5/oecore-fbdf93f43ff4b876487e1f26752598ec8abcb46e.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.5/oecore-fbdf93f43ff4b876487e1f26752598ec8abcb46e.tar.bz2
+
+meta-mingw
+
+- Repository Location: :yocto_git:`/meta-mingw`
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.5 </meta-mingw/log/?h=yocto-4.0.5>`
+- Git Revision: :yocto_git:`a90614a6498c3345704e9611f2842eb933dc51c1 </meta-mingw/commit/?id=a90614a6498c3345704e9611f2842eb933dc51c1>`
+- Release Artefact: meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1
+- sha: 49f9900bfbbc1c68136f8115b314e95d0b7f6be75edf36a75d9bcd1cca7c6302
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.5/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.5/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+
+meta-gplv2
+
+- Repository Location: :yocto_git:`/meta-gplv2`
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.5 </meta-gplv2/log/?h=yocto-4.0.5>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.5/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.5/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.5 </bitbake/log/?h=yocto-4.0.5>`
+- Git Revision: :oe_git:`c90d57497b9bcd237c3ae810ee8edb5b0d2d575a </bitbake/commit/?id=c90d57497b9bcd237c3ae810ee8edb5b0d2d575a>`
+- Release Artefact: bitbake-c90d57497b9bcd237c3ae810ee8edb5b0d2d575a
+- sha: 5698d548ce179036e46a24f80b213124c8825a4f443fa1d6be7ab0f70b01a9ff
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.5/bitbake-c90d57497b9bcd237c3ae810ee8edb5b0d2d575a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.5/bitbake-c90d57497b9bcd237c3ae810ee8edb5b0d2d575a.tar.bz2
+
+yocto-docs
+
+- Repository Location: :yocto_git:`/yocto-docs`
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.5 </yocto-docs/log/?h=yocto-4.0.5>`
+- Git Revision: :yocto_git:`8c2f9f54e29781f4ee72e81eeaa12ceaa82dc2d3 </yocto-docs/commit/?id=8c2f9f54e29781f4ee72e81eeaa12ceaa82dc2d3>`
+
diff --git a/documentation/migration-guides/release-notes-4.0.6.rst b/documentation/migration-guides/release-notes-4.0.6.rst
new file mode 100644
index 0000000000..76d23fcf0c
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.6.rst
@@ -0,0 +1,313 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Release notes for Yocto-4.0.6 (Kirkstone)
+-----------------------------------------
+
+Security Fixes in Yocto-4.0.6
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- bash: Fix :cve:`2022-3715`
+- curl: Fix :cve:`2022-32221`, :cve:`2022-42915` and :cve:`2022-42916`
+- dbus: Fix :cve:`2022-42010`, :cve:`2022-42011` and :cve:`2022-42012`
+- dropbear: Fix :cve:`2021-36369`
+- ffmpeg: Fix :cve:`2022-3964`, :cve:`2022-3965`
+- go: Fix :cve:`2022-2880`
+- grub2: Fix :cve:`2022-2601`, :cve:`2022-3775` and :cve:`2022-28736`
+- libarchive: Fix :cve:`2022-36227`
+- libpam: Fix :cve:`2022-28321`
+- libsndfile1: Fix :cve:`2021-4156`
+- lighttpd: Fix :cve:`2022-41556`
+- openssl: Fix :cve:`2022-3358`
+- pixman: Fix :cve:`2022-44638`
+- python3-mako: Fix :cve:`2022-40023`
+- python3: Fix :cve:`2022-42919`
+- qemu: Fix :cve:`2022-3165`
+- sysstat: Fix :cve:`2022-39377`
+- systemd: Fix :cve:`2022-3821`
+- tiff: Fix :cve:`2022-2953`, :cve:`2022-3599`, :cve:`2022-3597`, :cve:`2022-3626`, :cve:`2022-3627`, :cve:`2022-3570`, :cve:`2022-3598` and :cve:`2022-3970`
+- vim: Fix :cve:`2022-3352`, :cve:`2022-3705` and :cve:`2022-4141`
+- wayland: Fix :cve:`2021-3782`
+- xserver-xorg: Fix :cve:`2022-3550` and :cve:`2022-3551`
+
+
+Fixes in Yocto-4.0.6
+~~~~~~~~~~~~~~~~~~~~
+
+- archiver: avoid using machine variable as it breaks multiconfig
+- babeltrace: upgrade to 1.5.11
+- bind: upgrade to 9.18.8
+- bitbake.conf: Drop export of SOURCE_DATE_EPOCH_FALLBACK
+- bitbake: gitsm: Fix regression in gitsm submodule path parsing
+- bitbake: runqueue: Fix race issues around hash equivalence and sstate reuse
+- bluez5: Point hciattach bcm43xx firmware search path to /lib/firmware
+- bluez5: add dbus to RDEPENDS
+- build-appliance-image: Update to kirkstone head revision
+- buildtools-tarball: export certificates to python and curl
+- cargo_common.bbclass: Fix typos
+- classes: make TOOLCHAIN more permissive for kernel
+- cmake-native: Fix host tool contamination (Bug: 14951)
+- common-tasks.rst: fix oeqa runtime test path
+- create-spdx.bbclass: remove unused SPDX_INCLUDE_PACKAGED
+- create-spdx: Remove ";name=..." for downloadLocation
+- create-spdx: default share_src for shared sources
+- cve-update-db-native: add timeout to urlopen() calls
+- dbus: upgrade to 1.14.4
+- dhcpcd: fix to work with systemd
+- expat: upgrade to 2.5.0
+- externalsrc.bbclass: Remove a trailing slash from ${B}
+- externalsrc.bbclass: fix git repo detection
+- externalsrc: git submodule--helper list unsupported
+- gcc-shared-source: Fix source date epoch handling
+- gcc-source: Drop gengtype manipulation
+- gcc-source: Ensure deploy_source_date_epoch sstate hash doesn't change
+- gcc-source: Fix gengtypes race
+- gdk-pixbuf: upgrade to 2.42.10
+- get_module_deps3.py: Check attribute '__file__'
+- glib-2.0: fix rare GFileInfo test case failure
+- glibc-locale: Do not INHIBIT_DEFAULT_DEPS
+- gnomebase.bbclass: return the whole version for tarball directory if it is a number
+- gnutls: Unified package names to lower-case
+- groff: submit patches upstream
+- gstreamer1.0-libav: fix errors with ffmpeg 5.x
+- gstreamer1.0: upgrade to 1.20.4
+- ifupdown: upgrade to 0.8.39
+- insane.bbclass: Allow hashlib version that only accepts on parameter
+- iso-codes: upgrade to 4.12.0
+- kea: submit patch upstream (fix-multilib-conflict.patch)
+- kern-tools: fix relative path processing
+- kern-tools: integrate ZFS speedup patch
+- kernel-yocto: improve fatal error messages of symbol_why.py
+- kernel.bbclass: Include randstruct seed assets in STAGING_KERNEL_BUILDDIR
+- kernel.bbclass: make KERNEL_DEBUG_TIMESTAMPS work at rebuild
+- kernel: Clear SYSROOT_DIRS instead of replacing sysroot_stage_all
+- libcap: upgrade to 2.66
+- libepoxy: convert to git
+- libepoxy: update to 1.5.10
+- libffi: submit patch upstream (0001-arm-sysv-reverted-clang-VFP-mitigation.patch )
+- libffi: upgrade to 3.4.4
+- libical: upgrade to 3.0.16
+- libksba: upgrade to 1.6.2
+- libuv: fixup SRC_URI
+- libxcrypt: upgrade to 4.4.30
+- lighttpd: upgrade to 1.4.67
+- linux-firmware: add new fw file to ${PN}-qcom-adreno-a530
+- linux-firmware: don't put the firmware into the sysroot
+- linux-firmware: package amdgpu firmware
+- linux-firmware: split rtl8761 firmware
+- linux-firmware: upgrade to 20221109
+- linux-yocto/5.10: update genericx86* machines to v5.10.149
+- linux-yocto/5.15: fix CONFIG_CRYPTO_CCM mismatch warnings
+- linux-yocto/5.15: update genericx86* machines to v5.15.72
+- linux-yocto/5.15: update to v5.15.78
+- ltp: backport clock_gettime04 fix from upstream
+- lttng-modules: upgrade to 2.13.7
+- lttng-tools: Upgrade to 2.13.8
+- lttng-tools: submit determinism.patch upstream
+- lttng-ust: upgrade to 2.13.5
+- meson: make wrapper options sub-command specific
+- meta-selftest/staticids: add render group for systemd
+- mirrors.bbclass: update CPAN_MIRROR
+- mirrors.bbclass: use shallow tarball for binutils-native
+- mobile-broadband-provider-info: upgrade 20220725 -> 20221107
+- mtd-utils: upgrade 2.1.4 -> 2.1.5
+- numactl: upgrade to 2.0.16
+- oe/packagemanager/rpm: don't leak file objects
+- oeqa/selftest/lic_checksum: Cleanup changes to emptytest include
+- oeqa/selftest/minidebuginfo: Create selftest for minidebuginfo
+- oeqa/selftest/tinfoil: Add test for separate config_data with recipe_parse_file()
+- openssl: Fix SSL_CERT_FILE to match ca-certs location
+- openssl: upgrade to 3.0.7
+- openssl: export necessary env vars in SDK
+- opkg-utils: use a git clone, not a dynamic snapshot
+- opkg: Set correct info_dir and status_file in opkg.conf
+- overlayfs: Allow not used mount points
+- ovmf: correct patches status
+- package: Fix handling of minidebuginfo with newer binutils
+- perf: Depend on native setuptools3
+- poky.conf: bump version for 4.0.6
+- psplash: add psplash-default in rdepends
+- psplash: consider the situation of psplash not exist for systemd
+- python3: advance to version 3.10.8
+- qemu-helper-native: Correctly pass program name as argv[0]
+- qemu-helper-native: Re-write bridge helper as C program
+- qemu-native: Add PACKAGECONFIG option for jack
+- qemu: add io_uring PACKAGECONFIG
+- quilt: backport a patch to address grep 3.8 failures
+- resolvconf: make it work
+- rm_work: exclude the SSTATETASKS from the rm_work tasks sinature
+- runqemu: Do not perturb script environment
+- runqemu: Fix gl-es argument from causing other arguments to be ignored
+- sanity: Drop data finalize call
+- sanity: check for GNU tar specifically
+- scripts/oe-check-sstate: cleanup
+- scripts/oe-check-sstate: force build to run for all targets, specifically populate_sysroot
+- scripts: convert-overrides: Allow command-line customizations
+- socat: upgrade to 1.7.4.4
+- SPDX and CVE documentation updates
+- sstate: Allow optimisation of do_deploy_archives task dependencies
+- sstatesig: emit more helpful error message when not finding sstate manifest
+- sstatesig: skip the rm_work task signature
+- sudo: upgrade to 1.9.12p1
+- systemd: Consider PACKAGECONFIG in RRECOMMENDS
+- systemd: add group render to udev package
+- tcl: correct patch status
+- tiff: refresh with devtool
+- tiff: add CVE tag to b258ed69a485a9cfb299d9f060eb2a46c54e5903.patch
+- u-boot: Remove duplicate inherit of cml1
+- uboot-sign: Fix using wrong KEY_REQ_ARGS
+- vala: install vapigen-wrapper into /usr/bin/crosscripts and stage only that
+- valgrind: remove most hidden tests for arm64
+- vim: Upgrade to 9.0.0947
+- vulkan-samples: add lfs=0 to SRC_URI to avoid git smudge errors in do_unpack
+- wic: honor the SOURCE_DATE_EPOCH in case of updated fstab
+- wic: make ext2/3/4 images reproducible
+- wic: swap partitions are not added to fstab
+- wpebackend-fdo: upgrade to 1.14.0
+- xserver-xorg: move some recommended dependencies in required
+- xwayland: upgrade to 22.1.5
+
+
+Known Issues in Yocto-4.0.6
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- N/A
+
+
+Contributors to Yocto-4.0.6
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Alex Kiernan
+- Alexander Kanavin
+- Alexey Smirnov
+- Bartosz Golaszewski
+- Bernhard Rosenkränzer
+- Bhabu Bindu
+- Bruce Ashfield
+- Chee Yang Lee
+- Chen Qi
+- Christian Eggers
+- Claus Stovgaard
+- Diego Sueiro
+- Dmitry Baryshkov
+- Ed Tanous
+- Enrico Jörns
+- Etienne Cordonnier
+- Frank de Brabander
+- Harald Seiler
+- Hitendra Prajapati
+- Jan-Simon Moeller
+- Jeremy Puhlman
+- Joe Slater
+- John Edward Broadbent
+- Jose Quaresma
+- Joshua Watt
+- Kai Kang
+- Keiya Nobuta
+- Khem Raj
+- Konrad Weihmann
+- Leon Anavi
+- Liam Beguin
+- Marek Vasut
+- Mark Hatle
+- Martin Jansa
+- Michael Opdenacker
+- Mikko Rapeli
+- Narpat Mali
+- Nathan Rossi
+- Niko Mauno
+- Pavel Zhukov
+- Peter Kjellerstedt
+- Peter Marko
+- Polampalli, Archana
+- Qiu, Zheng
+- Ravula Adhitya Siddartha
+- Richard Purdie
+- Ross Burton
+- Sakib Sajal
+- Sean Anderson
+- Sergei Zhmylev
+- Steve Sakoman
+- Teoh Jay Shen
+- Thomas Perrot
+- Tim Orling
+- Vincent Davis Jr
+- Vivek Kumbhar
+- Vyacheslav Yurkov
+- Wang Mingyu
+- Xiangyu Chen
+- Zheng Qiu
+- Ciaran Courtney
+- Wang Mingyu
+
+
+Repositories / Downloads for Yocto-4.0.6
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: :yocto_git:`/poky`
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.6 </poky/log/?h=yocto-4.0.6>`
+- Git Revision: :yocto_git:`c4e08719a782fd4119eaf643907b80cebf57f88f </poky/commit/?id=c4e08719a782fd4119eaf643907b80cebf57f88f>`
+- Release Artefact: poky-c4e08719a782fd4119eaf643907b80cebf57f88f
+- sha: 2eb3b323dd2ccd25f9442bfbcbde82bc081fad5afd146a8e6dde439db24a99d4
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.6/poky-c4e08719a782fd4119eaf643907b80cebf57f88f.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.6/poky-c4e08719a782fd4119eaf643907b80cebf57f88f.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.6 </openembedded-core/log/?h=yocto-4.0.6>`
+- Git Revision: :oe_git:`45a8b4101b14453aa3020d3f2b8a76b4dc0ae3f2 </openembedded-core/commit/?id=45a8b4101b14453aa3020d3f2b8a76b4dc0ae3f2>`
+- Release Artefact: oecore-45a8b4101b14453aa3020d3f2b8a76b4dc0ae3f2
+- sha: de8b443365927befe67cc443b60db57563ff0726377223f836a3f3971cf405ec
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.6/oecore-45a8b4101b14453aa3020d3f2b8a76b4dc0ae3f2.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.6/oecore-45a8b4101b14453aa3020d3f2b8a76b4dc0ae3f2.tar.bz2
+
+meta-mingw
+
+- Repository Location: :yocto_git:`/meta-mingw`
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.6 </meta-mingw/log/?h=yocto-4.0.6>`
+- Git Revision: :yocto_git:`a90614a6498c3345704e9611f2842eb933dc51c1 </meta-mingw/commit/?id=a90614a6498c3345704e9611f2842eb933dc51c1>`
+- Release Artefact: meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1
+- sha: 49f9900bfbbc1c68136f8115b314e95d0b7f6be75edf36a75d9bcd1cca7c6302
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.6/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.6/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+
+meta-gplv2
+
+- Repository Location: :yocto_git:`/meta-gplv2`
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.6 </meta-gplv2/log/?h=yocto-4.0.6>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.6/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.6/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.6 </bitbake/log/?h=yocto-4.0.6>`
+- Git Revision: :oe_git:`7e268c107bb0240d583d2c34e24a71e373382509 </bitbake/commit/?id=7e268c107bb0240d583d2c34e24a71e373382509>`
+- Release Artefact: bitbake-7e268c107bb0240d583d2c34e24a71e373382509
+- sha: c3e2899012358c95962c7a5c85cf98dc30c58eae0861c374124e96d9556bb901
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.6/bitbake-7e268c107bb0240d583d2c34e24a71e373382509.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.6/bitbake-7e268c107bb0240d583d2c34e24a71e373382509.tar.bz2
+
+yocto-docs
+
+- Repository Location: :yocto_git:`/yocto-docs`
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.6 </yocto-docs/log/?h=yocto-4.0.6>`
+- Git Revision: :yocto_git:`c10d65ef3bbdf4fe3abc03e3aef3d4ca8c2ad87f </yocto-docs/commit/?id=c10d65ef3bbdf4fe3abc03e3aef3d4ca8c2ad87f>`
+
+
diff --git a/documentation/migration-guides/release-notes-4.0.7.rst b/documentation/migration-guides/release-notes-4.0.7.rst
new file mode 100644
index 0000000000..95f5b6a3af
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.7.rst
@@ -0,0 +1,242 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Release notes for Yocto-4.0.7 (Kirkstone)
+-----------------------------------------
+
+Security Fixes in Yocto-4.0.7
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- binutils: Fix :cve:`2022-4285`
+- curl: Fix :cve:`2022-43551` and :cve_mitre:`2022-43552`
+- ffmpeg: Fix :cve:`2022-3109` and :cve:`2022-3341`
+- go: Fix :cve:`2022-41715` and :cve:`2022-41717`
+- libX11: Fix :cve:`2022-3554` and :cve:`2022-3555`
+- libarchive: Fix :cve:`2022-36227`
+- libksba: Fix :cve:`2022-47629`
+- libpng: Fix :cve:`2019-6129`
+- libxml2: Fix :cve:`2022-40303` and :cve:`2022-40304`
+- openssl: Fix :cve:`2022-3996`
+- python3: Fix :cve:`2022-45061`
+- python3-git: Fix :cve:`2022-24439`
+- python3-setuptools: Fix :cve:`2022-40897`
+- python3-wheel: Fix :cve:`2022-40898`
+- qemu: Fix :cve:`2022-4144`
+- sqlite: Fix :cve:`2022-46908`
+- systemd: Fix :cve:`2022-45873`
+- vim: Fix :cve:`2023-0049`, :cve:`2023-0051`, :cve:`2023-0054` and :cve:`2023-0088`
+- webkitgtk: Fix :cve:`2022-32886`, :cve_mitre:`2022-32891`
+
+
+Fixes in Yocto-4.0.7
+~~~~~~~~~~~~~~~~~~~~
+
+- Revert "gstreamer1.0: disable flaky gstbin:test_watch_for_state_change test"
+- at: Change when files are copied
+- baremetal-image: Avoid overriding qemu variables from IMAGE_CLASSES
+- base.bbclass: Fix way to check ccache path
+- bc: extend to nativesdk
+- bind: upgrade to 9.18.10
+- busybox: always start do_compile with orig config files
+- busybox: rm temporary files if do_compile was interrupted
+- cairo: fix CVE patches assigned wrong CVE number
+- cairo: update patch for :cve:`2019-6461` with upstream solution
+- classes/create-spdx: Add SPDX_PRETTY option
+- classes: image: Set empty weak default IMAGE_LINGUAS
+- combo-layer: add sync-revs command
+- combo-layer: dont use bb.utils.rename
+- combo-layer: remove unused import
+- curl: Correct LICENSE from MIT-open-group to curl
+- cve-check: write the cve manifest to IMGDEPLOYDIR
+- cve-update-db-native: avoid incomplete updates
+- cve-update-db-native: show IP on failure
+- dbus: Add missing CVE product name
+- devtool/upgrade: correctly handle recipes where S is a subdir of upstream tree
+- devtool: process local files only for the main branch
+- dhcpcd: backport two patches to fix runtime error
+- docs: kernel-dev: faq: update tip on how to not include kernel in image
+- docs: migration-4.0: specify variable name change for kernel inclusion in image recipe
+- efibootmgr: update compilation with musl
+- externalsrc: fix lookup for .gitmodules
+- ffmpeg: refresh patches to apply cleanly
+- freetype:update mirror site.
+- gcc: Refactor linker patches and fix linker on arm with usrmerge
+- glibc: stable 2.35 branch updates.
+- go-crosssdk: avoid host contamination by GOCACHE
+- gstreamer1.0: Fix race conditions in gstbin tests
+- gstreamer1.0: upgrade to 1.20.5
+- gtk-icon-cache: Fix GTKIC_CMD if-else condition
+- harfbuzz: remove bindir only if it exists
+- kernel-fitimage: Adjust order of dtb/dtbo files
+- kernel-fitimage: Allow user to select dtb when multiple dtb exists
+- kernel.bbclass: remove empty module directories to prevent QA issues
+- lib/buildstats: fix parsing of trees with reduced_proc_pressure directories
+- lib/oe/reproducible: Use git log without gpg signature
+- libepoxy: remove upstreamed patch
+- libnewt: update 0.52.21 -> 0.52.23
+- libseccomp: fix typo in DESCRIPTION
+- libxcrypt-compat: upgrade 4.4.30 -> 4.4.33
+- libxml2: fix test data checksums
+- linux-firmware: upgrade 20221109 -> 20221214
+- linux-yocto/5.10: update to v5.10.152
+- linux-yocto/5.10: update to v5.10.154
+- linux-yocto/5.10: update to v5.10.160
+- linux-yocto/5.15: fix perf build with clang
+- linux-yocto/5.15: libbpf: Fix build warning on ref_ctr_off
+- linux-yocto/5.15: ltp and squashfs fixes
+- linux-yocto/5.15: powerpc: Fix reschedule bug in KUAP-unlocked user copy
+- linux-yocto/5.15: update to v5.15.84
+- lsof: add update-alternatives logic
+- lttng-modules: update 2.13.7 -> 2.13.8
+- manuals: add 4.0.5 and 4.0.6 release notes
+- manuals: document SPDX_PRETTY variable
+- mpfr: upgrade 4.1.0 -> 4.1.1
+- oeqa/concurrencytest: Add number of failures to summary output
+- oeqa/rpm.py: Increase timeout and add debug output
+- oeqa/selftest/externalsrc: add test for srctree_hash_files
+- openssh: remove RRECOMMENDS to rng-tools for sshd package
+- poky.conf: bump version for 4.0.7
+- qemuboot.bbclass: make sure runqemu boots bundled initramfs kernel image
+- rm_work.bbclass: use HOSTTOOLS 'rm' binary exclusively
+- rm_work: adjust dependency to make do_rm_work_all depend on do_rm_work
+- ruby: merge .inc into .bb
+- ruby: update 3.1.2 -> 3.1.3
+- selftest/virgl: use pkg-config from the host
+- tiff: Add packageconfig knob for webp
+- toolchain-scripts: compatibility with unbound variable protection
+- tzdata: update 2022d -> 2022g
+- valgrind: skip the boost_thread test on arm
+- xserver-xorg: upgrade 21.1.4 -> 21.1.6
+- xwayland: libxshmfence is needed when dri3 is enabled
+- xwayland: upgrade 22.1.5 -> 22.1.7
+- yocto-check-layer: Allow OE-Core to be tested
+
+
+Known Issues in Yocto-4.0.7
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- N/A
+
+
+Contributors to Yocto-4.0.7
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Alejandro Hernandez Samaniego
+- Alex Kiernan
+- Alex Stewart
+- Alexander Kanavin
+- Antonin Godard
+- Benoît Mauduit
+- Bhabu Bindu
+- Bruce Ashfield
+- Carlos Alberto Lopez Perez
+- Changqing Li
+- Chen Qi
+- Daniel Gomez
+- Florin Diaconescu
+- He Zhe
+- Hitendra Prajapati
+- Jagadeesh Krishnanjanappa
+- Jan Kircher
+- Jermain Horsman
+- Jose Quaresma
+- Joshua Watt
+- KARN JYE LAU
+- Kai Kang
+- Khem Raj
+- Luis
+- Marta Rybczynska
+- Martin Jansa
+- Mathieu Dubois-Briand
+- Michael Opdenacker
+- Narpat Mali
+- Ovidiu Panait
+- Pavel Zhukov
+- Peter Marko
+- Petr Kubizňák
+- Quentin Schulz
+- Randy MacLeod
+- Ranjitsinh Rathod
+- Richard Purdie
+- Robert Andersson
+- Ross Burton
+- Sandeep Gundlupet Raju
+- Saul Wold
+- Steve Sakoman
+- Vivek Kumbhar
+- Wang Mingyu
+- Xiangyu Chen
+- Yash Shinde
+- Yogita Urade
+
+
+Repositories / Downloads for Yocto-4.0.7
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: :yocto_git:`/poky`
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.7 </poky/log/?h=yocto-4.0.7>`
+- Git Revision: :yocto_git:`65dafea22018052fe7b2e17e6e4d7eb754224d38 </poky/commit/?id=65dafea22018052fe7b2e17e6e4d7eb754224d38>`
+- Release Artefact: poky-65dafea22018052fe7b2e17e6e4d7eb754224d38
+- sha: 6b1b67600b84503e2d5d29bcd6038547339f4f9413b830cd2408df825eda642d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.7/poky-65dafea22018052fe7b2e17e6e4d7eb754224d38.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.7/poky-65dafea22018052fe7b2e17e6e4d7eb754224d38.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.7 </openembedded-core/log/?h=yocto-4.0.7>`
+- Git Revision: :oe_git:`a8c82902384f7430519a31732a4bb631f21693ac </openembedded-core/commit/?id=a8c82902384f7430519a31732a4bb631f21693ac>`
+- Release Artefact: oecore-a8c82902384f7430519a31732a4bb631f21693ac
+- sha: 6f2dbc4ea1e388620ef77ac3a7bbb2b5956bb8bf9349b0c16cd7610e9996f5ea
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.7/oecore-a8c82902384f7430519a31732a4bb631f21693ac.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.7/oecore-a8c82902384f7430519a31732a4bb631f21693ac.tar.bz2
+
+meta-mingw
+
+- Repository Location: :yocto_git:`/meta-mingw`
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.7 </meta-mingw/log/?h=yocto-4.0.7>`
+- Git Revision: :yocto_git:`a90614a6498c3345704e9611f2842eb933dc51c1 </meta-mingw/commit/?id=a90614a6498c3345704e9611f2842eb933dc51c1>`
+- Release Artefact: meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1
+- sha: 49f9900bfbbc1c68136f8115b314e95d0b7f6be75edf36a75d9bcd1cca7c6302
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.7/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.7/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+
+meta-gplv2
+
+- Repository Location: :yocto_git:`/meta-gplv2`
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.7 </meta-gplv2/log/?h=yocto-4.0.7>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.7/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.7/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.7 </bitbake/log/?h=yocto-4.0.7>`
+- Git Revision: :oe_git:`7e268c107bb0240d583d2c34e24a71e373382509 </bitbake/commit/?id=7e268c107bb0240d583d2c34e24a71e373382509>`
+- Release Artefact: bitbake-7e268c107bb0240d583d2c34e24a71e373382509
+- sha: c3e2899012358c95962c7a5c85cf98dc30c58eae0861c374124e96d9556bb901
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.7/bitbake-7e268c107bb0240d583d2c34e24a71e373382509.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.7/bitbake-7e268c107bb0240d583d2c34e24a71e373382509.tar.bz2
+
+yocto-docs
+
+- Repository Location: :yocto_git:`/yocto-docs`
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.7 </yocto-docs/log/?h=yocto-4.0.7>`
+- Git Revision: :yocto_git:`5883e897c34f25401b358a597fb6e18d80f7f90b </yocto-docs/commit/?id=5883e897c34f25401b358a597fb6e18d80f7f90b>`
+
+
diff --git a/documentation/migration-guides/release-notes-4.0.8.rst b/documentation/migration-guides/release-notes-4.0.8.rst
new file mode 100644
index 0000000000..223b74fbaf
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.8.rst
@@ -0,0 +1,217 @@
+.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
+
+Release notes for Yocto-4.0.8 (Kirkstone)
+-----------------------------------------
+
+Security Fixes in Yocto-4.0.8
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- apr-util: Fix :cve:`2022-25147`
+- apr: Fix :cve:`2022-24963`, :cve:`2022-28331` and :cve:`2021-35940`
+- bind: Fix :cve:`2022-3094`, :cve:`2022-3736` and :cve:`2022-3924`
+- git: Ignore :cve:`2022-41953`
+- git: Fix :cve:`2022-23521` and :cve:`2022-41903`
+- libgit2: Fix :cve:`2023-22742`
+- ppp: Fix :cve:`2022-4603`
+- python3-certifi: Fix :cve:`2022-23491`
+- sudo: Fix :cve:`2023-22809`
+- tar: Fix :cve:`2022-48303`
+
+
+Fixes in Yocto-4.0.8
+~~~~~~~~~~~~~~~~~~~~
+
+- core-image.bbclass: Fix missing leading whitespace with ':append'
+- populate_sdk_ext.bbclass: Fix missing leading whitespace with ':append'
+- ptest-packagelists.inc: Fix missing leading whitespace with ':append'
+- apr-util: upgrade to 1.6.3
+- apr: upgrade to 1.7.2
+- apt: fix do_package_qa failure
+- bind: upgrade to 9.18.11
+- bitbake: bb/utils: include SSL certificate paths in export_proxies
+- bitbake: bitbake-diffsigs: Make PEP8 compliant
+- bitbake: bitbake-diffsigs: break on first dependent task difference
+- bitbake: fetch2/git: Clarify the meaning of namespace
+- bitbake: fetch2/git: Prevent git fetcher from fetching gitlab repository metadata
+- bitbake: fetch2/git: show SRCREV and git repo in error message about fixed SRCREV
+- bitbake: siggen: Fix inefficient string concatenation
+- bitbake: utils/ply: Update md5 to better report errors with hashlib
+- bootchart2: Fix usrmerge support
+- bsp-guide: fix broken git URLs and missing word
+- build-appliance-image: Update to kirkstone head revision
+- buildtools-tarball: set pkg-config search path
+- classes/fs-uuid: Fix command output decoding issue
+- dev-manual: common-tasks.rst: add link to FOSDEM 2023 video
+- dev-manual: fix old override syntax
+- devshell: Do not add scripts/git-intercept to PATH
+- devtool: fix devtool finish when gitmodules file is empty
+- diffutils: upgrade to 3.9
+- gdk-pixbuf: do not use tools from gdk-pixbuf-native when building tests
+- git: upgrade to 2.35.7
+- glslang: branch rename master -> main
+- httpserver: add error handler that write to the logger
+- image.bbclass: print all QA functions exceptions
+- kernel/linux-kernel-base: Fix kernel build artefact determinism issues
+- libc-locale: Fix on target locale generation
+- libgit2: upgrade to 1.4.5
+- libjpeg-turbo: upgrade to 2.1.5
+- libtirpc: Check if file exists before operating on it
+- libusb1: Link with latomic only if compiler has no atomic builtins
+- libusb1: Strip trailing whitespaces
+- linux-firmware: upgrade to 20230117
+- linux-yocto/5.15: update to v5.15.91
+- lsof: fix old override syntax
+- lttng-modules: Fix for 5.10.163 kernel version
+- lttng-tools: upgrade to 2.13.9
+- make-mod-scripts: Ensure kernel build output is deterministic
+- manuals: update patchwork instance URL
+- meta: remove True option to getVar and getVarFlag calls (again)
+- migration-guides: add release-notes for 4.0.7
+- native: Drop special variable handling
+- numactl: skip test case when target platform doesn't have 2 CPU node
+- oeqa context.py: fix --target-ip comment to include ssh port number
+- oeqa dump.py: add error counter and stop after 5 failures
+- oeqa qemurunner.py: add timeout to QMP calls
+- oeqa qemurunner.py: try to avoid reading one character at a time
+- oeqa qemurunner: read more data at a time from serial
+- oeqa ssh.py: add connection keep alive options to ssh client
+- oeqa ssh.py: move output prints to new line
+- oeqa/qemurunner: do not use Popen.poll() when terminating runqemu with a signal
+- oeqa/selftest/bbtests: Update message lookup for test_git_unpack_nonetwork_fail
+- oeqa/selftest/locales: Add selftest for locale generation/presence
+- poky.conf: Update SANITY_TESTED_DISTROS to match autobuilder
+- poky.conf: bump version for 4.0.8
+- profile-manual: update WireShark hyperlinks
+- python3-pytest: depend on python3-tomli instead of python3-toml
+- qemu: fix compile error
+- quilt: fix intermittent failure in faildiff.test
+- quilt: use upstreamed faildiff.test fix
+- recipe_sanity: fix old override syntax
+- ref-manual: document SSTATE_EXCLUDEDEPS_SYSROOT
+- scons.bbclass: Make MAXLINELENGTH overridable
+- scons: Pass MAXLINELENGTH to scons invocation
+- sdkext/cases/devtool: pass a logger to HTTPService
+- spirv-headers: set correct branch name
+- sudo: upgrade to 1.9.12p2
+- system-requirements.rst: add Fedora 36 and AlmaLinux 8.7 to list of supported distros
+- testimage: Fix error message to reflect new syntax
+- update-alternatives: fix typos
+- vulkan-samples: branch rename master -> main
+
+
+Known Issues in Yocto-4.0.8
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- N/A
+
+
+Contributors to Yocto-4.0.8
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Alejandro Hernandez Samaniego
+- Alexander Kanavin
+- Alexandre Belloni
+- Armin Kuster
+- Arnout Vandecappelle
+- Bruce Ashfield
+- Changqing Li
+- Chee Yang Lee
+- Etienne Cordonnier
+- Harald Seiler
+- Kai Kang
+- Khem Raj
+- Lee Chee Yang
+- Louis Rannou
+- Marek Vasut
+- Marius Kriegerowski
+- Mark Hatle
+- Martin Jansa
+- Mauro Queiros
+- Michael Opdenacker
+- Mikko Rapeli
+- Mingli Yu
+- Narpat Mali
+- Niko Mauno
+- Pawel Zalewski
+- Peter Kjellerstedt
+- Richard Purdie
+- Rodolfo Quesada Zumbado
+- Ross Burton
+- Sakib Sajal
+- Schmidt, Adriaan
+- Steve Sakoman
+- Thomas Roos
+- Ulrich Ölmann
+- Xiangyu Chen
+
+
+Repositories / Downloads for Yocto-4.0.8
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: :yocto_git:`/poky`
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.8 </poky/log/?h=yocto-4.0.8>`
+- Git Revision: :yocto_git:`a361fb3df9c87cf12963a9d785a9f99faa839222 </poky/commit/?id=a361fb3df9c87cf12963a9d785a9f99faa839222>`
+- Release Artefact: poky-a361fb3df9c87cf12963a9d785a9f99faa839222
+- sha: af4e8d64be27d3a408357c49b7952ce04c6d8bb0b9d7b50c48848d9355de7fc2
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.8/poky-a361fb3df9c87cf12963a9d785a9f99faa839222.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.8/poky-a361fb3df9c87cf12963a9d785a9f99faa839222.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.8 </openembedded-core/log/?h=yocto-4.0.8>`
+- Git Revision: :oe_git:`b20e2134daec33fbb8ce358d984751d887752bd5 </openembedded-core/commit/?id=b20e2134daec33fbb8ce358d984751d887752bd5>`
+- Release Artefact: oecore-b20e2134daec33fbb8ce358d984751d887752bd5
+- sha: 63cce6f1caf8428eefc1471351ab024affc8a41d8d7777f525e3aa9ea454d2cd
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.8/oecore-b20e2134daec33fbb8ce358d984751d887752bd5.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.8/oecore-b20e2134daec33fbb8ce358d984751d887752bd5.tar.bz2
+
+meta-mingw
+
+- Repository Location: :yocto_git:`/meta-mingw`
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.8 </meta-mingw/log/?h=yocto-4.0.8>`
+- Git Revision: :yocto_git:`a90614a6498c3345704e9611f2842eb933dc51c1 </meta-mingw/commit/?id=a90614a6498c3345704e9611f2842eb933dc51c1>`
+- Release Artefact: meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1
+- sha: 49f9900bfbbc1c68136f8115b314e95d0b7f6be75edf36a75d9bcd1cca7c6302
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.8/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.8/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+
+meta-gplv2
+
+- Repository Location: :yocto_git:`/meta-gplv2`
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.8 </meta-gplv2/log/?h=yocto-4.0.8>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.8/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.8/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.8 </bitbake/log/?h=yocto-4.0.8>`
+- Git Revision: :oe_git:`9bbdedc0ba7ca819b898e2a29a151d6a2014ca11 </bitbake/commit/?id=9bbdedc0ba7ca819b898e2a29a151d6a2014ca11>`
+- Release Artefact: bitbake-9bbdedc0ba7ca819b898e2a29a151d6a2014ca11
+- sha: 8e724411f4df00737e81b33eb568f1f97d2a00d5364342c0a212c46abb7b005b
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.8/bitbake-9bbdedc0ba7ca819b898e2a29a151d6a2014ca11.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.8/bitbake-9bbdedc0ba7ca819b898e2a29a151d6a2014ca11.tar.bz2
+
+yocto-docs
+
+- Repository Location: :yocto_git:`/yocto-docs`
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.8 </yocto-docs/log/?h=yocto-4.0.8>`
+- Git Revision: :yocto_git:`16ecbe028f2b9cc021267817a5413054e070b563 </yocto-docs/commit/?id=16ecbe028f2b9cc021267817a5413054e070b563>`
+
diff --git a/documentation/migration-guides/release-notes-4.0.9.rst b/documentation/migration-guides/release-notes-4.0.9.rst
new file mode 100644
index 0000000000..883514e686
--- /dev/null
+++ b/documentation/migration-guides/release-notes-4.0.9.rst
@@ -0,0 +1,247 @@
+Release notes for Yocto-4.0.9 (Kirkstone)
+-----------------------------------------
+
+Security Fixes in Yocto-4.0.9
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- binutils: Fix :cve:`2023-22608`
+- curl: Fix :cve:`2023-23914`, :cve:`2023-23915` and :cve:`2023-23916`
+- epiphany: Fix :cve:`2023-26081`
+- git: Ignore :cve:`2023-22743`
+- glibc: Fix :cve:`2023-0687`
+- gnutls: Fix :cve:`2023-0361`
+- go: Fix :cve:`2022-2879`, :cve:`2022-41720` and :cve:`2022-41723`
+- harfbuzz: Fix :cve:`2023-25193`
+- less: Fix :cve:`2022-46663`
+- libmicrohttpd: Fix :cve:`2023-27371`
+- libsdl2: Fix :cve:`2022-4743`
+- openssl: Fix :cve:`2022-3996`, :cve:`2023-0464`, :cve:`2023-0465` and :cve:`2023-0466`
+- pkgconf: Fix :cve:`2023-24056`
+- python3: Fix :cve:`2023-24329`
+- shadow: Ignore :cve:`2016-15024`
+- systemd: Fix :cve:`2022-4415`
+- tiff: Fix :cve:`2023-0800`, :cve:`2023-0801`, :cve:`2023-0802`, :cve:`2023-0803` and :cve:`2023-0804`
+- vim: Fix :cve:`2023-0433`, :cve:`2023-0512`, :cve:`2023-1127`, :cve:`2023-1170`, :cve:`2023-1175`, :cve:`2023-1264` and :cve:`2023-1355`
+- xserver-xorg: Fix :cve:`2023-0494`
+- xwayland: Fix :cve:`2023-0494`
+
+
+Fixes in Yocto-4.0.9
+~~~~~~~~~~~~~~~~~~~~
+
+- base-files: Drop localhost.localdomain from hosts file
+- binutils: Fix nativesdk ld.so search
+- bitbake: cookerdata: Drop dubious exception handling code
+- bitbake: cookerdata: Improve early exception handling
+- bitbake: cookerdata: Remove incorrect SystemExit usage
+- bitbake: fetch/git: Fix local clone url to make it work with repo
+- bitbake: utils: Allow to_boolean to support int values
+- bmap-tools: switch to main branch
+- buildtools-tarball: Handle spaces within user $PATH
+- busybox: Fix depmod patch
+- cracklib: update github branch to 'main'
+- cups: add/fix web interface packaging
+- cups: check PACKAGECONFIG for pam feature
+- cups: use BUILDROOT instead of DESTDIR
+- curl: fix dependencies when building with ldap/ldaps
+- cve-check: Fix false negative version issue
+- dbus: upgrade to 1.14.6
+- devtool/upgrade: do not delete the workspace/recipes directory
+- dhcpcd: Fix install conflict when enable multilib.
+- dhcpcd: fix dhcpcd start failure on qemuppc64
+- gcc-shared-source: do not use ${S}/.. in deploy_source_date_epoch
+- glibc: Add missing binutils dependency
+- image_types: fix multiubi var init
+- iso-codes: upgrade to 4.13.0
+- json-c: Add ptest for json-c
+- kernel-yocto: fix kernel-meta data detection
+- lib/buildstats: handle tasks that never finished
+- lib/resulttool: fix typo breaking resulttool log --ptest
+- libjpeg-turbo: upgrade to 2.1.5.1
+- libmicrohttpd: upgrade to 0.9.76
+- libseccomp: fix for the ptest result format
+- libssh2: Clean up ptest patch/coverage
+- linux-firmware: add yamato fw files to qcom-adreno-a2xx package
+- linux-firmware: properly set license for all Qualcomm firmware
+- linux-firmware: upgrade to 20230210
+- linux-yocto-rt/5.15: update to -rt59
+- linux-yocto/5.10: upgrade to v5.10.175
+- linux-yocto/5.15: upgrade to v5.15.103
+- linux: inherit pkgconfig in kernel.bbclass
+- lttng-modules: fix for kernel 6.2+
+- lttng-modules: upgrade to v2.13.9
+- lua: Fix install conflict when enable multilib.
+- mdadm: Fix raid0, 06wrmostly and 02lineargrow tests
+- meson: Fix wrapper handling of implicit setup command
+- migration-guides: add 4.0.8 release notes
+- nghttp2: never build python bindings
+- oeqa rtc.py: skip if read-only-rootfs
+- oeqa ssh.py: fix hangs in run()
+- oeqa/sdk: Improve Meson test
+- oeqa/selftest/prservice: Improve debug output for failure
+- oeqa/selftest/resulttooltests: fix minor typo
+- openssl: upgrade to 3.0.8
+- package.bbclase: Add check for /build in copydebugsources()
+- patchelf: replace a rejected patch with an equivalent uninative.bbclass tweak
+- poky.conf: bump version for 4.0.9
+- populate_sdk_ext: Handle spaces within user $PATH
+- pybootchartui: Fix python syntax issue
+- python3-git: fix indent error
+- python3-setuptools-rust-native: Add direct dependency of native python3 modules
+- qemu: Revert "fix :cve:`2021-3507`" as not applicable for qemu 6.2
+- rsync: Add missing prototypes to function declarations
+- rsync: Turn on -pedantic-errors at the end of 'configure'
+- runqemu: kill qemu if it hangs
+- scripts/lib/buildstats: handle top-level build_stats not being complete
+- selftest/recipetool: Stop test corrupting tinfoil class
+- selftest/runtime_test/virgl: Disable for all Rocky Linux
+- selftest: devtool: set BB_HASHSERVE_UPSTREAM when setting SSTATE_MIRROR
+- sstatesig: Improve output hash calculation
+- staging/multilib: Fix manifest corruption
+- staging: Separate out different multiconfig manifests
+- sudo: update 1.9.12p2 -> 1.9.13p3
+- systemd.bbclass: Add /usr/lib/systemd to searchpaths as well
+- systemd: add group sgx to udev package
+- systemd: fix wrong nobody-group assignment
+- timezone: use 'tz' subdir instead of ${WORKDIR} directly
+- toolchain-scripts: Handle spaces within user $PATH
+- tzcode-native: fix build with gcc-13 on host
+- tzdata: use separate B instead of WORKDIR for zic output
+- uninative: upgrade to 3.9 to include libgcc and glibc 2.37
+- vala: Fix install conflict when enable multilib.
+- vim: add missing pkgconfig inherit
+- vim: set modified-by to the recipe MAINTAINER
+- vim: upgrade to 9.0.1429
+- wic: Fix usage of fstype=none in wic
+- wireless-regdb: upgrade to 2023.02.13
+- xserver-xorg: upgrade to 21.1.7
+- xwayland: upgrade to 22.1.8
+
+
+Known Issues in Yocto-4.0.9
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- N/A
+
+
+Contributors to Yocto-4.0.9
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Alexander Kanavin
+- Alexis Lothoré
+- Bruce Ashfield
+- Changqing Li
+- Chee Yang Lee
+- Dmitry Baryshkov
+- Federico Pellegrin
+- Geoffrey GIRY
+- Hitendra Prajapati
+- Hongxu Jia
+- Joe Slater
+- Kai Kang
+- Kenfe-Mickael Laventure
+- Khem Raj
+- Martin Jansa
+- Mateusz Marciniec
+- Michael Halstead
+- Michael Opdenacker
+- Mikko Rapeli
+- Ming Liu
+- Mingli Yu
+- Narpat Mali
+- Pavel Zhukov
+- Pawan Badganchi
+- Peter Marko
+- Piotr Łobacz
+- Poonam Jadhav
+- Randy MacLeod
+- Richard Purdie
+- Robert Yang
+- Romuald Jeanne
+- Ross Burton
+- Sakib Sajal
+- Saul Wold
+- Shubham Kulkarni
+- Siddharth Doshi
+- Simone Weiss
+- Steve Sakoman
+- Tim Orling
+- Tom Hochstein
+- Trevor Woerner
+- Ulrich Ölmann
+- Vivek Kumbhar
+- Wang Mingyu
+- Xiangyu Chen
+- Yash Shinde
+
+
+Repositories / Downloads for Yocto-4.0.9
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+poky
+
+- Repository Location: :yocto_git:`/poky`
+- Branch: :yocto_git:`kirkstone </poky/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.9 </poky/log/?h=yocto-4.0.9>`
+- Git Revision: :yocto_git:`09def309f91929f47c6cce386016ccb777bd2cfc </poky/commit/?id=09def309f91929f47c6cce386016ccb777bd2cfc>`
+- Release Artefact: poky-09def309f91929f47c6cce386016ccb777bd2cfc
+- sha: 5c7ce209c8a6b37ec2898e5ca21858234d91999c11fa862880ba98e8bde62f63
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.9/poky-09def309f91929f47c6cce386016ccb777bd2cfc.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.9/poky-09def309f91929f47c6cce386016ccb777bd2cfc.tar.bz2
+
+openembedded-core
+
+- Repository Location: :oe_git:`/openembedded-core`
+- Branch: :oe_git:`kirkstone </openembedded-core/log/?h=kirkstone>`
+- Tag: :oe_git:`yocto-4.0.9 </openembedded-core/log/?h=yocto-4.0.9>`
+- Git Revision: :oe_git:`ff4b57ffff903a93b710284c7c7f916ddd74712f </openembedded-core/commit/?id=ff4b57ffff903a93b710284c7c7f916ddd74712f>`
+- Release Artefact: oecore-ff4b57ffff903a93b710284c7c7f916ddd74712f
+- sha: 726778ffc291136db1704316b196de979f68df9f96476b785e1791957fbb66b3
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.9/oecore-ff4b57ffff903a93b710284c7c7f916ddd74712f.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.9/oecore-ff4b57ffff903a93b710284c7c7f916ddd74712f.tar.bz2
+
+meta-mingw
+
+- Repository Location: :yocto_git:`/meta-mingw`
+- Branch: :yocto_git:`kirkstone </meta-mingw/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.9 </meta-mingw/log/?h=yocto-4.0.9>`
+- Git Revision: :yocto_git:`a90614a6498c3345704e9611f2842eb933dc51c1 </meta-mingw/commit/?id=a90614a6498c3345704e9611f2842eb933dc51c1>`
+- Release Artefact: meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1
+- sha: 49f9900bfbbc1c68136f8115b314e95d0b7f6be75edf36a75d9bcd1cca7c6302
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.9/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.9/meta-mingw-a90614a6498c3345704e9611f2842eb933dc51c1.tar.bz2
+
+meta-gplv2
+
+- Repository Location: :yocto_git:`/meta-gplv2`
+- Branch: :yocto_git:`kirkstone </meta-gplv2/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.9 </meta-gplv2/log/?h=yocto-4.0.9>`
+- Git Revision: :yocto_git:`d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a </meta-gplv2/commit/?id=d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a>`
+- Release Artefact: meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a
+- sha: c386f59f8a672747dc3d0be1d4234b6039273d0e57933eb87caa20f56b9cca6d
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.9/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.9/meta-gplv2-d2f8b5cdb285b72a4ed93450f6703ca27aa42e8a.tar.bz2
+
+bitbake
+
+- Repository Location: :oe_git:`/bitbake`
+- Branch: :oe_git:`2.0 </bitbake/log/?h=2.0>`
+- Tag: :oe_git:`yocto-4.0.9 </bitbake/log/?h=yocto-4.0.9>`
+- Git Revision: :oe_git:`2802adb572eb73a3eb2725a74a9bbdaafc543fa7 </bitbake/commit/?id=2802adb572eb73a3eb2725a74a9bbdaafc543fa7>`
+- Release Artefact: bitbake-2802adb572eb73a3eb2725a74a9bbdaafc543fa7
+- sha: 5c6e713b5e26b3835c0773095c7a1bc1f8affa28316b33597220ed86f1f1b643
+- Download Locations:
+ http://downloads.yoctoproject.org/releases/yocto/yocto-4.0.9/bitbake-2802adb572eb73a3eb2725a74a9bbdaafc543fa7.tar.bz2
+ http://mirrors.kernel.org/yocto/yocto/yocto-4.0.9/bitbake-2802adb572eb73a3eb2725a74a9bbdaafc543fa7.tar.bz2
+
+yocto-docs
+
+- Repository Location: :yocto_git:`/yocto-docs`
+- Branch: :yocto_git:`kirkstone </yocto-docs/log/?h=kirkstone>`
+- Tag: :yocto_git:`yocto-4.0.9 </yocto-docs/log/?h=yocto-4.0.9>`
+- Git Revision: :yocto_git:`86d0b38a97941ad52b1af220c7b801a399d50e93 </yocto-docs/commit/?id=86d0b38a97941ad52b1af220c7b801a399d50e93>`
+
diff --git a/documentation/migration-guides/release-notes-4.0.rst b/documentation/migration-guides/release-notes-4.0.rst
index eaa40f9317..11206072a0 100644
--- a/documentation/migration-guides/release-notes-4.0.rst
+++ b/documentation/migration-guides/release-notes-4.0.rst
@@ -22,8 +22,8 @@ New Features / Enhancements in 4.0
BB_SIGNATURE_HANDLER = "OEEquivHash"
BB_HASHSERVE = "auto"
- BB_HASHSERVE_UPSTREAM = "typhoon.yocto.io:8687"
- SSTATE_MIRRORS ?= "file://.* https://sstate.yoctoproject.org/&YOCTO_DOC_VERSION;/PATH;downloadfilename=PATH"
+ BB_HASHSERVE_UPSTREAM = "hashserv.yocto.io:8687"
+ SSTATE_MIRRORS ?= "file://.* https://sstate.yoctoproject.org/all/PATH;downloadfilename=PATH"
- The Python package build process is now based on `wheels <https://pythonwheels.com/>`__
in line with the upstream direction.
@@ -36,7 +36,7 @@ New Features / Enhancements in 4.0
- Inclusive language adjustments to some variable names - see the
:ref:`4.0 migration guide <migration-4.0-inclusive-language>` for details.
-
+
- New recipes:
- ``buildtools-docs-tarball``
@@ -112,8 +112,7 @@ New Features / Enhancements in 4.0
- BitBake enhancements:
- Fetcher enhancements:
-
- - New :ref:`bitbake:bitbake-user-manual/bitbake-user-manual-fetching:crate fetcher (\`\`crate://\`\`)` for Rust packages
+ - New :ref:`bitbake-user-manual/bitbake-user-manual-fetching:crate fetcher (\`\`crate://\`\`)` for Rust packages
- Added striplevel support to unpack
- git: Add a warning asking users to set a branch in git urls
- git: Allow git fetcher to support subdir param
@@ -125,7 +124,7 @@ New Features / Enhancements in 4.0
- ssh: now supports checkstatus, allows : in URLs (both required for use with sstate) and no longer requires username
- wget: add redirectauth parameter
- wget: add 30s timeout for checkstatus calls
-
+
- Show warnings for append/prepend/remove operators combined with +=/.=
- Add bb.warnonce() and bb.erroronce() log methods
- Improved setscene task display
@@ -138,7 +137,7 @@ New Features / Enhancements in 4.0
- Architecture-specific enhancements:
- ARM:
-
+
- tune-cortexa72: Enable the crc extension by default for cortexa72
- qemuarm64: Add tiny ktype to qemuarm64 bsp
- armv9a/tune: Add the support for the Neoverse N2 core
@@ -161,7 +160,7 @@ New Features / Enhancements in 4.0
- linux-yocto-dev: add qemuriscv32
- packagegroup-core-tools-profile: Enable systemtap for riscv64
- qemuriscv: Use virtio-tablet-pci for mouse
-
+
- x86:
- kernel-yocto: conditionally enable stack protection checking on x86-64
@@ -197,7 +196,7 @@ New Features / Enhancements in 4.0
- yocto-check-layer: improved README checks
- cve-check: add json output format
- cve-check: add coverage statistics on recipes with/without CVEs
-- Added mirrors for kernel sources and uninative binaries on kernel.org
+- Added mirrors for kernel sources and uninative binaries on kernel.org
- glibc and binutils recipes now use shallow mirror tarballs for faster fetching
- When patching fails, show more information on the fatal error
@@ -232,7 +231,7 @@ New Features / Enhancements in 4.0
- Detect more known licenses in Python code
- Move license md5sums data into CSV files
- npm: Use README as license fallback
-
+
- SDK-related enhancements:
- Extended recipes to ``nativesdk``: ``cargo``, ``librsvg``, ``libstd-rs``, ``libva``, ``python3-docutil``, ``python3-packaging``
@@ -240,7 +239,7 @@ New Features / Enhancements in 4.0
- Support creating per-toolchain cmake file in SDK
- Rust enhancements:
-
+
- New python_setuptools3_rust class to enable building python extensions in Rust
- classes/meson: Add optional rust definitions
diff --git a/documentation/overview-manual/concepts.rst b/documentation/overview-manual/concepts.rst
index 065d9586c6..e70c778263 100644
--- a/documentation/overview-manual/concepts.rst
+++ b/documentation/overview-manual/concepts.rst
@@ -34,10 +34,10 @@ itself is of various types:
BitBake knows how to combine multiple data sources together and refers
to each data source as a layer. For information on layers, see the
-":ref:`dev-manual/common-tasks:understanding and creating layers`"
+":ref:`dev-manual/layers:understanding and creating layers`"
section of the Yocto Project Development Tasks Manual.
-Following are some brief details on these core components. For
+Here are some brief details on these core components. For
additional information on how these components interact during a build,
see the
":ref:`overview-manual/concepts:openembedded build system concepts`"
@@ -149,7 +149,7 @@ Conforming to a known structure allows BitBake to make assumptions
during builds on where to find types of metadata. You can find
procedures and learn about tools (i.e. ``bitbake-layers``) for creating
layers suitable for the Yocto Project in the
-":ref:`dev-manual/common-tasks:understanding and creating layers`"
+":ref:`dev-manual/layers:understanding and creating layers`"
section of the Yocto Project Development Tasks Manual.
OpenEmbedded Build System Concepts
@@ -308,7 +308,7 @@ during the build. By default, the layers listed in this file include
layers minimally needed by the build system. However, you must manually
add any custom layers you have created. You can find more information on
working with the ``bblayers.conf`` file in the
-":ref:`dev-manual/common-tasks:enabling your layer`"
+":ref:`dev-manual/layers:enabling your layer`"
section in the Yocto Project Development Tasks Manual.
The files ``site.conf`` and ``auto.conf`` are not created by the
@@ -408,7 +408,7 @@ a ``README`` file as good practice and especially if the layer is to be
distributed, a configuration directory, and recipe directories. You can
learn about the general structure for layers used with the Yocto Project
in the
-":ref:`dev-manual/common-tasks:creating your own layer`"
+":ref:`dev-manual/layers:creating your own layer`"
section in the
Yocto Project Development Tasks Manual. For a general discussion on
layers and the many layers from which you can draw, see the
@@ -565,7 +565,7 @@ Local Projects
~~~~~~~~~~~~~~
Local projects are custom bits of software the user provides. These bits
-reside somewhere local to a project - perhaps a directory into which the
+reside somewhere local to a project --- perhaps a directory into which the
user checks in items (e.g. a local directory containing a development
source tree used by the group).
@@ -814,7 +814,7 @@ For more information on how the source directories are created, see the
":ref:`overview-manual/concepts:source fetching`" section. For
more information on how to create patches and how the build system
processes patches, see the
-":ref:`dev-manual/common-tasks:patching code`"
+":ref:`dev-manual/new-recipe:patching code`"
section in the
Yocto Project Development Tasks Manual. You can also see the
":ref:`sdk-manual/extensible:use \`\`devtool modify\`\` to modify the source of an existing component`"
@@ -1014,8 +1014,8 @@ data files are deleted from the root filesystem. As part of the final
stage of package installation, post installation scripts that are part
of the packages are run. Any scripts that fail to run on the build host
are run on the target when the target system is first booted. If you are
-using a
-:ref:`read-only root filesystem <dev-manual/common-tasks:creating a read-only root filesystem>`,
+using a
+:ref:`read-only root filesystem <dev-manual/read-only-rootfs:creating a read-only root filesystem>`,
all the post installation scripts must succeed on the build host during
the package installation phase since the root filesystem on the target
is read-only.
@@ -1026,7 +1026,7 @@ processing includes creation of a manifest file and optimizations.
The manifest file (``.manifest``) resides in the same directory as the
root filesystem image. This file lists out, line-by-line, the installed
packages. The manifest file is useful for the
-:ref:`testimage <ref-classes-testimage*>` class,
+:ref:`testimage <ref-classes-testimage>` class,
for example, to determine whether or not to run specific tests. See the
:term:`IMAGE_MANIFEST`
variable for additional information.
@@ -1174,7 +1174,7 @@ varflag. If some other task depends on such a task, then that task will
also always be considered out of date, which might not be what you want.
For details on how to view information about a task's signature, see the
-":ref:`dev-manual/common-tasks:viewing task variable dependencies`"
+":ref:`dev-manual/debugging:viewing task variable dependencies`"
section in the Yocto Project Development Tasks Manual.
Setscene Tasks and Shared State
@@ -1351,10 +1351,9 @@ can initialize the environment before using the tools.
the :doc:`/sdk-manual/index` manual.
All the output files for an SDK are written to the ``deploy/sdk`` folder
-inside the :term:`Build Directory` as
-shown in the previous figure. Depending on the type of SDK, there are
-several variables to configure these files. Here are the variables
-associated with an extensible SDK:
+inside the :term:`Build Directory` as shown in the previous figure. Depending
+on the type of SDK, there are several variables to configure these files.
+The variables associated with an extensible SDK are:
- :term:`DEPLOY_DIR`: Points to
the ``deploy`` directory.
@@ -1408,7 +1407,7 @@ This next list, shows the variables associated with a standard SDK:
Lists packages that make up the target part of the SDK (i.e. the part
built for the target hardware).
-- :term:`SDKPATH`: Defines the
+- :term:`SDKPATHINSTALL`: Defines the
default SDK installation path offered by the installation script.
- :term:`SDK_HOST_MANIFEST`:
@@ -1603,15 +1602,15 @@ them if they are deemed to be valid.
the shared state packages. Consequently, there are considerations that
affect maintaining shared state feeds. For information on how the
build system works with packages and can track incrementing :term:`PR`
- information, see the ":ref:`dev-manual/common-tasks:automatically incrementing a package version number`"
+ information, see the ":ref:`dev-manual/packages:automatically incrementing a package version number`"
section in the Yocto Project Development Tasks Manual.
- The code in the build system that supports incremental builds is
complex. For techniques that help you work around issues
related to shared state code, see the
- ":ref:`dev-manual/common-tasks:viewing metadata used to create the input signature of a shared state task`"
+ ":ref:`dev-manual/debugging:viewing metadata used to create the input signature of a shared state task`"
and
- ":ref:`dev-manual/common-tasks:invalidating shared state to force a task to run`"
+ ":ref:`dev-manual/debugging:invalidating shared state to force a task to run`"
sections both in the Yocto Project Development Tasks Manual.
The rest of this section goes into detail about the overall incremental
@@ -1648,7 +1647,7 @@ you a good idea of when the task's data changes.
To complicate the problem, there are things that should not be included
in the checksum. First, there is the actual specific build path of a
-given task - the :term:`WORKDIR`. It
+given task --- the :term:`WORKDIR`. It
does not matter if the work directory changes because it should not
affect the output for target packages. Also, the build process has the
objective of making native or cross packages relocatable.
@@ -1707,7 +1706,7 @@ need to fix this situation.
Thus far, this section has limited discussion to the direct inputs into
a task. Information based on direct inputs is referred to as the
"basehash" in the code. However, the question of a task's indirect
-inputs still exits - items already built and present in the
+inputs still exits --- items already built and present in the
:term:`Build Directory`. The checksum (or
signature) for a particular task needs to add the hashes of all the
tasks on which the particular task depends. Choosing which dependencies
@@ -1982,7 +1981,7 @@ Thanks to this, the depending tasks will keep a previously recorded
task hash, and BitBake will be able to retrieve their output from
the Shared State cache, instead of re-executing them. Similarly, the
output of further downstream tasks can also be retrieved from Shared
-Shate.
+State.
If the output hash is unknown, a new entry will be created on the Hash
Equivalence server, matching the task hash to that output.
@@ -2004,6 +2003,15 @@ task output from the Shared State cache.
the stability of the task's output hash. Therefore, the effectiveness
of Hash Equivalence strongly depends on it.
+ Recipes that are not reproducible may have undesired behavior if hash
+ equivalence is enabled, since the non-reproducible diverging output maybe be
+ remapped to an older sstate object in the cache by the server. If a recipe
+ is non-reproducible in trivial ways, such as different timestamps, this is
+ likely not a problem. However recipes that have more dramatic changes (such
+ as completely different file names) will likely outright fail since the
+ downstream sstate objects are not actually equivalent to what was just
+ built.
+
This applies to multiple scenarios:
- A "trivial" change to a recipe that doesn't impact its generated output,
@@ -2221,3 +2229,173 @@ For more information, see the
BitBake User Manual. You can also reference the "`Why Not
Fakeroot? <https://github.com/wrpseudo/pseudo/wiki/WhyNotFakeroot>`__"
article for background information on Fakeroot and Pseudo.
+
+BitBake Tasks Map
+=================
+
+To understand how BitBake operates in the build directory and environment
+we can consider the following recipes and diagram, to have full picture
+about the tasks that BitBake runs to generate the final package file
+for the recipe.
+
+We will have two recipes as an example:
+
+- ``libhello``: A recipe that provides a shared library
+- ``sayhello``: A recipe that uses ``libhello`` library to do its job
+
+.. note::
+
+ ``sayhello`` depends on ``libhello`` at compile time as it needs the shared
+ library to do the dynamic linking process. It also depends on it at runtime
+ as the shared library loader needs to find the library.
+ For more details about dependencies check :ref:`ref-varlocality-recipe-dependencies`.
+
+``libhello`` sources are as follows:
+
+- ``LICENSE``: This is the license associated with this library
+- ``Makefile``: The file used by ``make`` to build the library
+- ``hellolib.c``: The implementation of the library
+- ``hellolib.h``: The C header of the library
+
+``sayhello`` sources are as follows:
+
+- ``LICENSE``: This is the license associated with this project
+- ``Makefile``: The file used by ``make`` to build the project
+- ``sayhello.c``: The source file of the project
+
+Before presenting the contents of each file, here are the steps
+that we need to follow to accomplish what we want in the first place,
+which is integrating ``sayhello`` in our root file system:
+
+#. Create a Git repository for each project with the corresponding files
+
+#. Create a recipe for each project
+
+#. Make sure that ``sayhello`` recipe :term:`DEPENDS` on ``libhello``
+
+#. Make sure that ``sayhello`` recipe :term:`RDEPENDS` on ``libhello``
+
+#. Add ``sayhello`` to :term:`IMAGE_INSTALL` to integrate it into
+ the root file system
+
+The contents of ``libhello/Makefile`` are::
+
+ LIB=libhello.so
+
+ all: $(LIB)
+
+ $(LIB): hellolib.o
+ $(CC) $< -Wl,-soname,$(LIB).1 -fPIC $(LDFLAGS) -shared -o $(LIB).1.0
+
+ %.o: %.c
+ $(CC) -c $<
+
+ clean:
+ rm -rf *.o *.so*
+
+.. note::
+
+ When creating shared libraries, it is strongly recommended to follow the Linux
+ conventions and guidelines (see `this article
+ <https://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html>`__
+ for some background).
+
+.. note::
+
+ When creating ``Makefile`` files, it is strongly recommended to use ``CC``, ``LDFLAGS``
+ and ``CFLAGS`` as BitBake will set them as environment variables according
+ to your build configuration.
+
+The contents of ``libhello/hellolib.h`` are::
+
+ #ifndef HELLOLIB_H
+ #define HELLOLIB_H
+
+ void Hello();
+
+ #endif
+
+The contents of ``libhello/hellolib.c`` are::
+
+ #include <stdio.h>
+
+ void Hello(){
+ puts("Hello from a Yocto demo \n");
+ }
+
+The contents of ``sayhello/Makefile`` are::
+
+ EXEC=sayhello
+ LDFLAGS += -lhello
+
+ all: $(EXEC)
+
+ $(EXEC): sayhello.c
+ $(CC) $< $(LDFLAGS) $(CFLAGS) -o $(EXEC)
+
+ clean:
+ rm -rf $(EXEC) *.o
+
+The contents of ``sayhello/sayhello.c`` are::
+
+ #include <hellolib.h>
+
+ int main(){
+ Hello();
+ return 0;
+ }
+
+The contents of ``libhello_0.1.bb`` are::
+
+ SUMMARY = "Hello demo library"
+ DESCRIPTION = "Hello shared library used in Yocto demo"
+
+ # NOTE: Set the License according to the LICENSE file of your project
+ # and then add LIC_FILES_CHKSUM accordingly
+ LICENSE = "CLOSED"
+
+ # Assuming the branch is main
+ # Change <username> accordingly
+ SRC_URI = "git://github.com/<username>/libhello;branch=main;protocol=https"
+
+ S = "${WORKDIR}/git"
+
+ do_install(){
+ install -d ${D}${includedir}
+ install -d ${D}${libdir}
+
+ install hellolib.h ${D}${includedir}
+ oe_soinstall ${PN}.so.${PV} ${D}${libdir}
+ }
+
+The contents of ``sayhello_0.1.bb`` are::
+
+ SUMMARY = "SayHello demo"
+ DESCRIPTION = "SayHello project used in Yocto demo"
+
+ # NOTE: Set the License according to the LICENSE file of your project
+ # and then add LIC_FILES_CHKSUM accordingly
+ LICENSE = "CLOSED"
+
+ # Assuming the branch is main
+ # Change <username> accordingly
+ SRC_URI = "git://github.com/<username>/sayhello;branch=main;protocol=https"
+
+ DEPENDS += "libhello"
+ RDEPENDS:${PN} += "libhello"
+
+ S = "${WORKDIR}/git"
+
+ do_install(){
+ install -d ${D}/usr/bin
+ install -m 0700 sayhello ${D}/usr/bin
+ }
+
+After placing the recipes in a custom layer we can run ``bitbake sayhello``
+to build the recipe.
+
+The following diagram shows the sequences of tasks that BitBake
+executes to accomplish that.
+
+.. image:: svg/bitbake_tasks_map.*
+ :width: 100%
diff --git a/documentation/overview-manual/development-environment.rst b/documentation/overview-manual/development-environment.rst
index 19095fc116..c3123dcb6d 100644
--- a/documentation/overview-manual/development-environment.rst
+++ b/documentation/overview-manual/development-environment.rst
@@ -52,7 +52,7 @@ A development host or :term:`Build Host` is key to
using the Yocto Project. Because the goal of the Yocto Project is to
develop images or applications that run on embedded hardware,
development of those images and applications generally takes place on a
-system not intended to run the software - the development host.
+system not intended to run the software --- the development host.
You need to set up a development host in order to use it with the Yocto
Project. Most find that it is best to have a native Linux machine
@@ -94,7 +94,7 @@ are several ways of working in the Yocto Project environment:
through your Linux distribution and the Yocto Project.
For a general flow of the build procedures, see the
- ":ref:`dev-manual/common-tasks:building a simple image`"
+ ":ref:`dev-manual/building:building a simple image`"
section in the Yocto Project Development Tasks Manual.
- *Board Support Package (BSP) Development:* Development of BSPs
@@ -132,6 +132,14 @@ are several ways of working in the Yocto Project environment:
Toaster and on how to use Toaster in general, see the
:doc:`/toaster-manual/index`.
+- *Using the VSCode Extension:* You can use the `Yocto Project BitBake
+ <https://marketplace.visualstudio.com/items?itemName=yocto-project.yocto-bitbake>`__
+ extension for Visual Studio Code to start your BitBake builds through a
+ graphical user interface.
+
+ Learn more about the VSCode Extension on the `extension's marketplace page
+ <https://marketplace.visualstudio.com/items?itemName=yocto-project.yocto-bitbake>`__.
+
Yocto Project Source Repositories
=================================
@@ -244,8 +252,8 @@ and so forth.
For information on finding out who is responsible for (maintains) a
particular area of code in the Yocto Project, see the
- ":ref:`dev-manual/common-tasks:submitting a change to the yocto project`"
- section of the Yocto Project Development Tasks Manual.
+ ":doc:`../contributor-guide/identify-component`"
+ section of the Yocto Project and OpenEmbedded Contributor Guide.
The Yocto Project ``poky`` Git repository also has an upstream
contribution Git repository named ``poky-contrib``. You can see all the
@@ -276,8 +284,8 @@ push them into the "contrib" area and subsequently request that the
maintainer include them into an upstream branch. This process is called
"submitting a patch" or "submitting a change." For information on
submitting patches and changes, see the
-":ref:`dev-manual/common-tasks:submitting a change to the yocto project`"
-section in the Yocto Project Development Tasks Manual.
+":doc:`../contributor-guide/submit-changes`" section in the Yocto Project
+and OpenEmbedded Contributor Guide.
In summary, there is a single point of entry for changes into the
development branch of the Git repository, which is controlled by the
@@ -340,11 +348,10 @@ Book <https://book.git-scm.com>`__.
software on which to develop. The Yocto Project has two scripts named
``create-pull-request`` and ``send-pull-request`` that ship with the
release to facilitate this workflow. You can find these scripts in
- the ``scripts`` folder of the
- :term:`Source Directory`. For information
+ the ``scripts`` folder of the :term:`Source Directory`. For information
on how to use these scripts, see the
- ":ref:`dev-manual/common-tasks:using scripts to push a change upstream and request a pull`"
- section in the Yocto Project Development Tasks Manual.
+ ":ref:`contributor-guide/submit-changes:using scripts to push a change upstream and request a pull`"
+ section in the Yocto Project and OpenEmbedded Contributor Guide.
- *Patch Workflow:* This workflow allows you to notify the maintainer
through an email that you have a change (or patch) you would like
@@ -352,8 +359,8 @@ Book <https://book.git-scm.com>`__.
this type of change, you format the patch and then send the email
using the Git commands ``git format-patch`` and ``git send-email``.
For information on how to use these scripts, see the
- ":ref:`dev-manual/common-tasks:submitting a change to the yocto project`"
- section in the Yocto Project Development Tasks Manual.
+ ":doc:`../contributor-guide/submit-changes`" section in the Yocto Project
+ and OpenEmbedded Contributor Guide.
Git
===
@@ -655,5 +662,5 @@ Project uses in the ``meta/files/common-licenses`` directory in your
For information that can help you maintain compliance with various open
source licensing during the lifecycle of a product created using the
Yocto Project, see the
-":ref:`dev-manual/common-tasks:maintaining open source license compliance during your product's lifecycle`"
+":ref:`dev-manual/licenses:maintaining open source license compliance during your product's lifecycle`"
section in the Yocto Project Development Tasks Manual.
diff --git a/documentation/overview-manual/intro.rst b/documentation/overview-manual/intro.rst
index a8091771f4..80446b3810 100644
--- a/documentation/overview-manual/intro.rst
+++ b/documentation/overview-manual/intro.rst
@@ -38,7 +38,7 @@ This manual does not give you the following:
procedures reside in other manuals within the Yocto Project
documentation set. For example, the :doc:`/dev-manual/index`
provides examples on how to perform
- various development tasks. As another example, the
+ various development tasks. As another example, the
:doc:`/sdk-manual/index` manual contains detailed
instructions on how to install an SDK, which is used to develop
applications for target hardware.
diff --git a/documentation/overview-manual/svg/bitbake_tasks_map.svg b/documentation/overview-manual/svg/bitbake_tasks_map.svg
new file mode 100644
index 0000000000..09ef36faae
--- /dev/null
+++ b/documentation/overview-manual/svg/bitbake_tasks_map.svg
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Do not edit this file with editors other than draw.io -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="12270px" height="3804px" viewBox="-0.5 -0.5 12270 3804" content="&lt;mxfile host=&quot;app.diagrams.net&quot; modified=&quot;2023-11-01T08:31:56.536Z&quot; agent=&quot;Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36&quot; etag=&quot;p-CQVME_iMteI52En1Eq&quot; version=&quot;22.0.8&quot; type=&quot;device&quot;&gt;&lt;diagram name=&quot;Page-1&quot; id=&quot;c7558073-3199-34d8-9f00-42111426c3f3&quot;&gt;7V1bc6M6tv41rprzYAohro9JnO7O2X1JJZk9Z89LCtvEZtox3oA7nfn1R1yEQcjmLmRHPVV7YowxRp++dV9rAm9ef3/27d36m7d0NhNFXv6ewNlEUaCl6uj/oiPvyRFFM2FyZOW7y+QYOBx4dP/rpAfl9OjeXTpB4cTQ8zahuyseXHjbrbMIC8ds3/feiqe9eJvit+7slVM68LiwN+Wj/3KX4To5air64fgXx12t8TcD3UremduLnyvf22/T79t6Wyd559XGl0l/Y7C2l95b7hC8ncAb3/PC5K/X3zfOJnqu+Ikln/t05N3sln1nG9b5gPOn9ja72cPZi/UlfHp//N+7+WoKLTO5zi97s08fRnq74Tt+OvHvc6LLgAm8flu7ofO4sxfRu28IEOjYOnzdpG+/uJvNjbfxfPQ6fhrwemkH6+zjL942/GS/upsIK1/duePboett0Tc+2tsgPeHR2/vx9ddhiCCgaPAK/Qf9tOg/0QmBtPK81caxd24gLbzX+I1FEJ/66SW5PPqz8AWacp1+RfnRpU/zl+OHzu/cofRRfna8Vyf00TVl/K6lpuuaYt7Er98OAFLM9Ng6Dx4tPWinoF1lFz+sHvojXcAmi4l3XW7tnCUCevrS88O1t/K29ub2cPT6cPSr5+3SVfqPE4bv6T6196FXXGP02Pz3/0MvZMnQ8Ou/8m/OosckZ6/es1fLq2i7opeLjR0E7uJp7W6TNz65G3z9AmAWe//XeaAnwN97fIHSpQ9tf+WEJ89Mt2G0fifx6DsbdIe/ikRGg1b8UfT07ffcCTvPRY8jd+X76EAO5oZqFGBu6QTLVH1AT6nyAOzkHg4wz35Me+SrssEI+b/dMA989PKv3FsH2Ecv3vN7IPlUo91SQH4Q+t7PTDpBgmknClzKC8dRsjNz78jxv4+0e7BUG3v3mFZRSABACmniE4psnv7EMPsne7RCDWioBphytRqAl7CgBijqUGoAwoxYzDaLCWuspWIwXUugKsOv5SpSxVKZc4nrip6ijiRvfmmhVXNpARhsaXUGS3uJy5nR6SnKVdluUxmUlq5a/ywubidttLsyelA/K023FFsHm+38dUusMFaqlpbVt2bZzeSHJiPDpyVsihj/ICABUObC/jBBUephAjxmfUyx5kg/fyDbQ1UFhLmDMFThWUIYyOrJDwzlfyqrbgLDY2NYxVrZuWFYHZiH9+EP7fr+38Gnuf63/Cd0vb9n0ykQEOYQwup5ahLoE+xZmBrL1Ddhigb0ztIO7Wn09zTwF/EZ+t/7KAB7nSAkwgdCxxFsxMjIcAFnV/4iMsgX0YO7RkhHhqbjHy6J/lpF/x++7vBtpJfG75Q2XGNzH+/OdJvlPTlZvJl766y+6W+Q3nGzbPrTvDgDGv5tWLO74X/ZxrhV186Se2fHTmjAN84ZAb15/s9hGIgDmDVhwQEYKKOb8RhIZ6S3tXI2nuapS9LpMjKqQVsaV7QFNG0UISa81yNAr3e3Tl3o0Y1Ozn1/GUZzCD3glY7RQtpOIQBXM/3to4BRhSpfPIjvnDP9Dd/D/HBAnRjX3/759enu29XNl+enq4fPt0/PPx4nxiyn6M3JCwjlr6PyB4vKn0qLPLPV/nB6ghDd58qWsLbo7j2psSNbQi7ZcuPO1+j2PUF6/ZAeVHQJOzZS3tOUsY1eHCBh7XYTFHbCnK1BYXxlz2R3zhmFBfa7oLAeKcwyinrb+PyF6wfPjr8u2W6tnd6l633TWKugLMQZxLjUyjwdlDVPnj5MTDYzhwTSzxHp2nkiHRB1V4ygfl6k3sKG/zC4NziL0Fht6lvPys3zcaDFR1JiU0rV9REoVTHalDcI7YEPqCt67/EdJlCfamNoDzKf2YuyBKa+LDwRfTlTie4Y8sieCCifldLKA2S6UGftzEZD4YI6gSwXE7+ruNPST54/UAnZeYUDPgqIFU6KFxqDGEB1BA0AtqrAESgeHMW9B60YoVjDgGKKYv2sHL4fBcVQ46QlVmMU68YoKD4rx8OHQTEnkbfmKDbHcJ9BU6CYRxSbxnmiuNSdkJFnjM+yOuEZGzRHx0yxOVqODq5OONWfTdT6dmVNnFJfo6kSZ6VLbRynPUrTyl7bafAWh2tF6VI7MNIXv3cJ3pGr+KxcQhJiu/HsZSCkZD9SclrqZDp+ATruFHYOuSmXLUprlxLhrsLc0BefpUQrN1QEcw3FXBCOzFzoFsZR6FOFLnuRU81OKHR9TIBooxTyANYurKjXZUVOkp0VIv1INSq8MsT5BoteawCWS6cQXa7382hxJUST9mtEejF37gPH39qvTnY0plDpUOZ0CVya225O/I+2EaEOLbjsiVGBQlKqSkknUiBLToVwnEzlzEgWnMqCU+tWfA6VAkIJExLuQ5lAePKj0k8dQN7YZ15iZ6Ui8jMSPeud6flQSC/ouSd61samZ4BTshvRc8EtKKpK2PGsVlt3xSnuvFj0Wpl/OLHohUHfD78BItPXoBTVa0y5TSvHib/ZPx30XJzLWN2cpHrRov+lX52XYPE/mmzT4389rT1WL9K1z7Jb8o2wcG+/wgimoRZfhWfly2kzCJcHFHaQZXrdQK8KOZNlejm49vXu5vb74+3F0Qob1bhEH5RpfGzpQwFtWuAydFR0bkRak3HEmGGgg9pKNx8zkohmFxasKNEiTtdZ+CPwQ81x6Iv7W9rZ4WJ9cSzKl3JmURRz1uzKPFeqJbtKsqwXGNa0YAOORe/cO76LnprjT/rzMPMA8S6EWjcvQcGoHHvaEcmQ5klCJYfSyBCwYNRyzkTsrt24c2khKHVYSqU4ctlSataDjjMHW+gMNe2KE0wy9LBpxV4RozvYlJGajYuuu6f6SFVLVd667hplW4AH6greA9/zwunSCcKl6wsW64fFshY3J2gMKCx5DMoj56iIrmLHG95U05k1Wh3Q6TvnjM58Z+HunGnKaoLN+mEzC9RgM8A07InbOgs244jNao9EMDirJDP4HIngvtorR5BYPyQGoSzhrkYneAx7mBjR2LgBfEFjJ8ipmsawD5QXGjNZDZYcZxLaKcwVQhIfB4C9t9XuKEcNLuXoPhCOjb6kqCpLSjEHkpYHp0KcBc5Ijo7T84k/HuQBkh2oz6ybLwf6byfVEYF8NqNwt4vNfimMiN6MCE0iGoxalHm4GpQMpvxnnZU7hAfgdCGp2rlyFmf6mclnEGrjzgVB9UNQqmZl1HOCoJjrZ2XDIMvbEamQ/ebtQKUooIAMygAwKd56rNcPsPzlIiVcaCsFnlQeZCAQ0IkEFDI5DxhSubMkNQA9XPKWVdaQCyCQyok2Agb9wkBVasNgMC6waDpIp3XPPfM056lQQmb7i1SD5V73bLC2AFaTPMB9SfIrq2cY6D9XztRbmCEM24P2kZdeSn5P7sP4KANFs8y6MZJWjuSiQ4Losi5rFY1QymVChG8TAPJKya/u3FJFJwqSAGbhujOmyA8Mk3CfLXaOrW8RS72Ha3e7ij4dwRYbb/Njgev5wZ6TX7zNMjIC5XBtR7/PDSIg+k7gxD+XfsHHvx4ffvx4ep7dPTweue6bG/+cOfpeeeHtXLRHI+LzHak/8ZIvFuWuTHnQ6lUyodGkGJV0RWK4zOxW1e+d3PwNSL6tiCqIF1FHpYC6zrYBMr5bMbsmE5pZFbOTQ4YZMXurwWsddTUVFtzEkmVZFdshfkWWF7aJuxUTBBrvIspOPOt9lfVOqlGf2LsTu9W+Ak01JlU1RtlXfLZ1XnrP7jYI7egX1HO1I80gLG5le+OutujvBYJbtBmvI/3BXdibq/SNV3e5TEJLTuD+155vsNWdLi66rnY90WbRtRBRBAlnANrOy5SXY/oX1/uvvmKlGCSsKe1MLYpiRRomPVa8lWfAXLqjjlULGLO41kf8stSiWmM4tw3gM20LUdbC2764q72fGZNz/8BXgsY4oDEcZDRoOGZLW2YJxSLGyKo3gElxB7Btt6LQ4gpckFja70cQGJcEhqPm9H5SVTHz4ehMoU3i4wLP+y0iyp8C0HwDWpHJTty06ZJsLQulss4uXbkD7kAegvIiW6/DQfgS/yucN8rWyEUtcnDHZyV3E/889OUy0Ha/k88QV9Hp8Y16WwstgrsLnGqNxg526PegFy/u70gLIpJBib1BuMFy72i6BowL2jXAqLNraO3rB9w1lV2PxK4xxK4Z0/qjaEuM90ilC0PsEVPskREly5S0MCgeE8Z7ppzbwYmFgT6+iwcYCBODZ0iX8ktlmjubsYlR9gIKQUBcxRKCYExBAGptG7ayAFZ6T8W2AbLYN2NKG7VGpJzxruGzb1QUc/B2+40dOs8NW+EJxYkFlDVAJnNToJyN4GGEZeFlqpYAQEiAMYuezBrbhqkEgBrzEUrN0sk7p8qKFHQFZ23VGPzce1OadqmysKQqVeTKAo3oasIoWRbSrHViP/VaDMo1FBuEqci0UWo9jkqhwkyrGECDsFpwoRizPBKrYXBUsxrepJx0scnunDOrS4xZ7m/MMgSczVlW1LKDTMxZHia3VMFdkU80mGScXKoqpdW/1Gm4vC3+6GNys2YC3A9yzKk8jcbkinJIRVVr23gaHzbesYzloyZe6QOGysDCU8tpzIH9nhSTiYmNA5MnB1Nw1XKIBK//xa0+oyrC0irX1o+GqyGEeG6GaMZxwZ5Qte5kAohHn48tJbXGnlAdT2pk6wlVaQmswhNazYaqBiTLKtoMHDQnUseZSCCmC5xak9qd6/oP5HREE5+pumJAWZ9OT9Xkb0JZ5mUVPMYPj2lKXR7TVL54DN85ZzwmBkT1yGIWjxOi0PMZhcfEhKi+ua+2oxbPxBm6+3A5dybrxV3RNLg341ErO1l5oNV51MxX0GpPymE0eco6/CtAztIo8XHGQ6gUnU8U4t51U1kCAo79wHEKiH7Q1JHwMtMMDX3k4QNV8rnofSu6juf2wlxS40OZ65iuSmTaA1WVKLqslfQm7u0QbZRtfCeKHB0tzjT4EHqEUVuPMBjpEWhPEUlPMhGtqTvFAOgVF+ppiEH5jtOCqGM3VvqArlV54+Gp8wfyxevVzSQqS0jUUgnJONLvEX1x/F1y/j11Ylz/68fDH7O7h4mB3vyUJC8ePluvoAXtiK/23NlMBqsY20SXRyy9dHxMikvnxd7H9xcgkexuV08xcSuHA1+dl5BrljotwEpiOKqt2m7RUqePapLG64+LZ2SCG2Qf+uRVR5LSiU94Ly+BM4xRgzPcOVMn8ba5ZE2yurosjSg2UTuJlJi0MjD9Tek1QA+6KTQhOQKT1sOL5kYfbOqZgoPKnIF5JmDMLYxBCcY0I0vRaX7UAYHMpwf/UQCZVyADrdpVwBzFfA7pqK15CxQzR7ECJQ7ZmE+Xq2BjbnE8zRLheIIxn310ngSMeYWxCrhUjit76IwVAVu6vgAzt2AmRttZBiWay3JIu2LwORHK3S42e4RNAWZ+wQx1naRmGp5pw1QGxDOf+dMPtzd397fP6XRrgWleMW2ZUDKL00QNSsYNALoEWLqVoQZK6OCq/Vgh56FmYkGxNJ0D+B4FUHWWgZmeWV0wh+HES6a2yWfAQngWuCVJAEuzcwy1HHdjbZOZfAYshKuXYyCTEQseYMxnxGImYMwtjDVYjiDzgGQ+YxYir4d3PMtEAyONMkGJbVaPyaebLOr/7zvRw372nYW7E1MAuGxdYahEhiatnblCK6IYbgoALpoT7Zp6a9ek6kXPEeKxekOyBmzXpFg0dbLT+n6MdjMmIYNorWayzGVGrWYstbR0XBU+fUC/n5UGgmtUKXPWXNtiNdCCv8r4DwlAKHPmeLb4zKEQLUJ6Kx42DS47hFhGC+YTHUL4o76643qg3HuTyo4IrJxdO2bWjaC/nuhPjzp5FOjPolgRzNt34JYiotMbN1yWKWc1uIyvTm/ZnXPGZRt3LnisHx6zTKuU6EfhMcZqXCbUc8CL3Zdo5aX1ZSwyNwMHTK0oxw6dz8bK9IRy2YGNG0BJgSeV8+UEAjqRAJEqgRBAG2ZP7749HAeUvdsFEEjlprYCBv3CQFVqw2A4LqBlGgwZ5LD9RaroypygpI+1JcOUNJIH0CivrK4MN29ErrSV24z5HqnOcO0G0UPaBN4k7vUT2u42OuKF6+h8OZoyGL1+8aNvwYd1+zXC53Ye7Ca5Ced+lgbg7JAx5GwXrhNI6GOztHES+pW5d6Iv9pMnQl5gbgdRiD8CmJy9S32y5d+Ez9/jA6vFIqd578nzJoWB6jd9fCNakh16cD6i/qD+V8v2dtnt22udj26qwfM427SNlCWLLFukyUsME2takTN12qB6WszfGEwnAnwa5Gnt0/Ps9vFJpN9xnH6nk6mktAKoQ8oKGw2PotAxjNngv2N/ZRTSqhe0meTdnJXTPcXYMYhn2VV6QQHOrOLFC6qMOnWgiE9D4HN8fOp84RNU5uIvsuU6SG74soj+Vxbmn91wvb9sB3u1DEfvKFBVtZ7yR6cqIMvqoFV2reAyt7zczfKJBwAOn1k6S+/5xQkX67Ite8Y21AWmvkf1bGQOkCpTUM0y+R2C6r7fydodgAcK3iM6WSYa+eheptMejWrfWHKV/AiNxg4KtAjuLnCq2d4Oduj3oBcv7u9IQhA6UW0i1nQNGJe0b6KusDX2DWS6bxQhDIQw6FkYaGMLA0UIg0phoAlhwJswoOwbtsIA8tk4BgmDhbd9cVd73xECgW9gA6AW0wkshZJLANjCWhHioNI2UIQ8GHXbgOptw1gY8Nm1JhYGcXKAEAW8Y1onMK3R8gtpofQBUV3pNReyIOqDKWTBiPsG1tk3bKWBymcKCpIG+y1C2E8hDPgGtULoNzTPJ1tJoFYau0ISCKNgVEFgVu8ZxlKgTZ0pw8rSC2oDAnHqfnX9qMpXZooKTYESDlHSe35d/NGmg8eBrBIZMcrpOeIWME6dPxlkjrgKBdPxh2G1/xw8NhgGwDj5gWFAnD1Yzoy2KJhz0YmGDCv5DxV9KbIUysAeWlPDPqr49+EP7fr+38Gnuf63/Cd0vb9n0ynOKGHHnHnerJpqkvVsmuTzq0+nVxe4ts1YFB5w2YWftTI/05eeDxXDJPzO+AccY2fyfLxfjmokOKBIP38oMi8P2tx4C3sjJXR6CbzJvMEvpZ0dpHX4Vcv82UcHBPom4rwlbIENaxLg+dSJ1mBDSntO+kIqfbNhR+dNOdVzPt/Y744fCA7ptZcSjUOoOS/DtVHRKuM17b3COd/ykfL9/Kivqpr9ZdLaIGpmsHK2aFWTpgVh1E+h+psmiC6NbACIMYvNlejQ9f139HKavLj/M36rWcn+IZR0t6Xdy8oNj/ykZHJD9IsWa3uLdj9arbDTbyp+Vy8dBxrtbu4IvNiHoA9DStFK+1ijzaWGB5WhULClDbeXFS7N+GhUiX0yR9/FB/7x3YvXB53tBP+TQ6h7fNOJ+C1XsSgyr9OkZCtTZdxw7QK1SveWiOCKVB6u0jppzdLZhnA1kQBXuWlUsWnGnK2F53DjpnW02VqMNw23VZLuNgjt6BcI9YlnTAMIVNK6ADIlVMM2B04ThZLV2dBCGoy6c5ClUb1t2IoDXSmvsxi3MK5bXqs9Okbna3RMduecKRdRVr29EqNj+vIv6oQDxaA4F2kjkAf0n1hcAu/+j8+zi0bdWfcSBboFJZzjg6FMGU0Pdcmghc0H6ydqsoqbZ1PfjO5j36rSlQrBdtGuMZPd1VIe106MnSqq48lwmTejKleUTEciPzFQfhF+tDlpcBVbeLvo93qRPUdvWl8MW85ykVaqNyI73/kdzaZzt6vjp+eu/Sl4D3zPC6feNgIXzSLrJBuO+B+4C3wOO/BcB4akF91vtDHZyAajTrgbUFnRK/3W4wVBC4qycMFx6kjQDKS4qAS1UlzLbF1wejmtU7jgSBecaFY27s5RoYRLP/kJypjlgXDCCze2fl57dn3/+nlHHuYzxJcqF8E02G0KOYCX5xZh6Ywz5WpnHHXi3YAKbmUsbjRv3O3j00Uj77wdchaRY0TzxukUKA/nijOMUSRz5phr4pf7EFLZqiuVDciZVOYzRBHY7/FE2otmRabyWDGJQSkmZUYZ2/CYIaL83DEZ5qcaTMbZODF855wx2T7wBYn1RWLQJKuJLcq8ZRXSBi0OyGNtAqMjaWOC+6oZrbLwGAwT+qwRqZShQY4NT+42/SQB5T7Clkal/3wUXp0j/Ale7YtXoQSUIq8atJa4GqRGJYejVjy5jwK+YGdvOwU9PnmbpePHRcS+Y4cIINGv8qNiYjv+7853AidGuEutT8YHsmrh+6ubP64+3zYpzv5l+24cXYzHul+74bX9M/run1vvLbq1t7Ud3UAyd/0tvb24zHm3jyueEQ+vohP3QRzoRwecWvf66e5rhxt1ftuvu036hOt+WXRyXCz+vVGxuIy2Q1zr/ebGiFl5k4aV3g2/sJiG8RLDJH76blSLK7vBkIjIjiX4FrXltRhMkzUJFtsY0mrLoQKoBAZUMzs+AInxKUFFZsXZxIcNtSSgOcisMCsjeiKzIpoFLDIrRtw5AJKCgYPMCrxPheeTH+vfNMrW/5Ezrb7N/448zGdge+nsNt779KiSnLWm2f1c5Qix1LGmDXEKk79iWoiscJefYbXpaytIcVhSrB3YtjgLB+E754wUKWwYeygSP8Hz1cPNl6KvQlDf0NRHbbTFmPqqG221tkAQLcU+zthbOq/nHPR3r0F9l9VNrWsunXmDa8qeX+uq7u5n7atKZT/H09oJ8g5f75e7jB3B8/fyydV3k/pTntHz6/vx4Uujx9jgKSZ+4/pXR4+zwdVjwRe7Zn0nNkARrUdb/Sb2R9eDGma+m69Xj4+NPNIvMUTC1EGeoeCwXgu0QxL/fVRcN+i9pK7peHvZu50TM3Dphmo6re++f7l9uHuq//VtvXfCi0zEwYgOAjQXskprwTio/1gF2iiKcZpfgP+uM8ihbRW2KLzO9OfqGT24e9vIhdeHqSc4ZJw2cTtWd136gCZXFGqrhM+b+MBkmDptqxyuwemoU1kCU1+WqEp7pl5UOzYuQWVn3gceWKpO5LBalNJpBdd5FFy5Cpk206PyXt3jravyHtrBz+jeN0GkQSydSMGIjnuNotDN9ZqbNkpwC/9eo4B8U0tmAENmEDumseo7nD3S3vLp8HD2x3fKW7qVo70y9zZL6l6ptmEKMMqbLHTLcIIHJiS5QVmwvDBGYWu/xu8m+TfRHbbZMS2yVOqBNb30w3CXPuq5qmWbVrY8GZ5cWBhPeYGZlrpxZ08dF969W1qKQkxepvVAgbSpesAYygmnyqOkcfOw8u3NFlWu2y9KlUerfKMPj2rT3KC3KYo5+/ikSYx3YzrDZ/LhZyVSAUdfYL76H2Q3zllAKonSDyP0OEAa61R6aJA2Kst5sUccRuPUJ7UMrPOAmi78VLeAcgA/XkeCOh6EnBd1/tnt/dcffz3P7h6ea+vjx4LagmAaEoyqS7g9Op6GSKnkBorFtlRHlfnssn3Icn9+89H59RErkuJHINgm7mCtKGipeb3Z1EAmib2qXJl1JFLigXHKESNS4gffN0T+E9N8eHo9NXNbvFl4uo2ZXbDTeYBOm3h+LT347kH7z+rd/e/1p6vdf7b6dbiWnWntNiI4esaLFgxoWjDV0S0014aaqwYI01jFzTxGy7xUQXUNW4cCc/8QNgqSsErYIhvz7v6PvmM3D/ff2obpjl90dtsg7CL/o6SIL9bO4mf8xJ1d9FzzekJ2Un64sUhua1IiTXTOgaopYbIr5LfRrEYg4SKjAbYgn+VLByfHRTtHz7s1J5IgZHknpGizJkWmDNadUwV8ekKevt0LMHMMZhMW7TKFAmSmbWZVwGcN1eyrYGUugXyCgXVF0k2W2FWUFj6FHoNek0bR/nbJ9rUcEdyptrWcCp2Da3rfboVjOeqmLKmWnP0jQjOmIammZiBFJfkvoUUnP3mw5n0qHsfDGYOLoMyFBmWQ0lK0Mg2D4uJhG5PBMkbEZE7EZEwRkxlz2wCiNIq2bZiGZMwW2lO3iIzx8UIyHTQhpW63IlXpvVtRu3JBiJU3DPF0LPvRckHyA3qqWw1a/Zc92IK0ECZlQ5OyF04kWxTRhmAz9YcorFiRPgE7swCraq+bUWkbO/I8qZQaqVZrEmnWpIWXSLVS6Z1j2zTm/o/P/3q4e7qd3T00Ci/Wi1jmrt4szlgzeJm7fqN46+C1mEP1hiEN6YY/o1nBarE/DJOSO+78Xv2HdC2rKCGhCSRKSoUKdMmkOF+N0iyJ/sgJKkfJqXPj/q/uwtnG7YXijIGk7/06brvToDV75IPydntE1c7zxl00ALP9ElviNb/DDqOu8Scgf/LrkgZOcwexTNz5P+12hH97sH/Ff5a/I3kkWaOApqW2X+9unuPG/c83X/54/GeThBH3ZZJOIiDvaZOtXSxgtl5Y60HefP3xeDtrT265sz5MX31KzXEPpDNViJk0hkWbFAKVw6imQv2uPFwnDhUe93Z3Jp1UIzqMwUgAvA/i6v+4s1fgoCeL6CSWdwt351B6ks3t5AMxMtAOcf1M95TQX+m3rO0g+xlJ97LWTQXIrMXo4NPVw+fbp+c/b7/PfkSdAaaFwz8em03LEFttoK2m6hrZ9Z/akQyqdPE+lG+QmRnc2qJtEU09bt5+jLpqSBksT199zhK2YTmDd+FtX6ZItXACKfwdXpz7jlG7LdL/BiEllEfrhqgMRTygTYfwEbI4moxk/Oj9HHBqf/XQxdEajNNvh70QbCUD5Ul9GSiwWBeLnMlAfOOcpfkEQYjMoenCXhxcAReZrsmyjKpWgxEgQwk3nGFTSKUed/iNCcHHx6erp1uRL8xlvjCO9KpQskz5aA4lraCDDu8+Qr8nJz5TegJfHIAYmRcmUCXLKqw0kCltAGkzDZVeIhf0lYbM9TqgWnnNbipLsgyr1Lvo1b3ju+hXR8mrTSL5x9U8HtDYXn+DuAVNZSxfGS+Wf9K2GdBxnY0xjqZi2O42Ce/Hilkaws581vJ8725iJ/U+jKdM5wqX807vaEbKYWR1dNIh7pNebVKIUy3d6GNJzGextrdxM103eoXHXafxreQS+OsT7/r+EDVK3g7sV4fSuFf4n/vyP2d9pU4IYuo4DGswB9DxUuHO+2RQbTW/iMkSUvZpMopo6aKtE3q+6xAZOEg38+N9Zy8WvhdkGzXAOySwfyUXiJvFBzGY8QggvKVDJDCOasJiKw0WNdUP4VCs7Jg1d5M6lKqjVHoNOksdDOb3ouDxEIP7eYYvgj7YRykT8UynOq3zX5P2Gv3mVAXLJvlNdedspekPDe5W5Er1Y26Q7S8UnZa2cCxVarg9WOk26a75uXG6Qm772fF9ldC6sd8dPziC8FQzy9Q9JI68fSxS0IUDN9Et39ZOhJON56VZWYlmiR6VHcnT09rah4HzMCIGyERijkqL11F76g9WRVSj9K4ruuP/RFk1CRJf3NU+W+Ai8hNEOkGAnrFrbxKTws8+1ljufLu6+XL3/XYAKTG7e3x6+CFkxPibysJ5rJnSRpEZKk1pG2xLDTptyonZOgiSOTeJVbH23iYnkl5TtwxS5pwp2mThNDY1ps72lwArYwkAcFA603Eo49FUkLX96huw1Poa2tiDo0srVy8t5dkVx0dGJ+VjGkceI+VhH49QQLIWjWK8UWvRhnqqNLXx7J4qWfUcDfij1C/QOkIO9mBp7T/P7sHKxIMFQKY8Vkp69mCPlZaZfX6Plai3MSCFX1mClaZen9tTBWpprCfUqXVMPT1a9NL3Ir0ne+8zehDrb97Sic74fw==&lt;/diagram&gt;&lt;/mxfile&gt;" style="background-color: rgb(255, 255, 255);"><defs><style type="text/css">@import url(https://fonts.googleapis.com/css?family=Liberation+Sans);&#xa;</style></defs><g><rect x="9428" y="2640" width="1120" height="600" rx="90" ry="90" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><path d="M 8868 3120.57 Q 8656.57 3120.57 8656.57 1980.33" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 8656.57 1959.33 L 8665.9 1987.33 L 8656.57 1980.33 L 8647.24 1987.33 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 9228 3600 Q 9228 3760.57 9608 3760.57 Q 9988 3760.57 9988 3280.42" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 9988 3253.42 L 10006 3289.42 L 9988 3280.42 L 9970 3289.42 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><rect x="8868" y="2640" width="480" height="960" rx="72" ry="72" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><rect x="2868" y="2640" width="1080" height="960" rx="144" ry="144" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><rect x="1082" y="840" width="1080" height="440" rx="66" ry="66" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><rect x="2148" y="2640" width="560" height="960" rx="84" ry="84" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><path d="M 4928 160 L 4928 360" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 4928.57 160 L 4928.57 200.57 Q 4928.57 240.57 4888.57 240.57 L 1662.29 240.57 Q 1622.29 240.57 1622.19 280.57 L 1622.06 334.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 1622.01 355.53 L 1608.08 327.49 L 1622.06 334.53 L 1636.08 327.56 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 4928.57 160 L 4928.57 200.57 Q 4928.57 240.57 4968.57 240.57 L 5808.57 240.57 Q 5848.57 240.57 5848.38 280.57 L 5848.12 334.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 5848.02 355.53 L 5834.16 327.46 L 5848.12 334.53 L 5862.16 327.6 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 4928.57 160 L 4928.57 200.57 Q 4928.57 240.57 4968.57 240.57 L 7273.71 240.57 Q 7313.71 240.57 7313.81 280.57 L 7313.94 334.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 7313.99 355.53 L 7299.92 327.56 L 7313.94 334.53 L 7327.92 327.49 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 4928.57 160 L 4928.57 200.57 Q 4928.57 240.57 4888.57 240.57 L 2620 240.57 Q 2580 240.57 2580.01 280.57 L 2580.02 334.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 2580.02 355.53 L 2566.01 327.53 L 2580.02 334.53 L 2594.01 327.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="4788" y="0" width="280" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 68px; height: 1px; padding-top: 20px; margin-left: 1198px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">tmp</font></div></div></div></foreignObject><text x="1232" y="24" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">tmp</text></switch></g><path d="M 4928 520 L 4928 1040" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><rect x="4788" y="360" width="280" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 68px; height: 1px; padding-top: 110px; margin-left: 1198px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">work</font></div></div></div></foreignObject><text x="1232" y="114" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">work</text></switch></g><path d="M 4928 1200 L 4928.29 1240.57 Q 4928.57 1280.57 4968.57 1280.57 L 5648.57 1280.57 Q 5688.57 1280.57 5688.57 1320.29 L 5688.57 1360" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 4928 1200 L 4928.29 1240.57 Q 4928.57 1280.57 4888.57 1280.57 L 3156 1280.57 Q 3116 1280.57 3115.98 1320.29 L 3115.96 1360" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 4588 1120 Q 4398.29 1120.57 4398.24 1334.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 4398.24 1355.53 L 4388.91 1327.53 L 4398.24 1334.53 L 4407.58 1327.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="4588" y="1040" width="680" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 168px; height: 1px; padding-top: 280px; margin-left: 1148px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter"><b>${MULTIMACH_TARGET_OS}</b></font></div></div></div></foreignObject><text x="1232" y="284" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">${MULTIMACH_TARGET_OS}</text></switch></g><path d="M 3115.96 1520 L 3116 1680" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><rect x="2975.96" y="1360" width="280" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 68px; height: 1px; padding-top: 360px; margin-left: 745px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">libhello</font></div></div></div></foreignObject><text x="779" y="364" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">libhello</text></switch></g><path d="M 5688.57 1520 L 5688.57 1560.57 Q 5688.57 1600.57 5688.57 1570.29 L 5688.57 1555.14 Q 5688.57 1540 5688.29 1580 L 5688 1620" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><rect x="5548" y="1360" width="280" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 68px; height: 1px; padding-top: 360px; margin-left: 1388px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">sayhello</font></div></div></div></foreignObject><text x="1422" y="364" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">sayhello</text></switch></g><path d="M 3116 1840 L 3116 1960.57 Q 3116 2000.57 3076 2000.57 L 2036 2000.57 Q 1996 2000.57 1996 2040.57 L 1996 2134.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 1996 2155.53 L 1986.67 2127.53 L 1996 2134.53 L 2005.33 2127.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 3116 1840 L 3116 1960.57 Q 3116 2000.57 3076 2000.57 L 2468 2000.57 Q 2428 2000.57 2428 2040.57 L 2428 2134.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 2428 2155.53 L 2418.67 2127.53 L 2428 2134.53 L 2437.33 2127.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 3116 1840 L 3115.96 2134.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 3115.96 2155.53 L 3101.96 2127.53 L 3115.96 2134.53 L 3129.96 2127.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 3116 1840 L 3116 1960.57 Q 3116 2000.57 3156 2000.57 L 4268 2000.57 Q 4308 2000.57 4308 2040.57 L 4308 2134.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 4308 2155.53 L 4294 2127.53 L 4308 2134.53 L 4322 2127.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 3116 1840 L 3116 1960.57 Q 3116 2000.57 3076 2000.57 L 1508 2000.57 Q 1468 2000.57 1468 2040.57 L 1468 2134.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 1468 2155.53 L 1458.67 2127.53 L 1468 2134.53 L 1477.33 2127.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="2976" y="1680" width="280" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 68px; height: 1px; padding-top: 440px; margin-left: 745px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">0.1-r0</font></div></div></div></foreignObject><text x="779" y="444" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">0.1-r0</text></switch></g><path d="M 5688.57 1780 L 5688.57 1960.57 Q 5688.57 2000.57 5648.57 2000.57 L 5568.57 2000.57 Q 5528.57 2000.57 5528.43 2040.57 L 5528.09 2134.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 5528.02 2155.53 L 5514.12 2127.48 L 5528.09 2134.53 L 5542.12 2127.58 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 5688.57 1780 L 5688.57 1960.57 Q 5688.57 2000.57 5728.57 2000.57 L 6988 2000.57 Q 7028 2000.57 7028 2040.57 L 7028 2134.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 7028 2155.53 L 7014 2127.53 L 7028 2134.53 L 7042 2127.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 5688.57 1780 L 5688.57 1960.57 Q 5688.57 2000.57 5728.57 2000.57 L 7676 2000.57 Q 7716 2000.57 7715.99 2040.57 L 7715.97 2134.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 7715.96 2155.53 L 7701.97 2127.52 L 7715.97 2134.53 L 7729.97 2127.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 5688.57 1780 L 5688.57 1960.57 Q 5688.57 2000.57 5728.57 2000.57 L 8313.71 2000.57 Q 8353.71 2000.57 8353.79 2040.57 L 8353.95 2134.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 8353.99 2155.53 L 8339.94 2127.55 L 8353.95 2134.53 L 8367.94 2127.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 5688.57 1780 L 5688.57 1960.57 Q 5688.57 2000.57 5728.57 2000.57 L 9068 2000.57 Q 9108 2000.57 9108 2040.57 L 9108 2134.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 9108 2155.53 L 9094 2127.53 L 9108 2134.53 L 9122 2127.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 5688.57 1780 L 5688.57 1960.57 Q 5688.57 2000.57 5728.57 2000.57 L 9948 2000.57 Q 9988 2000.57 9988 2040.57 L 9988 2134.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 9988 2155.53 L 9974 2127.53 L 9988 2134.53 L 10002 2127.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="5548" y="1620" width="280" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 68px; height: 1px; padding-top: 425px; margin-left: 1388px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">0.1-r0</font></div></div></div></foreignObject><text x="1422" y="429" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">0.1-r0</text></switch></g><path d="M 1622.29 520 L 1622.29 560.57 Q 1622.29 600.57 1622.29 560.57 L 1622.29 540.57 Q 1622.29 520.57 1622.29 560.29 L 1622.29 600" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 1762 440 Q 2215.43 440.57 2215.04 205.47" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 2215.01 184.47 L 2224.39 212.46 L 2215.04 205.47 L 2205.72 212.49 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="1482" y="360" width="280" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 68px; height: 1px; padding-top: 110px; margin-left: 371px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">downloads</font></div></div></div></foreignObject><text x="405" y="114" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">downloads</text></switch></g><path d="M 1622 760 L 1622.14 800.57 Q 1622.29 840.57 1622.29 800.57 L 1622.29 780.57 Q 1622.29 760.57 1622.29 800.29 L 1622.29 840" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><rect x="1482" y="600" width="280" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 68px; height: 1px; padding-top: 170px; margin-left: 371px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">git2</font></div></div></div></foreignObject><text x="405" y="174" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">git2</text></switch></g><path d="M 2082 1160 L 2628 1160.53 Q 2668 1160.57 2668 1200.57 L 2668 2200.57 Q 2668 2240.57 2628 2240.41 L 2568.42 2240.16" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 2541.42 2240.05 L 2577.49 2222.2 L 2568.42 2240.16 L 2577.34 2258.2 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><rect x="1162" y="880" width="920" height="160" rx="24" ry="24" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 228px; height: 1px; padding-top: 240px; margin-left: 291px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">github.com.&lt;username&gt;.sayhello</div></div></div></foreignObject><text x="405" y="244" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">github.com.&lt;username&gt;.sayhello</text></switch></g><path d="M 2082 960 L 6628 960.57 Q 6668 960.57 6668 1000.57 L 6668 2200.57 Q 6668 2240.57 6708 2240.48 L 6887.58 2240.09" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 6914.58 2240.03 L 6878.62 2258.11 L 6887.58 2240.09 L 6878.54 2222.11 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><rect x="1162" y="1080" width="920" height="160" rx="24" ry="24" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 228px; height: 1px; padding-top: 290px; margin-left: 291px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">github.com.&lt;username&gt;.libhello</div></div></div></foreignObject><text x="405" y="294" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">github.com.&lt;username&gt;.libhello</text></switch></g><path d="M 2428 2320 Q 2428 2320 2428 2614.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 2428 2635.53 L 2414 2607.53 L 2428 2614.53 L 2442 2607.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="2328" y="2160" width="200" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 48px; height: 1px; padding-top: 560px; margin-left: 583px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">git</font></div></div></div></foreignObject><text x="607" y="564" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">git</text></switch></g><rect x="2212" y="2720" width="456" height="80" rx="12" ry="12" fill="#f5f5f5" stroke="#666666" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 112px; height: 1px; padding-top: 690px; margin-left: 554px;"><div data-drawio-colors="color: #333333; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(51, 51, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Makefile</div></div></div></foreignObject><text x="610" y="694" fill="#333333" font-family="Liberation Sans" font-size="12px" text-anchor="middle">Makefile</text></switch></g><path d="M 2212 2880.57 L 2117.14 2880.57 Q 2077.14 2880.57 2077.14 2840.57 L 2077.14 2714.86 Q 2077.14 2674.86 2037.14 2674.9 L 1966.47 2674.97" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 1945.47 2675 L 1973.46 2665.63 L 1966.47 2674.97 L 1973.48 2684.3 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="2212" y="2840" width="456" height="80" rx="12" ry="12" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 112px; height: 1px; padding-top: 720px; margin-left: 554px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">LICENSE</div></div></div></foreignObject><text x="610" y="724" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">LICENSE</text></switch></g><path d="M 2212 3000 Q 2028 3000.57 2028 3060.57 Q 2028 3120.57 2171.58 3120.13" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 2198.58 3120.04 L 2162.62 3132.15 L 2171.58 3120.13 L 2162.55 3108.15 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><rect x="2212" y="2960" width="456" height="80" rx="12" ry="12" fill="#f5f5f5" stroke="#666666" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 112px; height: 1px; padding-top: 750px; margin-left: 554px;"><div data-drawio-colors="color: #333333; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(51, 51, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">fix.patch</div></div></div></foreignObject><text x="610" y="754" fill="#333333" font-family="Liberation Sans" font-size="12px" text-anchor="middle">fix.patch</text></switch></g><path d="M 2214.74 3151.44 Q 1988 3151.43 1988 3278.29 Q 1988 3405.14 2179.58 3405.02" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 2206.58 3405.01 L 2170.6 3423.03 L 2179.58 3405.02 L 2170.57 3387.03 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><rect x="2212" y="3080" width="456" height="80" rx="12" ry="12" fill="#f5f5f5" stroke="#666666" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 112px; height: 1px; padding-top: 780px; margin-left: 554px;"><div data-drawio-colors="color: #333333; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(51, 51, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">hellolib.c</div></div></div></foreignObject><text x="610" y="784" fill="#333333" font-family="Liberation Sans" font-size="12px" text-anchor="middle">hellolib.c</text></switch></g><rect x="1896" y="2160" width="200" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 48px; height: 1px; padding-top: 560px; margin-left: 475px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">temp</font></div></div></div></foreignObject><text x="499" y="564" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">temp</text></switch></g><path d="M 4308 2320 L 4308 2640" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><rect x="4068" y="2160" width="480" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 560px; margin-left: 1018px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">sysroot-destdir</font></div></div></div></foreignObject><text x="1077" y="564" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">sysroot-destdir</text></switch></g><path d="M 5528 2320 L 5528 2614.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 5528 2635.53 L 5518.67 2607.53 L 5528 2614.53 L 5537.33 2607.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="5308" y="2160" width="440" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 108px; height: 1px; padding-top: 560px; margin-left: 1328px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">recipe-sysroot</font></div></div></div></foreignObject><text x="1382" y="564" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">recipe-sysroot</text></switch></g><path d="M 3116 2320 L 3116 2694.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 3116 2715.53 L 3106.67 2687.53 L 3116 2694.53 L 3125.33 2687.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="2989.96" y="2160" width="252" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 61px; height: 1px; padding-top: 560px; margin-left: 748px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">image</font></div></div></div></foreignObject><text x="779" y="564" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">image</text></switch></g><path d="M 3116 2880 L 3116 2920.57 Q 3116 2960.57 3115.99 2987.55 L 3115.99 3014.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 3115.98 3035.53 L 3106.65 3007.53 L 3115.99 3014.53 L 3125.32 3007.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 3116 2880 Q 3116 2960.57 3354.86 2960.57 Q 3593.71 2960.57 3593.91 3014.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 3593.98 3035.53 L 3584.55 3007.56 L 3593.91 3014.53 L 3603.22 3007.49 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="3029" y="2720" width="174" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 42px; height: 1px; padding-top: 700px; margin-left: 758px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">usr</font></div></div></div></foreignObject><text x="779" y="704" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">usr</text></switch></g><path d="M 3115.98 3200 L 3116 3294.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 3116 3315.53 L 3106.66 3287.53 L 3116 3294.53 L 3125.33 3287.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="3008.48" y="3040" width="215" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 52px; height: 1px; padding-top: 780px; margin-left: 753px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">include</font></div></div></div></foreignObject><text x="779" y="784" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">include</text></switch></g><path d="M 3593.71 3200 L 3593.71 3240.57 Q 3593.71 3280.57 3593.76 3287.55 L 3593.82 3294.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 3593.97 3315.53 L 3579.77 3287.63 L 3593.82 3294.53 L 3607.76 3287.43 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="3507" y="3040" width="174" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 42px; height: 1px; padding-top: 780px; margin-left: 878px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">lib</font></div></div></div></foreignObject><text x="899" y="784" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">lib</text></switch></g><rect x="2956" y="3320" width="320" height="100" rx="15" ry="15" fill="#f5f5f5" stroke="#666666" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 843px; margin-left: 740px;"><div data-drawio-colors="color: #333333; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(51, 51, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">hellolib.h</div></div></div></foreignObject><text x="779" y="846" fill="#333333" font-family="Liberation Sans" font-size="12px" text-anchor="middle">hellolib.h</text></switch></g><rect x="3348" y="3350" width="480" height="80" rx="12" ry="12" fill="#f5f5f5" stroke="#666666" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 848px; margin-left: 838px;"><div data-drawio-colors="color: #333333; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(51, 51, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">libhello.so.1</div></div></div></foreignObject><text x="897" y="851" fill="#333333" font-family="Liberation Sans" font-size="12px" text-anchor="middle">libhello.so.1</text></switch></g><rect x="3348" y="3450" width="480" height="100" rx="15" ry="15" fill="#f5f5f5" stroke="#666666" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 875px; margin-left: 838px;"><div data-drawio-colors="color: #333333; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(51, 51, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">libhello.so.1.0</div></div></div></foreignObject><text x="897" y="879" fill="#333333" font-family="Liberation Sans" font-size="12px" text-anchor="middle">libhello.so.1.0</text></switch></g><rect x="3320" y="3320" width="548" height="250" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><path d="M 4428 3600 Q 4428 3760.57 4978.29 3760.57 Q 5528.57 3760.57 5528.14 3640.42" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 5528.05 3613.42 L 5546.18 3649.35 L 5528.14 3640.42 L 5510.18 3649.48 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><rect x="4068" y="2640" width="480" height="960" rx="72" ry="72" fill="#eeeeee" stroke="#36393d" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 780px; margin-left: 1018px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Everything in <b>image</b> folder that is present in <b>SYSROOT_DIRS</b> will be copied here.</div></div></div></foreignObject><text x="1077" y="784" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">Everything in image...</text></switch></g><path d="M 3678 3600 Q 3678.29 3760.57 3993.14 3760.57 Q 4308 3760.57 4308 3640.42" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 4308 3613.42 L 4326 3649.42 L 4308 3640.42 L 4290 3649.42 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><path d="M 2428 3600 Q 2428 3760.57 2941.14 3760.57 Q 3454.29 3760.57 3454.4 3639.46" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 3454.43 3612.46 L 3472.39 3648.47 L 3454.4 3639.46 L 3436.39 3648.44 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><rect x="2748" y="3680" width="360" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 935px; margin-left: 732px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_install</font></div></div></div></foreignObject><text x="732" y="939" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_install</text></switch></g><rect x="2220" y="3350" width="456" height="110" rx="16.5" ry="16.5" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 112px; height: 1px; padding-top: 851px; margin-left: 556px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">libhello.so.1.0</div></div></div></foreignObject><text x="612" y="855" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">libhello.so.1.0</text></switch></g><rect x="1668" y="3180" width="440" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 810px; margin-left: 472px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_configure<br /></font></div></div></div></foreignObject><text x="472" y="814" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_configure&#xa;</text></switch></g><rect x="2212" y="3200" width="456" height="80" rx="12" ry="12" fill="#f5f5f5" stroke="#666666" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 112px; height: 1px; padding-top: 810px; margin-left: 554px;"><div data-drawio-colors="color: #333333; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(51, 51, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">hellolib.h</div></div></div></foreignObject><text x="610" y="814" fill="#333333" font-family="Liberation Sans" font-size="12px" text-anchor="middle">hellolib.h</text></switch></g><rect x="1788" y="3000" width="320" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 765px; margin-left: 487px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_patch<br /></font></div></div></div></foreignObject><text x="487" y="769" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_patch&#xa;</text></switch></g><rect x="2494" y="1620" width="360" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 420px; margin-left: 669px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_unpack<br /></font></div></div></div></foreignObject><text x="669" y="424" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_unpack&#xa;</text></switch></g><ellipse cx="2434" cy="1680" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 420px; margin-left: 595px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">6</b></font></div></div></div></foreignObject><text x="609" y="424" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">6</text></switch></g><ellipse cx="1728" cy="3060" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 765px; margin-left: 418px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">7</b></font></div></div></div></foreignObject><text x="432" y="769" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">7</text></switch></g><ellipse cx="1608" cy="3240" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 810px; margin-left: 388px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">8</b></font></div></div></div></foreignObject><text x="402" y="814" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">8</text></switch></g><rect x="1748" y="3310" width="360" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 843px; margin-left: 482px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_compile<br /></font></div></div></div></foreignObject><text x="482" y="846" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_compile&#xa;</text></switch></g><ellipse cx="1688" cy="3370" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 843px; margin-left: 408px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">9</b></font></div></div></div></foreignObject><text x="422" y="846" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">9</text></switch></g><ellipse cx="2688" cy="3740" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 935px; margin-left: 658px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">10</b></font></div></div></div></foreignObject><text x="672" y="939" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">10</text></switch></g><rect x="3728" y="3680" width="640" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 935px; margin-left: 1012px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_populate_sysroot</font></div></div></div></foreignObject><text x="1012" y="939" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_populate_sysroot</text></switch></g><ellipse cx="3668" cy="3740" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 935px; margin-left: 903px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">11</b></font></div></div></div></foreignObject><text x="917" y="939" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">11</text></switch></g><path d="M 7028 3280 Q 7028 3760.57 7372 3760.57 Q 7716 3760.57 7715.97 3640.42" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 7715.96 3613.42 L 7733.97 3649.41 L 7715.97 3640.42 L 7697.97 3649.42 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><rect x="6748" y="2640" width="560" height="640" rx="84" ry="84" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><path d="M 7028 2320 Q 7028 2320 7028 2614.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 7028 2635.53 L 7014 2607.53 L 7028 2614.53 L 7042 2607.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="6928" y="2160" width="200" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 48px; height: 1px; padding-top: 560px; margin-left: 1733px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">git</font></div></div></div></foreignObject><text x="1757" y="564" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">git</text></switch></g><rect x="6800" y="2720" width="456" height="80" rx="12" ry="12" fill="#f5f5f5" stroke="#666666" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 112px; height: 1px; padding-top: 690px; margin-left: 1701px;"><div data-drawio-colors="color: #333333; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(51, 51, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Makefile</div></div></div></foreignObject><text x="1757" y="694" fill="#333333" font-family="Liberation Sans" font-size="12px" text-anchor="middle">Makefile</text></switch></g><rect x="6800" y="2840" width="456" height="80" rx="12" ry="12" fill="#f5f5f5" stroke="#666666" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 112px; height: 1px; padding-top: 720px; margin-left: 1701px;"><div data-drawio-colors="color: #333333; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(51, 51, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">LICENSE</div></div></div></foreignObject><text x="1757" y="724" fill="#333333" font-family="Liberation Sans" font-size="12px" text-anchor="middle">LICENSE</text></switch></g><path d="M 6800 3000 Q 6588 3000.57 6588 3088 Q 6588 3175.43 6759.58 3175.08" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 6786.58 3175.03 L 6750.62 3193.1 L 6759.58 3175.08 L 6750.55 3157.1 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><rect x="6800" y="2960" width="456" height="80" rx="12" ry="12" fill="#f5f5f5" stroke="#666666" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 112px; height: 1px; padding-top: 750px; margin-left: 1701px;"><div data-drawio-colors="color: #333333; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(51, 51, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">sayhello.c</div></div></div></foreignObject><text x="1757" y="754" fill="#333333" font-family="Liberation Sans" font-size="12px" text-anchor="middle">sayhello.c</text></switch></g><rect x="6800" y="3120" width="456" height="110" rx="16.5" ry="16.5" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 112px; height: 1px; padding-top: 794px; margin-left: 1701px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">sayhello</div></div></div></foreignObject><text x="1757" y="797" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">sayhello</text></switch></g><path d="M 7835.96 3600 Q 7828 3760.57 8090.86 3760.57 Q 8353.71 3760.57 8353.94 3640.42" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 8353.99 3613.42 L 8371.93 3649.45 L 8353.94 3640.42 L 8335.93 3649.38 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><rect x="7475.96" y="2640" width="480" height="960" rx="72" ry="72" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><path d="M 7715.96 2320 L 7715.96 2614.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 7715.96 2635.53 L 7706.63 2607.53 L 7715.96 2614.53 L 7725.29 2607.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="7589.96" y="2160" width="252" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 61px; height: 1px; padding-top: 560px; margin-left: 1898px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">image</font></div></div></div></foreignObject><text x="1929" y="564" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">image</text></switch></g><path d="M 7716 2880 L 7716 2940 Q 7716 2980 7716.01 3017.26 L 7716.01 3054.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 7716.02 3075.53 L 7706.68 3047.53 L 7716.01 3054.53 L 7725.35 3047.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="7629" y="2720" width="174" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 42px; height: 1px; padding-top: 700px; margin-left: 1908px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">usr</font></div></div></div></foreignObject><text x="1929" y="704" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">usr</text></switch></g><path d="M 7716.02 3240 L 7716 3394.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 7716 3415.53 L 7706.67 3387.53 L 7716 3394.53 L 7725.34 3387.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="7608.52" y="3080" width="215" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 52px; height: 1px; padding-top: 790px; margin-left: 1903px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">bin</font></div></div></div></foreignObject><text x="1929" y="794" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">bin</text></switch></g><rect x="1268" y="2160" width="400" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 560px; margin-left: 318px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">libhello-0.1</font></div></div></div></foreignObject><text x="367" y="564" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">libhello-0.1</text></switch></g><path d="M 1241 2160 Q 1241.71 2220 1245.14 2050.29 Q 1248.57 1880.57 1785.14 1880.57 Q 2321.71 1880.57 2321.08 2127.06" fill="none" stroke="#000000" stroke-width="8" stroke-miterlimit="10" stroke-dasharray="8 16" pointer-events="stroke"/><path d="M 2321.02 2151.06 L 2310.44 2119.03 L 2321.08 2127.06 L 2331.77 2119.08 Z" fill="#000000" stroke="#000000" stroke-width="8" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 472px; margin-left: 435px;"><div data-drawio-colors="color: rgb(0, 0, 0); background-color: rgb(255, 255, 255); border-color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; background-color: rgb(255, 255, 255); border: 1px solid rgb(0, 0, 0); white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" style="font-size: 14px;">S = "${WORKDIR}/git"</font></div></div></div></foreignObject><text x="435" y="475" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="11px" text-anchor="middle">S = "${WORKDIR}/git"</text></switch></g><rect x="3203" y="1620" width="252" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 61px; height: 1px; padding-top: 418px; margin-left: 802px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">WORKDIR</font></div></div></div></foreignObject><text x="832" y="421" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">WORKDIR</text></switch></g><rect x="2923" y="2160" width="106" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 24px; height: 1px; padding-top: 553px; margin-left: 732px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">D</font></div></div></div></foreignObject><text x="744" y="556" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">D</text></switch></g><rect x="2268" y="2160" width="106" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 24px; height: 1px; padding-top: 553px; margin-left: 568px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">S</font></div></div></div></foreignObject><text x="580" y="556" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">S</text></switch></g><rect x="2162" y="2160" width="106" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 24px; height: 1px; padding-top: 553px; margin-left: 542px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">B</font></div></div></div></foreignObject><text x="554" y="556" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">B</text></switch></g><rect x="1188" y="2160" width="106" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 24px; height: 1px; padding-top: 553px; margin-left: 298px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">S</font></div></div></div></foreignObject><text x="310" y="556" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">S</text></switch></g><rect x="1835" y="2160" width="106" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 24px; height: 1px; padding-top: 553px; margin-left: 460px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">T</font></div></div></div></foreignObject><text x="472" y="556" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">T</text></switch></g><rect x="3628" y="3160" width="200" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 48px; height: 1px; padding-top: 803px; margin-left: 908px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">libdir</font></div></div></div></foreignObject><text x="932" y="806" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">libdir</text></switch></g><rect x="3135" y="3160" width="320" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 803px; margin-left: 785px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">includedir</font></div></div></div></foreignObject><text x="824" y="806" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">includedir</text></switch></g><rect x="5603.48" y="2280" width="464.52" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 114px; height: 1px; padding-top: 583px; margin-left: 1402px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">RECIPE_SYSROOT</font></div></div></div></foreignObject><text x="1459" y="586" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">RECIPE_SYSROOT</text></switch></g><path d="M 7226.86 2260 Q 7226.86 2110.29 7197.14 2110.29 Q 7167.43 2110.29 7167.49 1985.47" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 7167.5 1964.47 L 7176.82 1992.48 L 7167.49 1985.47 L 7158.15 1992.47 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="7174" y="2260" width="106" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 24px; height: 1px; padding-top: 578px; margin-left: 1795px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">S</font></div></div></div></foreignObject><text x="1807" y="581" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">S</text></switch></g><rect x="7068" y="2260" width="106" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 24px; height: 1px; padding-top: 578px; margin-left: 1768px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">B</font></div></div></div></foreignObject><text x="1780" y="581" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">B</text></switch></g><rect x="7803" y="2260" width="106" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 24px; height: 1px; padding-top: 578px; margin-left: 1952px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">D</font></div></div></div></foreignObject><text x="1964" y="581" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">D</text></switch></g><rect x="5788" y="1580" width="252" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 61px; height: 1px; padding-top: 408px; margin-left: 1448px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">WORKDIR</font></div></div></div></foreignObject><text x="1479" y="411" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">WORKDIR</text></switch></g><rect x="4640" y="3680" width="800" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 935px; margin-left: 1260px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_prepare_recipe_sysroot</font></div></div></div></foreignObject><text x="1260" y="939" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_prepare_recipe_sysroot</text></switch></g><rect x="7536" y="3420" width="360" height="110" rx="16.5" ry="16.5" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 88px; height: 1px; padding-top: 869px; margin-left: 1885px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">sayhello</div></div></div></foreignObject><text x="1929" y="872" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">sayhello</text></switch></g><rect x="4988" y="2640" width="1080" height="960" rx="144" ry="144" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><path d="M 5236 2880 Q 5236 2960.57 5235.99 3014.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 5235.98 3035.53 L 5226.65 3007.53 L 5235.99 3014.53 L 5245.32 3007.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 5236 2880 Q 5236 2960.57 5474.86 2960.57 Q 5713.71 2960.57 5713.91 3014.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 5713.98 3035.53 L 5704.55 3007.56 L 5713.91 3014.53 L 5723.22 3007.49 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="5149" y="2720" width="174" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 42px; height: 1px; padding-top: 700px; margin-left: 1288px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">usr</font></div></div></div></foreignObject><text x="1309" y="704" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">usr</text></switch></g><path d="M 5235.98 3200 L 5236 3294.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 5236 3315.53 L 5226.66 3287.53 L 5236 3294.53 L 5245.33 3287.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="5128.48" y="3040" width="215" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 52px; height: 1px; padding-top: 780px; margin-left: 1283px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">include</font></div></div></div></foreignObject><text x="1309" y="784" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">include</text></switch></g><path d="M 5713.71 3200 L 5713.71 3240.57 Q 5713.71 3280.57 5713.76 3287.55 L 5713.82 3294.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 5713.97 3315.53 L 5704.43 3287.6 L 5713.82 3294.53 L 5723.1 3287.46 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="5627" y="3040" width="174" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 42px; height: 1px; padding-top: 780px; margin-left: 1408px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">lib</font></div></div></div></foreignObject><text x="1429" y="784" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">lib</text></switch></g><rect x="5076" y="3320" width="320" height="100" rx="15" ry="15" fill="#f5f5f5" stroke="#666666" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 843px; margin-left: 1270px;"><div data-drawio-colors="color: #333333; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(51, 51, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">hellolib.h</div></div></div></foreignObject><text x="1309" y="846" fill="#333333" font-family="Liberation Sans" font-size="12px" text-anchor="middle">hellolib.h</text></switch></g><rect x="5468" y="3350" width="480" height="80" rx="12" ry="12" fill="#f5f5f5" stroke="#666666" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 848px; margin-left: 1368px;"><div data-drawio-colors="color: #333333; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(51, 51, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">libhello.so.1</div></div></div></foreignObject><text x="1427" y="851" fill="#333333" font-family="Liberation Sans" font-size="12px" text-anchor="middle">libhello.so.1</text></switch></g><rect x="5468" y="3450" width="480" height="100" rx="15" ry="15" fill="#f5f5f5" stroke="#666666" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 875px; margin-left: 1368px;"><div data-drawio-colors="color: #333333; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(51, 51, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">libhello.so.1.0</div></div></div></foreignObject><text x="1427" y="879" fill="#333333" font-family="Liberation Sans" font-size="12px" text-anchor="middle">libhello.so.1.0</text></switch></g><rect x="5440" y="3320" width="548" height="250" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><rect x="3880" y="1680" width="1560" height="280" fill="none" stroke="#36393d" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 455px; margin-left: 1165px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" style="font-size: 15px;">This also contains other files from other <br />dependencies. Default dependencies are:<br />basically <b style=""><u>gcc</u></b>, <b style=""><u>compilerlibs</u></b> and <b style=""><u style="">libc</u></b></font></div></div></div></foreignObject><text x="1165" y="459" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">This also contains other files from other...</text></switch></g><rect x="4368" y="2280" width="510" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 126px; height: 1px; padding-top: 583px; margin-left: 1093px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">SYSROOT_DESTDIR</font></div></div></div></foreignObject><text x="1156" y="586" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">SYSROOT_DESTDIR</text></switch></g><path d="M 330 960 L 1121.58 960" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 1148.58 960 L 1112.58 978 L 1121.58 960 L 1112.58 942 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><path d="M 330 1160 L 1121.58 1160" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 1148.58 1160 L 1112.58 1178 L 1121.58 1160 L 1112.58 1142 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><rect x="2" y="860" width="328" height="400" rx="49.2" ry="49.2" fill="#000000" stroke="#23445d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 80px; height: 1px; padding-top: 265px; margin-left: 1px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font color="#fcfcfc">Github</font></div></div></div></foreignObject><text x="41" y="269" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">Github</text></switch></g><rect x="535" y="900" width="320" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 240px; margin-left: 174px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_fetch<br /></font></div></div></div></foreignObject><text x="174" y="244" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_fetch&#xa;</text></switch></g><ellipse cx="475" cy="960" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 240px; margin-left: 105px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">1</b></font></div></div></div></foreignObject><text x="119" y="244" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">1</text></switch></g><rect x="535" y="1100" width="320" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 290px; margin-left: 174px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_fetch<br /></font></div></div></div></foreignObject><text x="174" y="294" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_fetch&#xa;</text></switch></g><ellipse cx="475" cy="1160" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 290px; margin-left: 105px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">5</b></font></div></div></div></foreignObject><text x="119" y="294" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">5</text></switch></g><rect x="6228" y="2980" width="440" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 760px; margin-left: 1612px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_configure<br /></font></div></div></div></foreignObject><text x="1612" y="764" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_configure&#xa;</text></switch></g><ellipse cx="6168" cy="3040" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 760px; margin-left: 1528px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">12</b></font></div></div></div></foreignObject><text x="1542" y="764" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">12</text></switch></g><rect x="6308" y="3110" width="360" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 793px; margin-left: 1622px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_compile<br /></font></div></div></div></foreignObject><text x="1622" y="796" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_compile&#xa;</text></switch></g><ellipse cx="6248" cy="3170" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 793px; margin-left: 1548px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">13</b></font></div></div></div></foreignObject><text x="1562" y="796" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">13</text></switch></g><rect x="2508" y="900" width="360" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 240px; margin-left: 672px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_unpack<br /></font></div></div></div></foreignObject><text x="672" y="244" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_unpack&#xa;</text></switch></g><ellipse cx="2448" cy="960" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 240px; margin-left: 598px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">2</b></font></div></div></div></foreignObject><text x="612" y="244" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">2</text></switch></g><path d="M 5848.57 520 Q 5848.57 600.57 5849.05 594.61" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 5847.36 615.54 L 5835.66 586.51 L 5849.05 594.61 L 5863.57 588.76 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 5848.57 520 Q 5848.57 560.57 5593.14 560.57 Q 5337.71 560.57 5337.31 594.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 5337.05 615.53 L 5323.39 587.36 L 5337.31 594.53 L 5351.39 587.7 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 5848.57 520 Q 5848.57 560.57 6108.57 560.57 Q 6368.57 560.57 6368.24 594.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 6368.04 615.53 L 6354.31 587.39 L 6368.24 594.53 L 6382.31 587.66 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="5708" y="360" width="280" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 68px; height: 1px; padding-top: 110px; margin-left: 1428px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">conf</font></div></div></div></foreignObject><text x="1462" y="114" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">conf</text></switch></g><path d="M 5149 660 Q 5108 660 5108 510.29 Q 5108 360.57 5252 360.57 Q 5396 360.57 5396 205.47" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 5396 184.47 L 5405.33 212.47 L 5396 205.47 L 5386.67 212.47 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="5149" y="620" width="376" height="80" rx="12" ry="12" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 92px; height: 1px; padding-top: 165px; margin-left: 1288px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">local.conf</div></div></div></foreignObject><text x="1334" y="169" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">local.conf</text></switch></g><path d="M 5847.43 700 Q 5847.43 860 6048 860 Q 6248.57 860 6248.09 994.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 6248.02 1015.53 L 6238.78 987.49 L 6248.09 994.53 L 6257.45 987.56 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="5627" y="620" width="440" height="80" rx="12" ry="12" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 108px; height: 1px; padding-top: 165px; margin-left: 1408px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">bblayers.conf</div></div></div></foreignObject><text x="1462" y="169" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">bblayers.conf</text></switch></g><rect x="6707" y="1360" width="921" height="600" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 228px; height: 1px; padding-top: 415px; margin-left: 1678px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font style="font-size: 15px;"><b><u>S</u></b> defaults generally to <b><u>${WORKDIR}/${BPN}-${PV}</u></b><br />In <b>git</b> recipes change it to <b><u>${WORKDIR}/git</u></b></font></div></div></div></foreignObject><text x="1792" y="419" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">S defaults generally to ${WORKDIR}/${B...</text></switch></g><rect x="6228" y="2700" width="440" height="160" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 695px; margin-left: 1612px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_patch<br /><i>(No patches)</i><br /></font></div></div></div></foreignObject><text x="1612" y="699" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_patch...</text></switch></g><ellipse cx="6168" cy="2780" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 695px; margin-left: 1528px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">3</b></font></div></div></div></foreignObject><text x="1542" y="699" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">3</text></switch></g><ellipse cx="4580" cy="3740" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 935px; margin-left: 1131px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">4</b></font></div></div></div></foreignObject><text x="1145" y="939" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">4</text></switch></g><rect x="6927" y="3560" width="360" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 905px; margin-left: 1777px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_install<br /></font></div></div></div></foreignObject><text x="1777" y="909" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_install&#xa;</text></switch></g><ellipse cx="6868" cy="3620" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 905px; margin-left: 1703px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">14</b></font></div></div></div></foreignObject><text x="1717" y="909" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">14</text></switch></g><path d="M 8354.02 2320 L 8354.02 2614.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 8354.02 2635.53 L 8344.69 2607.53 L 8354.02 2614.53 L 8363.35 2607.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="8228" y="2160" width="252" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 61px; height: 1px; padding-top: 560px; margin-left: 2058px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">package</font></div></div></div></foreignObject><text x="2089" y="564" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">package</text></switch></g><rect x="8441.04" y="2260" width="146.96" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 35px; height: 1px; padding-top: 578px; margin-left: 2111px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">PKGD</font></div></div></div></foreignObject><text x="2129" y="581" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">PKGD</text></switch></g><path d="M 8461.77 3600 Q 8461.14 3760.57 8784.57 3760.57 Q 9108 3760.57 9108 3640.42" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 9108 3613.42 L 9126 3649.42 L 9108 3640.42 L 9090 3649.42 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><rect x="8138.52" y="2640" width="431" height="960" rx="64.65" ry="64.65" fill="#eeeeee" stroke="#36393d" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 106px; height: 1px; padding-top: 780px; margin-left: 2036px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">A copy of <b>${D}<br /></b>excluding<br /><b>/sysroot-only</b></div></div></div></foreignObject><text x="2089" y="784" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">A copy of ${D}...</text></switch></g><rect x="7960.96" y="3680" width="360" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 935px; margin-left: 2035px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_package<br /></font></div></div></div></foreignObject><text x="2035" y="939" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_package&#xa;</text></switch></g><ellipse cx="7901.96" cy="3740" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 935px; margin-left: 1961px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">15</b></font></div></div></div></foreignObject><text x="1975" y="939" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">15</text></switch></g><path d="M 9108 2320 L 9108 2614.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 9108 2635.53 L 9098.67 2607.53 L 9108 2614.53 L 9117.33 2607.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="8868" y="2160" width="480" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 560px; margin-left: 2218px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">packages-split</font></div></div></div></foreignObject><text x="2277" y="564" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">packages-split</text></switch></g><rect x="9308" y="2260" width="240" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 58px; height: 1px; padding-top: 578px; margin-left: 2328px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">PKGDEST</font></div></div></div></foreignObject><text x="2357" y="581" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">PKGDEST</text></switch></g><path d="M 9108 2840 L 9108 2895.1" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 9108 2916.1 L 9098.67 2888.1 L 9108 2895.1 L 9117.33 2888.1 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="8982" y="2680" width="252" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 61px; height: 1px; padding-top: 690px; margin-left: 2247px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">sayhello</font></div></div></div></foreignObject><text x="2277" y="694" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">sayhello</text></switch></g><path d="M 9108 3080 L 9108 3120.57 Q 9108 3160.57 9108 3162.55 L 9108 3164.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 9107.98 3185.53 L 9098.67 3157.52 L 9108 3164.53 L 9117.34 3157.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="9021" y="2920" width="174" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 42px; height: 1px; padding-top: 750px; margin-left: 2256px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">usr</font></div></div></div></foreignObject><text x="2277" y="754" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">usr</text></switch></g><path d="M 9107.98 3350 L 9107.97 3414.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 9107.96 3435.53 L 9098.63 3407.53 L 9107.97 3414.53 L 9117.3 3407.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="9000.48" y="3190" width="215" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 52px; height: 1px; padding-top: 818px; margin-left: 2251px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">bin</font></div></div></div></foreignObject><text x="2277" y="821" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">bin</text></switch></g><rect x="7689.48" y="1360" width="1287" height="595" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 320px; height: 1px; padding-top: 414px; margin-left: 1923px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><span style="font-size: 15px;">Folders created here are present in <b><u>PACKAGES</u></b> variable, BitBake knows what and where to put things using the <b><u>FILES</u></b> variable, example: <b><u>FILES:${PN}</u></b> files will go to <b><u>${PN}</u></b> folder which is in <b><u>PACKAGES</u></b></span></div></div></div></foreignObject><text x="2083" y="418" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">Folders created here are present in PACKAGES variable...</text></switch></g><rect x="8640.48" y="3680" width="360" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 935px; margin-left: 2205px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_package<br /></font></div></div></div></foreignObject><text x="2205" y="939" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_package&#xa;</text></switch></g><ellipse cx="8581.48" cy="3740" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 935px; margin-left: 2131px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">16</b></font></div></div></div></foreignObject><text x="2145" y="939" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">16</text></switch></g><path d="M 9988 2320 L 9988 2674.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 9988 2695.53 L 9978.67 2667.53 L 9988 2674.53 L 9997.33 2667.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="9748" y="2160" width="480" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 560px; margin-left: 2438px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">deploy-<b><i>pkg</i></b></font></div></div></div></foreignObject><text x="2497" y="564" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">deploy-pkg</text></switch></g><path d="M 9988 2860 L 9988 3014.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 9988 3035.53 L 9978.67 3007.53 L 9988 3014.53 L 9997.33 3007.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="9748" y="2700" width="480" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 695px; margin-left: 2438px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter"><b>${PACKAGE_ARCH}</b></font></div></div></div></foreignObject><text x="2497" y="699" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">${PACKAGE_ARCH}</text></switch></g><rect x="9028" y="1360" width="1640" height="595" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 408px; height: 1px; padding-top: 414px; margin-left: 2258px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font style="font-size: 15px;">This can be <b><u>rpms</u></b>, <b><u>debs</u></b> or <b><u>ipks</u></b>.<br />These are provided by<br /><b><u>package_rpm</u></b>, <b><u>package_deb</u></b> and <b><u>package_ipk</u></b> classes respectively, use <b><u>PACKAGE_CLASSES</u></b> for that as<br />content of <b><u>PACKAGE_CLASSES</u></b> will be appended<br />to <b><u>INHERIT</u></b><br /></font></div></div></div></foreignObject><text x="2462" y="418" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">This can be rpms, debs or ipks....</text></switch></g><path d="M 10522 3105 L 10708 3105.59 Q 10748 3105.71 10748 3065.71 L 10748 1320.57 Q 10748 1280.57 10708 1280.57 L 7353.71 1280.57 Q 7313.71 1280.57 7313.8 1240.57 L 7313.92 1180.42" fill="none" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 7313.97 1153.42 L 7331.9 1189.45 L 7313.92 1180.42 L 7295.9 1189.38 Z" fill="#000000" stroke="#000000" stroke-width="12" stroke-miterlimit="10" pointer-events="all"/><rect x="9454" y="3040" width="1068" height="130" rx="19.5" ry="19.5" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 265px; height: 1px; padding-top: 776px; margin-left: 2365px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">sayhello-0.1-r0.${PACKAGE_ARCH}.<i>pkg</i></div></div></div></foreignObject><text x="2497" y="780" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">sayhello-0.1-r0.${PACKAGE_ARCH}.pkg</text></switch></g><rect x="10788" y="2640" width="1480" height="680" fill="none" stroke="#36393d" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 368px; height: 1px; padding-top: 745px; margin-left: 2698px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font style="font-size: 15px;">This task also depends on <b><u>PACKAGE_CLASSES</u></b>,<br /><b><u><i>pkg</i></u></b> can be <b><u>rpm</u></b>, <b><u>deb</u></b> or <b><u>ipk</u></b> for <b><u>package_rpm</u></b>,<br /><b><u>package_deb</u></b> or <u style="font-weight: bold;">package_ipk</u> respectively.<br />The generated package generally named using:<br /><b><u>${PN}</u></b>, <b><u>${PR}</u></b>, <b><u>${PACKAGE_ARCH}</u></b> and <b><u><i>pkg</i></u></b><br /></font></div></div></div></foreignObject><text x="2882" y="749" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">This task also depends on PACKAGE_CLASSES,...</text></switch></g><path d="M 7314 520 L 7314 654.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 7314 675.53 L 7300 647.53 L 7314 654.53 L 7328 647.53 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 7454 440 Q 7454 440 7742.53 440" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 7763.53 440 L 7735.53 449.33 L 7742.53 440 L 7735.53 430.67 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="7174" y="360" width="280" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 68px; height: 1px; padding-top: 110px; margin-left: 1795px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">deploy</font></div></div></div></foreignObject><text x="1829" y="114" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">deploy</text></switch></g><path d="M 7313.71 840 L 7313.71 880.57 Q 7313.71 920.57 7313.71 910.29 L 7313.71 905.14 Q 7313.71 900 7313.81 927.26 L 7313.91 954.53" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 7313.98 975.53 L 7299.88 947.58 L 7313.91 954.53 L 7327.88 947.48 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="7054.48" y="680" width="519" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 128px; height: 1px; padding-top: 190px; margin-left: 1765px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><b>${DEPLOY_DIR_<i>pkg</i>}</b></div></div></div></foreignObject><text x="1828" y="194" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">${DEPLOY_DIR_pkg}</text></switch></g><rect x="9488" y="3680" width="600" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 935px; margin-left: 2447px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_package_write_<i>pkg</i><br /></font></div></div></div></foreignObject><text x="2447" y="939" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_package_write_pkg&#xa;</text></switch></g><ellipse cx="9408" cy="3740" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 935px; margin-left: 2338px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">17</b></font></div></div></div></foreignObject><text x="2352" y="939" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">17</text></switch></g><path d="M 10048 3740 Q 11528.57 3740 11528.03 3345.47" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 11528.01 3324.47 L 11537.38 3352.46 L 11528.03 3345.47 L 11518.71 3352.48 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="7074" y="980" width="480" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 265px; margin-left: 1770px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">${PACKAGE_ARCH}</div></div></div></foreignObject><text x="1829" y="269" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">${PACKAGE_ARCH}</text></switch></g><rect x="7768" y="672.52" width="1660" height="167.48" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 413px; height: 1px; padding-top: 189px; margin-left: 1943px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font style="font-size: 15px;">For packages, this can be <b><u>IPK</u></b>, <b><u>RPM</u></b> or <b><u>DEB</u></b> (<i>check step 17</i>)</font></div></div></div></foreignObject><text x="2150" y="193" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">For packages, this can be IPK, RPM or DEB (check step 17)</text></switch></g><rect x="7369.48" y="480" width="320" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 133px; margin-left: 1843px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">DEPLOY_DIR</font></div></div></div></foreignObject><text x="1882" y="136" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">DEPLOY_DIR</text></switch></g><rect x="4988" y="80" width="240" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 58px; height: 1px; padding-top: 33px; margin-left: 1248px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">TMPDIR</font></div></div></div></foreignObject><text x="1277" y="36" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">TMPDIR</text></switch></g><rect x="1668" y="480" width="250.72" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 61px; height: 1px; padding-top: 133px; margin-left: 418px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">DL_DIR</font></div></div></div></foreignObject><text x="448" y="136" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">DL_DIR</text></switch></g><path d="M 7573.48 760 L 7630.29 760.34 Q 7670.29 760.57 7706.42 758.98 L 7742.55 757.38" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 7763.53 756.46 L 7735.97 767.02 L 7742.55 757.38 L 7735.15 748.37 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="10488" y="2380" width="600" height="120" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 610px; margin-left: 2697px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">do_package_write_<i>pkg</i><br /></font></div></div></div></foreignObject><text x="2697" y="614" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">do_package_write_pkg&#xa;</text></switch></g><ellipse cx="10408" cy="2440" rx="60.00000000000001" ry="60.00000000000001" fill="#000000" stroke="#56517e" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 610px; margin-left: 2588px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter" color="#ffffff" size="1"><b style="font-size: 15px;">18</b></font></div></div></div></foreignObject><text x="2602" y="614" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">18</text></switch></g><path d="M 10268 2310.29 Q 10879.43 2310.29 10879.43 1999.43 Q 10879.43 1688.57 10878.68 1140.47" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 10878.65 1119.47 L 10888.02 1147.46 L 10878.68 1140.47 L 10869.35 1147.48 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="10148" y="2260" width="120" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><path d="M 10088 2200 Q 10088.57 2077.14 10173.14 2077.14 Q 10257.71 2077.14 10257.94 1980.47" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 10257.99 1959.47 L 10267.26 1987.49 L 10257.94 1980.47 L 10248.59 1987.45 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="9628" y="805" width="1667.52" height="310" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 415px; height: 1px; padding-top: 240px; margin-left: 2408px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font style="font-size: 15px;">This can be <b><u>PKGWRITEDIRRPM</u></b>, <b><u>PKGWRITEDIRDEB</u></b> or <b><u>PKGWRITEDIRIPK</u></b> for <b><u>package_rpm</u></b>, <b><u>package_deb</u></b><br />or <b><u>package_ipk</u></b> respectively<br /></font></div></div></div></foreignObject><text x="2615" y="244" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">This can be PKGWRITEDIRRPM, PKGWRITEDIRDEB or PKGWRITEDIRIPK for pack...</text></switch></g><rect x="628" y="2470" width="1313" height="410" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 326px; height: 1px; padding-top: 669px; margin-left: 158px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><span style="font-size: 15px;">License checking happens in <b><u>do_populate_lic</u></b> after <b><u>do_patch<br /></u></b>and before that a checksum check<br />happends on <b><u>LIC_FILES_CHKSUM</u></b> if the<br />license is not <b><u>CLOSED</u></b><br /></span></div></div></div></foreignObject><text x="321" y="672" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">License checking happens in do_populate_lic after do_pa...</text></switch></g><rect x="3528.48" y="1360" width="1739.52" height="280" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 433px; height: 1px; padding-top: 375px; margin-left: 883px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><span style="font-size: 15px;">This variable is used to separate recipes<br />based on their target. This has value of<br /><b><u>${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}</u></b><br /></span></div></div></div></foreignObject><text x="1100" y="379" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">This variable is used to separate recipes...</text></switch></g><path d="M 6588 660 Q 6863.43 660 6863.4 505.47" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 6863.39 484.47 L 6872.73 512.47 L 6863.4 505.47 L 6854.06 512.47 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="6148" y="620" width="440" height="80" rx="12" ry="12" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 108px; height: 1px; padding-top: 165px; margin-left: 1538px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">conf-notes.txt</div></div></div></foreignObject><text x="1592" y="169" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">conf-notes.txt</text></switch></g><path d="M 2580.02 520 Q 2580 660 3042.53 660" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 3063.53 660 L 3035.53 669.33 L 3042.53 660 L 3035.53 650.67 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><path d="M 2374 440 Q 2215.43 440.57 2215.04 205.47" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 2215.01 184.47 L 2224.39 212.46 L 2215.04 205.47 L 2205.72 212.49 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="2374" y="360" width="412.04" height="160" rx="24" ry="24" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 101px; height: 1px; padding-top: 110px; margin-left: 595px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">sstate-cache</font></div></div></div></foreignObject><text x="645" y="114" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">sstate-cache</text></switch></g><rect x="2643.92" y="480" width="332.04" height="100" rx="15" ry="15" fill="#000000" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 81px; height: 1px; padding-top: 133px; margin-left: 662px;"><div data-drawio-colors="color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(255, 255, 255); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: normal; overflow-wrap: normal;"><font data-font-src="https://fonts.googleapis.com/css?family=Architects+Daughter">SSTATE_DIR</font></div></div></div></foreignObject><text x="702" y="136" fill="#ffffff" font-family="Liberation Sans" font-size="12px" text-anchor="middle" font-weight="bold">SSTATE_DIR</text></switch></g><rect x="8927.96" y="3440" width="360" height="110" rx="16.5" ry="16.5" fill="#eeeeee" stroke="#36393d" stroke-width="4" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 88px; height: 1px; padding-top: 874px; margin-left: 2233px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">sayhello</div></div></div></foreignObject><text x="2277" y="877" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">sayhello</text></switch></g><path d="M 4660 1960 Q 4660 2300 4904.57 2300 Q 5149.14 2300 5148.94 2611.65" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" stroke-dasharray="12 12" pointer-events="stroke"/><path d="M 5148.92 2632.65 L 5139.61 2604.64 L 5148.94 2611.65 L 5158.27 2604.65 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-miterlimit="10" pointer-events="all"/><rect x="3068" y="480" width="1640" height="360" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 408px; height: 1px; padding-top: 165px; margin-left: 768px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><span style="font-size: 15px;">This folder contains cache for recipes build output, this is used by BitBake, if the recipe checksum did not change it knows that the output to use is the same.<br /></span></div></div></div></foreignObject><text x="972" y="169" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">This folder contains cache for recipes build output, this is used by...</text></switch></g><rect x="1395" y="0" width="1640" height="180" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 408px; height: 1px; padding-top: 23px; margin-left: 350px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><span style="font-size: 15px;"><font data-font-src="https://fonts.googleapis.com/css?family=Liberation+Sans">These directories can be shared accross builds to save disk space and build time</font><br /></span></div></div></div></foreignObject><text x="554" y="26" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">These directories can be shared accross builds to save disk space an...</text></switch></g><rect x="7768" y="350" width="1667.52" height="180" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 415px; height: 1px; padding-top: 110px; margin-left: 1943px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font style="font-size: 15px;">This directory contains other output directories such as <b><u>images</u></b>, <b><u>sdk</u></b> and <b><u>licenses</u></b><br /></font></div></div></div></foreignObject><text x="2150" y="114" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">This directory contains other output directories such as images, sdk...</text></switch></g><rect x="5908" y="1020" width="680" height="520" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 168px; height: 1px; padding-top: 320px; margin-left: 1478px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font style="font-size: 15px;">This file contains all <b>layers</b> that BitBake should consider when looking for metadata.<br /></font></div></div></div></foreignObject><text x="1562" y="324" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">This file contains all layer...</text></switch></g><rect x="5396" y="20" width="1760" height="160" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 438px; height: 1px; padding-top: 25px; margin-left: 1350px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font style="font-size: 15px;">This is base configuration file containing essential user config such as <b><u>MACHINE</u></b> and <b><u>DISTRO</u></b><br /></font></div></div></div></foreignObject><text x="1569" y="29" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">This is base configuration file containing essential user config such as...</text></switch></g><rect x="6140" y="320" width="964.52" height="160" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" stroke-dasharray="12 12" pointer-events="all"/><g transform="translate(-0.5 -0.5)scale(4)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 239px; height: 1px; padding-top: 100px; margin-left: 1536px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: &quot;Liberation Sans&quot;; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font style="font-size: 15px;">The message to show after <b>source oe-init-build-env</b><br /></font></div></div></div></foreignObject><text x="1656" y="104" fill="rgb(0, 0, 0)" font-family="Liberation Sans" font-size="12px" text-anchor="middle">The message to show after source oe-init...</text></switch></g><rect x="10948" y="2400" width="120" height="80" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><rect x="10348" y="3065" width="80" height="80" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><rect x="9948" y="3700" width="100" height="80" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><rect x="10028" y="2200" width="80" height="80" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/><rect x="7454" y="725" width="80" height="80" fill="none" stroke="rgb(0, 0, 0)" stroke-width="4" pointer-events="all"/></g><switch><g requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"/><a transform="translate(0,-5)" xlink:href="https://www.drawio.com/doc/faq/svg-export-text-problems" target="_blank"><text text-anchor="middle" font-size="10px" x="50%" y="100%">Text is not SVG - cannot display</text></a></switch></svg> \ No newline at end of file
diff --git a/documentation/overview-manual/yp-intro.rst b/documentation/overview-manual/yp-intro.rst
index 86a8bf2b0d..50b7901b29 100644
--- a/documentation/overview-manual/yp-intro.rst
+++ b/documentation/overview-manual/yp-intro.rst
@@ -129,7 +129,7 @@ Here are features and advantages of the Yocto Project:
arbitrarily include packages.
- *License Manifest:* The Yocto Project provides a :ref:`license
- manifest <dev-manual/common-tasks:maintaining open source license compliance during your product's lifecycle>`
+ manifest <dev-manual/licenses:maintaining open source license compliance during your product's lifecycle>`
for review by people who need to track the use of open source
licenses (e.g. legal teams).
@@ -225,7 +225,7 @@ your Metadata, the easier it is to cope with future changes.
- Layers support the inclusion of technologies, hardware components,
and software components. The :ref:`Yocto Project
- Compatible <dev-manual/common-tasks:making sure your layer is compatible with yocto project>`
+ Compatible <dev-manual/layers:making sure your layer is compatible with yocto project>`
designation provides a minimum level of standardization that
contributes to a strong ecosystem. "YP Compatible" is applied to
appropriate products and software components such as BSPs, other
@@ -269,7 +269,7 @@ of the ``poky`` repository, you will see several layers: ``meta``,
layer.
For procedures on how to create layers, see the
-":ref:`dev-manual/common-tasks:understanding and creating layers`"
+":ref:`dev-manual/layers:understanding and creating layers`"
section in the Yocto Project Development Tasks Manual.
Components and Tools
@@ -340,6 +340,18 @@ the Yocto Project:
view information about builds. For information on Toaster, see the
:doc:`/toaster-manual/index`.
+- *VSCode IDE Extension:* The `Yocto Project BitBake
+ <https://marketplace.visualstudio.com/items?itemName=yocto-project.yocto-bitbake>`__
+ extension for Visual Studio Code provides a rich set of features for working
+ with BitBake recipes. The extension provides syntax highlighting,
+ hover tips, and completion for BitBake files as well as embedded Python and
+ Bash languages. Additional views and commands allow you to efficiently
+ browse, build and edit recipes. It also provides SDK integration for
+ cross-compiling and debugging through ``devtool``.
+
+ Learn more about the VSCode Extension on the `extension's frontpage
+ <https://marketplace.visualstudio.com/items?itemName=yocto-project.yocto-bitbake>`__.
+
Production Tools
----------------
@@ -351,7 +363,7 @@ Yocto Project:
(BitBake and
OE-Core) automatically generates upgrades for recipes that are based
on new versions of the recipes published upstream. See
- :ref:`dev-manual/common-tasks:using the auto upgrade helper (auh)`
+ :ref:`dev-manual/upgrading-recipes:using the auto upgrade helper (auh)`
for how to set it up.
- *Recipe Reporting System:* The Recipe Reporting System tracks recipe
@@ -361,7 +373,7 @@ Yocto Project:
of the :oe_layerindex:`OpenEmbedded Layer Index <>`, which
is a website that indexes OpenEmbedded-Core layers.
-- *Patchwork:* `Patchwork <http://jk.ozlabs.org/projects/patchwork/>`__
+- *Patchwork:* `Patchwork <https://patchwork.yoctoproject.org/>`__
is a fork of a project originally started by
`OzLabs <https://ozlabs.org/>`__. The project is a web-based tracking
system designed to streamline the process of bringing contributions
@@ -584,20 +596,15 @@ Build Host runs, you have several choices.
":ref:`dev-manual/start:setting up to use cross platforms (crops)`"
section in the Yocto Project Development Tasks Manual.
-- *Windows Subsystem For Linux (WSLv2):* You may use Windows Subsystem
- For Linux v2 to set up a Build Host using Windows 10.
-
- .. note::
+- *Windows Subsystem For Linux (WSL 2):* You may use Windows Subsystem
+ For Linux version 2 to set up a Build Host using Windows 10 or later,
+ or Windows Server 2019 or later.
- The Yocto Project is not compatible with WSLv1, it is compatible
- but not officially supported nor validated with WSLv2, if you
- still decide to use WSL please upgrade to WSLv2.
-
- The Windows Subsystem For Linux allows Windows 10 to run a real Linux
+ The Windows Subsystem For Linux allows Windows to run a real Linux
kernel inside of a lightweight virtual machine (VM).
- For information on how to set up a Build Host with WSLv2, see the
- ":ref:`dev-manual/start:setting up to use windows subsystem for linux (wslv2)`"
+ For information on how to set up a Build Host with WSL 2, see the
+ ":ref:`dev-manual/start:setting up to use windows subsystem for linux (wsl 2)`"
section in the Yocto Project Development Tasks Manual.
- *Toaster:* Regardless of what your Build Host is running, you can use
@@ -610,6 +617,14 @@ Build Host runs, you have several choices.
For information about and how to use Toaster, see the
:doc:`/toaster-manual/index`.
+- *Using the VSCode Extension:* You can use the `Yocto Project BitBake
+ <https://marketplace.visualstudio.com/items?itemName=yocto-project.yocto-bitbake>`__
+ extension for Visual Studio Code to start your BitBake builds through a
+ graphical user interface.
+
+ Learn more about the VSCode Extension on the `extension's marketplace page
+ <https://marketplace.visualstudio.com/items?itemName=yocto-project.yocto-bitbake>`__
+
Reference Embedded Distribution (Poky)
======================================
@@ -722,7 +737,7 @@ workflow:
.. image:: figures/YP-flow-diagram.png
:align: center
-Following is a brief summary of the "workflow":
+Here is a brief summary of the "workflow":
1. Developers specify architecture, policies, patches and configuration
details.
@@ -781,7 +796,7 @@ helpful for getting started:
Yocto Project.
For more detailed information on layers, see the
- ":ref:`dev-manual/common-tasks:understanding and creating layers`"
+ ":ref:`dev-manual/layers:understanding and creating layers`"
section in the Yocto Project Development Tasks Manual. For a
discussion specifically on BSP Layers, see the
":ref:`bsp-guide/bsp:bsp layers`" section in the Yocto
@@ -842,7 +857,7 @@ helpful for getting started:
distribution.
Another point worth noting is that historically within the Yocto
- Project, recipes were referred to as packages - thus, the existence
+ Project, recipes were referred to as packages --- thus, the existence
of several BitBake variables that are seemingly mis-named, (e.g.
:term:`PR`,
:term:`PV`, and
diff --git a/documentation/poky.yaml.in b/documentation/poky.yaml.in
index 1e1d6c83ed..b9cdd844e4 100644
--- a/documentation/poky.yaml.in
+++ b/documentation/poky.yaml.in
@@ -13,7 +13,8 @@ YOCTO_RELEASE_DL_URL : "&YOCTO_DL_URL;/releases/yocto/yocto-&DISTRO;"
UBUNTU_HOST_PACKAGES_ESSENTIAL : "gawk wget git diffstat unzip texinfo gcc \
build-essential chrpath socat cpio python3 python3-pip python3-pexpect \
xz-utils debianutils iputils-ping python3-git python3-jinja2 libegl1-mesa libsdl1.2-dev \
- pylint3 xterm python3-subunit mesa-common-dev zstd liblz4-tool"
+ python3-subunit mesa-common-dev zstd liblz4-tool file locales
+ \n\ $ sudo locale-gen en_US.UTF-8"
FEDORA_HOST_PACKAGES_ESSENTIAL : "gawk make wget tar bzip2 gzip python3 unzip perl patch \
diffutils diffstat git cpp gcc gcc-c++ glibc-devel texinfo chrpath \
ccache perl-Data-Dumper perl-Text-ParseWords perl-Thread-Queue perl-bignum socat \
@@ -22,20 +23,14 @@ FEDORA_HOST_PACKAGES_ESSENTIAL : "gawk make wget tar bzip2 gzip python3 unzip pe
perl-File-Copy perl-locale zstd lz4"
OPENSUSE_HOST_PACKAGES_ESSENTIAL : "python gcc gcc-c++ git chrpath make wget python-xml \
diffstat makeinfo python-curses patch socat python3 python3-curses tar python3-pip \
- python3-pexpect xz which python3-Jinja2 Mesa-libEGL1 libSDL-devel xterm rpcgen Mesa-dri-devel \
- zstd lz4
- \n\ $ sudo pip3 install GitPython"
-CENTOS7_HOST_PACKAGES_ESSENTIAL : "-y epel-release
- \n\ $ sudo yum makecache
- \n\ $ sudo yum install gawk make wget tar bzip2 gzip python3 unzip perl patch \
- diffutils diffstat git cpp gcc gcc-c++ glibc-devel texinfo chrpath socat \
- perl-Data-Dumper perl-Text-ParseWords perl-Thread-Queue python3-pip xz \
- which SDL-devel xterm mesa-libGL-devel zstd lz4
- \n\ $ sudo pip3 install GitPython jinja2"
-CENTOS8_HOST_PACKAGES_ESSENTIAL : "-y epel-release
- \n\ $ sudo dnf config-manager --set-enabled PowerTools
- \n\ $ sudo dnf makecache
- \n\ $ sudo dnf install gawk make wget tar bzip2 gzip python3 unzip perl patch \
+ python3-pexpect xz which python3-Jinja2 Mesa-libEGL1 libSDL-devel rpcgen Mesa-dri-devel \
+ zstd lz4 bzip2 gzip hostname
+ \n\ $ sudo pip3 install GitPython"
+ALMALINUX_HOST_PACKAGES_ESSENTIAL : "-y epel-release
+ \n\ $ sudo yum install dnf-plugins-core
+ \n\ $ sudo dnf config-manager --set-enabled crb
+ \n\ $ sudo dnf makecache
+ \n\ $ sudo dnf install gawk make wget tar bzip2 gzip python3 unzip perl patch \
diffutils diffstat git cpp gcc gcc-c++ glibc-devel texinfo chrpath ccache \
socat perl-Data-Dumper perl-Text-ParseWords perl-Thread-Queue python3-pip \
python3-GitPython python3-jinja2 python3-pexpect xz which SDL-devel xterm \
@@ -44,4 +39,11 @@ PIP3_HOST_PACKAGES_DOC : "$ sudo pip3 install sphinx sphinx_rtd_theme pyyaml"
MIN_PYTHON_VERSION : "3.6.0"
MIN_TAR_VERSION : "1.28"
MIN_GIT_VERSION : "1.8.3.1"
-MIN_GCC_VERSION : "5.0"
+MIN_GCC_VERSION : "7.5"
+MIN_MAKE_VERSION : "4.0"
+# Disk space (Gbytes) needed to generate qemux86-64 core-image-sato on Ubuntu 22.04 (x86-64), rounded up from 87
+MIN_DISK_SPACE : "90"
+# Disk space (Gbytes) needed to generate qemux86-64 core-image-sato on Ubuntu 22.04 (x86-64) with "rm_work", rounded up from 38
+MIN_DISK_SPACE_RM_WORK : "40"
+# RAM (Gbytes) needed to generate qemux86-64 core-image-sato on Ubuntu 22.04 (x86-64) on a 4 core system
+MIN_RAM : "8"
diff --git a/documentation/profile-manual/intro.rst b/documentation/profile-manual/intro.rst
index 9c8fa3dbfa..86310cf318 100644
--- a/documentation/profile-manual/intro.rst
+++ b/documentation/profile-manual/intro.rst
@@ -7,43 +7,45 @@ Yocto Project Profiling and Tracing Manual
Introduction
============
-Yocto bundles a number of tracing and profiling tools - this 'HOWTO'
+Yocto Project bundles a number of tracing and profiling tools --- this manual
describes their basic usage and shows by example how to make use of them
-to examine application and system behavior.
+to analyze application and system behavior.
-The tools presented are for the most part completely open-ended and have
+The tools presented are, for the most part, completely open-ended and have
quite good and/or extensive documentation of their own which can be used
to solve just about any problem you might come across in Linux. Each
section that describes a particular tool has links to that tool's
documentation and website.
-The purpose of this 'HOWTO' is to present a set of common and generally
+The purpose of this manual is to present a set of common and generally
useful tracing and profiling idioms along with their application (as
appropriate) to each tool, in the context of a general-purpose
'drill-down' methodology that can be applied to solving a large number
-(90%?) of problems. For help with more advanced usages and problems,
-please see the documentation and/or websites listed for each tool.
+of problems. For help with more advanced usages and problems,
+refer to the documentation and/or websites provided for each tool.
-The final section of this 'HOWTO' is a collection of real-world examples
-which we'll be continually adding to as we solve more problems using the
-tools - feel free to add your own examples to the list!
+The final section of this manual is a collection of real-world examples
+which we'll be continually updating as we solve more problems using the
+tools --- feel free to suggest additions to what you read here.
General Setup
=============
-Most of the tools are available only in 'sdk' images or in images built
-after adding 'tools-profile' to your local.conf. So, in order to be able
-to access all of the tools described here, please first build and boot
-an 'sdk' image e.g. ::
+Most of the tools are available only in ``sdk`` images or in images built
+after adding ``tools-profile`` to your ``local.conf`` file. So, in order to be able
+to access all of the tools described here, you can build and boot
+an ``sdk`` image, perhaps one of::
$ bitbake core-image-sato-sdk
+ $ bitbake core-image-weston-sdk
+ $ bitbake core-image-rt-sdk
-or alternatively by adding 'tools-profile' to the EXTRA_IMAGE_FEATURES line in
-your local.conf::
+Alternatively, you can add ``tools-profile`` to the :term:`EXTRA_IMAGE_FEATURES` line in
+your ``local.conf`` file::
EXTRA_IMAGE_FEATURES = "debug-tweaks tools-profile"
-If you use the 'tools-profile' method, you don't need to build an sdk image -
+If you use the ``tools-profile`` method, you don't need to build an sdk image ---
the tracing and profiling tools will be included in non-sdk images as well e.g.::
$ bitbake core-image-sato
@@ -64,12 +66,12 @@ the tracing and profiling tools will be included in non-sdk images as well e.g.:
If you've already built a stripped image, you can generate debug
packages (xxx-dbg) which you can manually install as needed.
-To generate debug info for packages, you can add dbg-pkgs to
-EXTRA_IMAGE_FEATURES in local.conf. For example::
+To generate debug info for packages, you can add ``dbg-pkgs`` to
+:term:`EXTRA_IMAGE_FEATURES` in ``local.conf``. For example::
EXTRA_IMAGE_FEATURES = "debug-tweaks tools-profile dbg-pkgs"
-Additionally, in order to generate the right type of debuginfo, we also need to
+Additionally, in order to generate the right type of debug info, we also need to
set :term:`PACKAGE_DEBUG_SPLIT_STYLE` in the ``local.conf`` file::
PACKAGE_DEBUG_SPLIT_STYLE = 'debug-file-directory'
diff --git a/documentation/profile-manual/usage.rst b/documentation/profile-manual/usage.rst
index fb1553d70d..2ff12f434e 100644
--- a/documentation/profile-manual/usage.rst
+++ b/documentation/profile-manual/usage.rst
@@ -13,11 +13,11 @@ tools.
perf
====
-The 'perf' tool is the profiling and tracing tool that comes bundled
+The perf tool is the profiling and tracing tool that comes bundled
with the Linux kernel.
Don't let the fact that it's part of the kernel fool you into thinking
-that it's only for tracing and profiling the kernel - you can indeed use
+that it's only for tracing and profiling the kernel --- you can indeed use
it to trace and profile just the kernel, but you can also use it to
profile specific applications separately (with or without kernel
context), and you can also use it to trace and profile the kernel and
@@ -26,22 +26,22 @@ of what's going on.
In many ways, perf aims to be a superset of all the tracing and
profiling tools available in Linux today, including all the other tools
-covered in this HOWTO. The past couple of years have seen perf subsume a
+covered in this How-to. The past couple of years have seen perf subsume a
lot of the functionality of those other tools and, at the same time,
those other tools have removed large portions of their previous
functionality and replaced it with calls to the equivalent functionality
now implemented by the perf subsystem. Extrapolation suggests that at
-some point those other tools will simply become completely redundant and
+some point those other tools will become completely redundant and
go away; until then, we'll cover those other tools in these pages and in
many cases show how the same things can be accomplished in perf and the
other tools when it seems useful to do so.
The coverage below details some of the most common ways you'll likely
want to apply the tool; full documentation can be found either within
-the tool itself or in the man pages at
+the tool itself or in the manual pages at
`perf(1) <https://linux.die.net/man/1/perf>`__.
-Perf Setup
+perf Setup
----------
For this section, we'll assume you've already performed the basic setup
@@ -54,14 +54,14 @@ image built with the following in your ``local.conf`` file::
perf runs on the target system for the most part. You can archive
profile data and copy it to the host for analysis, but for the rest of
-this document we assume you've ssh'ed to the host and will be running
-the perf commands on the target.
+this document we assume you're connected to the host through SSH and will be
+running the perf commands on the target.
-Basic Perf Usage
+Basic perf Usage
----------------
The perf tool is pretty much self-documenting. To remind yourself of the
-available commands, simply type 'perf', which will show you basic usage
+available commands, just type ``perf``, which will show you basic usage
along with the available perf subcommands::
root@crownbay:~# perf
@@ -97,19 +97,19 @@ along with the available perf subcommands::
Using perf to do Basic Profiling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-As a simple test case, we'll profile the 'wget' of a fairly large file,
+As a simple test case, we'll profile the ``wget`` of a fairly large file,
which is a minimally interesting case because it has both file and
network I/O aspects, and at least in the case of standard Yocto images,
it's implemented as part of BusyBox, so the methods we use to analyze it
-can be used in a very similar way to the whole host of supported BusyBox
-applets in Yocto. ::
+can be used in a similar way to the whole host of supported BusyBox
+applets in Yocto::
root@crownbay:~# rm linux-2.6.19.2.tar.bz2; \
wget &YOCTO_DL_URL;/mirror/sources/linux-2.6.19.2.tar.bz2
The quickest and easiest way to get some basic overall data about what's
-going on for a particular workload is to profile it using 'perf stat'.
-'perf stat' basically profiles using a few default counters and displays
+going on for a particular workload is to profile it using ``perf stat``.
+This command basically profiles using a few default counters and displays
the summed counts at the end of the run::
root@crownbay:~# perf stat wget &YOCTO_DL_URL;/mirror/sources/linux-2.6.19.2.tar.bz2
@@ -131,13 +131,13 @@ the summed counts at the end of the run::
59.836627620 seconds time elapsed
-Many times such a simple-minded test doesn't yield much of
-interest, but sometimes it does (see Real-world Yocto bug (slow
-loop-mounted write speed)).
+Such a simple-minded test doesn't always yield much of interest, but sometimes
+it does (see the :yocto_bugs:`Slow write speed on live images with denzil
+</show_bug.cgi?id=3049>` bug report).
-Also, note that 'perf stat' isn't restricted to a fixed set of counters
-- basically any event listed in the output of 'perf list' can be tallied
-by 'perf stat'. For example, suppose we wanted to see a summary of all
+Also, note that ``perf stat`` isn't restricted to a fixed set of counters
+--- basically any event listed in the output of ``perf list`` can be tallied
+by ``perf stat``. For example, suppose we wanted to see a summary of all
the events related to kernel memory allocation/freeing along with cache
hits and misses::
@@ -164,22 +164,22 @@ hits and misses::
44.831023415 seconds time elapsed
-So 'perf stat' gives us a nice easy
+As you can see, ``perf stat`` gives us a nice easy
way to get a quick overview of what might be happening for a set of
events, but normally we'd need a little more detail in order to
understand what's going on in a way that we can act on in a useful way.
-To dive down into a next level of detail, we can use 'perf record'/'perf
-report' which will collect profiling data and present it to use using an
-interactive text-based UI (or simply as text if we specify ``--stdio`` to
-'perf report').
+To dive down into a next level of detail, we can use ``perf record`` /
+``perf report`` which will collect profiling data and present it to use using an
+interactive text-based UI (or just as text if we specify ``--stdio`` to
+``perf report``).
-As our first attempt at profiling this workload, we'll simply run 'perf
-record', handing it the workload we want to profile (everything after
-'perf record' and any perf options we hand it - here none - will be
+As our first attempt at profiling this workload, we'll just run ``perf
+record``, handing it the workload we want to profile (everything after
+``perf record`` and any perf options we hand it --- here none, will be
executed in a new shell). perf collects samples until the process exits
-and records them in a file named 'perf.data' in the current working
-directory. ::
+and records them in a file named ``perf.data`` in the current working
+directory::
root@crownbay:~# perf record wget &YOCTO_DL_URL;/mirror/sources/linux-2.6.19.2.tar.bz2
@@ -189,7 +189,7 @@ directory. ::
[ perf record: Captured and wrote 0.176 MB perf.data (~7700 samples) ]
To see the results in a
-'text-based UI' (tui), simply run 'perf report', which will read the
+"text-based UI" (tui), just run ``perf report``, which will read the
perf.data file in the current working directory and display the results
in an interactive UI::
@@ -198,26 +198,26 @@ in an interactive UI::
.. image:: figures/perf-wget-flat-stripped.png
:align: center
-The above screenshot displays a 'flat' profile, one entry for each
-'bucket' corresponding to the functions that were profiled during the
+The above screenshot displays a "flat" profile, one entry for each
+"bucket" corresponding to the functions that were profiled during the
profiling run, ordered from the most popular to the least (perf has
options to sort in various orders and keys as well as display entries
-only above a certain threshold and so on - see the perf documentation
-for details). Note that this includes both userspace functions (entries
-containing a [.]) and kernel functions accounted to the process (entries
-containing a [k]). (perf has command-line modifiers that can be used to
-restrict the profiling to kernel or userspace, among others).
-
-Notice also that the above report shows an entry for 'busybox', which is
-the executable that implements 'wget' in Yocto, but that instead of a
+only above a certain threshold and so on --- see the perf documentation
+for details). Note that this includes both user space functions (entries
+containing a ``[.]``) and kernel functions accounted to the process (entries
+containing a ``[k]``). perf has command-line modifiers that can be used to
+restrict the profiling to kernel or user space, among others.
+
+Notice also that the above report shows an entry for ``busybox``, which is
+the executable that implements ``wget`` in Yocto, but that instead of a
useful function name in that entry, it displays a not-so-friendly hex
value instead. The steps below will show how to fix that problem.
Before we do that, however, let's try running a different profile, one
which shows something a little more interesting. The only difference
-between the new profile and the previous one is that we'll add the -g
+between the new profile and the previous one is that we'll add the ``-g``
option, which will record not just the address of a sampled function,
-but the entire callchain to the sampled function as well::
+but the entire call chain to the sampled function as well::
root@crownbay:~# perf record -g wget &YOCTO_DL_URL;/mirror/sources/linux-2.6.19.2.tar.bz2
Connecting to downloads.yoctoproject.org (140.211.169.59:80)
@@ -231,44 +231,44 @@ but the entire callchain to the sampled function as well::
.. image:: figures/perf-wget-g-copy-to-user-expanded-stripped.png
:align: center
-Using the callgraph view, we can actually see not only which functions
+Using the call graph view, we can actually see not only which functions
took the most time, but we can also see a summary of how those functions
were called and learn something about how the program interacts with the
kernel in the process.
-Notice that each entry in the above screenshot now contains a '+' on the
-left-hand side. This means that we can expand the entry and drill down
-into the callchains that feed into that entry. Pressing 'enter' on any
-one of them will expand the callchain (you can also press 'E' to expand
-them all at the same time or 'C' to collapse them all).
+Notice that each entry in the above screenshot now contains a ``+`` on the
+left side. This means that we can expand the entry and drill down
+into the call chains that feed into that entry. Pressing ``Enter`` on any
+one of them will expand the call chain (you can also press ``E`` to expand
+them all at the same time or ``C`` to collapse them all).
In the screenshot above, we've toggled the ``__copy_to_user_ll()`` entry
-and several subnodes all the way down. This lets us see which callchains
+and several subnodes all the way down. This lets us see which call chains
contributed to the profiled ``__copy_to_user_ll()`` function which
contributed 1.77% to the total profile.
-As a bit of background explanation for these callchains, think about
-what happens at a high level when you run wget to get a file out on the
+As a bit of background explanation for these call chains, think about
+what happens at a high level when you run ``wget`` to get a file out on the
network. Basically what happens is that the data comes into the kernel
-via the network connection (socket) and is passed to the userspace
-program 'wget' (which is actually a part of BusyBox, but that's not
+via the network connection (socket) and is passed to the user space
+program ``wget`` (which is actually a part of BusyBox, but that's not
important for now), which takes the buffers the kernel passes to it and
writes it to a disk file to save it.
The part of this process that we're looking at in the above call stacks
is the part where the kernel passes the data it has read from the socket
-down to wget i.e. a copy-to-user.
+down to wget i.e. a ``copy-to-user``.
Notice also that here there's also a case where the hex value is
-displayed in the callstack, here in the expanded ``sys_clock_gettime()``
-function. Later we'll see it resolve to a userspace function call in
-busybox.
+displayed in the call stack, here in the expanded ``sys_clock_gettime()``
+function. Later we'll see it resolve to a user space function call in
+BusyBox.
.. image:: figures/perf-wget-g-copy-from-user-expanded-stripped.png
:align: center
-The above screenshot shows the other half of the journey for the data -
-from the wget program's userspace buffers to disk. To get the buffers to
+The above screenshot shows the other half of the journey for the data ---
+from the ``wget`` program's user space buffers to disk. To get the buffers to
disk, the wget program issues a ``write(2)``, which does a ``copy-from-user`` to
the kernel, which then takes care via some circuitous path (probably
also present somewhere in the profile data), to get it safely to disk.
@@ -278,8 +278,8 @@ of how to extract useful information out of it, let's get back to the
task at hand and see if we can get some basic idea about where the time
is spent in the program we're profiling, wget. Remember that wget is
actually implemented as an applet in BusyBox, so while the process name
-is 'wget', the executable we're actually interested in is BusyBox. So
-let's expand the first entry containing BusyBox:
+is ``wget``, the executable we're actually interested in is ``busybox``.
+Therefore, let's expand the first entry containing BusyBox:
.. image:: figures/perf-wget-busybox-expanded-stripped.png
:align: center
@@ -289,7 +289,7 @@ hex value instead of a symbol as with most of the kernel entries.
Expanding the BusyBox entry doesn't make it any better.
The problem is that perf can't find the symbol information for the
-busybox binary, which is actually stripped out by the Yocto build
+``busybox`` binary, which is actually stripped out by the Yocto build
system.
One way around that is to put the following in your ``local.conf`` file
@@ -299,39 +299,38 @@ when you build the image::
However, we already have an image with the binaries stripped, so
what can we do to get perf to resolve the symbols? Basically we need to
-install the debuginfo for the BusyBox package.
+install the debugging information for the BusyBox package.
To generate the debug info for the packages in the image, we can add
``dbg-pkgs`` to :term:`EXTRA_IMAGE_FEATURES` in ``local.conf``. For example::
EXTRA_IMAGE_FEATURES = "debug-tweaks tools-profile dbg-pkgs"
-Additionally, in order to generate the type of debuginfo that perf
-understands, we also need to set
-:term:`PACKAGE_DEBUG_SPLIT_STYLE`
+Additionally, in order to generate the type of debugging information that perf
+understands, we also need to set :term:`PACKAGE_DEBUG_SPLIT_STYLE`
in the ``local.conf`` file::
PACKAGE_DEBUG_SPLIT_STYLE = 'debug-file-directory'
-Once we've done that, we can install the
-debuginfo for BusyBox. The debug packages once built can be found in
-``build/tmp/deploy/rpm/*`` on the host system. Find the busybox-dbg-...rpm
-file and copy it to the target. For example::
+Once we've done that, we can install the debugging information for BusyBox. The
+debug packages once built can be found in ``build/tmp/deploy/rpm/*``
+on the host system. Find the ``busybox-dbg-...rpm`` file and copy it
+to the target. For example::
[trz@empanada core2]$ scp /home/trz/yocto/crownbay-tracing-dbg/build/tmp/deploy/rpm/core2_32/busybox-dbg-1.20.2-r2.core2_32.rpm root@192.168.1.31:
busybox-dbg-1.20.2-r2.core2_32.rpm 100% 1826KB 1.8MB/s 00:01
-Now install the debug rpm on the target::
+Now install the debug RPM on the target::
root@crownbay:~# rpm -i busybox-dbg-1.20.2-r2.core2_32.rpm
-Now that the debuginfo is installed, we see that the BusyBox entries now display
+Now that the debugging information is installed, we see that the BusyBox entries now display
their functions symbolically:
.. image:: figures/perf-wget-busybox-debuginfo.png
:align: center
-If we expand one of the entries and press 'enter' on a leaf node, we're
+If we expand one of the entries and press ``Enter`` on a leaf node, we're
presented with a menu of actions we can take to get more information
related to that entry:
@@ -340,16 +339,16 @@ related to that entry:
One of these actions allows us to show a view that displays a
busybox-centric view of the profiled functions (in this case we've also
-expanded all the nodes using the 'E' key):
+expanded all the nodes using the ``E`` key):
.. image:: figures/perf-wget-busybox-dso-zoom.png
:align: center
-Finally, we can see that now that the BusyBox debuginfo is installed,
+Finally, we can see that now that the BusyBox debugging information is installed,
the previously unresolved symbol in the ``sys_clock_gettime()`` entry
mentioned previously is now resolved, and shows that the
-sys_clock_gettime system call that was the source of 6.75% of the
-copy-to-user overhead was initiated by the ``handle_input()`` BusyBox
+``sys_clock_gettime`` system call that was the source of 6.75% of the
+``copy-to-user`` overhead was initiated by the ``handle_input()`` BusyBox
function:
.. image:: figures/perf-wget-g-copy-to-user-expanded-debuginfo.png
@@ -357,14 +356,14 @@ function:
At the lowest level of detail, we can dive down to the assembly level
and see which instructions caused the most overhead in a function.
-Pressing 'enter' on the 'udhcpc_main' function, we're again presented
+Pressing ``Enter`` on the ``udhcpc_main`` function, we're again presented
with a menu:
.. image:: figures/perf-wget-busybox-annotate-menu.png
:align: center
-Selecting 'Annotate udhcpc_main', we get a detailed listing of
-percentages by instruction for the udhcpc_main function. From the
+Selecting ``Annotate udhcpc_main``, we get a detailed listing of
+percentages by instruction for the ``udhcpc_main`` function. From the
display, we can see that over 50% of the time spent in this function is
taken up by a couple tests and the move of a constant (1) to a register:
@@ -372,17 +371,17 @@ taken up by a couple tests and the move of a constant (1) to a register:
:align: center
As a segue into tracing, let's try another profile using a different
-counter, something other than the default 'cycles'.
+counter, something other than the default ``cycles``.
The tracing and profiling infrastructure in Linux has become unified in
a way that allows us to use the same tool with a completely different
set of counters, not just the standard hardware counters that
-traditional tools have had to restrict themselves to (of course the
-traditional tools can also make use of the expanded possibilities now
+traditional tools have had to restrict themselves to (the
+traditional tools can now actually make use of the expanded possibilities now
available to them, and in some cases have, as mentioned previously).
We can get a list of the available events that can be used to profile a
-workload via 'perf list'::
+workload via ``perf list``::
root@crownbay:~# perf list
@@ -518,14 +517,14 @@ workload via 'perf list'::
.. admonition:: Tying it Together
These are exactly the same set of events defined by the trace event
- subsystem and exposed by ftrace/tracecmd/kernelshark as files in
- /sys/kernel/debug/tracing/events, by SystemTap as
+ subsystem and exposed by ftrace / trace-cmd / KernelShark as files in
+ ``/sys/kernel/debug/tracing/events``, by SystemTap as
kernel.trace("tracepoint_name") and (partially) accessed by LTTng.
Only a subset of these would be of interest to us when looking at this
workload, so let's choose the most likely subsystems (identified by the
-string before the colon in the Tracepoint events) and do a 'perf stat'
-run using only those wildcarded subsystems::
+string before the colon in the ``Tracepoint`` events) and do a ``perf stat``
+run using only those subsystem wildcards::
root@crownbay:~# perf stat -e skb:* -e net:* -e napi:* -e sched:* -e workqueue:* -e irq:* -e syscalls:* wget &YOCTO_DL_URL;/mirror/sources/linux-2.6.19.2.tar.bz2
Performance counter stats for 'wget &YOCTO_DL_URL;/mirror/sources/linux-2.6.19.2.tar.bz2':
@@ -596,8 +595,8 @@ and tell perf to do a profile using it as the sampling event::
The screenshot above shows the results of running a profile using
sched:sched_switch tracepoint, which shows the relative costs of various
-paths to sched_wakeup (note that sched_wakeup is the name of the
-tracepoint - it's actually defined just inside ttwu_do_wakeup(), which
+paths to ``sched_wakeup`` (note that ``sched_wakeup`` is the name of the
+tracepoint --- it's actually defined just inside ``ttwu_do_wakeup()``, which
accounts for the function name actually displayed in the profile:
.. code-block:: c
@@ -615,15 +614,15 @@ accounts for the function name actually displayed in the profile:
}
A couple of the more interesting
-callchains are expanded and displayed above, basically some network
-receive paths that presumably end up waking up wget (busybox) when
+call chains are expanded and displayed above, basically some network
+receive paths that presumably end up waking up wget (BusyBox) when
network data is ready.
Note that because tracepoints are normally used for tracing, the default
-sampling period for tracepoints is 1 i.e. for tracepoints perf will
-sample on every event occurrence (this can be changed using the -c
+sampling period for tracepoints is ``1`` i.e. for tracepoints perf will
+sample on every event occurrence (this can be changed using the ``-c``
option). This is in contrast to hardware counters such as for example
-the default 'cycles' hardware counter used for normal profiling, where
+the default ``cycles`` hardware counter used for normal profiling, where
sampling periods are much higher (in the thousands) because profiling
should have as low an overhead as possible and sampling on every cycle
would be prohibitively expensive.
@@ -634,10 +633,10 @@ Using perf to do Basic Tracing
Profiling is a great tool for solving many problems or for getting a
high-level view of what's going on with a workload or across the system.
It is however by definition an approximation, as suggested by the most
-prominent word associated with it, 'sampling'. On the one hand, it
+prominent word associated with it, ``sampling``. On the one hand, it
allows a representative picture of what's going on in the system to be
-cheaply taken, but on the other hand, that cheapness limits its utility
-when that data suggests a need to 'dive down' more deeply to discover
+cheaply taken, but alternatively, that cheapness limits its utility
+when that data suggests a need to "dive down" more deeply to discover
what's really going on. In such cases, the only way to see what's really
going on is to be able to look at (or summarize more intelligently) the
individual steps that go into the higher-level behavior exposed by the
@@ -650,7 +649,7 @@ applicable to our workload::
-e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write
wget &YOCTO_DL_URL;/mirror/sources/linux-2.6.19.2.tar.bz2
-We can look at the raw trace output using 'perf script' with no
+We can look at the raw trace output using ``perf script`` with no
arguments::
root@crownbay:~# perf script
@@ -681,7 +680,7 @@ arguments::
This gives us a detailed timestamped sequence of events that occurred within the
workload with respect to those events.
-In many ways, profiling can be viewed as a subset of tracing -
+In many ways, profiling can be viewed as a subset of tracing ---
theoretically, if you have a set of trace events that's sufficient to
capture all the important aspects of a workload, you can derive any of
the results or views that a profiling run can.
@@ -701,23 +700,23 @@ an infinite variety of ways.
Another way to look at it is that there are only so many ways that the
'primitive' counters can be used on their own to generate interesting
output; to get anything more complicated than simple counts requires
-some amount of additional logic, which is typically very specific to the
+some amount of additional logic, which is typically specific to the
problem at hand. For example, if we wanted to make use of a 'counter'
that maps to the value of the time difference between when a process was
scheduled to run on a processor and the time it actually ran, we
wouldn't expect such a counter to exist on its own, but we could derive
-one called say 'wakeup_latency' and use it to extract a useful view of
+one called say ``wakeup_latency`` and use it to extract a useful view of
that metric from trace data. Likewise, we really can't figure out from
standard profiling tools how much data every process on the system reads
and writes, along with how many of those reads and writes fail
completely. If we have sufficient trace data, however, we could with the
right tools easily extract and present that information, but we'd need
-something other than pre-canned profiling tools to do that.
+something other than ready-made profiling tools to do that.
Luckily, there is a general-purpose way to handle such needs, called
-'programming languages'. Making programming languages easily available
+"programming languages". Making programming languages easily available
to apply to such problems given the specific format of data is called a
-'programming language binding' for that data and language. Perf supports
+'programming language binding' for that data and language. perf supports
two programming language bindings, one for Python and one for Perl.
.. admonition:: Tying it Together
@@ -727,21 +726,21 @@ two programming language bindings, one for Python and one for Perl.
DProbes dpcc compiler, an ANSI C compiler which targeted a low-level
assembly language running on an in-kernel interpreter on the target
system. This is exactly analogous to what Sun's DTrace did, except
- that DTrace invented its own language for the purpose. Systemtap,
+ that DTrace invented its own language for the purpose. SystemTap,
heavily inspired by DTrace, also created its own one-off language,
but rather than running the product on an in-kernel interpreter,
created an elaborate compiler-based machinery to translate its
language into kernel modules written in C.
-Now that we have the trace data in perf.data, we can use 'perf script
--g' to generate a skeleton script with handlers for the read/write
-entry/exit events we recorded::
+Now that we have the trace data in ``perf.data``, we can use ``perf script
+-g`` to generate a skeleton script with handlers for the read / write
+entry / exit events we recorded::
root@crownbay:~# perf script -g python
generated Python script: perf-script.py
-The skeleton script simply creates a python function for each event type in the
-perf.data file. The body of each function simply prints the event name along
+The skeleton script just creates a Python function for each event type in the
+``perf.data`` file. The body of each function just prints the event name along
with its parameters. For example:
.. code-block:: python
@@ -755,7 +754,7 @@ with its parameters. For example:
print "skbaddr=%u, len=%u, name=%s\n" % (skbaddr, len, name),
We can run that script directly to print all of the events contained in the
-perf.data file::
+``perf.data`` file::
root@crownbay:~# perf script -s perf-script.py
@@ -784,8 +783,8 @@ perf.data file::
syscalls__sys_exit_read 1 11624.859944032 1262 wget nr=3, ret=1024
That in itself isn't very useful; after all, we can accomplish pretty much the
-same thing by simply running 'perf script' without arguments in the same
-directory as the perf.data file.
+same thing by just running ``perf script`` without arguments in the same
+directory as the ``perf.data`` file.
We can however replace the print statements in the generated function
bodies with whatever we want, and thereby make it infinitely more
@@ -806,8 +805,8 @@ event. For example:
Each event handler function in the generated code
is modified to do this. For convenience, we define a common function
-called inc_counts() that each handler calls; inc_counts() simply tallies
-a count for each event using the 'counts' hash, which is a specialized
+called ``inc_counts()`` that each handler calls; ``inc_counts()`` just tallies
+a count for each event using the ``counts`` hash, which is a specialized
hash function that does Perl-like autovivification, a capability that's
extremely useful for kinds of multi-level aggregation commonly used in
processing traces (see perf's documentation on the Python language
@@ -825,7 +824,7 @@ binding for details):
Finally, at the end of the trace processing run, we want to print the
result of all the per-event tallies. For that, we use the special
-'trace_end()' function:
+``trace_end()`` function:
.. code-block:: python
@@ -854,7 +853,7 @@ The end result is a summary of all the events recorded in the trace::
syscalls__sys_exit_write 8990
Note that this is
-pretty much exactly the same information we get from 'perf stat', which
+pretty much exactly the same information we get from ``perf stat``, which
goes a little way to support the idea mentioned previously that given
the right kind of trace data, higher-level profiling-type summaries can
be derived from it.
@@ -866,43 +865,43 @@ System-Wide Tracing and Profiling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The examples so far have focused on tracing a particular program or
-workload - in other words, every profiling run has specified the program
-to profile in the command-line e.g. 'perf record wget ...'.
+workload --- that is, every profiling run has specified the program
+to profile in the command-line e.g. ``perf record wget ...``.
It's also possible, and more interesting in many cases, to run a
system-wide profile or trace while running the workload in a separate
shell.
-To do system-wide profiling or tracing, you typically use the -a flag to
-'perf record'.
+To do system-wide profiling or tracing, you typically use the ``-a`` flag to
+``perf record``.
To demonstrate this, open up one window and start the profile using the
--a flag (press Ctrl-C to stop tracing)::
+``-a`` flag (press ``Ctrl-C`` to stop tracing)::
root@crownbay:~# perf record -g -a
^C[ perf record: Woken up 6 times to write data ]
[ perf record: Captured and wrote 1.400 MB perf.data (~61172 samples) ]
-In another window, run the wget test::
+In another window, run the ``wget`` test::
root@crownbay:~# wget &YOCTO_DL_URL;/mirror/sources/linux-2.6.19.2.tar.bz2
Connecting to downloads.yoctoproject.org (140.211.169.59:80)
linux-2.6.19.2.tar.b 100% \|*******************************\| 41727k 0:00:00 ETA
-Here we see entries not only for our wget load, but for
+Here we see entries not only for our ``wget`` load, but for
other processes running on the system as well:
.. image:: figures/perf-systemwide.png
:align: center
-In the snapshot above, we can see callchains that originate in libc, and
-a callchain from Xorg that demonstrates that we're using a proprietary X
-driver in userspace (notice the presence of 'PVR' and some other
-unresolvable symbols in the expanded Xorg callchain).
+In the snapshot above, we can see call chains that originate in ``libc``, and
+a call chain from ``Xorg`` that demonstrates that we're using a proprietary X
+driver in user space (notice the presence of ``PVR`` and some other
+unresolvable symbols in the expanded ``Xorg`` call chain).
-Note also that we have both kernel and userspace entries in the above
-snapshot. We can also tell perf to focus on userspace but providing a
-modifier, in this case 'u', to the 'cycles' hardware counter when we
+Note also that we have both kernel and user space entries in the above
+snapshot. We can also tell perf to focus on user space but providing a
+modifier, in this case ``u``, to the ``cycles`` hardware counter when we
record a profile::
root@crownbay:~# perf record -g -a -e cycles:u
@@ -912,24 +911,24 @@ record a profile::
.. image:: figures/perf-report-cycles-u.png
:align: center
-Notice in the screenshot above, we see only userspace entries ([.])
+Notice in the screenshot above, we see only user space entries (``[.]``)
-Finally, we can press 'enter' on a leaf node and select the 'Zoom into
-DSO' menu item to show only entries associated with a specific DSO. In
-the screenshot below, we've zoomed into the 'libc' DSO which shows all
-the entries associated with the libc-xxx.so DSO.
+Finally, we can press ``Enter`` on a leaf node and select the ``Zoom into
+DSO`` menu item to show only entries associated with a specific DSO. In
+the screenshot below, we've zoomed into the ``libc`` DSO which shows all
+the entries associated with the ``libc-xxx.so`` DSO.
.. image:: figures/perf-systemwide-libc.png
:align: center
-We can also use the system-wide -a switch to do system-wide tracing.
+We can also use the system-wide ``-a`` switch to do system-wide tracing.
Here we'll trace a couple of scheduler events::
root@crownbay:~# perf record -a -e sched:sched_switch -e sched:sched_wakeup
^C[ perf record: Woken up 38 times to write data ]
[ perf record: Captured and wrote 9.780 MB perf.data (~427299 samples) ]
-We can look at the raw output using 'perf script' with no arguments::
+We can look at the raw output using ``perf script`` with no arguments::
root@crownbay:~# perf script
@@ -947,11 +946,11 @@ We can look at the raw output using 'perf script' with no arguments::
Filtering
^^^^^^^^^
-Notice that there are a lot of events that don't really have anything to
-do with what we're interested in, namely events that schedule 'perf'
+Notice that there are many events that don't really have anything to
+do with what we're interested in, namely events that schedule ``perf``
itself in and out or that wake perf up. We can get rid of those by using
-the '--filter' option - for each event we specify using -e, we can add a
---filter after that to filter out trace events that contain fields with
+the ``--filter`` option --- for each event we specify using ``-e``, we can add a
+``--filter`` after that to filter out trace events that contain fields with
specific values::
root@crownbay:~# perf record -a -e sched:sched_switch --filter 'next_comm != perf && prev_comm != perf' -e sched:sched_wakeup --filter 'comm != perf'
@@ -977,16 +976,16 @@ specific values::
kworker/0:3 1209 [000] 7932.326214: sched_switch: prev_comm=kworker/0:3 prev_pid=1209 prev_prio=120 prev_state=S ==> next_comm=swapper/0 next_pid=0 next_prio=120
In this case, we've filtered out all events that have
-'perf' in their 'comm' or 'comm_prev' or 'comm_next' fields. Notice that
+``perf`` in their ``comm``, ``comm_prev`` or ``comm_next`` fields. Notice that
there are still events recorded for perf, but notice that those events
-don't have values of 'perf' for the filtered fields. To completely
+don't have values of ``perf`` for the filtered fields. To completely
filter out anything from perf will require a bit more work, but for the
purpose of demonstrating how to use filters, it's close enough.
.. admonition:: Tying it Together
These are exactly the same set of event filters defined by the trace
- event subsystem. See the ftrace/tracecmd/kernelshark section for more
+ event subsystem. See the ftrace / trace-cmd / KernelShark section for more
discussion about these event filters.
.. admonition:: Tying it Together
@@ -996,14 +995,14 @@ purpose of demonstrating how to use filters, it's close enough.
indispensable part of the perf design as it relates to tracing.
kernel-based event filters provide a mechanism to precisely throttle
the event stream that appears in user space, where it makes sense to
- provide bindings to real programming languages for postprocessing the
+ provide bindings to real programming languages for post-processing the
event stream. This architecture allows for the intelligent and
flexible partitioning of processing between the kernel and user
space. Contrast this with other tools such as SystemTap, which does
all of its processing in the kernel and as such requires a special
project-defined language in order to accommodate that design, or
- LTTng, where everything is sent to userspace and as such requires a
- super-efficient kernel-to-userspace transport mechanism in order to
+ LTTng, where everything is sent to user space and as such requires a
+ super-efficient kernel-to-user space transport mechanism in order to
function properly. While perf certainly can benefit from for instance
advances in the design of the transport, it doesn't fundamentally
depend on them. Basically, if you find that your perf tracing
@@ -1014,9 +1013,9 @@ Using Dynamic Tracepoints
~~~~~~~~~~~~~~~~~~~~~~~~~
perf isn't restricted to the fixed set of static tracepoints listed by
-'perf list'. Users can also add their own 'dynamic' tracepoints anywhere
-in the kernel. For instance, suppose we want to define our own
-tracepoint on do_fork(). We can do that using the 'perf probe' perf
+``perf list``. Users can also add their own "dynamic" tracepoints anywhere
+in the kernel. For example, suppose we want to define our own
+tracepoint on ``do_fork()``. We can do that using the ``perf probe`` perf
subcommand::
root@crownbay:~# perf probe do_fork
@@ -1028,8 +1027,8 @@ subcommand::
perf record -e probe:do_fork -aR sleep 1
Adding a new tracepoint via
-'perf probe' results in an event with all the expected files and format
-in /sys/kernel/debug/tracing/events, just the same as for static
+``perf probe`` results in an event with all the expected files and format
+in ``/sys/kernel/debug/tracing/events``, just the same as for static
tracepoints (as discussed in more detail in the trace events subsystem
section::
@@ -1045,13 +1044,13 @@ section::
name: do_fork
ID: 944
format:
- field:unsigned short common_type; offset:0; size:2; signed:0;
- field:unsigned char common_flags; offset:2; size:1; signed:0;
- field:unsigned char common_preempt_count; offset:3; size:1; signed:0;
- field:int common_pid; offset:4; size:4; signed:1;
- field:int common_padding; offset:8; size:4; signed:1;
+ field:unsigned short common_type; offset:0; size:2; signed:0;
+ field:unsigned char common_flags; offset:2; size:1; signed:0;
+ field:unsigned char common_preempt_count; offset:3; size:1; signed:0;
+ field:int common_pid; offset:4; size:4; signed:1;
+ field:int common_padding; offset:8; size:4; signed:1;
- field:unsigned long __probe_ip; offset:12; size:4; signed:0;
+ field:unsigned long __probe_ip; offset:12; size:4; signed:0;
print fmt: "(%lx)", REC->__probe_ip
@@ -1062,7 +1061,7 @@ existence::
probe:do_fork (on do_fork)
probe:schedule (on schedule)
-Let's record system-wide ('sleep 30' is a
+Let's record system-wide (``sleep 30`` is a
trick for recording system-wide but basically do nothing and then wake
up after 30 seconds)::
@@ -1070,7 +1069,7 @@ up after 30 seconds)::
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.087 MB perf.data (~3812 samples) ]
-Using 'perf script' we can see each do_fork event that fired::
+Using ``perf script`` we can see each ``do_fork`` event that fired::
root@crownbay:~# perf script
@@ -1111,8 +1110,8 @@ Using 'perf script' we can see each do_fork event that fired::
matchbox-deskto 1311 [001] 34237.114106: do_fork: (c1028460)
gaku 1312 [000] 34237.202388: do_fork: (c1028460)
-And using 'perf report' on the same file, we can see the
-callgraphs from starting a few programs during those 30 seconds:
+And using ``perf report`` on the same file, we can see the
+call graphs from starting a few programs during those 30 seconds:
.. image:: figures/perf-probe-do_fork-profile.png
:align: center
@@ -1120,63 +1119,63 @@ callgraphs from starting a few programs during those 30 seconds:
.. admonition:: Tying it Together
The trace events subsystem accommodate static and dynamic tracepoints
- in exactly the same way - there's no difference as far as the
+ in exactly the same way --- there's no difference as far as the
infrastructure is concerned. See the ftrace section for more details
on the trace event subsystem.
.. admonition:: Tying it Together
- Dynamic tracepoints are implemented under the covers by kprobes and
- uprobes. kprobes and uprobes are also used by and in fact are the
+ Dynamic tracepoints are implemented under the covers by Kprobes and
+ Uprobes. Kprobes and Uprobes are also used by and in fact are the
main focus of SystemTap.
-Perf Documentation
+perf Documentation
------------------
-Online versions of the man pages for the commands discussed in this
+Online versions of the manual pages for the commands discussed in this
section can be found here:
-- The `'perf stat' manpage <https://linux.die.net/man/1/perf-stat>`__.
+- The `'perf stat' manual page <https://linux.die.net/man/1/perf-stat>`__.
- The `'perf record'
- manpage <https://linux.die.net/man/1/perf-record>`__.
+ manual page <https://linux.die.net/man/1/perf-record>`__.
- The `'perf report'
- manpage <https://linux.die.net/man/1/perf-report>`__.
+ manual page <https://linux.die.net/man/1/perf-report>`__.
-- The `'perf probe' manpage <https://linux.die.net/man/1/perf-probe>`__.
+- The `'perf probe' manual page <https://linux.die.net/man/1/perf-probe>`__.
- The `'perf script'
- manpage <https://linux.die.net/man/1/perf-script>`__.
+ manual page <https://linux.die.net/man/1/perf-script>`__.
- Documentation on using the `'perf script' python
binding <https://linux.die.net/man/1/perf-script-python>`__.
-- The top-level `perf(1) manpage <https://linux.die.net/man/1/perf>`__.
+- The top-level `perf(1) manual page <https://linux.die.net/man/1/perf>`__.
-Normally, you should be able to invoke the man pages via perf itself
-e.g. 'perf help' or 'perf help record'.
+Normally, you should be able to open the manual pages via perf itself
+e.g. ``perf help`` or ``perf help record``.
-To have the perf manpages installed on your target, modify your
+To have the perf manual pages installed on your target, modify your
configuration as follows::
IMAGE_INSTALL:append = " perf perf-doc"
DISTRO_FEATURES:append = " api-documentation"
-The man pages in text form, along with some other files, such as a set
-of examples, can also be found in the 'perf' directory of the kernel tree::
+The manual pages in text form, along with some other files, such as a set
+of examples, can also be found in the ``perf`` directory of the kernel tree::
tools/perf/Documentation
There's also a nice perf tutorial on the perf
-wiki that goes into more detail than we do here in certain areas: `Perf
+wiki that goes into more detail than we do here in certain areas: `perf
Tutorial <https://perf.wiki.kernel.org/index.php/Tutorial>`__
ftrace
======
-'ftrace' literally refers to the 'ftrace function tracer' but in reality
-this encompasses a number of related tracers along with the
+"ftrace" literally refers to the "ftrace function tracer" but in reality
+this encompasses several related tracers along with the
infrastructure that they all make use of.
ftrace Setup
@@ -1185,20 +1184,20 @@ ftrace Setup
For this section, we'll assume you've already performed the basic setup
outlined in the ":ref:`profile-manual/intro:General Setup`" section.
-ftrace, trace-cmd, and kernelshark run on the target system, and are
-ready to go out-of-the-box - no additional setup is necessary. For the
-rest of this section we assume you've ssh'ed to the host and will be
-running ftrace on the target. kernelshark is a GUI application and if
-you use the '-X' option to ssh you can have the kernelshark GUI run on
+ftrace, trace-cmd, and KernelShark run on the target system, and are
+ready to go out-of-the-box --- no additional setup is necessary. For the
+rest of this section we assume you're connected to the host through SSH and
+will be running ftrace on the target. KernelShark is a GUI application and if
+you use the ``-X`` option to ``ssh`` you can have the KernelShark GUI run on
the target but display remotely on the host if you want.
Basic ftrace usage
------------------
-'ftrace' essentially refers to everything included in the /tracing
+"ftrace" essentially refers to everything included in the ``/tracing``
directory of the mounted debugfs filesystem (Yocto follows the standard
-convention and mounts it at /sys/kernel/debug). Here's a listing of all
-the files found in /sys/kernel/debug/tracing on a Yocto system::
+convention and mounts it at ``/sys/kernel/debug``). All the files found in
+``/sys/kernel/debug/tracing`` on a Yocto system are::
root@sugarbay:/sys/kernel/debug/tracing# ls
README kprobe_events trace
@@ -1214,7 +1213,7 @@ the files found in /sys/kernel/debug/tracing on a Yocto system::
free_buffer set_graph_function
The files listed above are used for various purposes
-- some relate directly to the tracers themselves, others are used to set
+--- some relate directly to the tracers themselves, others are used to set
tracing options, and yet others actually contain the tracing output when
a tracer is in effect. Some of the functions can be guessed from their
names, others need explanation; in any case, we'll cover some of the
@@ -1223,30 +1222,30 @@ the ftrace documentation.
We'll start by looking at some of the available built-in tracers.
-cat'ing the 'available_tracers' file lists the set of available tracers::
+The ``available_tracers`` file lists the set of available tracers::
root@sugarbay:/sys/kernel/debug/tracing# cat available_tracers
blk function_graph function nop
-The 'current_tracer' file contains the tracer currently in effect::
+The ``current_tracer`` file contains the tracer currently in effect::
root@sugarbay:/sys/kernel/debug/tracing# cat current_tracer
nop
-The above listing of current_tracer shows that the
-'nop' tracer is in effect, which is just another way of saying that
+The above listing of ``current_tracer`` shows that the
+``nop`` tracer is in effect, which is just another way of saying that
there's actually no tracer currently in effect.
-echo'ing one of the available_tracers into current_tracer makes the
+Writing one of the available tracers into ``current_tracer`` makes the
specified tracer the current tracer::
root@sugarbay:/sys/kernel/debug/tracing# echo function > current_tracer
root@sugarbay:/sys/kernel/debug/tracing# cat current_tracer
function
-The above sets the current tracer to be the 'function tracer'. This tracer
+The above sets the current tracer to be the ``function`` tracer. This tracer
traces every function call in the kernel and makes it available as the
-contents of the 'trace' file. Reading the 'trace' file lists the
+contents of the ``trace`` file. Reading the ``trace`` file lists the
currently buffered function calls that have been traced by the function
tracer::
@@ -1293,7 +1292,7 @@ tracer::
.
Each line in the trace above shows what was happening in the kernel on a given
-cpu, to the level of detail of function calls. Each entry shows the function
+CPU, to the level of detail of function calls. Each entry shows the function
called, followed by its caller (after the arrow).
The function tracer gives you an extremely detailed idea of what the
@@ -1303,11 +1302,11 @@ great way to learn about how the kernel code works in a dynamic sense.
.. admonition:: Tying it Together
The ftrace function tracer is also available from within perf, as the
- ftrace:function tracepoint.
+ ``ftrace:function`` tracepoint.
It is a little more difficult to follow the call chains than it needs to
-be - luckily there's a variant of the function tracer that displays the
-callchains explicitly, called the 'function_graph' tracer::
+be --- luckily there's a variant of the function tracer that displays the
+call chains explicitly, called the ``function_graph`` tracer::
root@sugarbay:/sys/kernel/debug/tracing# echo function_graph > current_tracer
root@sugarbay:/sys/kernel/debug/tracing# cat trace | less
@@ -1422,11 +1421,11 @@ callchains explicitly, called the 'function_graph' tracer::
3) + 13.784 us | }
3) | sys_ioctl() {
-As you can see, the function_graph display is much easier
+As you can see, the ``function_graph`` display is much easier
to follow. Also note that in addition to the function calls and
associated braces, other events such as scheduler events are displayed
in context. In fact, you can freely include any tracepoint available in
-the trace events subsystem described in the next section by simply
+the trace events subsystem described in the next section by just
enabling those events, and they'll appear in context in the function
graph display. Quite a powerful tool for understanding kernel dynamics.
@@ -1440,9 +1439,9 @@ The 'trace events' Subsystem
----------------------------
One especially important directory contained within the
-/sys/kernel/debug/tracing directory is the 'events' subdirectory, which
+``/sys/kernel/debug/tracing`` directory is the ``events`` subdirectory, which
contains representations of every tracepoint in the system. Listing out
-the contents of the 'events' subdirectory, we see mainly another set of
+the contents of the ``events`` subdirectory, we see mainly another set of
subdirectories::
root@sugarbay:/sys/kernel/debug/tracing# cd events
@@ -1490,9 +1489,9 @@ subdirectories::
drwxr-xr-x 26 root root 0 Nov 14 23:19 writeback
Each one of these subdirectories
-corresponds to a 'subsystem' and contains yet again more subdirectories,
+corresponds to a "subsystem" and contains yet again more subdirectories,
each one of those finally corresponding to a tracepoint. For example,
-here are the contents of the 'kmem' subsystem::
+here are the contents of the ``kmem`` subsystem::
root@sugarbay:/sys/kernel/debug/tracing/events# cd kmem
root@sugarbay:/sys/kernel/debug/tracing/events/kmem# ls -al
@@ -1514,7 +1513,7 @@ here are the contents of the 'kmem' subsystem::
drwxr-xr-x 2 root root 0 Nov 14 23:19 mm_page_pcpu_drain
Let's see what's inside the subdirectory for a
-specific tracepoint, in this case the one for kmalloc::
+specific tracepoint, in this case the one for ``kmalloc``::
root@sugarbay:/sys/kernel/debug/tracing/events/kmem# cd kmalloc
root@sugarbay:/sys/kernel/debug/tracing/events/kmem/kmalloc# ls -al
@@ -1525,28 +1524,28 @@ specific tracepoint, in this case the one for kmalloc::
-r--r--r-- 1 root root 0 Nov 14 23:19 format
-r--r--r-- 1 root root 0 Nov 14 23:19 id
-The 'format' file for the
+The ``format`` file for the
tracepoint describes the event in memory, which is used by the various
tracing tools that now make use of these tracepoint to parse the event
-and make sense of it, along with a 'print fmt' field that allows tools
-like ftrace to display the event as text. Here's what the format of the
-kmalloc event looks like::
+and make sense of it, along with a ``print fmt`` field that allows tools
+like ftrace to display the event as text. The format of the
+``kmalloc`` event looks like::
root@sugarbay:/sys/kernel/debug/tracing/events/kmem/kmalloc# cat format
name: kmalloc
ID: 313
format:
- field:unsigned short common_type; offset:0; size:2; signed:0;
- field:unsigned char common_flags; offset:2; size:1; signed:0;
- field:unsigned char common_preempt_count; offset:3; size:1; signed:0;
- field:int common_pid; offset:4; size:4; signed:1;
- field:int common_padding; offset:8; size:4; signed:1;
-
- field:unsigned long call_site; offset:16; size:8; signed:0;
- field:const void * ptr; offset:24; size:8; signed:0;
- field:size_t bytes_req; offset:32; size:8; signed:0;
- field:size_t bytes_alloc; offset:40; size:8; signed:0;
- field:gfp_t gfp_flags; offset:48; size:4; signed:0;
+ field:unsigned short common_type; offset:0; size:2; signed:0;
+ field:unsigned char common_flags; offset:2; size:1; signed:0;
+ field:unsigned char common_preempt_count; offset:3; size:1; signed:0;
+ field:int common_pid; offset:4; size:4; signed:1;
+ field:int common_padding; offset:8; size:4; signed:1;
+
+ field:unsigned long call_site; offset:16; size:8; signed:0;
+ field:const void * ptr; offset:24; size:8; signed:0;
+ field:size_t bytes_req; offset:32; size:8; signed:0;
+ field:size_t bytes_alloc; offset:40; size:8; signed:0;
+ field:gfp_t gfp_flags; offset:48; size:4; signed:0;
print fmt: "call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", REC->call_site, REC->ptr, REC->bytes_req, REC->bytes_alloc,
(REC->gfp_flags) ? __print_flags(REC->gfp_flags, "|", {(unsigned long)(((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | ((
@@ -1565,11 +1564,11 @@ kmalloc event looks like::
long)(( gfp_t)0x08u), "GFP_MOVABLE"}, {(unsigned long)(( gfp_t)0), "GFP_NOTRACK"}, {(unsigned long)(( gfp_t)0x400000u), "GFP_NO_KSWAPD"},
{(unsigned long)(( gfp_t)0x800000u), "GFP_OTHER_NODE"} ) : "GFP_NOWAIT"
-The 'enable' file
+The ``enable`` file
in the tracepoint directory is what allows the user (or tools such as
-trace-cmd) to actually turn the tracepoint on and off. When enabled, the
-corresponding tracepoint will start appearing in the ftrace 'trace' file
-described previously. For example, this turns on the kmalloc tracepoint::
+``trace-cmd``) to actually turn the tracepoint on and off. When enabled, the
+corresponding tracepoint will start appearing in the ftrace ``trace`` file
+described previously. For example, this turns on the ``kmalloc`` tracepoint::
root@sugarbay:/sys/kernel/debug/tracing/events/kmem/kmalloc# echo 1 > enable
@@ -1581,8 +1580,8 @@ events in the output buffer::
root@sugarbay:/sys/kernel/debug/tracing# echo nop > current_tracer
root@sugarbay:/sys/kernel/debug/tracing# echo 1 > tracing_on
-Now, if we look at the 'trace' file, we see nothing
-but the kmalloc events we just turned on::
+Now, if we look at the ``trace`` file, we see nothing
+but the ``kmalloc`` events we just turned on::
root@sugarbay:/sys/kernel/debug/tracing# cat trace | less
# tracer: nop
@@ -1628,17 +1627,17 @@ but the kmalloc events we just turned on::
<idle>-0 [000] ..s3 18156.400660: kmalloc: call_site=ffffffff81619b36 ptr=ffff88006d554800 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_ATOMIC
matchbox-termin-1361 [001] ...1 18156.552800: kmalloc: call_site=ffffffff81614050 ptr=ffff88006db34800 bytes_req=576 bytes_alloc=1024 gfp_flags=GFP_KERNEL|GFP_REPEAT
-To again disable the kmalloc event, we need to send 0 to the enable file::
+To again disable the ``kmalloc`` event, we need to send ``0`` to the ``enable`` file::
root@sugarbay:/sys/kernel/debug/tracing/events/kmem/kmalloc# echo 0 > enable
You can enable any number of events or complete subsystems (by
-using the 'enable' file in the subsystem directory) and get an
+using the ``enable`` file in the subsystem directory) and get an
arbitrarily fine-grained idea of what's going on in the system by
enabling as many of the appropriate tracepoints as applicable.
-A number of the tools described in this HOWTO do just that, including
-trace-cmd and kernelshark in the next section.
+Several tools described in this How-to do just that, including
+``trace-cmd`` and KernelShark in the next section.
.. admonition:: Tying it Together
@@ -1646,80 +1645,79 @@ trace-cmd and kernelshark in the next section.
ftrace, but by many of the other tools covered in this document and
they form a central point of integration for the various tracers
available in Linux. They form a central part of the instrumentation
- for the following tools: perf, lttng, ftrace, blktrace and SystemTap
+ for the following tools: perf, LTTng, ftrace, blktrace and SystemTap
.. admonition:: Tying it Together
Eventually all the special-purpose tracers currently available in
- /sys/kernel/debug/tracing will be removed and replaced with
- equivalent tracers based on the 'trace events' subsystem.
+ ``/sys/kernel/debug/tracing`` will be removed and replaced with
+ equivalent tracers based on the "trace events" subsystem.
-trace-cmd/kernelshark
----------------------
+trace-cmd / KernelShark
+-----------------------
-trace-cmd is essentially an extensive command-line 'wrapper' interface
+trace-cmd is essentially an extensive command-line "wrapper" interface
that hides the details of all the individual files in
-/sys/kernel/debug/tracing, allowing users to specify specific particular
-events within the /sys/kernel/debug/tracing/events/ subdirectory and to
+``/sys/kernel/debug/tracing``, allowing users to specify specific particular
+events within the ``/sys/kernel/debug/tracing/events/`` subdirectory and to
collect traces and avoid having to deal with those details directly.
-As yet another layer on top of that, kernelshark provides a GUI that
+As yet another layer on top of that, KernelShark provides a GUI that
allows users to start and stop traces and specify sets of events using
an intuitive interface, and view the output as both trace events and as
-a per-CPU graphical display. It directly uses 'trace-cmd' as the
+a per-CPU graphical display. It directly uses trace-cmd as the
plumbing that accomplishes all that underneath the covers (and actually
displays the trace-cmd command it uses, as we'll see).
-To start a trace using kernelshark, first start kernelshark::
+To start a trace using KernelShark, first start this tool::
root@sugarbay:~# kernelshark
-Then bring up the 'Capture' dialog by
-choosing from the kernelshark menu::
+Then open up the ``Capture`` dialog by choosing from the KernelShark menu::
Capture | Record
That will display the following dialog, which allows you to choose one or more
-events (or even one or more complete subsystems) to trace:
+events (or even entire subsystems) to trace:
.. image:: figures/kernelshark-choose-events.png
:align: center
Note that these are exactly the same sets of events described in the
previous trace events subsystem section, and in fact is where trace-cmd
-gets them for kernelshark.
+gets them for KernelShark.
In the above screenshot, we've decided to explore the graphics subsystem
a bit and so have chosen to trace all the tracepoints contained within
-the 'i915' and 'drm' subsystems.
+the ``i915`` and ``drm`` subsystems.
-After doing that, we can start and stop the trace using the 'Run' and
-'Stop' button on the lower right corner of the dialog (the same button
+After doing that, we can start and stop the trace using the ``Run`` and
+``Stop`` button on the lower right corner of the dialog (the same button
will turn into the 'Stop' button after the trace has started):
.. image:: figures/kernelshark-output-display.png
:align: center
-Notice that the right-hand pane shows the exact trace-cmd command-line
+Notice that the right pane shows the exact trace-cmd command-line
that's used to run the trace, along with the results of the trace-cmd
run.
-Once the 'Stop' button is pressed, the graphical view magically fills up
-with a colorful per-cpu display of the trace data, along with the
+Once the ``Stop`` button is pressed, the graphical view magically fills up
+with a colorful per-CPU display of the trace data, along with the
detailed event listing below that:
.. image:: figures/kernelshark-i915-display.png
:align: center
-Here's another example, this time a display resulting from tracing 'all
-events':
+Here's another example, this time a display resulting from tracing ``all
+events``:
.. image:: figures/kernelshark-all.png
:align: center
The tool is pretty self-explanatory, but for more detailed information
-on navigating through the data, see the `kernelshark
-website <https://rostedt.homelinux.com/kernelshark/>`__.
+on navigating through the data, see the `KernelShark
+website <https://kernelshark.org/Documentation.html>`__.
ftrace Documentation
--------------------
@@ -1734,41 +1732,41 @@ Documentation directory::
Documentation/trace/events.txt
-There is a nice series of articles on using ftrace and trace-cmd at LWN:
+A nice series of articles on using ftrace and trace-cmd are available at LWN:
-- `Debugging the kernel using Ftrace - part
+- `Debugging the kernel using ftrace - part
1 <https://lwn.net/Articles/365835/>`__
-- `Debugging the kernel using Ftrace - part
+- `Debugging the kernel using ftrace - part
2 <https://lwn.net/Articles/366796/>`__
-- `Secrets of the Ftrace function
+- `Secrets of the ftrace function
tracer <https://lwn.net/Articles/370423/>`__
- `trace-cmd: A front-end for
- Ftrace <https://lwn.net/Articles/410200/>`__
+ ftrace <https://lwn.net/Articles/410200/>`__
-There's more detailed documentation kernelshark usage here:
-`KernelShark <https://rostedt.homelinux.com/kernelshark/>`__
+See also `KernelShark's documentation <https://kernelshark.org/Documentation.html>`__
+for further usage details.
-An amusing yet useful README (a tracing mini-HOWTO) can be found in
+An amusing yet useful README (a tracing mini-How-to) can be found in
``/sys/kernel/debug/tracing/README``.
-systemtap
+SystemTap
=========
SystemTap is a system-wide script-based tracing and profiling tool.
SystemTap scripts are C-like programs that are executed in the kernel to
-gather/print/aggregate data extracted from the context they end up being
-invoked under.
+gather / print / aggregate data extracted from the context they end up being
+called under.
For example, this probe from the `SystemTap
-tutorial <https://sourceware.org/systemtap/tutorial/>`__ simply prints a
-line every time any process on the system open()s a file. For each line,
+tutorial <https://sourceware.org/systemtap/tutorial/>`__ just prints a
+line every time any process on the system runs ``open()`` on a file. For each line,
it prints the executable name of the program that opened the file, along
-with its PID, and the name of the file it opened (or tried to open),
-which it extracts from the open syscall's argstr.
+with its PID, and the name of the file it opened (or tried to open), which it
+extracts from the argument string (``argstr``) of the ``open`` system call.
.. code-block:: none
@@ -1783,48 +1781,48 @@ which it extracts from the open syscall's argstr.
}
Normally, to execute this
-probe, you'd simply install systemtap on the system you want to probe,
+probe, you'd just install SystemTap on the system you want to probe,
and directly run the probe on that system e.g. assuming the name of the
-file containing the above text is trace_open.stp::
+file containing the above text is ``trace_open.stp``::
# stap trace_open.stp
-What systemtap does under the covers to run this probe is 1) parse and
-convert the probe to an equivalent 'C' form, 2) compile the 'C' form
+What SystemTap does under the covers to run this probe is 1) parse and
+convert the probe to an equivalent "C" form, 2) compile the "C" form
into a kernel module, 3) insert the module into the kernel, which arms
it, and 4) collect the data generated by the probe and display it to the
user.
-In order to accomplish steps 1 and 2, the 'stap' program needs access to
+In order to accomplish steps 1 and 2, the ``stap`` program needs access to
the kernel build system that produced the kernel that the probed system
-is running. In the case of a typical embedded system (the 'target'), the
+is running. In the case of a typical embedded system (the "target"), the
kernel build system unfortunately isn't typically part of the image
-running on the target. It is normally available on the 'host' system
+running on the target. It is normally available on the "host" system
that produced the target image however; in such cases, steps 1 and 2 are
executed on the host system, and steps 3 and 4 are executed on the
-target system, using only the systemtap 'runtime'.
+target system, using only the SystemTap "runtime".
-The systemtap support in Yocto assumes that only steps 3 and 4 are run
+The SystemTap support in Yocto assumes that only steps 3 and 4 are run
on the target; it is possible to do everything on the target, but this
section assumes only the typical embedded use-case.
-So basically what you need to do in order to run a systemtap script on
+Therefore, what you need to do in order to run a SystemTap script on
the target is to 1) on the host system, compile the probe into a kernel
module that makes sense to the target, 2) copy the module onto the
target system and 3) insert the module into the target kernel, which
arms it, and 4) collect the data generated by the probe and display it
to the user.
-systemtap Setup
+SystemTap Setup
---------------
-Those are a lot of steps and a lot of details, but fortunately Yocto
-includes a script called 'crosstap' that will take care of those
-details, allowing you to simply execute a systemtap script on the remote
+Those are many steps and details, but fortunately Yocto
+includes a script called ``crosstap`` that will take care of those
+details, allowing you to just execute a SystemTap script on the remote
target, with arguments if necessary.
In order to do this from a remote host, however, you need to have access
-to the build for the image you booted. The 'crosstap' script provides
+to the build for the image you booted. The ``crosstap`` script provides
details on how to do this if you run the script on the host without
having done a build::
@@ -1833,29 +1831,35 @@ having done a build::
Error: No target kernel build found.
Did you forget to create a local build of your image?
- 'crosstap' requires a local sdk build of the target system
- (or a build that includes 'tools-profile') in order to build
- kernel modules that can probe the target system.
-
- Practically speaking, that means you need to do the following:
- - If you're running a pre-built image, download the release
- and/or BSP tarballs used to build the image.
- - If you're working from git sources, just clone the metadata
- and BSP layers needed to build the image you'll be booting.
- - Make sure you're properly set up to build a new image (see
- the BSP README and/or the widely available basic documentation
- that discusses how to build images).
- - Build an -sdk version of the image e.g.:
- $ bitbake core-image-sato-sdk
- OR
- - Build a non-sdk image but include the profiling tools:
- [ edit local.conf and add 'tools-profile' to the end of
- the EXTRA_IMAGE_FEATURES variable ]
- $ bitbake core-image-sato
+'crosstap' requires a local SDK build of the target system
+(or a build that includes 'tools-profile') in order to build
+kernel modules that can probe the target system.
+
+Practically speaking, that means you need to do the following:
+
+- If you're running a pre-built image, download the release
+ and/or BSP tarballs used to build the image.
+
+- If you're working from git sources, just clone the metadata
+ and BSP layers needed to build the image you'll be booting.
+
+- Make sure you're properly set up to build a new image (see
+ the BSP README and/or the widely available basic documentation
+ that discusses how to build images).
+
+- Build an ``-sdk`` version of the image e.g.::
+
+ $ bitbake core-image-sato-sdk
+
+- Or build a non-SDK image but include the profiling tools
+ (edit ``local.conf`` and add ``tools-profile`` to the end of
+ :term:`EXTRA_IMAGE_FEATURES` variable)::
+
+ $ bitbake core-image-sato
Once you've build the image on the host system, you're ready to
- boot it (or the equivalent pre-built image) and use 'crosstap'
- to probe it (you need to source the environment as usual first):
+ boot it (or the equivalent pre-built image) and use ``crosstap``
+ to probe it (you need to source the environment as usual first)::
$ source oe-init-build-env
$ cd ~/my/systemtap/scripts
@@ -1863,29 +1867,27 @@ having done a build::
.. note::
- SystemTap, which uses 'crosstap', assumes you can establish an ssh
+ SystemTap, which uses ``crosstap``, assumes you can establish an SSH
connection to the remote target. Please refer to the crosstap wiki
- page for details on verifying ssh connections at
- . Also, the ability to ssh into the target system is not enabled by
- default in \*-minimal images.
+ page for details on verifying SSH connections. Also, the ability to SSH
+ into the target system is not enabled by default in ``*-minimal`` images.
-So essentially what you need to
-do is build an SDK image or image with 'tools-profile' as detailed in
-the ":ref:`profile-manual/intro:General Setup`" section of this
-manual, and boot the resulting target image.
+Therefore, what you need to do is build an SDK image or image with
+``tools-profile`` as detailed in the ":ref:`profile-manual/intro:General Setup`"
+section of this manual, and boot the resulting target image.
.. note::
- If you have a build directory containing multiple machines, you need
- to have the MACHINE you're connecting to selected in local.conf, and
- the kernel in that machine's build directory must match the kernel on
- the booted system exactly, or you'll get the above 'crosstap' message
- when you try to invoke a script.
+ If you have a :term:`Build Directory` containing multiple machines, you need
+ to have the :term:`MACHINE` you're connecting to selected in ``local.conf``, and
+ the kernel in that machine's :term:`Build Directory` must match the kernel on
+ the booted system exactly, or you'll get the above ``crosstap`` message
+ when you try to call a script.
Running a Script on a Target
----------------------------
-Once you've done that, you should be able to run a systemtap script on
+Once you've done that, you should be able to run a SystemTap script on
the target::
$ cd /path/to/yocto
@@ -1903,8 +1905,8 @@ the target::
You can also run generated QEMU images with a command like 'runqemu qemux86-64'
-Once you've done that, you can cd to whatever
-directory contains your scripts and use 'crosstap' to run the script::
+Once you've done that, you can ``cd`` to whatever
+directory contains your scripts and use ``crosstap`` to run the script::
$ cd /path/to/my/systemap/script
$ crosstap root@192.168.7.2 trace_open.stp
@@ -1914,13 +1916,12 @@ If you get an error connecting to the target e.g.::
$ crosstap root@192.168.7.2 trace_open.stp
error establishing ssh connection on remote 'root@192.168.7.2'
-Try ssh'ing to the target and see what happens::
+Try connecting to the target through SSH and see what happens::
$ ssh root@192.168.7.2
-A lot of the time, connection
-problems are due specifying a wrong IP address or having a 'host key
-verification error'.
+Connection problems are often due specifying a wrong IP address or having a ``host key
+verification error``.
If everything worked as planned, you should see something like this
(enter the password when prompted, or press enter if it's set up to use
@@ -1933,7 +1934,7 @@ no password):
matchbox-termin(1036) open ("/tmp/vte3FS2LW", O_RDWR|O_CREAT|O_EXCL|O_LARGEFILE, 0600)
matchbox-termin(1036) open ("/tmp/vteJMC7LW", O_RDWR|O_CREAT|O_EXCL|O_LARGEFILE, 0600)
-systemtap Documentation
+SystemTap Documentation
-----------------------
The SystemTap language reference can be found here: `SystemTap Language
@@ -1946,7 +1947,7 @@ page <https://sourceware.org/systemtap/documentation.html>`__
Sysprof
=======
-Sysprof is a very easy to use system-wide profiler that consists of a
+Sysprof is an easy to use system-wide profiler that consists of a
single window with three panes and a few buttons which allow you to
start, stop, and view the profile from one place.
@@ -1956,18 +1957,18 @@ Sysprof Setup
For this section, we'll assume you've already performed the basic setup
outlined in the ":ref:`profile-manual/intro:General Setup`" section.
-Sysprof is a GUI-based application that runs on the target system. For
-the rest of this document we assume you've ssh'ed to the host and will
-be running Sysprof on the target (you can use the '-X' option to ssh and
+Sysprof is a GUI-based application that runs on the target system. For the rest
+of this document we assume you're connected to the host through SSH and will be
+running Sysprof on the target (you can use the ``-X`` option to ``ssh`` and
have the Sysprof GUI run on the target but display remotely on the host
if you want).
Basic Sysprof Usage
-------------------
-To start profiling the system, you simply press the 'Start' button. To
+To start profiling the system, you just press the ``Start`` button. To
stop profiling and to start viewing the profile data in one easy step,
-press the 'Profile' button.
+press the ``Profile`` button.
Once you've pressed the profile button, the three panes will fill up
with profiling data:
@@ -1978,11 +1979,11 @@ with profiling data:
The left pane shows a list of functions and processes. Selecting one of
those expands that function in the right pane, showing all its callees.
Note that this caller-oriented display is essentially the inverse of
-perf's default callee-oriented callchain display.
+perf's default callee-oriented call chain display.
In the screenshot above, we're focusing on ``__copy_to_user_ll()`` and
-looking up the callchain we can see that one of the callers of
-``__copy_to_user_ll`` is sys_read() and the complete callpath between them.
+looking up the call chain we can see that one of the callers of
+``__copy_to_user_ll`` is ``sys_read()`` and the complete call path between them.
Notice that this is essentially a portion of the same information we saw
in the perf display shown in the perf section of this page.
@@ -1990,7 +1991,7 @@ in the perf display shown in the perf section of this page.
:align: center
Similarly, the above is a snapshot of the Sysprof display of a
-copy-from-user callchain.
+``copy-from-user`` call chain.
Finally, looking at the third Sysprof pane in the lower left, we can see
a list of all the callers of a particular function selected in the top
@@ -2005,18 +2006,17 @@ to the selected function, and so on.
.. admonition:: Tying it Together
- If you like sysprof's 'caller-oriented' display, you may be able to
- approximate it in other tools as well. For example, 'perf report' has
- the -g (--call-graph) option that you can experiment with; one of the
- options is 'caller' for an inverted caller-based callgraph display.
+ If you like Sysprof's ``caller-oriented`` display, you may be able to
+ approximate it in other tools as well. For example, ``perf report`` has
+ the ``-g`` (``--call-graph``) option that you can experiment with; one of the
+ options is ``caller`` for an inverted caller-based call graph display.
Sysprof Documentation
---------------------
There doesn't seem to be any documentation for Sysprof, but maybe that's
-because it's pretty self-explanatory. The Sysprof website, however, is
-here: `Sysprof, System-wide Performance Profiler for
-Linux <http://sysprof.com/>`__
+because it's pretty self-explanatory. The Sysprof website, however, is here:
+`Sysprof, System-wide Performance Profiler for Linux <http://sysprof.com/>`__
LTTng (Linux Trace Toolkit, next generation)
============================================
@@ -2026,20 +2026,20 @@ LTTng Setup
For this section, we'll assume you've already performed the basic setup
outlined in the ":ref:`profile-manual/intro:General Setup`" section.
-LTTng is run on the target system by ssh'ing to it.
+LTTng is run on the target system by connecting to it through SSH.
Collecting and Viewing Traces
-----------------------------
Once you've applied the above commits and built and booted your image
-(you need to build the core-image-sato-sdk image or use one of the other
+(you need to build the ``core-image-sato-sdk`` image or use one of the other
methods described in the ":ref:`profile-manual/intro:General Setup`" section), you're ready to start
tracing.
Collecting and viewing a trace on the target (inside a shell)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-First, from the host, ssh to the target::
+First, from the host, connect to the target through SSH::
$ ssh -l root 192.168.1.47
The authenticity of host '192.168.1.47 (192.168.1.47)' can't be established.
@@ -2116,31 +2116,31 @@ You can now view the trace in text form on the target::
.
You can now safely destroy the trace
-session (note that this doesn't delete the trace - it's still there in
-~/lttng-traces)::
+session (note that this doesn't delete the trace --- it's still there in
+``~/lttng-traces``)::
root@crownbay:~# lttng destroy
Session auto-20121015-232120 destroyed at /home/root
Note that the trace is saved in a directory of the same name as returned by
-'lttng create', under the ~/lttng-traces directory (note that you can change this by
-supplying your own name to 'lttng create')::
+``lttng create``, under the ``~/lttng-traces`` directory (note that you can change this by
+supplying your own name to ``lttng create``)::
root@crownbay:~# ls -al ~/lttng-traces
drwxrwx--- 3 root root 1024 Oct 15 23:21 .
drwxr-xr-x 5 root root 1024 Oct 15 23:57 ..
drwxrwx--- 3 root root 1024 Oct 15 23:21 auto-20121015-232120
-Collecting and viewing a userspace trace on the target (inside a shell)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Collecting and viewing a user space trace on the target (inside a shell)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-For LTTng userspace tracing, you need to have a properly instrumented
-userspace program. For this example, we'll use the 'hello' test program
-generated by the lttng-ust build.
+For LTTng user space tracing, you need to have a properly instrumented
+user space program. For this example, we'll use the ``hello`` test program
+generated by the ``lttng-ust`` build.
-The 'hello' test program isn't installed on the root filesystem by the lttng-ust
-build, so we need to copy it over manually. First cd into the build
-directory that contains the hello executable::
+The ``hello`` test program isn't installed on the root filesystem by the ``lttng-ust``
+build, so we need to copy it over manually. First ``cd`` into the build
+directory that contains the ``hello`` executable::
$ cd build/tmp/work/core2_32-poky-linux/lttng-ust/2.0.5-r0/git/tests/hello/.libs
@@ -2148,10 +2148,10 @@ Copy that over to the target machine::
$ scp hello root@192.168.1.20:
-You now have the instrumented lttng 'hello world' test program on the
+You now have the instrumented LTTng "hello world" test program on the
target, ready to test.
-First, from the host, ssh to the target::
+First, from the host, connect to the target through SSH::
$ ssh -l root 192.168.1.47
The authenticity of host '192.168.1.47 (192.168.1.47)' can't be established.
@@ -2166,7 +2166,7 @@ Once on the target, use these steps to create a trace::
Session auto-20190303-021943 created.
Traces will be written in /home/root/lttng-traces/auto-20190303-021943
-Enable the events you want to trace (in this case all userspace events)::
+Enable the events you want to trace (in this case all user space events)::
root@crownbay:~# lttng enable-event --userspace --all
All UST events are enabled in channel channel0
@@ -2176,7 +2176,7 @@ Start the trace::
root@crownbay:~# lttng start
Tracing started for session auto-20190303-021943
-Run the instrumented hello world program::
+Run the instrumented "hello world" program::
root@crownbay:~# ./hello
Hello, World!
@@ -2200,7 +2200,7 @@ You can now view the trace in text form on the target::
.
You can now safely destroy the trace session (note that this doesn't delete the
-trace - it's still there in ~/lttng-traces)::
+trace --- it's still there in ``~/lttng-traces``)::
root@crownbay:~# lttng destroy
Session auto-20190303-021943 destroyed at /home/root
@@ -2238,27 +2238,27 @@ the entire blktrace and blkparse pipeline on the target, or you can run
blktrace in 'listen' mode on the target and have blktrace and blkparse
collect and analyze the data on the host (see the
":ref:`profile-manual/usage:Using blktrace Remotely`" section
-below). For the rest of this section we assume you've ssh'ed to the host and
-will be running blkrace on the target.
+below). For the rest of this section we assume you've to the host through SSH
+and will be running blktrace on the target.
Basic blktrace Usage
--------------------
-To record a trace, simply run the 'blktrace' command, giving it the name
+To record a trace, just run the ``blktrace`` command, giving it the name
of the block device you want to trace activity on::
root@crownbay:~# blktrace /dev/sdc
-In another shell, execute a workload you want to trace. ::
+In another shell, execute a workload you want to trace::
root@crownbay:/media/sdc# rm linux-2.6.19.2.tar.bz2; wget &YOCTO_DL_URL;/mirror/sources/linux-2.6.19.2.tar.bz2; sync
Connecting to downloads.yoctoproject.org (140.211.169.59:80)
linux-2.6.19.2.tar.b 100% \|*******************************\| 41727k 0:00:00 ETA
-Press Ctrl-C in the blktrace shell to stop the trace. It
+Press ``Ctrl-C`` in the blktrace shell to stop the trace. It
will display how many events were logged, along with the per-cpu file
-sizes (blktrace records traces in per-cpu kernel buffers and simply
-dumps them to userspace for blkparse to merge and sort later). ::
+sizes (blktrace records traces in per-cpu kernel buffers and just
+dumps them to user space for blkparse to merge and sort later)::
^C=== sdc ===
CPU 0: 7082 events, 332 KiB data
@@ -2274,7 +2274,7 @@ with the device name as the first part of the filename::
-rw-r--r-- 1 root root 339938 Oct 27 22:40 sdc.blktrace.0
-rw-r--r-- 1 root root 75753 Oct 27 22:40 sdc.blktrace.1
-To view the trace events, simply invoke 'blkparse' in the directory
+To view the trace events, just call ``blkparse`` in the directory
containing the trace files, giving it the device name that forms the
first part of the filenames::
@@ -2333,29 +2333,29 @@ first part of the filenames::
8,32 1 0 58.516990819 0 m N cfq3551 put_queue
CPU0 (sdc):
- Reads Queued: 0, 0KiB Writes Queued: 331, 26,284KiB
- Read Dispatches: 0, 0KiB Write Dispatches: 485, 40,484KiB
- Reads Requeued: 0 Writes Requeued: 0
- Reads Completed: 0, 0KiB Writes Completed: 511, 41,000KiB
- Read Merges: 0, 0KiB Write Merges: 13, 160KiB
- Read depth: 0 Write depth: 2
- IO unplugs: 23 Timer unplugs: 0
+ Reads Queued: 0, 0KiB Writes Queued: 331, 26,284KiB
+ Read Dispatches: 0, 0KiB Write Dispatches: 485, 40,484KiB
+ Reads Requeued: 0 Writes Requeued: 0
+ Reads Completed: 0, 0KiB Writes Completed: 511, 41,000KiB
+ Read Merges: 0, 0KiB Write Merges: 13, 160KiB
+ Read depth: 0 Write depth: 2
+ IO unplugs: 23 Timer unplugs: 0
CPU1 (sdc):
- Reads Queued: 0, 0KiB Writes Queued: 249, 15,800KiB
- Read Dispatches: 0, 0KiB Write Dispatches: 42, 1,600KiB
- Reads Requeued: 0 Writes Requeued: 0
- Reads Completed: 0, 0KiB Writes Completed: 16, 1,084KiB
- Read Merges: 0, 0KiB Write Merges: 40, 276KiB
- Read depth: 0 Write depth: 2
- IO unplugs: 30 Timer unplugs: 1
+ Reads Queued: 0, 0KiB Writes Queued: 249, 15,800KiB
+ Read Dispatches: 0, 0KiB Write Dispatches: 42, 1,600KiB
+ Reads Requeued: 0 Writes Requeued: 0
+ Reads Completed: 0, 0KiB Writes Completed: 16, 1,084KiB
+ Read Merges: 0, 0KiB Write Merges: 40, 276KiB
+ Read depth: 0 Write depth: 2
+ IO unplugs: 30 Timer unplugs: 1
Total (sdc):
- Reads Queued: 0, 0KiB Writes Queued: 580, 42,084KiB
- Read Dispatches: 0, 0KiB Write Dispatches: 527, 42,084KiB
- Reads Requeued: 0 Writes Requeued: 0
- Reads Completed: 0, 0KiB Writes Completed: 527, 42,084KiB
- Read Merges: 0, 0KiB Write Merges: 53, 436KiB
- IO unplugs: 53 Timer unplugs: 1
+ Reads Queued: 0, 0KiB Writes Queued: 580, 42,084KiB
+ Read Dispatches: 0, 0KiB Write Dispatches: 527, 42,084KiB
+ Reads Requeued: 0 Writes Requeued: 0
+ Reads Completed: 0, 0KiB Writes Completed: 527, 42,084KiB
+ Read Merges: 0, 0KiB Write Merges: 53, 436KiB
+ IO unplugs: 53 Timer unplugs: 1
Throughput (R/W): 0KiB/s / 719KiB/s
Events (sdc): 6,592 entries
@@ -2366,15 +2366,15 @@ first part of the filenames::
The report shows each event that was
found in the blktrace data, along with a summary of the overall block
I/O traffic during the run. You can look at the
-`blkparse <https://linux.die.net/man/1/blkparse>`__ manpage to learn the
+`blkparse <https://linux.die.net/man/1/blkparse>`__ manual page to learn the
meaning of each field displayed in the trace listing.
Live Mode
~~~~~~~~~
blktrace and blkparse are designed from the ground up to be able to
-operate together in a 'pipe mode' where the stdout of blktrace can be
-fed directly into the stdin of blkparse::
+operate together in a "pipe mode" where the standard output of blktrace can be
+fed directly into the standard input of blkparse::
root@crownbay:~# blktrace /dev/sdc -o - | blkparse -i -
@@ -2401,30 +2401,31 @@ tracer writes to, blktrace provides a way to trace without perturbing
the traced device at all by providing native support for sending all
trace data over the network.
-To have blktrace operate in this mode, start blktrace on the target
-system being traced with the -l option, along with the device to trace::
+To have blktrace operate in this mode, start blktrace in server mode on the
+host system, which is going to store the captured data::
- root@crownbay:~# blktrace -l /dev/sdc
+ $ blktrace -l
server: waiting for connections...
-On the host system, use the -h option to connect to the target system,
-also passing it the device to trace::
+On the target system that is going to be traced, start blktrace in client
+mode with the -h option to connect to the host system, also passing it the
+device to trace::
- $ blktrace -d /dev/sdc -h 192.168.1.43
+ root@crownbay:~# blktrace -d /dev/sdc -h 192.168.1.43
blktrace: connecting to 192.168.1.43
blktrace: connected!
-On the target system, you should see this::
+On the host system, you should see this::
server: connection from 192.168.1.43
-In another shell, execute a workload you want to trace. ::
+In another shell, execute a workload you want to trace::
root@crownbay:/media/sdc# rm linux-2.6.19.2.tar.bz2; wget &YOCTO_DL_URL;/mirror/sources/linux-2.6.19.2.tar.bz2; sync
Connecting to downloads.yoctoproject.org (140.211.169.59:80)
linux-2.6.19.2.tar.b 100% \|*******************************\| 41727k 0:00:00 ETA
-When it's done, do a Ctrl-C on the host system to stop the
+When it's done, do a ``Ctrl-C`` on the target system to stop the
trace::
^C=== sdc ===
@@ -2432,7 +2433,7 @@ trace::
CPU 1: 4109 events, 193 KiB data
Total: 11800 events (dropped 0), 554 KiB data
-On the target system, you should also see a trace summary for the trace
+On the host system, you should also see a trace summary for the trace
just ended::
server: end of run for 192.168.1.43:sdc
@@ -2442,14 +2443,14 @@ just ended::
Total: 11800 events (dropped 0), 554 KiB data
The blktrace instance on the host will
-save the target output inside a hostname-timestamp directory::
+save the target output inside a ``<hostname>-<timestamp>`` directory::
$ ls -al
drwxr-xr-x 10 root root 1024 Oct 28 02:40 .
drwxr-sr-x 4 root root 1024 Oct 26 18:24 ..
drwxr-xr-x 2 root root 1024 Oct 28 02:40 192.168.1.43-2012-10-28-02:40:56
-cd into that directory to see the output files::
+``cd`` into that directory to see the output files::
$ ls -l
-rw-r--r-- 1 root root 369193 Oct 28 02:44 sdc.blktrace.0
@@ -2477,29 +2478,29 @@ And run blkparse on the host system using the device name::
8,32 1 0 177.266696560 0 m N cfq1267 put_queue
CPU0 (sdc):
- Reads Queued: 0, 0KiB Writes Queued: 270, 21,708KiB
- Read Dispatches: 59, 2,628KiB Write Dispatches: 495, 39,964KiB
- Reads Requeued: 0 Writes Requeued: 0
- Reads Completed: 90, 2,752KiB Writes Completed: 543, 41,596KiB
- Read Merges: 0, 0KiB Write Merges: 9, 344KiB
- Read depth: 2 Write depth: 2
- IO unplugs: 20 Timer unplugs: 1
+ Reads Queued: 0, 0KiB Writes Queued: 270, 21,708KiB
+ Read Dispatches: 59, 2,628KiB Write Dispatches: 495, 39,964KiB
+ Reads Requeued: 0 Writes Requeued: 0
+ Reads Completed: 90, 2,752KiB Writes Completed: 543, 41,596KiB
+ Read Merges: 0, 0KiB Write Merges: 9, 344KiB
+ Read depth: 2 Write depth: 2
+ IO unplugs: 20 Timer unplugs: 1
CPU1 (sdc):
- Reads Queued: 688, 2,752KiB Writes Queued: 381, 20,652KiB
- Read Dispatches: 31, 124KiB Write Dispatches: 59, 2,396KiB
- Reads Requeued: 0 Writes Requeued: 0
- Reads Completed: 0, 0KiB Writes Completed: 11, 764KiB
- Read Merges: 598, 2,392KiB Write Merges: 88, 448KiB
- Read depth: 2 Write depth: 2
- IO unplugs: 52 Timer unplugs: 0
+ Reads Queued: 688, 2,752KiB Writes Queued: 381, 20,652KiB
+ Read Dispatches: 31, 124KiB Write Dispatches: 59, 2,396KiB
+ Reads Requeued: 0 Writes Requeued: 0
+ Reads Completed: 0, 0KiB Writes Completed: 11, 764KiB
+ Read Merges: 598, 2,392KiB Write Merges: 88, 448KiB
+ Read depth: 2 Write depth: 2
+ IO unplugs: 52 Timer unplugs: 0
Total (sdc):
- Reads Queued: 688, 2,752KiB Writes Queued: 651, 42,360KiB
- Read Dispatches: 90, 2,752KiB Write Dispatches: 554, 42,360KiB
- Reads Requeued: 0 Writes Requeued: 0
- Reads Completed: 90, 2,752KiB Writes Completed: 554, 42,360KiB
- Read Merges: 598, 2,392KiB Write Merges: 97, 792KiB
- IO unplugs: 72 Timer unplugs: 1
+ Reads Queued: 688, 2,752KiB Writes Queued: 651, 42,360KiB
+ Read Dispatches: 90, 2,752KiB Write Dispatches: 554, 42,360KiB
+ Reads Requeued: 0 Writes Requeued: 0
+ Reads Completed: 90, 2,752KiB Writes Completed: 554, 42,360KiB
+ Read Merges: 598, 2,392KiB Write Merges: 97, 792KiB
+ IO unplugs: 72 Timer unplugs: 1
Throughput (R/W): 15KiB/s / 238KiB/s
Events (sdc): 9,301 entries
@@ -2514,16 +2515,16 @@ Tracing Block I/O via 'ftrace'
It's also possible to trace block I/O using only
:ref:`profile-manual/usage:The 'trace events' Subsystem`, which
can be useful for casual tracing if you don't want to bother dealing with the
-userspace tools.
+user space tools.
-To enable tracing for a given device, use /sys/block/xxx/trace/enable,
-where xxx is the device name. This for example enables tracing for
-/dev/sdc::
+To enable tracing for a given device, use ``/sys/block/xxx/trace/enable``,
+where ``xxx`` is the device name. This for example enables tracing for
+``/dev/sdc``::
root@crownbay:/sys/kernel/debug/tracing# echo 1 > /sys/block/sdc/trace/enable
Once you've selected the device(s) you want
-to trace, selecting the 'blk' tracer will turn the blk tracer on::
+to trace, selecting the ``blk`` tracer will turn the blk tracer on::
root@crownbay:/sys/kernel/debug/tracing# cat available_tracers
blk function_graph function nop
@@ -2534,8 +2535,8 @@ Execute the workload you're interested in::
root@crownbay:/sys/kernel/debug/tracing# cat /media/sdc/testfile.txt
-And look at the output (note here that we're using 'trace_pipe' instead of
-trace to capture this trace - this allows us to wait around on the pipe
+And look at the output (note here that we're using ``trace_pipe`` instead of
+trace to capture this trace --- this allows us to wait around on the pipe
for data to appear)::
root@crownbay:/sys/kernel/debug/tracing# cat trace_pipe
@@ -2562,7 +2563,7 @@ And this turns off tracing for the specified device::
blktrace Documentation
----------------------
-Online versions of the man pages for the commands discussed in this
+Online versions of the manual pages for the commands discussed in this
section can be found here:
- https://linux.die.net/man/8/blktrace
@@ -2571,8 +2572,8 @@ section can be found here:
- https://linux.die.net/man/8/btrace
-The above manpages, along with manpages for the other blktrace utilities
-(btt, blkiomon, etc) can be found in the /doc directory of the blktrace
-tools git repo::
+The above manual pages, along with manuals for the other blktrace utilities
+(``btt``, ``blkiomon``, etc) can be found in the ``/doc`` directory of the blktrace
+tools git repository::
$ git clone git://git.kernel.dk/blktrace.git
diff --git a/documentation/ref-manual/classes.rst b/documentation/ref-manual/classes.rst
index a6dafe8f90..43bdc8bd25 100644
--- a/documentation/ref-manual/classes.rst
+++ b/documentation/ref-manual/classes.rst
@@ -28,10 +28,10 @@ information.
.. _ref-classes-allarch:
-``allarch.bbclass``
-===================
+``allarch``
+===========
-The ``allarch`` class is inherited by recipes that do not produce
+The :ref:`ref-classes-allarch` class is inherited by recipes that do not produce
architecture-specific output. The class disables functionality that is
normally needed for recipes that produce executable binaries (such as
building the cross-compiler and a C library as pre-requisites, and
@@ -43,42 +43,43 @@ splitting out of debug symbols during packaging).
produce packages that depend on tunings through use of the
:term:`RDEPENDS` and
:term:`TUNE_PKGARCH` variables, should never be
- configured for all architectures using ``allarch``. This is the case
+ configured for all architectures using :ref:`ref-classes-allarch`. This is the case
even if the recipes do not produce architecture-specific output.
Configuring such recipes for all architectures causes the
- ``do_package_write_*`` tasks to
+ :ref:`do_package_write_* <ref-tasks-package_write_deb>` tasks to
have different signatures for the machines with different tunings.
Additionally, unnecessary rebuilds occur every time an image for a
different :term:`MACHINE` is built even when the recipe never changes.
-By default, all recipes inherit the :ref:`base <ref-classes-base>` and
-:ref:`package <ref-classes-package>` classes, which enable
+By default, all recipes inherit the :ref:`ref-classes-base` and
+:ref:`ref-classes-package` classes, which enable
functionality needed for recipes that produce executable output. If your
recipe, for example, only produces packages that contain configuration
files, media files, or scripts (e.g. Python and Perl), then it should
-inherit the ``allarch`` class.
+inherit the :ref:`ref-classes-allarch` class.
.. _ref-classes-archiver:
-``archiver.bbclass``
-====================
+``archiver``
+============
-The ``archiver`` class supports releasing source code and other
+The :ref:`ref-classes-archiver` class supports releasing source code and other
materials with the binaries.
-For more details on the source archiver, see the
-":ref:`dev-manual/common-tasks:maintaining open source license compliance during your product's lifecycle`"
+For more details on the source :ref:`ref-classes-archiver`, see the
+":ref:`dev-manual/licenses:maintaining open source license compliance during your product's lifecycle`"
section in the Yocto Project Development Tasks Manual. You can also see
the :term:`ARCHIVER_MODE` variable for information
about the variable flags (varflags) that help control archive creation.
.. _ref-classes-autotools:
-``autotools*.bbclass``
-======================
+``autotools*``
+==============
-The ``autotools*`` classes support Autotooled packages.
+The :ref:`autotools* <ref-classes-autotools>` classes support packages built with the
+:wikipedia:`GNU Autotools <GNU_Autotools>`.
The ``autoconf``, ``automake``, and ``libtool`` packages bring
standardization. This class defines a set of tasks (e.g. ``configure``,
@@ -86,16 +87,16 @@ standardization. This class defines a set of tasks (e.g. ``configure``,
should usually be enough to define a few standard variables and then
simply ``inherit autotools``. These classes can also work with software
that emulates Autotools. For more information, see the
-":ref:`dev-manual/common-tasks:autotooled package`" section
+":ref:`dev-manual/new-recipe:building an autotooled package`" section
in the Yocto Project Development Tasks Manual.
-By default, the ``autotools*`` classes use out-of-tree builds (i.e.
+By default, the :ref:`autotools* <ref-classes-autotools>` classes use out-of-tree builds (i.e.
``autotools.bbclass`` building with ``B != S``).
If the software being built by a recipe does not support using
out-of-tree builds, you should have the recipe inherit the
-``autotools-brokensep`` class. The ``autotools-brokensep`` class behaves
-the same as the ``autotools`` class but builds with :term:`B`
+:ref:`autotools-brokensep <ref-classes-autotools>` class. The :ref:`autotools-brokensep <ref-classes-autotools>` class behaves
+the same as the :ref:`ref-classes-autotools` class but builds with :term:`B`
== :term:`S`. This method is useful when out-of-tree build
support is either not present or is broken.
@@ -105,35 +106,34 @@ support is either not present or is broken.
all possible.
It's useful to have some idea of how the tasks defined by the
-``autotools*`` classes work and what they do behind the scenes.
+:ref:`autotools* <ref-classes-autotools>` classes work and what they do behind the scenes.
-- :ref:`ref-tasks-configure` - Regenerates the
+- :ref:`ref-tasks-configure` --- regenerates the
configure script (using ``autoreconf``) and then launches it with a
standard set of arguments used during cross-compilation. You can pass
additional parameters to ``configure`` through the :term:`EXTRA_OECONF`
or :term:`PACKAGECONFIG_CONFARGS`
variables.
-- :ref:`ref-tasks-compile` - Runs ``make`` with
+- :ref:`ref-tasks-compile` --- runs ``make`` with
arguments that specify the compiler and linker. You can pass
additional arguments through the :term:`EXTRA_OEMAKE` variable.
-- :ref:`ref-tasks-install` - Runs ``make install`` and
+- :ref:`ref-tasks-install` --- runs ``make install`` and
passes in ``${``\ :term:`D`\ ``}`` as ``DESTDIR``.
.. _ref-classes-base:
-``base.bbclass``
-================
+``base``
+========
-The ``base`` class is special in that every ``.bb`` file implicitly
+The :ref:`ref-classes-base` class is special in that every ``.bb`` file implicitly
inherits the class. This class contains definitions for standard basic
tasks such as fetching, unpacking, configuring (empty by default),
compiling (runs any ``Makefile`` present), installing (empty by default)
-and packaging (empty by default). These classes are often overridden or
-extended by other classes such as the
-:ref:`autotools <ref-classes-autotools>` class or the
-:ref:`package <ref-classes-package>` class.
+and packaging (empty by default). These tasks are often overridden or
+extended by other classes such as the :ref:`ref-classes-autotools` class or the
+:ref:`ref-classes-package` class.
The class also contains some commonly used functions such as
``oe_runmake``, which runs ``make`` with the arguments specified in
@@ -142,18 +142,18 @@ arguments passed directly to ``oe_runmake``.
.. _ref-classes-bash-completion:
-``bash-completion.bbclass``
-===========================
+``bash-completion``
+===================
Sets up packaging and dependencies appropriate for recipes that build
software that includes bash-completion data.
.. _ref-classes-bin-package:
-``bin_package.bbclass``
-=======================
+``bin_package``
+===============
-The ``bin_package`` class is a helper class for recipes that extract the
+The :ref:`ref-classes-bin-package` class is a helper class for recipes that extract the
contents of a binary package (e.g. an RPM) and install those contents
rather than building the binary from source. The binary package is
extracted and new packages in the configured output package format are
@@ -177,10 +177,10 @@ example use for this class.
.. _ref-classes-binconfig:
-``binconfig.bbclass``
-=====================
+``binconfig``
+=============
-The ``binconfig`` class helps to correct paths in shell scripts.
+The :ref:`ref-classes-binconfig` class helps to correct paths in shell scripts.
Before ``pkg-config`` had become widespread, libraries shipped shell
scripts to give information about the libraries and include paths needed
@@ -197,34 +197,33 @@ information.
.. _ref-classes-binconfig-disabled:
-``binconfig-disabled.bbclass``
-==============================
+``binconfig-disabled``
+======================
-An alternative version of the :ref:`binconfig <ref-classes-binconfig>`
+An alternative version of the :ref:`ref-classes-binconfig`
class, which disables binary configuration scripts by making them return
an error in favor of using ``pkg-config`` to query the information. The
-scripts to be disabled should be specified using the
-:term:`BINCONFIG` variable within the recipe inheriting
-the class.
+scripts to be disabled should be specified using the :term:`BINCONFIG`
+variable within the recipe inheriting the class.
.. _ref-classes-buildhistory:
-``buildhistory.bbclass``
-========================
+``buildhistory``
+================
-The ``buildhistory`` class records a history of build output metadata,
+The :ref:`ref-classes-buildhistory` class records a history of build output metadata,
which can be used to detect possible regressions as well as used for
analysis of the build output. For more information on using Build
History, see the
-":ref:`dev-manual/common-tasks:maintaining build output quality`"
+":ref:`dev-manual/build-quality:maintaining build output quality`"
section in the Yocto Project Development Tasks Manual.
.. _ref-classes-buildstats:
-``buildstats.bbclass``
-======================
+``buildstats``
+==============
-The ``buildstats`` class records performance statistics about each task
+The :ref:`ref-classes-buildstats` class records performance statistics about each task
executed during the build (e.g. elapsed time, CPU usage, and I/O usage).
When you use this class, the output goes into the
@@ -238,23 +237,59 @@ Collecting build statistics is enabled by default through the
:term:`USER_CLASSES` variable from your
``local.conf`` file. Consequently, you do not have to do anything to
enable the class. However, if you want to disable the class, simply
-remove "buildstats" from the :term:`USER_CLASSES` list.
+remove ":ref:`ref-classes-buildstats`" from the :term:`USER_CLASSES` list.
.. _ref-classes-buildstats-summary:
-``buildstats-summary.bbclass``
-==============================
+``buildstats-summary``
+======================
When inherited globally, prints statistics at the end of the build on
sstate re-use. In order to function, this class requires the
-:ref:`buildstats <ref-classes-buildstats>` class be enabled.
+:ref:`ref-classes-buildstats` class be enabled.
+
+.. _ref-classes-cargo:
+
+``cargo``
+=========
+
+The :ref:`ref-classes-cargo` class allows to compile Rust language programs
+using `Cargo <https://doc.rust-lang.org/cargo/>`__. Cargo is Rust's package
+manager, allowing to fetch package dependencies and build your program.
+
+Using this class makes it very easy to build Rust programs. All you need
+is to use the :term:`SRC_URI` variable to point to a source repository
+which can be built by Cargo, typically one that was created by the
+``cargo new`` command, containing a ``Cargo.toml`` file, a ``Cargo.lock`` file and a ``src``
+subdirectory.
+
+You will find an example (that show also how to handle possible git source dependencies) in the
+:oe_git:`zvariant_3.12.0.bb </openembedded-core/tree/meta-selftest/recipes-extended/zvariant/zvariant_3.12.0.bb>`
+recipe. Another example, with only crate dependencies, is the
+:oe_git:`uutils-coreutils </meta-openembedded/tree/meta-oe/recipes-core/uutils-coreutils>`
+recipe, which was generated by the `cargo-bitbake <https://crates.io/crates/cargo-bitbake>`__
+tool.
+
+This class inherits the :ref:`ref-classes-cargo_common` class.
+
+.. _ref-classes-cargo_common:
+
+``cargo_common``
+================
+
+The :ref:`ref-classes-cargo_common` class is an internal class
+that is not intended to be used directly.
+
+An exception is the "rust" recipe, to build the Rust compiler and runtime
+library, which is built by Cargo but cannot use the :ref:`ref-classes-cargo`
+class. This is why this class was introduced.
.. _ref-classes-ccache:
-``ccache.bbclass``
-==================
+``ccache``
+==========
-The ``ccache`` class enables the C/C++ Compiler Cache for the build.
+The :ref:`ref-classes-ccache` class enables the C/C++ Compiler Cache for the build.
This class is used to give a minor performance boost during the build.
See https://ccache.samba.org/ for information on the C/C++ Compiler
@@ -268,84 +303,89 @@ this class is not recommended.
.. _ref-classes-chrpath:
-``chrpath.bbclass``
-===================
+``chrpath``
+===========
-The ``chrpath`` class is a wrapper around the "chrpath" utility, which
-is used during the build process for ``nativesdk``, ``cross``, and
-``cross-canadian`` recipes to change ``RPATH`` records within binaries
+The :ref:`ref-classes-chrpath` class is a wrapper around the "chrpath" utility, which
+is used during the build process for :ref:`ref-classes-nativesdk`, :ref:`ref-classes-cross`, and
+:ref:`ref-classes-cross-canadian` recipes to change ``RPATH`` records within binaries
in order to make them relocatable.
.. _ref-classes-cmake:
-``cmake.bbclass``
-=================
+``cmake``
+=========
-The ``cmake`` class allows for recipes that need to build software using
-the `CMake <https://cmake.org/overview/>`__ build system. You can use
-the :term:`EXTRA_OECMAKE` variable to specify
-additional configuration options to be passed using the ``cmake``
-command line.
+The :ref:`ref-classes-cmake` class allows recipes to build software using the
+`CMake <https://cmake.org/overview/>`__ build system. You can use the
+:term:`EXTRA_OECMAKE` variable to specify additional configuration options to
+pass to the ``cmake`` command line.
-On the occasion that you would be installing custom CMake toolchain
-files supplied by the application being built, you should install them
-to the preferred CMake Module directory: ``${D}${datadir}/cmake/``
-Modules during
-:ref:`ref-tasks-install`.
+By default, the :ref:`ref-classes-cmake` class uses
+`Ninja <https://ninja-build.org/>`__ instead of GNU make for building, which
+offers better build performance. If a recipe is broken with Ninja, then the
+recipe can set the :term:`OECMAKE_GENERATOR` variable to ``Unix Makefiles`` to
+use GNU make instead.
+
+If you need to install custom CMake toolchain files supplied by the application
+being built, you should install them (during :ref:`ref-tasks-install`) to the
+preferred CMake Module directory: ``${D}${datadir}/cmake/modules/``.
.. _ref-classes-cml1:
-``cml1.bbclass``
-================
+``cml1``
+========
-The ``cml1`` class provides basic support for the Linux kernel style
-build configuration system.
+The :ref:`ref-classes-cml1` class provides basic support for the Linux kernel style
+build configuration system. "cml" stands for "Configuration Menu Language", which
+originates from the Linux kernel but is also used in other projects such as U-Boot
+and BusyBox. It could have been called "kconfig" too.
.. _ref-classes-compress_doc:
-``compress_doc.bbclass``
-========================
+``compress_doc``
+================
-Enables compression for man pages and info pages. This class is intended
+Enables compression for manual and info pages. This class is intended
to be inherited globally. The default compression mechanism is gz (gzip)
but you can select an alternative mechanism by setting the
:term:`DOC_COMPRESS` variable.
.. _ref-classes-copyleft_compliance:
-``copyleft_compliance.bbclass``
-===============================
+``copyleft_compliance``
+=======================
-The ``copyleft_compliance`` class preserves source code for the purposes
-of license compliance. This class is an alternative to the ``archiver``
+The :ref:`ref-classes-copyleft_compliance` class preserves source code for the purposes
+of license compliance. This class is an alternative to the :ref:`ref-classes-archiver`
class and is still used by some users even though it has been deprecated
-in favor of the :ref:`archiver <ref-classes-archiver>` class.
+in favor of the :ref:`ref-classes-archiver` class.
.. _ref-classes-copyleft_filter:
-``copyleft_filter.bbclass``
-===========================
+``copyleft_filter``
+===================
-A class used by the :ref:`archiver <ref-classes-archiver>` and
-:ref:`copyleft_compliance <ref-classes-copyleft_compliance>` classes
+A class used by the :ref:`ref-classes-archiver` and
+:ref:`ref-classes-copyleft_compliance` classes
for filtering licenses. The ``copyleft_filter`` class is an internal
class and is not intended to be used directly.
.. _ref-classes-core-image:
-``core-image.bbclass``
-======================
+``core-image``
+==============
-The ``core-image`` class provides common definitions for the
+The :ref:`ref-classes-core-image` class provides common definitions for the
``core-image-*`` image recipes, such as support for additional
:term:`IMAGE_FEATURES`.
.. _ref-classes-cpan:
-``cpan*.bbclass``
-=================
+``cpan*``
+=========
-The ``cpan*`` classes support Perl modules.
+The :ref:`cpan* <ref-classes-cpan>` classes support Perl modules.
Recipes for Perl modules are simple. These recipes usually only need to
point to the source's archive and then inherit the proper class file.
@@ -358,23 +398,49 @@ authors used.
- Modules that use ``Build.PL``-based build system require using
``cpan_build.bbclass`` in their recipes.
-Both build methods inherit the ``cpan-base`` class for basic Perl
+Both build methods inherit the :ref:`cpan-base <ref-classes-cpan>` class for basic Perl
support.
+.. _ref-classes-create-spdx:
+
+``create-spdx``
+===============
+
+The :ref:`ref-classes-create-spdx` class provides support for
+automatically creating :term:`SPDX` :term:`SBOM` documents based upon image
+and SDK contents.
+
+This class is meant to be inherited globally from a configuration file::
+
+ INHERIT += "create-spdx"
+
+The toplevel :term:`SPDX` output file is generated in JSON format as a
+``IMAGE-MACHINE.spdx.json`` file in ``tmp/deploy/images/MACHINE/`` inside the
+:term:`Build Directory`. There are other related files in the same directory,
+as well as in ``tmp/deploy/spdx``.
+
+The exact behaviour of this class, and the amount of output can be controlled
+by the :term:`SPDX_PRETTY`, :term:`SPDX_ARCHIVE_PACKAGED`,
+:term:`SPDX_ARCHIVE_SOURCES` and :term:`SPDX_INCLUDE_SOURCES` variables.
+
+See the description of these variables and the
+":ref:`dev-manual/sbom:creating a software bill of materials`"
+section in the Yocto Project Development Manual for more details.
+
.. _ref-classes-cross:
-``cross.bbclass``
-=================
+``cross``
+=========
-The ``cross`` class provides support for the recipes that build the
+The :ref:`ref-classes-cross` class provides support for the recipes that build the
cross-compilation tools.
.. _ref-classes-cross-canadian:
-``cross-canadian.bbclass``
-==========================
+``cross-canadian``
+==================
-The ``cross-canadian`` class provides support for the recipes that build
+The :ref:`ref-classes-cross-canadian` class provides support for the recipes that build
the Canadian Cross-compilation tools for SDKs. See the
":ref:`overview-manual/concepts:cross-development toolchain generation`"
section in the Yocto Project Overview and Concepts Manual for more
@@ -382,10 +448,10 @@ discussion on these cross-compilation tools.
.. _ref-classes-crosssdk:
-``crosssdk.bbclass``
-====================
+``crosssdk``
+============
-The ``crosssdk`` class provides support for the recipes that build the
+The :ref:`ref-classes-crosssdk` class provides support for the recipes that build the
cross-compilation tools used for building SDKs. See the
":ref:`overview-manual/concepts:cross-development toolchain generation`"
section in the Yocto Project Overview and Concepts Manual for more
@@ -393,26 +459,74 @@ discussion on these cross-compilation tools.
.. _ref-classes-cve-check:
-``cve-check.bbclass``
-=====================
+``cve-check``
+=============
-The ``cve-check`` class looks for known CVEs (Common Vulnerabilities
-and Exposures) while building an image. This class is meant to be
+The :ref:`ref-classes-cve-check` class looks for known CVEs (Common Vulnerabilities
+and Exposures) while building with BitBake. This class is meant to be
inherited globally from a configuration file::
INHERIT += "cve-check"
+To filter out obsolete CVE database entries which are known not to impact software from Poky and OE-Core,
+add following line to the build configuration file::
+
+ include cve-extra-exclusions.inc
+
You can also look for vulnerabilities in specific packages by passing
-``-c cve_check`` to BitBake. You will find details in the
-":ref:`dev-manual/common-tasks:checking for vulnerabilities`"
+``-c cve_check`` to BitBake.
+
+After building the software with Bitbake, CVE check output reports are available in ``tmp/deploy/cve``
+and image specific summaries in ``tmp/deploy/images/*.cve`` or ``tmp/deploy/images/*.json`` files.
+
+When building, the CVE checker will emit build time warnings for any detected
+issues which are in the state ``Unpatched``, meaning that CVE issue seems to affect the software component
+and version being compiled and no patches to address the issue are applied. Other states
+for detected CVE issues are: ``Patched`` meaning that a patch to address the issue is already
+applied, and ``Ignored`` meaning that the issue can be ignored.
+
+The ``Patched`` state of a CVE issue is detected from patch files with the format
+``CVE-ID.patch``, e.g. ``CVE-2019-20633.patch``, in the :term:`SRC_URI` and using
+CVE metadata of format ``CVE: CVE-ID`` in the commit message of the patch file.
+
+If the recipe lists the ``CVE-ID`` in :term:`CVE_CHECK_IGNORE` variable, then the CVE state is reported
+as ``Ignored``. Multiple CVEs can be listed separated by spaces. Example::
+
+ CVE_CHECK_IGNORE += "CVE-2020-29509 CVE-2020-29511"
+
+If CVE check reports that a recipe contains false positives or false negatives, these may be
+fixed in recipes by adjusting the CVE product name using :term:`CVE_PRODUCT` and :term:`CVE_VERSION` variables.
+:term:`CVE_PRODUCT` defaults to the plain recipe name :term:`BPN` which can be adjusted to one or more CVE
+database vendor and product pairs using the syntax::
+
+ CVE_PRODUCT = "flex_project:flex"
+
+where ``flex_project`` is the CVE database vendor name and ``flex`` is the product name. Similarly
+if the default recipe version :term:`PV` does not match the version numbers of the software component
+in upstream releases or the CVE database, then the :term:`CVE_VERSION` variable can be used to set the
+CVE database compatible version number, for example::
+
+ CVE_VERSION = "2.39"
+
+Any bugs or missing or incomplete information in the CVE database entries should be fixed in the CVE database
+via the `NVD feedback form <https://nvd.nist.gov/info/contact-form>`__.
+
+Users should note that security is a process, not a product, and thus also CVE checking, analyzing results,
+patching and updating the software should be done as a regular process. The data and assumptions
+required for CVE checker to reliably detect issues are frequently broken in various ways.
+These can only be detected by reviewing the details of the issues and iterating over the generated reports,
+and following what happens in other Linux distributions and in the greater open source community.
+
+You will find some more details in the
+":ref:`dev-manual/vulnerabilities:checking for vulnerabilities`"
section in the Development Tasks Manual.
.. _ref-classes-debian:
-``debian.bbclass``
-==================
+``debian``
+==========
-The ``debian`` class renames output packages so that they follow the
+The :ref:`ref-classes-debian` class renames output packages so that they follow the
Debian naming policy (i.e. ``glibc`` becomes ``libc6`` and
``glibc-devel`` becomes ``libc6-dev``.) Renaming includes the library
name and version as part of the package name.
@@ -424,10 +538,10 @@ naming scheme.
.. _ref-classes-deploy:
-``deploy.bbclass``
-==================
+``deploy``
+==========
-The ``deploy`` class handles deploying files to the
+The :ref:`ref-classes-deploy` class handles deploying files to the
:term:`DEPLOY_DIR_IMAGE` directory. The main
function of this class is to allow the deploy step to be accelerated by
shared state. Recipes that inherit this class should define their own
@@ -438,25 +552,71 @@ add the task at the appropriate place, which is usually after
:ref:`ref-tasks-install`. The class then takes care of
staging the files from :term:`DEPLOYDIR` to :term:`DEPLOY_DIR_IMAGE`.
+.. _ref-classes-devicetree:
+
+``devicetree``
+==============
+
+The :ref:`ref-classes-devicetree` class allows to build a recipe that compiles
+device tree source files that are not in the kernel tree.
+
+The compilation of out-of-tree device tree sources is the same as the kernel
+in-tree device tree compilation process. This includes the ability to include
+sources from the kernel such as SoC ``dtsi`` files as well as C header files,
+such as ``gpio.h``.
+
+The :ref:`ref-tasks-compile` task will compile two kinds of files:
+
+- Regular device tree sources with a ``.dts`` extension.
+
+- Device tree overlays, detected from the presence of the ``/plugin/;``
+ string in the file contents.
+
+This class deploys the generated device tree binaries into
+``${``\ :term:`DEPLOY_DIR_IMAGE`\ ``}/devicetree/``. This is similar to
+what the :ref:`ref-classes-kernel-devicetree` class does, with the added
+``devicetree`` subdirectory to avoid name clashes. Additionally, the device
+trees are populated into the sysroot for access via the sysroot from within
+other recipes.
+
+By default, all device tree sources located in :term:`DT_FILES_PATH` directory
+are compiled.
+
+An extra padding is appended to non-overlay device trees binaries. This
+can typically be used as extra space for adding extra properties at boot time.
+The padding size can be modified by setting :term:`DT_PADDING_SIZE`
+to the desired size, in bytes.
+
+See :oe_git:`devicetree.bbclass sources
+</openembedded-core/tree/meta/classes-recipe/devicetree.bbclass>`
+for further variables controlling this class.
+
+Here is an excerpt of an example ``recipes-kernel/linux/devicetree-acme.bb``
+recipe inheriting this class::
+
+ inherit devicetree
+ COMPATIBLE_MACHINE = "^mymachine$"
+ SRC_URI:mymachine = "file://mymachine.dts"
+
.. _ref-classes-devshell:
-``devshell.bbclass``
-====================
+``devshell``
+============
-The ``devshell`` class adds the ``do_devshell`` task. Distribution
-policy dictates whether to include this class. See the ":ref:`dev-manual/common-tasks:using a development shell`"
+The :ref:`ref-classes-devshell` class adds the :ref:`ref-tasks-devshell` task. Distribution
+policy dictates whether to include this class. See the ":ref:`dev-manual/development-shell:using a development shell`"
section in the Yocto Project Development Tasks Manual for more
-information about using ``devshell``.
+information about using :ref:`ref-classes-devshell`.
.. _ref-classes-devupstream:
-``devupstream.bbclass``
-=======================
+``devupstream``
+===============
-The ``devupstream`` class uses
+The :ref:`ref-classes-devupstream` class uses
:term:`BBCLASSEXTEND` to add a variant of the
recipe that fetches from an alternative URI (e.g. Git) instead of a
-tarball. Following is an example::
+tarball. Here is an example::
BBCLASSEXTEND = "devupstream:target"
SRC_URI:class-devupstream = "git://git.example.com/example;branch=main"
@@ -475,10 +635,10 @@ Any development-specific adjustments can be done by using the
The class
currently only supports creating a development variant of the target
-recipe, not ``native`` or ``nativesdk`` variants.
+recipe, not :ref:`ref-classes-native` or :ref:`ref-classes-nativesdk` variants.
The :term:`BBCLASSEXTEND` syntax (i.e. ``devupstream:target``) provides
-support for ``native`` and ``nativesdk`` variants. Consequently, this
+support for :ref:`ref-classes-native` and :ref:`ref-classes-nativesdk` variants. Consequently, this
functionality can be added in a future release.
Support for other version control systems such as Subversion is limited
@@ -487,10 +647,10 @@ due to BitBake's automatic fetch dependencies (e.g.
.. _ref-classes-externalsrc:
-``externalsrc.bbclass``
-=======================
+``externalsrc``
+===============
-The ``externalsrc`` class supports building software from source code
+The :ref:`ref-classes-externalsrc` class supports building software from source code
that is external to the OpenEmbedded build system. Building software
from an external source tree means that the build system's normal fetch,
unpack, and patch process is not used.
@@ -498,9 +658,8 @@ unpack, and patch process is not used.
By default, the OpenEmbedded build system uses the :term:`S`
and :term:`B` variables to locate unpacked recipe source code
and to build it, respectively. When your recipe inherits the
-``externalsrc`` class, you use the
-:term:`EXTERNALSRC` and
-:term:`EXTERNALSRC_BUILD` variables to
+:ref:`ref-classes-externalsrc` class, you use the
+:term:`EXTERNALSRC` and :term:`EXTERNALSRC_BUILD` variables to
ultimately define :term:`S` and :term:`B`.
By default, this class expects the source code to support recipe builds
@@ -515,19 +674,18 @@ See these variables for more information:
:term:`WORKDIR`, :term:`BPN`, and
:term:`PV`,
-For more information on the ``externalsrc`` class, see the comments in
+For more information on the :ref:`ref-classes-externalsrc` class, see the comments in
``meta/classes/externalsrc.bbclass`` in the :term:`Source Directory`.
-For information on how to use the
-``externalsrc`` class, see the
-":ref:`dev-manual/common-tasks:building software from an external source`"
+For information on how to use the :ref:`ref-classes-externalsrc` class, see the
+":ref:`dev-manual/building:building software from an external source`"
section in the Yocto Project Development Tasks Manual.
.. _ref-classes-extrausers:
-``extrausers.bbclass``
-======================
+``extrausers``
+==============
-The ``extrausers`` class allows additional user and group configuration
+The :ref:`ref-classes-extrausers` class allows additional user and group configuration
to be applied at the image level. Inheriting this class either globally
or from an image recipe allows additional user and group operations to
be performed using the
@@ -535,13 +693,11 @@ be performed using the
.. note::
- The user and group operations added using the
- :ref:`extrausers <ref-classes-extrausers>`
+ The user and group operations added using the :ref:`ref-classes-extrausers`
class are not tied to a specific recipe outside of the recipe for the
image. Thus, the operations can be performed across the image as a
- whole. Use the
- :ref:`useradd <ref-classes-useradd>`
- class to add user and group configuration to a specific recipe.
+ whole. Use the :ref:`ref-classes-useradd` class to add user and group
+ configuration to a specific recipe.
Here is an example that uses this class in an image recipe::
@@ -579,19 +735,19 @@ Finally, here is an example that sets the root password::
.. note::
From a security perspective, hardcoding a default password is not
- generally a good idea or even legal in some jurisdictions. It is
- recommended that you do not do this if you are building a production
+ generally a good idea or even legal in some jurisdictions. It is
+ recommended that you do not do this if you are building a production
image.
.. _ref-classes-features_check:
-``features_check.bbclass``
-=================================
+``features_check``
+==================
-The ``features_check`` class allows individual recipes to check
-for required and conflicting
-:term:`DISTRO_FEATURES`, :term:`MACHINE_FEATURES` or :term:`COMBINED_FEATURES`.
+The :ref:`ref-classes-features_check` class allows individual recipes to check
+for required and conflicting :term:`DISTRO_FEATURES`, :term:`MACHINE_FEATURES`
+or :term:`COMBINED_FEATURES`.
This class provides support for the following variables:
@@ -612,10 +768,10 @@ triggered.
.. _ref-classes-fontcache:
-``fontcache.bbclass``
-=====================
+``fontcache``
+=============
-The ``fontcache`` class generates the proper post-install and
+The :ref:`ref-classes-fontcache` class generates the proper post-install and
post-remove (postinst and postrm) scriptlets for font packages. These
scriptlets call ``fc-cache`` (part of ``Fontconfig``) to add the fonts
to the font information cache. Since the cache files are
@@ -628,20 +784,20 @@ packages containing the fonts.
.. _ref-classes-fs-uuid:
-``fs-uuid.bbclass``
-===================
+``fs-uuid``
+===========
-The ``fs-uuid`` class extracts UUID from
+The :ref:`ref-classes-fs-uuid` class extracts UUID from
``${``\ :term:`ROOTFS`\ ``}``, which must have been built
-by the time that this function gets called. The ``fs-uuid`` class only
+by the time that this function gets called. The :ref:`ref-classes-fs-uuid` class only
works on ``ext`` file systems and depends on ``tune2fs``.
.. _ref-classes-gconf:
-``gconf.bbclass``
-=================
+``gconf``
+=========
-The ``gconf`` class provides common functionality for recipes that need
+The :ref:`ref-classes-gconf` class provides common functionality for recipes that need
to install GConf schemas. The schemas will be put into a separate
package (``${``\ :term:`PN`\ ``}-gconf``) that is created
automatically when this class is inherited. This package uses the
@@ -650,29 +806,52 @@ register and unregister the schemas in the target image.
.. _ref-classes-gettext:
-``gettext.bbclass``
-===================
+``gettext``
+===========
-The ``gettext`` class provides support for building software that uses
-the GNU ``gettext`` internationalization and localization system. All
-recipes building software that use ``gettext`` should inherit this
+The :ref:`ref-classes-gettext` class provides support for building
+software that uses the GNU ``gettext`` internationalization and localization
+system. All recipes building software that use ``gettext`` should inherit this
class.
.. _ref-classes-gnomebase:
-``gnomebase.bbclass``
-=====================
+``gnomebase``
+=============
-The ``gnomebase`` class is the base class for recipes that build
+The :ref:`ref-classes-gnomebase` class is the base class for recipes that build
software from the GNOME stack. This class sets
:term:`SRC_URI` to download the source from the GNOME
mirrors as well as extending :term:`FILES` with the typical
GNOME installation paths.
+.. _ref-classes-go:
+
+``go``
+======
+
+The :ref:`ref-classes-go` class supports building Go programs. The behavior of
+this class is controlled by the mandatory :term:`GO_IMPORT` variable, and
+by the optional :term:`GO_INSTALL` and :term:`GO_INSTALL_FILTEROUT` ones.
+
+To build a Go program with the Yocto Project, you can use the
+:yocto_git:`go-helloworld_0.1.bb </poky/tree/meta/recipes-extended/go-examples/go-helloworld_0.1.bb>`
+recipe as an example.
+
+.. _ref-classes-go-mod:
+
+``go-mod``
+==========
+
+The :ref:`ref-classes-go-mod` class allows to use Go modules, and inherits the
+:ref:`ref-classes-go` class.
+
+See the associated :term:`GO_WORKDIR` variable.
+
.. _ref-classes-gobject-introspection:
-``gobject-introspection.bbclass``
-=================================
+``gobject-introspection``
+=========================
Provides support for recipes building software that supports GObject
introspection. This functionality is only enabled if the
@@ -683,16 +862,17 @@ introspection. This functionality is only enabled if the
.. note::
- This functionality is backfilled by default and, if not applicable,
- should be disabled through :term:`DISTRO_FEATURES_BACKFILL_CONSIDERED` or
+ This functionality is :ref:`backfilled <ref-features-backfill>` by default
+ and, if not applicable, should be disabled through
+ :term:`DISTRO_FEATURES_BACKFILL_CONSIDERED` or
:term:`MACHINE_FEATURES_BACKFILL_CONSIDERED`, respectively.
.. _ref-classes-grub-efi:
-``grub-efi.bbclass``
-====================
+``grub-efi``
+============
-The ``grub-efi`` class provides ``grub-efi``-specific functions for
+The :ref:`ref-classes-grub-efi` class provides ``grub-efi``-specific functions for
building bootable images.
This class supports several variables:
@@ -721,10 +901,10 @@ This class supports several variables:
.. _ref-classes-gsettings:
-``gsettings.bbclass``
-=====================
+``gsettings``
+=============
-The ``gsettings`` class provides common functionality for recipes that
+The :ref:`ref-classes-gsettings` class provides common functionality for recipes that
need to install GSettings (glib) schemas. The schemas are assumed to be
part of the main package. Appropriate post-install and post-remove
(postinst/postrm) scriptlets are added to register and unregister the
@@ -732,18 +912,18 @@ schemas in the target image.
.. _ref-classes-gtk-doc:
-``gtk-doc.bbclass``
-===================
+``gtk-doc``
+===========
-The ``gtk-doc`` class is a helper class to pull in the appropriate
+The :ref:`ref-classes-gtk-doc` class is a helper class to pull in the appropriate
``gtk-doc`` dependencies and disable ``gtk-doc``.
.. _ref-classes-gtk-icon-cache:
-``gtk-icon-cache.bbclass``
-==========================
+``gtk-icon-cache``
+==================
-The ``gtk-icon-cache`` class generates the proper post-install and
+The :ref:`ref-classes-gtk-icon-cache` class generates the proper post-install and
post-remove (postinst/postrm) scriptlets for packages that use GTK+ and
install icons. These scriptlets call ``gtk-update-icon-cache`` to add
the fonts to GTK+'s icon cache. Since the cache files are
@@ -753,10 +933,10 @@ creation.
.. _ref-classes-gtk-immodules-cache:
-``gtk-immodules-cache.bbclass``
-===============================
+``gtk-immodules-cache``
+=======================
-The ``gtk-immodules-cache`` class generates the proper post-install and
+The :ref:`ref-classes-gtk-immodules-cache` class generates the proper post-install and
post-remove (postinst/postrm) scriptlets for packages that install GTK+
input method modules for virtual keyboards. These scriptlets call
``gtk-update-icon-cache`` to add the input method modules to the cache.
@@ -771,19 +951,19 @@ the packages containing the modules.
.. _ref-classes-gzipnative:
-``gzipnative.bbclass``
-======================
+``gzipnative``
+==============
-The ``gzipnative`` class enables the use of different native versions of
+The :ref:`ref-classes-gzipnative` class enables the use of different native versions of
``gzip`` and ``pigz`` rather than the versions of these tools from the
build host.
.. _ref-classes-icecc:
-``icecc.bbclass``
-=================
+``icecc``
+=========
-The ``icecc`` class supports
+The :ref:`ref-classes-icecc` class supports
`Icecream <https://github.com/icecc/icecream>`__, which facilitates
taking compile jobs and distributing them among remote machines.
@@ -791,7 +971,7 @@ The class stages directories with symlinks from ``gcc`` and ``g++`` to
``icecc``, for both native and cross compilers. Depending on each
configure or compile, the OpenEmbedded build system adds the directories
at the head of the ``PATH`` list and then sets the ``ICECC_CXX`` and
-``ICEC_CC`` variables, which are the paths to the ``g++`` and ``gcc``
+``ICECC_CC`` variables, which are the paths to the ``g++`` and ``gcc``
compilers, respectively.
For the cross compiler, the class creates a ``tar.gz`` file that
@@ -799,8 +979,8 @@ contains the Yocto Project toolchain and sets ``ICECC_VERSION``, which
is the version of the cross-compiler used in the cross-development
toolchain, accordingly.
-The class handles all three different compile stages (i.e native
-,cross-kernel and target) and creates the necessary environment
+The class handles all three different compile stages (i.e native,
+cross-kernel and target) and creates the necessary environment
``tar.gz`` file to be used by the remote machines. The class also
supports SDK generation.
@@ -810,12 +990,13 @@ using ``which``. If :term:`ICECC_ENV_EXEC` is set
in your ``local.conf`` file, the variable should point to the
``icecc-create-env`` script provided by the user. If you do not point to
a user-provided script, the build system uses the default script
-provided by the recipe ``icecc-create-env-native.bb``.
+provided by the recipe :oe_git:`icecc-create-env_0.1.bb
+</openembedded-core/tree/meta/recipes-devtools/icecc-create-env/icecc-create-env_0.1.bb>`.
.. note::
This script is a modified version and not the one that comes with
- icecc.
+ ``icecream``.
If you do not want the Icecream distributed compile support to apply to
specific recipes or classes, you can ask them to be ignored by Icecream
@@ -830,13 +1011,13 @@ Additionally, you can list recipes using the
your ``local.conf`` file to force ``icecc`` to be enabled for recipes
using an empty :term:`PARALLEL_MAKE` variable.
-Inheriting the ``icecc`` class changes all sstate signatures.
+Inheriting the :ref:`ref-classes-icecc` class changes all sstate signatures.
Consequently, if a development team has a dedicated build system that
populates :term:`SSTATE_MIRRORS` and they want to
reuse sstate from :term:`SSTATE_MIRRORS`, then all developers and the build
-system need to either inherit the ``icecc`` class or nobody should.
+system need to either inherit the :ref:`ref-classes-icecc` class or nobody should.
-At the distribution level, you can inherit the ``icecc`` class to be
+At the distribution level, you can inherit the :ref:`ref-classes-icecc` class to be
sure that all builders start with the same sstate signatures. After
inheriting the class, you can then disable the feature by setting the
:term:`ICECC_DISABLED` variable to "1" as follows::
@@ -853,10 +1034,10 @@ individually as follows in your ``local.conf`` file::
.. _ref-classes-image:
-``image.bbclass``
-=================
+``image``
+=========
-The ``image`` class helps support creating images in different formats.
+The :ref:`ref-classes-image` class helps support creating images in different formats.
First, the root filesystem is created from packages using one of the
``rootfs*.bbclass`` files (depending on the package format used) and
then one or more image files are created.
@@ -868,7 +1049,7 @@ then one or more image files are created.
install into the image.
For information on customizing images, see the
-":ref:`dev-manual/common-tasks:customizing images`" section
+":ref:`dev-manual/customizing-images:customizing images`" section
in the Yocto Project Development Tasks Manual. For information on how
images are created, see the
":ref:`overview-manual/concepts:images`" section in the
@@ -876,25 +1057,25 @@ Yocto Project Overview and Concepts Manual.
.. _ref-classes-image-buildinfo:
-``image-buildinfo.bbclass``
-===========================
+``image-buildinfo``
+===================
-The ``image-buildinfo`` class writes information to the target
+The :ref:`ref-classes-image-buildinfo` class writes information to the target
filesystem on ``/etc/build``.
.. _ref-classes-image_types:
-``image_types.bbclass``
-=======================
+``image_types``
+===============
-The ``image_types`` class defines all of the standard image output types
+The :ref:`ref-classes-image_types` class defines all of the standard image output types
that you can enable through the
:term:`IMAGE_FSTYPES` variable. You can use this
class as a reference on how to add support for custom image output
types.
-By default, the :ref:`image <ref-classes-image>` class automatically
-enables the ``image_types`` class. The ``image`` class uses the
+By default, the :ref:`ref-classes-image` class automatically
+enables the :ref:`ref-classes-image_types` class. The :ref:`ref-classes-image` class uses the
``IMGCLASSES`` variable as follows::
IMGCLASSES = "rootfs_${IMAGE_PKGTYPE} image_types ${IMAGE_CLASSES}"
@@ -906,7 +1087,7 @@ enables the ``image_types`` class. The ``image`` class uses the
IMGCLASSES += "image-postinst-intercepts"
inherit ${IMGCLASSES}
-The ``image_types`` class also handles conversion and compression of images.
+The :ref:`ref-classes-image_types` class also handles conversion and compression of images.
.. note::
@@ -916,8 +1097,8 @@ The ``image_types`` class also handles conversion and compression of images.
.. _ref-classes-image-live:
-``image-live.bbclass``
-======================
+``image-live``
+==============
This class controls building "live" (i.e. HDDIMG and ISO) images. Live
images contain syslinux for legacy booting, as well as the bootloader
@@ -929,10 +1110,10 @@ Normally, you do not use this class directly. Instead, you add "live" to
.. _ref-classes-insane:
-``insane.bbclass``
-==================
+``insane``
+==========
-The ``insane`` class adds a step to the package generation process so
+The :ref:`ref-classes-insane` class adds a step to the package generation process so
that output quality assurance checks are generated by the OpenEmbedded
build system. A range of checks are performed that check the build's
output for common problems that show up during runtime. Distribution
@@ -960,8 +1141,8 @@ Please keep in mind that the QA checks
are meant to detect real or potential problems in the packaged
output. So exercise caution when disabling these checks.
-Here are the tests you can list with the :term:`WARN_QA` and
-:term:`ERROR_QA` variables:
+The tests you can list with the :term:`WARN_QA` and
+:term:`ERROR_QA` variables are:
- ``already-stripped:`` Checks that produced binaries have not
already been stripped prior to the build system extracting debug
@@ -978,8 +1159,8 @@ Here are the tests you can list with the :term:`WARN_QA` and
software, like bootloaders, might need to bypass this check.
- ``buildpaths:`` Checks for paths to locations on the build host
- inside the output files. Currently, this test triggers too many false
- positives and thus is not normally enabled.
+ inside the output files. Not only can these leak information about
+ the build environment, they also hinder binary reproducibility.
- ``build-deps:`` Determines if a build-time dependency that is
specified through :term:`DEPENDS`, explicit
@@ -992,7 +1173,7 @@ Here are the tests you can list with the :term:`WARN_QA` and
the package is installed into the image during the
:ref:`ref-tasks-rootfs` task because the auto-detected
dependency was not satisfied. An example of this would be where the
- :ref:`update-rc.d <ref-classes-update-rc.d>` class automatically
+ :ref:`ref-classes-update-rc.d` class automatically
adds a dependency on the ``initscripts-functions`` package to
packages that install an initscript that refers to
``/etc/init.d/functions``. The recipe should really have an explicit
@@ -1001,6 +1182,11 @@ Here are the tests you can list with the :term:`WARN_QA` and
``initscripts`` recipe is actually built and thus the
``initscripts-functions`` package is made available.
+- ``configure-gettext:`` Checks that if a recipe is building something
+ that uses automake and the automake files contain an ``AM_GNU_GETTEXT``
+ directive, that the recipe also inherits the :ref:`ref-classes-gettext`
+ class to ensure that gettext is available during the build.
+
- ``compile-host-path:`` Checks the
:ref:`ref-tasks-compile` log for indications that
paths to locations on the build host were used. Using such paths
@@ -1079,12 +1265,12 @@ Here are the tests you can list with the :term:`WARN_QA` and
might result in host contamination of the build output.
- ``installed-vs-shipped:`` Reports when files have been installed
- within ``do_install`` but have not been included in any package by
+ within :ref:`ref-tasks-install` but have not been included in any package by
way of the :term:`FILES` variable. Files that do not
appear in any package cannot be present in an image later on in the
build process. Ideally, all installed files should be packaged or not
installed at all. These files can be deleted at the end of
- ``do_install`` if the files are not needed in any package.
+ :ref:`ref-tasks-install` if the files are not needed in any package.
- ``invalid-chars:`` Checks that the recipe metadata variables
:term:`DESCRIPTION`,
@@ -1117,11 +1303,39 @@ Here are the tests you can list with the :term:`WARN_QA` and
``/usr/libexec``. This check is not performed if the ``libexecdir``
variable has been set explicitly to ``/usr/libexec``.
+- ``mime:`` Check that if a package contains mime type files (``.xml``
+ files in ``${datadir}/mime/packages``) that the recipe also inherits
+ the :ref:`ref-classes-mime` class in order to ensure that these get
+ properly installed.
+
+- ``mime-xdg:`` Checks that if a package contains a .desktop file with a
+ 'MimeType' key present, that the recipe inherits the
+ :ref:`ref-classes-mime-xdg` class that is required in order for that
+ to be activated.
+
+- ``missing-update-alternatives:`` Check that if a recipe sets the
+ :term:`ALTERNATIVE` variable that the recipe also inherits
+ :ref:`ref-classes-update-alternatives` such that the alternative will
+ be correctly set up.
+
- ``packages-list:`` Checks for the same package being listed
multiple times through the :term:`PACKAGES` variable
value. Installing the package in this manner can cause errors during
packaging.
+- ``patch-fuzz:`` Checks for fuzz in patch files that may allow
+ them to apply incorrectly if the underlying code changes.
+
+- ``patch-status-core:`` Checks that the Upstream-Status is specified
+ and valid in the headers of patches for recipes in the OE-Core layer.
+
+- ``patch-status-noncore:`` Checks that the Upstream-Status is specified
+ and valid in the headers of patches for recipes in layers other than
+ OE-Core.
+
+- ``perllocalpod:`` Checks for ``perllocal.pod`` being erroneously
+ installed and packaged by a recipe.
+
- ``perm-config:`` Reports lines in ``fs-perms.txt`` that have an
invalid format.
@@ -1175,12 +1389,20 @@ Here are the tests you can list with the :term:`WARN_QA` and
options are being passed to the linker commands and your binaries
have potential security issues.
+- ``shebang-size:`` Check that the shebang line (``#!`` in the first line)
+ in a packaged script is not longer than 128 characters, which can cause
+ an error at runtime depending on the operating system.
+
- ``split-strip:`` Reports that splitting or stripping debug symbols
from binaries has failed.
- ``staticdev:`` Checks for static library files (``*.a``) in
non-``staticdev`` packages.
+- ``src-uri-bad:`` Checks that the :term:`SRC_URI` value set by a recipe
+ does not contain a reference to ``${PN}`` (instead of the correct
+ ``${BPN}``) nor refers to unstable Github archive tarballs.
+
- ``symlink-to-sysroot:`` Checks for symlinks in packages that point
into :term:`TMPDIR` on the host. Such symlinks will
work on the host, but are clearly invalid when running on the target.
@@ -1191,6 +1413,15 @@ Here are the tests you can list with the :term:`WARN_QA` and
":doc:`/ref-manual/qa-checks`" for more information regarding runtime performance
issues.
+- ``unhandled-features-check:`` check that if one of the variables that
+ the :ref:`ref-classes-features_check` class supports (e.g.
+ :term:`REQUIRED_DISTRO_FEATURES`) is set by a recipe, then the recipe
+ also inherits :ref:`ref-classes-features_check` in order for the
+ requirement to actually work.
+
+- ``unimplemented-ptest:`` Checks that ptests are implemented for upstream
+ tests.
+
- ``unlisted-pkg-lics:`` Checks that all declared licenses applying
for a package are also declared on the recipe level (i.e. any license
in ``LICENSE:*`` should appear in :term:`LICENSE`).
@@ -1200,19 +1431,23 @@ Here are the tests you can list with the :term:`WARN_QA` and
the linker (e.g. ``/lib`` and ``/usr/lib``). While these paths will
not cause any breakage, they do waste space and are unnecessary.
+- ``usrmerge:`` If ``usrmerge`` is in :term:`DISTRO_FEATURES`, this
+ check will ensure that no package installs files to root (``/bin``,
+ ``/sbin``, ``/lib``, ``/lib64``) directories.
+
- ``var-undefined:`` Reports when variables fundamental to packaging
(i.e. :term:`WORKDIR`,
:term:`DEPLOY_DIR`, :term:`D`,
:term:`PN`, and :term:`PKGD`) are undefined
during :ref:`ref-tasks-package`.
-- ``version-going-backwards:`` If Build History is enabled, reports
- when a package being written out has a lower version than the
- previously written package under the same name. If you are placing
- output packages into a feed and upgrading packages on a target system
- using that feed, the version of a package going backwards can result
- in the target system not correctly upgrading to the "new" version of
- the package.
+- ``version-going-backwards:`` If the :ref:`ref-classes-buildhistory`
+ class is enabled, reports when a package being written out has a lower
+ version than the previously written package under the same name. If
+ you are placing output packages into a feed and upgrading packages on
+ a target system using that feed, the version of a package going
+ backwards can result in the target system not correctly upgrading to
+ the "new" version of the package.
.. note::
@@ -1227,184 +1462,187 @@ Here are the tests you can list with the :term:`WARN_QA` and
automatically get these versions. Consequently, you should only need
to explicitly add dependencies to binary driver recipes.
-.. _ref-classes-insserv:
-
-``insserv.bbclass``
-===================
-
-The ``insserv`` class uses the ``insserv`` utility to update the order
-of symbolic links in ``/etc/rc?.d/`` within an image based on
-dependencies specified by LSB headers in the ``init.d`` scripts
-themselves.
-
.. _ref-classes-kernel:
-``kernel.bbclass``
-==================
+``kernel``
+==========
-The ``kernel`` class handles building Linux kernels. The class contains
+The :ref:`ref-classes-kernel` class handles building Linux kernels. The class contains
code to build all kernel trees. All needed headers are staged into the
:term:`STAGING_KERNEL_DIR` directory to allow out-of-tree module builds
-using the :ref:`module <ref-classes-module>` class.
-
-This means that each built kernel module is packaged separately and
-inter-module dependencies are created by parsing the ``modinfo`` output.
-If all modules are required, then installing the ``kernel-modules``
-package installs all packages with modules and various other kernel
-packages such as ``kernel-vmlinux``.
-
-The ``kernel`` class contains logic that allows you to embed an initial
-RAM filesystem (initramfs) image when you build the kernel image. For
-information on how to build an initramfs, see the
-":ref:`dev-manual/common-tasks:building an initial ram filesystem (initramfs) image`" section in
+using the :ref:`ref-classes-module` class.
+
+If a file named ``defconfig`` is listed in :term:`SRC_URI`, then by default
+:ref:`ref-tasks-configure` copies it as ``.config`` in the build directory,
+so it is automatically used as the kernel configuration for the build. This
+copy is not performed in case ``.config`` already exists there: this allows
+recipes to produce a configuration by other means in
+``do_configure:prepend``.
+
+Each built kernel module is packaged separately and inter-module
+dependencies are created by parsing the ``modinfo`` output. If all modules
+are required, then installing the ``kernel-modules`` package installs all
+packages with modules and various other kernel packages such as
+``kernel-vmlinux``.
+
+The :ref:`ref-classes-kernel` class contains logic that allows you to embed an initial
+RAM filesystem (:term:`Initramfs`) image when you build the kernel image. For
+information on how to build an :term:`Initramfs`, see the
+":ref:`dev-manual/building:building an initial ram filesystem (Initramfs) image`" section in
the Yocto Project Development Tasks Manual.
-Various other classes are used by the ``kernel`` and ``module`` classes
-internally including the :ref:`kernel-arch <ref-classes-kernel-arch>`,
-:ref:`module-base <ref-classes-module-base>`, and
-:ref:`linux-kernel-base <ref-classes-linux-kernel-base>` classes.
+Various other classes are used by the :ref:`ref-classes-kernel` and :ref:`ref-classes-module` classes
+internally including the :ref:`ref-classes-kernel-arch`, :ref:`ref-classes-module-base`, and
+:ref:`ref-classes-linux-kernel-base` classes.
.. _ref-classes-kernel-arch:
-``kernel-arch.bbclass``
-=======================
+``kernel-arch``
+===============
-The ``kernel-arch`` class sets the ``ARCH`` environment variable for
+The :ref:`ref-classes-kernel-arch` class sets the ``ARCH`` environment variable for
Linux kernel compilation (including modules).
.. _ref-classes-kernel-devicetree:
-``kernel-devicetree.bbclass``
-=============================
+``kernel-devicetree``
+=====================
-The ``kernel-devicetree`` class, which is inherited by the
-:ref:`kernel <ref-classes-kernel>` class, supports device tree
-generation.
+The :ref:`ref-classes-kernel-devicetree` class, which is inherited by the
+:ref:`ref-classes-kernel` class, supports device tree generation.
+
+Its behavior is mainly controlled by the following variables:
+
+- :term:`KERNEL_DEVICETREE_BUNDLE`: whether to bundle the kernel and device tree
+- :term:`KERNEL_DTBDEST`: directory where to install DTB files
+- :term:`KERNEL_DTBVENDORED`: whether to keep vendor subdirectories
+- :term:`KERNEL_DTC_FLAGS`: flags for ``dtc``, the Device Tree Compiler
+- :term:`KERNEL_PACKAGE_NAME`: base name of the kernel packages
.. _ref-classes-kernel-fitimage:
-``kernel-fitimage.bbclass``
-===========================
+``kernel-fitimage``
+===================
-The ``kernel-fitimage`` class provides support to pack a kernel image,
-device trees, a U-boot script, a Initramfs bundle and a RAM disk
+The :ref:`ref-classes-kernel-fitimage` class provides support to pack a kernel image,
+device trees, a U-boot script, an :term:`Initramfs` bundle and a RAM disk
into a single FIT image. In theory, a FIT image can support any number
-of kernels, U-boot scripts, Initramfs bundles, RAM disks and device-trees.
-However, ``kernel-fitimage`` currently only supports
-limited usescases: just one kernel image, an optional U-boot script,
-an optional Initramfs bundle, an optional RAM disk, and any number of
-device tree.
+of kernels, U-boot scripts, :term:`Initramfs` bundles, RAM disks and device-trees.
+However, :ref:`ref-classes-kernel-fitimage` currently only supports
+limited usecases: just one kernel image, an optional U-boot script,
+an optional :term:`Initramfs` bundle, an optional RAM disk, and any number of
+device trees.
To create a FIT image, it is required that :term:`KERNEL_CLASSES`
-is set to include "kernel-fitimage" and :term:`KERNEL_IMAGETYPE`
-is set to "fitImage".
+is set to include ":ref:`ref-classes-kernel-fitimage`" and one of :term:`KERNEL_IMAGETYPE`,
+:term:`KERNEL_ALT_IMAGETYPE` or :term:`KERNEL_IMAGETYPES` to include "fitImage".
The options for the device tree compiler passed to ``mkimage -D``
when creating the FIT image are specified using the
:term:`UBOOT_MKIMAGE_DTCOPTS` variable.
Only a single kernel can be added to the FIT image created by
-``kernel-fitimage`` and the kernel image in FIT is mandatory. The
+:ref:`ref-classes-kernel-fitimage` and the kernel image in FIT is mandatory. The
address where the kernel image is to be loaded by U-Boot is
specified by :term:`UBOOT_LOADADDRESS` and the entrypoint by
:term:`UBOOT_ENTRYPOINT`.
Multiple device trees can be added to the FIT image created by
-``kernel-fitimage`` and the device tree is optional.
+:ref:`ref-classes-kernel-fitimage` and the device tree is optional.
The address where the device tree is to be loaded by U-Boot is
specified by :term:`UBOOT_DTBO_LOADADDRESS` for device tree overlays
and by :term:`UBOOT_DTB_LOADADDRESS` for device tree binaries.
Only a single RAM disk can be added to the FIT image created by
-``kernel-fitimage`` and the RAM disk in FIT is optional.
+:ref:`ref-classes-kernel-fitimage` and the RAM disk in FIT is optional.
The address where the RAM disk image is to be loaded by U-Boot
is specified by :term:`UBOOT_RD_LOADADDRESS` and the entrypoint by
-:term:`UBOOT_RD_ENTRYPOINT`. The ramdisk is added to FIT image when
-:term:`INITRAMFS_IMAGE` is specified and that :term:`INITRAMFS_IMAGE_BUNDLE`
-is set to 0.
+:term:`UBOOT_RD_ENTRYPOINT`. The ramdisk is added to the FIT image when
+:term:`INITRAMFS_IMAGE` is specified and requires that :term:`INITRAMFS_IMAGE_BUNDLE`
+is not set to 1.
-Only a single Initramfs bundle can be added to the FIT image created by
-``kernel-fitimage`` and the Initramfs bundle in FIT is optional.
-In case of Initramfs, the kernel is configured to be bundled with the root filesystem
+Only a single :term:`Initramfs` bundle can be added to the FIT image created by
+:ref:`ref-classes-kernel-fitimage` and the :term:`Initramfs` bundle in FIT is optional.
+In case of :term:`Initramfs`, the kernel is configured to be bundled with the root filesystem
in the same binary (example: zImage-initramfs-:term:`MACHINE`.bin).
-When the kernel is copied to RAM and executed, it unpacks the Initramfs root filesystem.
-The Initramfs bundle can be enabled when :term:`INITRAMFS_IMAGE`
-is specified and that :term:`INITRAMFS_IMAGE_BUNDLE` is set to 1.
-The address where the Initramfs bundle is to be loaded by U-boot is specified
+When the kernel is copied to RAM and executed, it unpacks the :term:`Initramfs` root filesystem.
+The :term:`Initramfs` bundle can be enabled when :term:`INITRAMFS_IMAGE`
+is specified and requires that :term:`INITRAMFS_IMAGE_BUNDLE` is set to 1.
+The address where the :term:`Initramfs` bundle is to be loaded by U-boot is specified
by :term:`UBOOT_LOADADDRESS` and the entrypoint by :term:`UBOOT_ENTRYPOINT`.
Only a single U-boot boot script can be added to the FIT image created by
-``kernel-fitimage`` and the boot script is optional.
+:ref:`ref-classes-kernel-fitimage` and the boot script is optional.
The boot script is specified in the ITS file as a text file containing
U-boot commands. When using a boot script the user should configure the
-U-boot ``do_install`` task to copy the script to sysroot.
-So the script can be included in the FIT image by the ``kernel-fitimage``
+U-boot :ref:`ref-tasks-install` task to copy the script to sysroot.
+So the script can be included in the FIT image by the :ref:`ref-classes-kernel-fitimage`
class. At run-time, U-boot CONFIG_BOOTCOMMAND define can be configured to
-load the boot script from the FIT image and executes it.
+load the boot script from the FIT image and execute it.
-The FIT image generated by ``kernel-fitimage`` class is signed when the
+The FIT image generated by the :ref:`ref-classes-kernel-fitimage` class is signed when the
variables :term:`UBOOT_SIGN_ENABLE`, :term:`UBOOT_MKIMAGE_DTCOPTS`,
:term:`UBOOT_SIGN_KEYDIR` and :term:`UBOOT_SIGN_KEYNAME` are set
appropriately. The default values used for :term:`FIT_HASH_ALG` and
-:term:`FIT_SIGN_ALG` in ``kernel-fitimage`` are "sha256" and
-"rsa2048" respectively. The keys for signing fitImage can be generated using
-the ``kernel-fitimage`` class when both :term:`FIT_GENERATE_KEYS` and
+:term:`FIT_SIGN_ALG` in :ref:`ref-classes-kernel-fitimage` are "sha256" and
+"rsa2048" respectively. The keys for signing the FIT image can be generated using
+the :ref:`ref-classes-kernel-fitimage` class when both :term:`FIT_GENERATE_KEYS` and
:term:`UBOOT_SIGN_ENABLE` are set to "1".
.. _ref-classes-kernel-grub:
-``kernel-grub.bbclass``
-=======================
+``kernel-grub``
+===============
-The ``kernel-grub`` class updates the boot area and the boot menu with
+The :ref:`ref-classes-kernel-grub` class updates the boot area and the boot menu with
the kernel as the priority boot mechanism while installing a RPM to
update the kernel on a deployed target.
.. _ref-classes-kernel-module-split:
-``kernel-module-split.bbclass``
-===============================
+``kernel-module-split``
+=======================
-The ``kernel-module-split`` class provides common functionality for
+The :ref:`ref-classes-kernel-module-split` class provides common functionality for
splitting Linux kernel modules into separate packages.
.. _ref-classes-kernel-uboot:
-``kernel-uboot.bbclass``
-========================
+``kernel-uboot``
+================
-The ``kernel-uboot`` class provides support for building from
+The :ref:`ref-classes-kernel-uboot` class provides support for building from
vmlinux-style kernel sources.
.. _ref-classes-kernel-uimage:
-``kernel-uimage.bbclass``
-=========================
+``kernel-uimage``
+=================
-The ``kernel-uimage`` class provides support to pack uImage.
+The :ref:`ref-classes-kernel-uimage` class provides support to pack uImage.
.. _ref-classes-kernel-yocto:
-``kernel-yocto.bbclass``
-========================
+``kernel-yocto``
+================
-The ``kernel-yocto`` class provides common functionality for building
+The :ref:`ref-classes-kernel-yocto` class provides common functionality for building
from linux-yocto style kernel source repositories.
.. _ref-classes-kernelsrc:
-``kernelsrc.bbclass``
-=====================
+``kernelsrc``
+=============
-The ``kernelsrc`` class sets the Linux kernel source and version.
+The :ref:`ref-classes-kernelsrc` class sets the Linux kernel source and version.
.. _ref-classes-lib_package:
-``lib_package.bbclass``
-=======================
+``lib_package``
+===============
-The ``lib_package`` class supports recipes that build libraries and
+The :ref:`ref-classes-lib_package` class supports recipes that build libraries and
produce executable binaries, where those binaries should not be
installed by default along with the library. Instead, the binaries are
added to a separate ``${``\ :term:`PN`\ ``}-bin`` package to
@@ -1412,40 +1650,40 @@ make their installation optional.
.. _ref-classes-libc*:
-``libc*.bbclass``
-=================
+``libc*``
+=========
-The ``libc*`` classes support recipes that build packages with ``libc``:
+The :ref:`ref-classes-libc*` classes support recipes that build packages with ``libc``:
-- The ``libc-common`` class provides common support for building with
+- The :ref:`libc-common <ref-classes-libc*>` class provides common support for building with
``libc``.
-- The ``libc-package`` class supports packaging up ``glibc`` and
+- The :ref:`libc-package <ref-classes-libc*>` class supports packaging up ``glibc`` and
``eglibc``.
.. _ref-classes-license:
-``license.bbclass``
-===================
+``license``
+===========
-The ``license`` class provides license manifest creation and license
+The :ref:`ref-classes-license` class provides license manifest creation and license
exclusion. This class is enabled by default using the default value for
the :term:`INHERIT_DISTRO` variable.
.. _ref-classes-linux-kernel-base:
-``linux-kernel-base.bbclass``
-=============================
+``linux-kernel-base``
+=====================
-The ``linux-kernel-base`` class provides common functionality for
+The :ref:`ref-classes-linux-kernel-base` class provides common functionality for
recipes that build out of the Linux kernel source tree. These builds
goes beyond the kernel itself. For example, the Perf recipe also
inherits this class.
.. _ref-classes-linuxloader:
-``linuxloader.bbclass``
-=======================
+``linuxloader``
+===============
Provides the function ``linuxloader()``, which gives the value of the
dynamic loader/linker provided on the platform. This value is used by a
@@ -1453,71 +1691,101 @@ number of other classes.
.. _ref-classes-logging:
-``logging.bbclass``
-===================
+``logging``
+===========
-The ``logging`` class provides the standard shell functions used to log
+The :ref:`ref-classes-logging` class provides the standard shell functions used to log
messages for various BitBake severity levels (i.e. ``bbplain``,
``bbnote``, ``bbwarn``, ``bberror``, ``bbfatal``, and ``bbdebug``).
-This class is enabled by default since it is inherited by the ``base``
+This class is enabled by default since it is inherited by the :ref:`ref-classes-base`
class.
+.. _ref-classes-meson:
+
+``meson``
+=========
+
+The :ref:`ref-classes-meson` class allows to create recipes that build software
+using the `Meson <https://mesonbuild.com/>`__ build system. You can use the
+:term:`MESON_BUILDTYPE` and :term:`EXTRA_OEMESON`
+variables to specify additional configuration options to be passed using the
+``meson`` command line.
+
.. _ref-classes-metadata_scm:
-``metadata_scm.bbclass``
-========================
+``metadata_scm``
+================
-The ``metadata_scm`` class provides functionality for querying the
+The :ref:`ref-classes-metadata_scm` class provides functionality for querying the
branch and revision of a Source Code Manager (SCM) repository.
-The :ref:`base <ref-classes-base>` class uses this class to print the
-revisions of each layer before starting every build. The
-``metadata_scm`` class is enabled by default because it is inherited by
-the ``base`` class.
+The :ref:`ref-classes-base` class uses this class to print the revisions of
+each layer before starting every build. The :ref:`ref-classes-metadata_scm`
+class is enabled by default because it is inherited by the
+:ref:`ref-classes-base` class.
.. _ref-classes-migrate_localcount:
-``migrate_localcount.bbclass``
-==============================
+``migrate_localcount``
+======================
-The ``migrate_localcount`` class verifies a recipe's localcount data and
+The :ref:`ref-classes-migrate_localcount` class verifies a recipe's localcount data and
increments it appropriately.
.. _ref-classes-mime:
-``mime.bbclass``
-================
+``mime``
+========
-The ``mime`` class generates the proper post-install and post-remove
+The :ref:`ref-classes-mime` class generates the proper post-install and post-remove
(postinst/postrm) scriptlets for packages that install MIME type files.
These scriptlets call ``update-mime-database`` to add the MIME types to
the shared database.
+.. _ref-classes-mime-xdg:
+
+``mime-xdg``
+============
+
+The :ref:`ref-classes-mime-xdg` class generates the proper
+post-install and post-remove (postinst/postrm) scriptlets for packages
+that install ``.desktop`` files containing ``MimeType`` entries.
+These scriptlets call ``update-desktop-database`` to add the MIME types
+to the database of MIME types handled by desktop files.
+
+Thanks to this class, when users open a file through a file browser
+on recently created images, they don't have to choose the application
+to open the file from the pool of all known applications, even the ones
+that cannot open the selected file.
+
+If you have recipes installing their ``.desktop`` files as absolute
+symbolic links, the detection of such files cannot be done by the current
+implementation of this class. In this case, you have to add the corresponding
+package names to the :term:`MIME_XDG_PACKAGES` variable.
+
.. _ref-classes-mirrors:
-``mirrors.bbclass``
-===================
+``mirrors``
+===========
-The ``mirrors`` class sets up some standard
+The :ref:`ref-classes-mirrors` class sets up some standard
:term:`MIRRORS` entries for source code mirrors. These
mirrors provide a fall-back path in case the upstream source specified
in :term:`SRC_URI` within recipes is unavailable.
This class is enabled by default since it is inherited by the
-:ref:`base <ref-classes-base>` class.
+:ref:`ref-classes-base` class.
.. _ref-classes-module:
-``module.bbclass``
-==================
+``module``
+==========
-The ``module`` class provides support for building out-of-tree Linux
-kernel modules. The class inherits the
-:ref:`module-base <ref-classes-module-base>` and
-:ref:`kernel-module-split <ref-classes-kernel-module-split>` classes,
-and implements the :ref:`ref-tasks-compile` and
-:ref:`ref-tasks-install` tasks. The class provides
+The :ref:`ref-classes-module` class provides support for building out-of-tree Linux
+kernel modules. The class inherits the :ref:`ref-classes-module-base` and
+:ref:`ref-classes-kernel-module-split` classes, and implements the
+:ref:`ref-tasks-compile` and :ref:`ref-tasks-install` tasks. The class provides
everything needed to build and package a kernel module.
For general information on out-of-tree Linux kernel modules, see the
@@ -1526,44 +1794,44 @@ section in the Yocto Project Linux Kernel Development Manual.
.. _ref-classes-module-base:
-``module-base.bbclass``
-=======================
+``module-base``
+===============
-The ``module-base`` class provides the base functionality for building
-Linux kernel modules. Typically, a recipe that builds software that
-includes one or more kernel modules and has its own means of building
-the module inherits this class as opposed to inheriting the
-:ref:`module <ref-classes-module>` class.
+The :ref:`ref-classes-module-base` class provides the base functionality for
+building Linux kernel modules. Typically, a recipe that builds software that
+includes one or more kernel modules and has its own means of building the module
+inherits this class as opposed to inheriting the :ref:`ref-classes-module`
+class.
.. _ref-classes-multilib*:
-``multilib*.bbclass``
-=====================
+``multilib*``
+=============
-The ``multilib*`` classes provide support for building libraries with
+The :ref:`ref-classes-multilib*` classes provide support for building libraries with
different target optimizations or target architectures and installing
them side-by-side in the same image.
For more information on using the Multilib feature, see the
-":ref:`dev-manual/common-tasks:combining multiple versions of library files into one image`"
+":ref:`dev-manual/libraries:combining multiple versions of library files into one image`"
section in the Yocto Project Development Tasks Manual.
.. _ref-classes-native:
-``native.bbclass``
-==================
+``native``
+==========
-The ``native`` class provides common functionality for recipes that
+The :ref:`ref-classes-native` class provides common functionality for recipes that
build tools to run on the :term:`Build Host` (i.e. tools that use the compiler
or other tools from the build host).
You can create a recipe that builds tools that run natively on the host
a couple different ways:
-- Create a ``myrecipe-native.bb`` recipe that inherits the ``native``
+- Create a ``myrecipe-native.bb`` recipe that inherits the :ref:`ref-classes-native`
class. If you use this method, you must order the inherit statement
in the recipe after all other inherit statements so that the
- ``native`` class is inherited last.
+ :ref:`ref-classes-native` class is inherited last.
.. note::
@@ -1585,17 +1853,17 @@ a couple different ways:
specify any functionality specific to the respective native or target
case.
-Although applied differently, the ``native`` class is used with both
+Although applied differently, the :ref:`ref-classes-native` class is used with both
methods. The advantage of the second method is that you do not need to
have two separate recipes (assuming you need both) for native and
target. All common parts of the recipe are automatically shared.
.. _ref-classes-nativesdk:
-``nativesdk.bbclass``
-=====================
+``nativesdk``
+=============
-The ``nativesdk`` class provides common functionality for recipes that
+The :ref:`ref-classes-nativesdk` class provides common functionality for recipes that
wish to build tools to run as part of an SDK (i.e. tools that run on
:term:`SDKMACHINE`).
@@ -1603,11 +1871,11 @@ You can create a recipe that builds tools that run on the SDK machine a
couple different ways:
- Create a ``nativesdk-myrecipe.bb`` recipe that inherits the
- ``nativesdk`` class. If you use this method, you must order the
+ :ref:`ref-classes-nativesdk` class. If you use this method, you must order the
inherit statement in the recipe after all other inherit statements so
- that the ``nativesdk`` class is inherited last.
+ that the :ref:`ref-classes-nativesdk` class is inherited last.
-- Create a ``nativesdk`` variant of any recipe by adding the following::
+- Create a :ref:`ref-classes-nativesdk` variant of any recipe by adding the following::
BBCLASSEXTEND = "nativesdk"
@@ -1626,26 +1894,26 @@ couple different ways:
Not doing so can lead to subtle problems because there is code that
depends on the naming convention.
-Although applied differently, the ``nativesdk`` class is used with both
+Although applied differently, the :ref:`ref-classes-nativesdk` class is used with both
methods. The advantage of the second method is that you do not need to
have two separate recipes (assuming you need both) for the SDK machine
and the target. All common parts of the recipe are automatically shared.
.. _ref-classes-nopackages:
-``nopackages.bbclass``
-======================
+``nopackages``
+==============
Disables packaging tasks for those recipes and classes where packaging
is not needed.
.. _ref-classes-npm:
-``npm.bbclass``
-===============
+``npm``
+=======
-Provides support for building Node.js software fetched using the `node
-package manager (NPM) <https://en.wikipedia.org/wiki/Npm_(software)>`__.
+Provides support for building Node.js software fetched using the
+:wikipedia:`node package manager (NPM) <Npm_(software)>`.
.. note::
@@ -1653,27 +1921,27 @@ package manager (NPM) <https://en.wikipedia.org/wiki/Npm_(software)>`__.
fetcher to have dependencies fetched and packaged automatically.
For information on how to create NPM packages, see the
-":ref:`dev-manual/common-tasks:creating node package manager (npm) packages`"
+":ref:`dev-manual/packages:creating node package manager (npm) packages`"
section in the Yocto Project Development Tasks Manual.
.. _ref-classes-oelint:
-``oelint.bbclass``
-==================
+``oelint``
+==========
-The ``oelint`` class is an obsolete lint checking tool available in
+The :ref:`ref-classes-oelint` class is an obsolete lint checking tool available in
``meta/classes`` in the :term:`Source Directory`.
There are some classes that could be generally useful in OE-Core but
-are never actually used within OE-Core itself. The ``oelint`` class is
+are never actually used within OE-Core itself. The :ref:`ref-classes-oelint` class is
one such example. However, being aware of this class can reduce the
proliferation of different versions of similar classes across multiple
layers.
.. _ref-classes-overlayfs:
-``overlayfs.bbclass``
-=======================
+``overlayfs``
+=============
It's often desired in Embedded System design to have a read-only root filesystem.
But a lot of different applications might want to have read-write access to
@@ -1694,7 +1962,7 @@ is supported by ``overlayfs``. This has to be done in your machine configuration
* QA checks fail to catch file existence if you redefine this variable in your recipe!
* Only the existence of the systemd mount unit file is checked, not its contents.
* To get more details on ``overlayfs``, its internals and supported operations, please refer
- to the official documentation of the `Linux kernel <https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html>`_.
+ to the official documentation of the `Linux kernel <https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html>`__.
The class assumes you have a ``data.mount`` systemd unit defined elsewhere in your BSP
(e.g. in ``systemd-machine-units`` recipe) and it's installed into the image.
@@ -1719,7 +1987,7 @@ and then in your recipe::
On a practical note, your application recipe might require multiple
overlays to be mounted before running to avoid writing to the underlying
file system (which can be forbidden in case of read-only file system)
-To achieve that :ref:`overlayfs <ref-classes-overlayfs>` provides a ``systemd``
+To achieve that :ref:`ref-classes-overlayfs` provides a ``systemd``
helper service for mounting overlays. This helper service is named
``${PN}-overlays.service`` and can be depended on in your application recipe
(named ``application`` in the following example) ``systemd`` unit by adding
@@ -1732,12 +2000,12 @@ to the unit the following::
.. note::
The class does not support the ``/etc`` directory itself, because ``systemd`` depends on it.
- In order to get ``/etc`` in overlayfs, see :ref:`overlayfs-etc <ref-classes-overlayfs-etc>`.
+ In order to get ``/etc`` in overlayfs, see :ref:`ref-classes-overlayfs-etc`.
.. _ref-classes-overlayfs-etc:
-``overlayfs-etc.bbclass``
-=========================
+``overlayfs-etc``
+=================
In order to have the ``/etc`` directory in overlayfs a special handling at early
boot stage is required. The idea is to supply a custom init script that mounts
@@ -1781,10 +2049,10 @@ The class provides two options for ``/sbin/init`` generation:
.. _ref-classes-own-mirrors:
-``own-mirrors.bbclass``
-=======================
+``own-mirrors``
+===============
-The ``own-mirrors`` class makes it easier to set up your own
+The :ref:`ref-classes-own-mirrors` class makes it easier to set up your own
:term:`PREMIRRORS` from which to first fetch source
before attempting to fetch it from the upstream specified in
:term:`SRC_URI` within each recipe.
@@ -1800,38 +2068,28 @@ in :term:`SOURCE_MIRROR_URL`.
.. _ref-classes-package:
-``package.bbclass``
-===================
+``package``
+===========
-The ``package`` class supports generating packages from a build's
+The :ref:`ref-classes-package` class supports generating packages from a build's
output. The core generic functionality is in ``package.bbclass``. The
code specific to particular package types resides in these
-package-specific classes:
-:ref:`package_deb <ref-classes-package_deb>`,
-:ref:`package_rpm <ref-classes-package_rpm>`,
-:ref:`package_ipk <ref-classes-package_ipk>`, and
-:ref:`package_tar <ref-classes-package_tar>`.
-
-.. note::
-
- The
- package_tar
- class is broken and not supported. It is recommended that you do not
- use this class.
+package-specific classes: :ref:`ref-classes-package_deb`,
+:ref:`ref-classes-package_rpm`, :ref:`ref-classes-package_ipk`.
You can control the list of resulting package formats by using the
:term:`PACKAGE_CLASSES` variable defined in your ``conf/local.conf``
configuration file, which is located in the :term:`Build Directory`.
-When defining the variable, you can
-specify one or more package types. Since images are generated from
-packages, a packaging class is needed to enable image generation. The
-first class listed in this variable is used for image generation.
+When defining the variable, you can specify one or more package types.
+Since images are generated from packages, a packaging class is needed
+to enable image generation. The first class listed in this variable is
+used for image generation.
If you take the optional step to set up a repository (package feed) on
the development host that can be used by DNF, you can install packages
from the feed while you are running the image on the target (i.e.
runtime installation of packages). For more information, see the
-":ref:`dev-manual/common-tasks:using runtime package management`"
+":ref:`dev-manual/packages:using runtime package management`"
section in the Yocto Project Development Tasks Manual.
The package-specific class you choose can affect build-time performance
@@ -1842,7 +2100,7 @@ complete build of the package with all dependencies previously built.
The reason for this discrepancy is because the RPM package manager
creates and processes more :term:`Metadata` than the IPK package
manager. Consequently, you might consider setting :term:`PACKAGE_CLASSES` to
-"package_ipk" if you are building smaller systems.
+":ref:`ref-classes-package_ipk`" if you are building smaller systems.
Before making your package manager decision, however, you should
consider some further things about using RPM:
@@ -1867,120 +2125,136 @@ at these two Yocto Project mailing list links:
.. _ref-classes-package_deb:
-``package_deb.bbclass``
-=======================
+``package_deb``
+===============
-The ``package_deb`` class provides support for creating packages that
+The :ref:`ref-classes-package_deb` class provides support for creating packages that
use the Debian (i.e. ``.deb``) file format. The class ensures the
packages are written out in a ``.deb`` file format to the
``${``\ :term:`DEPLOY_DIR_DEB`\ ``}`` directory.
-This class inherits the :ref:`package <ref-classes-package>` class and
+This class inherits the :ref:`ref-classes-package` class and
is enabled through the :term:`PACKAGE_CLASSES`
variable in the ``local.conf`` file.
.. _ref-classes-package_ipk:
-``package_ipk.bbclass``
-=======================
+``package_ipk``
+===============
-The ``package_ipk`` class provides support for creating packages that
+The :ref:`ref-classes-package_ipk` class provides support for creating packages that
use the IPK (i.e. ``.ipk``) file format. The class ensures the packages
are written out in a ``.ipk`` file format to the
``${``\ :term:`DEPLOY_DIR_IPK`\ ``}`` directory.
-This class inherits the :ref:`package <ref-classes-package>` class and
+This class inherits the :ref:`ref-classes-package` class and
is enabled through the :term:`PACKAGE_CLASSES`
variable in the ``local.conf`` file.
.. _ref-classes-package_rpm:
-``package_rpm.bbclass``
-=======================
+``package_rpm``
+===============
-The ``package_rpm`` class provides support for creating packages that
+The :ref:`ref-classes-package_rpm` class provides support for creating packages that
use the RPM (i.e. ``.rpm``) file format. The class ensures the packages
are written out in a ``.rpm`` file format to the
``${``\ :term:`DEPLOY_DIR_RPM`\ ``}`` directory.
-This class inherits the :ref:`package <ref-classes-package>` class and
+This class inherits the :ref:`ref-classes-package` class and
is enabled through the :term:`PACKAGE_CLASSES`
variable in the ``local.conf`` file.
.. _ref-classes-package_tar:
-``package_tar.bbclass``
-=======================
+``package_tar``
+===============
-The ``package_tar`` class provides support for creating tarballs. The
+The :ref:`ref-classes-package_tar` class provides support for creating tarballs. The
class ensures the packages are written out in a tarball format to the
``${``\ :term:`DEPLOY_DIR_TAR`\ ``}`` directory.
-This class inherits the :ref:`package <ref-classes-package>` class and
+This class inherits the :ref:`ref-classes-package` class and
is enabled through the :term:`PACKAGE_CLASSES`
variable in the ``local.conf`` file.
.. note::
- You cannot specify the ``package_tar`` class first using the
+ You cannot specify the :ref:`ref-classes-package_tar` class first using the
:term:`PACKAGE_CLASSES` variable. You must use ``.deb``, ``.ipk``, or ``.rpm``
file formats for your image or SDK.
.. _ref-classes-packagedata:
-``packagedata.bbclass``
-=======================
+``packagedata``
+===============
-The ``packagedata`` class provides common functionality for reading
+The :ref:`ref-classes-packagedata` class provides common functionality for reading
``pkgdata`` files found in :term:`PKGDATA_DIR`. These
files contain information about each output package produced by the
OpenEmbedded build system.
This class is enabled by default because it is inherited by the
-:ref:`package <ref-classes-package>` class.
+:ref:`ref-classes-package` class.
.. _ref-classes-packagegroup:
-``packagegroup.bbclass``
-========================
+``packagegroup``
+================
-The ``packagegroup`` class sets default values appropriate for package
+The :ref:`ref-classes-packagegroup` class sets default values appropriate for package
group recipes (e.g. :term:`PACKAGES`, :term:`PACKAGE_ARCH`, :term:`ALLOW_EMPTY`, and
so forth). It is highly recommended that all package group recipes
inherit this class.
For information on how to use this class, see the
-":ref:`dev-manual/common-tasks:customizing images using custom package groups`"
+":ref:`dev-manual/customizing-images:customizing images using custom package groups`"
section in the Yocto Project Development Tasks Manual.
Previously, this class was called the ``task`` class.
.. _ref-classes-patch:
-``patch.bbclass``
-=================
+``patch``
+=========
-The ``patch`` class provides all functionality for applying patches
+The :ref:`ref-classes-patch` class provides all functionality for applying patches
during the :ref:`ref-tasks-patch` task.
This class is enabled by default because it is inherited by the
-:ref:`base <ref-classes-base>` class.
+:ref:`ref-classes-base` class.
.. _ref-classes-perlnative:
-``perlnative.bbclass``
-======================
+``perlnative``
+==============
-When inherited by a recipe, the ``perlnative`` class supports using the
+When inherited by a recipe, the :ref:`ref-classes-perlnative` class supports using the
native version of Perl built by the build system rather than using the
version provided by the build host.
+.. _ref-classes-pypi:
+
+``pypi``
+========
+
+The :ref:`ref-classes-pypi` class sets variables appropriately for recipes that build
+Python modules from `PyPI <https://pypi.org/>`__, the Python Package Index.
+By default it determines the PyPI package name based upon :term:`BPN`
+(stripping the "python-" or "python3-" prefix off if present), however in
+some cases you may need to set it manually in the recipe by setting
+:term:`PYPI_PACKAGE`.
+
+Variables set by the :ref:`ref-classes-pypi` class include :term:`SRC_URI`, :term:`SECTION`,
+:term:`HOMEPAGE`, :term:`UPSTREAM_CHECK_URI`, :term:`UPSTREAM_CHECK_REGEX`
+and :term:`CVE_PRODUCT`.
+
.. _ref-classes-python_flit_core:
-``python_flit_core.bbclass``
-============================
+``python_flit_core``
+====================
-The ``python_flit_core`` class enables building Python modules which declare
+The :ref:`ref-classes-python_flit_core` class enables building Python modules which declare
the `PEP-517 <https://www.python.org/dev/peps/pep-0517/>`__ compliant
``flit_core.buildapi`` ``build-backend`` in the ``[build-system]``
section of ``pyproject.toml`` (See `PEP-518 <https://www.python.org/dev/peps/pep-0518/>`__).
@@ -1988,42 +2262,64 @@ section of ``pyproject.toml`` (See `PEP-518 <https://www.python.org/dev/peps/pep
Python modules built with ``flit_core.buildapi`` are pure Python (no
``C`` or ``Rust`` extensions).
-Internally this uses the :ref:`python_pep517 <ref-classes-python_pep517>` class.
+Internally this uses the :ref:`ref-classes-python_pep517` class.
.. _ref-classes-python_pep517:
-``python_pep517.bbclass``
-=========================
+``python_pep517``
+=================
-The ``python_pep517`` class builds and installs a Python ``wheel`` binary
+The :ref:`ref-classes-python_pep517` class builds and installs a Python ``wheel`` binary
archive (see `PEP-517 <https://peps.python.org/pep-0517/>`__).
Recipes wouldn't inherit this directly, instead typically another class will
-inherit this, add the relevant native dependencies, and set
-:term:`PEP517_BUILD_API` to the Python class which implements the PEP-517 build
-API.
+inherit this and add the relevant native dependencies.
-Examples of classes which do this are :ref:`python_flit_core
-<ref-classes-python_flit_core>`, :ref:`python_setuptools_build_meta
-<ref-classes-python_setuptools_build_meta>`, and :ref:`python_poetry_core
-<ref-classes-python_poetry_core>`.
+Examples of classes which do this are :ref:`ref-classes-python_flit_core`,
+:ref:`ref-classes-python_setuptools_build_meta`, and
+:ref:`ref-classes-python_poetry_core`.
.. _ref-classes-python_poetry_core:
-``python_poetry_core.bbclass``
-==============================
+``python_poetry_core``
+======================
-The ``python_poetry_core`` class enables building Python modules which use the
+The :ref:`ref-classes-python_poetry_core` class enables building Python modules which use the
`Poetry Core <https://python-poetry.org>`__ build system.
-Internally this uses the :ref:`python_pep517 <ref-classes-python_pep517>` class.
+Internally this uses the :ref:`ref-classes-python_pep517` class.
+
+.. _ref-classes-python_pyo3:
+
+``python_pyo3``
+===============
+
+The :ref:`ref-classes-python_pyo3` class helps make sure that Python extensions
+written in Rust and built with `PyO3 <https://pyo3.rs/>`__, properly set up the
+environment for cross compilation.
+
+This class is internal to the :ref:`ref-classes-python-setuptools3_rust` class
+and is not meant to be used directly in recipes.
+
+.. _ref-classes-python-setuptools3_rust:
+
+``python-setuptools3_rust``
+===========================
+
+The :ref:`ref-classes-python-setuptools3_rust` class enables building Python
+extensions implemented in Rust with `PyO3 <https://pyo3.rs/>`__, which allows
+to compile and distribute Python extensions written in Rust as easily
+as if they were written in C.
+
+This class inherits the :ref:`ref-classes-setuptools3` and
+:ref:`ref-classes-python_pyo3` classes.
.. _ref-classes-pixbufcache:
-``pixbufcache.bbclass``
-=======================
+``pixbufcache``
+===============
-The ``pixbufcache`` class generates the proper post-install and
+The :ref:`ref-classes-pixbufcache` class generates the proper post-install and
post-remove (postinst/postrm) scriptlets for packages that install
pixbuf loaders, which are used with ``gdk-pixbuf``. These scriptlets
call ``update_pixbuf_cache`` to add the pixbuf loaders to the cache.
@@ -2038,24 +2334,24 @@ containing the loaders.
.. _ref-classes-pkgconfig:
-``pkgconfig.bbclass``
-=====================
+``pkgconfig``
+=============
-The ``pkgconfig`` class provides a standard way to get header and
+The :ref:`ref-classes-pkgconfig` class provides a standard way to get header and
library information by using ``pkg-config``. This class aims to smooth
integration of ``pkg-config`` into libraries that use it.
During staging, BitBake installs ``pkg-config`` data into the
``sysroots/`` directory. By making use of sysroot functionality within
-``pkg-config``, the ``pkgconfig`` class no longer has to manipulate the
+``pkg-config``, the :ref:`ref-classes-pkgconfig` class no longer has to manipulate the
files.
.. _ref-classes-populate-sdk:
-``populate_sdk.bbclass``
-========================
+``populate_sdk``
+================
-The ``populate_sdk`` class provides support for SDK-only recipes. For
+The :ref:`ref-classes-populate-sdk` class provides support for SDK-only recipes. For
information on advantages gained when building a cross-development
toolchain using the :ref:`ref-tasks-populate_sdk`
task, see the ":ref:`sdk-manual/appendix-obtain:building an sdk installer`"
@@ -2064,34 +2360,34 @@ Software Development Kit (eSDK) manual.
.. _ref-classes-populate-sdk-*:
-``populate_sdk_*.bbclass``
-==========================
+``populate_sdk_*``
+==================
-The ``populate_sdk_*`` classes support SDK creation and consist of the
+The :ref:`ref-classes-populate-sdk-*` classes support SDK creation and consist of the
following classes:
-- ``populate_sdk_base``: The base class supporting SDK creation under
+- :ref:`populate_sdk_base <ref-classes-populate-sdk-*>`: The base class supporting SDK creation under
all package managers (i.e. DEB, RPM, and opkg).
-- ``populate_sdk_deb``: Supports creation of the SDK given the Debian
+- :ref:`populate_sdk_deb <ref-classes-populate-sdk-*>`: Supports creation of the SDK given the Debian
package manager.
-- ``populate_sdk_rpm``: Supports creation of the SDK given the RPM
+- :ref:`populate_sdk_rpm <ref-classes-populate-sdk-*>`: Supports creation of the SDK given the RPM
package manager.
-- ``populate_sdk_ipk``: Supports creation of the SDK given the opkg
+- :ref:`populate_sdk_ipk <ref-classes-populate-sdk-*>`: Supports creation of the SDK given the opkg
(IPK format) package manager.
-- ``populate_sdk_ext``: Supports extensible SDK creation under all
+- :ref:`populate_sdk_ext <ref-classes-populate-sdk-*>`: Supports extensible SDK creation under all
package managers.
-The ``populate_sdk_base`` class inherits the appropriate
+The :ref:`populate_sdk_base <ref-classes-populate-sdk-*>` class inherits the appropriate
``populate_sdk_*`` (i.e. ``deb``, ``rpm``, and ``ipk``) based on
:term:`IMAGE_PKGTYPE`.
The base class ensures all source and destination directories are
established and then populates the SDK. After populating the SDK, the
-``populate_sdk_base`` class constructs two sysroots:
+:ref:`populate_sdk_base <ref-classes-populate-sdk-*>` class constructs two sysroots:
``${``\ :term:`SDK_ARCH`\ ``}-nativesdk``, which
contains the cross-compiler and associated tooling, and the target,
which contains a target root filesystem that is configured for the SDK
@@ -2104,9 +2400,9 @@ which consists of the following::
Finally, the base populate SDK class creates the toolchain environment
setup script, the tarball of the SDK, and the installer.
-The respective ``populate_sdk_deb``, ``populate_sdk_rpm``, and
-``populate_sdk_ipk`` classes each support the specific type of SDK.
-These classes are inherited by and used with the ``populate_sdk_base``
+The respective :ref:`populate_sdk_deb <ref-classes-populate-sdk-*>`, :ref:`populate_sdk_rpm <ref-classes-populate-sdk-*>`, and
+:ref:`populate_sdk_ipk <ref-classes-populate-sdk-*>` classes each support the specific type of SDK.
+These classes are inherited by and used with the :ref:`populate_sdk_base <ref-classes-populate-sdk-*>`
class.
For more information on the cross-development toolchain generation, see
@@ -2121,10 +2417,10 @@ Software Development Kit (eSDK) manual.
.. _ref-classes-prexport:
-``prexport.bbclass``
-====================
+``prexport``
+============
-The ``prexport`` class provides functionality for exporting
+The :ref:`ref-classes-prexport` class provides functionality for exporting
:term:`PR` values.
.. note::
@@ -2134,10 +2430,10 @@ The ``prexport`` class provides functionality for exporting
.. _ref-classes-primport:
-``primport.bbclass``
-====================
+``primport``
+============
-The ``primport`` class provides functionality for importing
+The :ref:`ref-classes-primport` class provides functionality for importing
:term:`PR` values.
.. note::
@@ -2147,23 +2443,23 @@ The ``primport`` class provides functionality for importing
.. _ref-classes-prserv:
-``prserv.bbclass``
-==================
+``prserv``
+==========
-The ``prserv`` class provides functionality for using a :ref:`PR
-service <dev-manual/common-tasks:working with a pr service>` in order to
+The :ref:`ref-classes-prserv` class provides functionality for using a :ref:`PR
+service <dev-manual/packages:working with a pr service>` in order to
automatically manage the incrementing of the :term:`PR`
variable for each recipe.
This class is enabled by default because it is inherited by the
-:ref:`package <ref-classes-package>` class. However, the OpenEmbedded
+:ref:`ref-classes-package` class. However, the OpenEmbedded
build system will not enable the functionality of this class unless
:term:`PRSERV_HOST` has been set.
.. _ref-classes-ptest:
-``ptest.bbclass``
-=================
+``ptest``
+=========
The ``ptest`` class provides functionality for packaging and installing
runtime tests for recipes that build software that provides these tests.
@@ -2171,45 +2467,45 @@ runtime tests for recipes that build software that provides these tests.
This class is intended to be inherited by individual recipes. However,
the class' functionality is largely disabled unless "ptest" appears in
:term:`DISTRO_FEATURES`. See the
-":ref:`dev-manual/common-tasks:testing packages with ptest`"
+":ref:`dev-manual/packages:testing packages with ptest`"
section in the Yocto Project Development Tasks Manual for more information
on ptest.
.. _ref-classes-ptest-gnome:
-``ptest-gnome.bbclass``
-=======================
+``ptest-gnome``
+===============
Enables package tests (ptests) specifically for GNOME packages, which
have tests intended to be executed with ``gnome-desktop-testing``.
For information on setting up and running ptests, see the
-":ref:`dev-manual/common-tasks:testing packages with ptest`"
+":ref:`dev-manual/packages:testing packages with ptest`"
section in the Yocto Project Development Tasks Manual.
.. _ref-classes-python3-dir:
-``python3-dir.bbclass``
-=======================
+``python3-dir``
+===============
-The ``python3-dir`` class provides the base version, location, and site
+The :ref:`ref-classes-python3-dir` class provides the base version, location, and site
package location for Python 3.
.. _ref-classes-python3native:
-``python3native.bbclass``
-=========================
+``python3native``
+=================
-The ``python3native`` class supports using the native version of Python
+The :ref:`ref-classes-python3native` class supports using the native version of Python
3 built by the build system rather than support of the version provided
by the build host.
.. _ref-classes-python3targetconfig:
-``python3targetconfig.bbclass``
-===============================
+``python3targetconfig``
+=======================
-The ``python3targetconfig`` class supports using the native version of Python
+The :ref:`ref-classes-python3targetconfig` class supports using the native version of Python
3 built by the build system rather than support of the version provided
by the build host, except that the configuration for the target machine
is accessible (such as correct installation directories). This also adds a
@@ -2218,41 +2514,40 @@ in order to avoid unnecessarily lengthening builds.
.. _ref-classes-qemu:
-``qemu.bbclass``
-================
+``qemu``
+========
-The ``qemu`` class provides functionality for recipes that either need
+The :ref:`ref-classes-qemu` class provides functionality for recipes that either need
QEMU or test for the existence of QEMU. Typically, this class is used to
run programs for a target system on the build host using QEMU's
application emulation mode.
.. _ref-classes-recipe_sanity:
-``recipe_sanity.bbclass``
-=========================
+``recipe_sanity``
+=================
-The ``recipe_sanity`` class checks for the presence of any host system
+The :ref:`ref-classes-recipe_sanity` class checks for the presence of any host system
recipe prerequisites that might affect the build (e.g. variables that
are set or software that is present).
.. _ref-classes-relocatable:
-``relocatable.bbclass``
-=======================
+``relocatable``
+===============
-The ``relocatable`` class enables relocation of binaries when they are
+The :ref:`ref-classes-relocatable` class enables relocation of binaries when they are
installed into the sysroot.
-This class makes use of the :ref:`chrpath <ref-classes-chrpath>` class
-and is used by both the :ref:`cross <ref-classes-cross>` and
-:ref:`native <ref-classes-native>` classes.
+This class makes use of the :ref:`ref-classes-chrpath` class and is used by
+both the :ref:`ref-classes-cross` and :ref:`ref-classes-native` classes.
.. _ref-classes-remove-libtool:
-``remove-libtool.bbclass``
-==========================
+``remove-libtool``
+==================
-The ``remove-libtool`` class adds a post function to the
+The :ref:`ref-classes-remove-libtool` class adds a post function to the
:ref:`ref-tasks-install` task to remove all ``.la`` files
installed by ``libtool``. Removing these files results in them being
absent from both the sysroot and target packages.
@@ -2264,15 +2559,15 @@ override the removal by setting ``REMOVE_LIBTOOL_LA`` to "0" as follows::
.. note::
- The ``remove-libtool`` class is not enabled by default.
+ The :ref:`ref-classes-remove-libtool` class is not enabled by default.
.. _ref-classes-report-error:
-``report-error.bbclass``
-========================
+``report-error``
+================
-The ``report-error`` class supports enabling the :ref:`error reporting
-tool <dev-manual/common-tasks:using the error reporting tool>`",
+The :ref:`ref-classes-report-error` class supports enabling the :ref:`error reporting
+tool <dev-manual/error-reporting-tool:using the error reporting tool>`",
which allows you to submit build error information to a central database.
The class collects debug information for recipe, recipe version, task,
@@ -2283,10 +2578,10 @@ are created and stored in
.. _ref-classes-rm-work:
-``rm_work.bbclass``
-===================
+``rm_work``
+===========
-The ``rm_work`` class supports deletion of temporary workspace, which
+The :ref:`ref-classes-rm-work` class supports deletion of temporary workspace, which
can ease your hard drive demands during builds.
The OpenEmbedded build system can use a substantial amount of disk space
@@ -2295,60 +2590,76 @@ under the ``${TMPDIR}/work`` directory for each recipe. Once the build
system generates the packages for a recipe, the work files for that
recipe are no longer needed. However, by default, the build system
preserves these files for inspection and possible debugging purposes. If
-you would rather have these files deleted to save disk space as the
-build progresses, you can enable ``rm_work`` by adding the following to
-your ``local.conf`` file, which is found in the :term:`Build Directory`.
-::
+you would rather have these files deleted to save disk space as the build
+progresses, you can enable :ref:`ref-classes-rm-work` by adding the following to
+your ``local.conf`` file, which is found in the :term:`Build Directory`::
INHERIT += "rm_work"
-If you are
-modifying and building source code out of the work directory for a
-recipe, enabling ``rm_work`` will potentially result in your changes to
-the source being lost. To exclude some recipes from having their work
-directories deleted by ``rm_work``, you can add the names of the recipe
-or recipes you are working on to the :term:`RM_WORK_EXCLUDE` variable, which
-can also be set in your ``local.conf`` file. Here is an example::
+If you are modifying and building source code out of the work directory for a
+recipe, enabling :ref:`ref-classes-rm-work` will potentially result in your
+changes to the source being lost. To exclude some recipes from having their work
+directories deleted by :ref:`ref-classes-rm-work`, you can add the names of the
+recipe or recipes you are working on to the :term:`RM_WORK_EXCLUDE` variable,
+which can also be set in your ``local.conf`` file. Here is an example::
RM_WORK_EXCLUDE += "busybox glibc"
.. _ref-classes-rootfs*:
-``rootfs*.bbclass``
-===================
+``rootfs*``
+===========
-The ``rootfs*`` classes support creating the root filesystem for an
+The :ref:`ref-classes-rootfs*` classes support creating the root filesystem for an
image and consist of the following classes:
-- The ``rootfs-postcommands`` class, which defines filesystem
+- The :ref:`rootfs-postcommands <ref-classes-rootfs*>` class, which defines filesystem
post-processing functions for image recipes.
-- The ``rootfs_deb`` class, which supports creation of root filesystems
+- The :ref:`rootfs_deb <ref-classes-rootfs*>` class, which supports creation of root filesystems
for images built using ``.deb`` packages.
-- The ``rootfs_rpm`` class, which supports creation of root filesystems
+- The :ref:`rootfs_rpm <ref-classes-rootfs*>` class, which supports creation of root filesystems
for images built using ``.rpm`` packages.
-- The ``rootfs_ipk`` class, which supports creation of root filesystems
+- The :ref:`rootfs_ipk <ref-classes-rootfs*>` class, which supports creation of root filesystems
for images built using ``.ipk`` packages.
-- The ``rootfsdebugfiles`` class, which installs additional files found
+- The :ref:`rootfsdebugfiles <ref-classes-rootfs*>` class, which installs additional files found
on the build host directly into the root filesystem.
The root filesystem is created from packages using one of the
-``rootfs*.bbclass`` files as determined by the
-:term:`PACKAGE_CLASSES` variable.
+:ref:`ref-classes-rootfs*` files as determined by the :term:`PACKAGE_CLASSES`
+variable.
For information on how root filesystem images are created, see the
":ref:`overview-manual/concepts:image generation`"
section in the Yocto Project Overview and Concepts Manual.
+.. _ref-classes-rust:
+
+``rust``
+========
+
+The :ref:`ref-classes-rust` class is an internal class which is just used
+in the "rust" recipe, to build the Rust compiler and runtime
+library. Except for this recipe, it is not intended to be used directly.
+
+.. _ref-classes-rust-common:
+
+``rust-common``
+===============
+
+The :ref:`ref-classes-rust-common` class is an internal class to the
+:ref:`ref-classes-cargo_common` and :ref:`ref-classes-rust` classes and is not
+intended to be used directly.
+
.. _ref-classes-sanity:
-``sanity.bbclass``
-==================
+``sanity``
+==========
-The ``sanity`` class checks to see if prerequisite software is present
+The :ref:`ref-classes-sanity` class checks to see if prerequisite software is present
on the host system so that users can be notified of potential problems
that might affect their build. The class also performs basic user
configuration checks from the ``local.conf`` configuration file to
@@ -2357,29 +2668,29 @@ usually determines whether to include this class.
.. _ref-classes-scons:
-``scons.bbclass``
-=================
+``scons``
+=========
-The ``scons`` class supports recipes that need to build software that
-uses the SCons build system. You can use the
-:term:`EXTRA_OESCONS` variable to specify
-additional configuration options you want to pass SCons command line.
+The :ref:`ref-classes-scons` class supports recipes that need to build software
+that uses the SCons build system. You can use the :term:`EXTRA_OESCONS`
+variable to specify additional configuration options you want to pass SCons
+command line.
.. _ref-classes-sdl:
-``sdl.bbclass``
-===============
+``sdl``
+=======
-The ``sdl`` class supports recipes that need to build software that uses
+The :ref:`ref-classes-sdl` class supports recipes that need to build software that uses
the Simple DirectMedia Layer (SDL) library.
.. _ref-classes-python_setuptools_build_meta:
-``python_setuptools_build_meta.bbclass``
-========================================
+``python_setuptools_build_meta``
+================================
-The ``python_setuptools_build_meta`` class enables building Python modules which
-declare the
+The :ref:`ref-classes-python_setuptools_build_meta` class enables building
+Python modules which declare the
`PEP-517 <https://www.python.org/dev/peps/pep-0517/>`__ compliant
``setuptools.build_meta`` ``build-backend`` in the ``[build-system]``
section of ``pyproject.toml`` (See `PEP-518 <https://www.python.org/dev/peps/pep-0518/>`__).
@@ -2387,21 +2698,22 @@ section of ``pyproject.toml`` (See `PEP-518 <https://www.python.org/dev/peps/pep
Python modules built with ``setuptools.build_meta`` can be pure Python or
include ``C`` or ``Rust`` extensions).
-Internally this uses the :ref:`python_pep517 <ref-classes-python_pep517>` class.
+Internally this uses the :ref:`ref-classes-python_pep517` class.
.. _ref-classes-setuptools3:
-``setuptools3.bbclass``
-=======================
+``setuptools3``
+===============
-The ``setuptools3`` class supports Python version 3.x extensions that
-use build systems based on ``setuptools`` (e.g. only have a ``setup.py`` and
-have not migrated to the official ``pyproject.toml`` format). If your recipe
-uses these build systems, the recipe needs to inherit the ``setuptools3`` class.
+The :ref:`ref-classes-setuptools3` class supports Python version 3.x extensions
+that use build systems based on ``setuptools`` (e.g. only have a ``setup.py``
+and have not migrated to the official ``pyproject.toml`` format). If your recipe
+uses these build systems, the recipe needs to inherit the
+:ref:`ref-classes-setuptools3` class.
.. note::
- The ``setuptools3`` class ``do_compile()`` task now calls
+ The :ref:`ref-classes-setuptools3` class :ref:`ref-tasks-compile` task now calls
``setup.py bdist_wheel`` to build the ``wheel`` binary archive format
(See `PEP-427 <https://www.python.org/dev/peps/pep-0427/>`__).
@@ -2412,69 +2724,61 @@ uses these build systems, the recipe needs to inherit the ``setuptools3`` class.
.. note::
- The ``setuptools3`` class ``do_install()`` task now installs the ``wheel``
- binary archive. In current versions of ``setuptools`` the legacy ``setup.py
- install`` method is deprecated. If the ``setup.py`` cannot be used with
- wheels, for example it creates files outside of the Python module or
- standard entry points, then :ref:`setuptools3_legacy
- <ref-classes-setuptools3_legacy>` should be used.
+ The :ref:`ref-classes-setuptools3` class :ref:`ref-tasks-install` task now
+ installs the ``wheel`` binary archive. In current versions of
+ ``setuptools`` the legacy ``setup.py install`` method is deprecated. If
+ the ``setup.py`` cannot be used with wheels, for example it creates files
+ outside of the Python module or standard entry points, then
+ :ref:`ref-classes-setuptools3_legacy` should be used.
.. _ref-classes-setuptools3_legacy:
-``setuptools3_legacy.bbclass``
-==============================
+``setuptools3_legacy``
+======================
-The ``setuptools3_legacy`` class supports Python version 3.x extensions that use
-build systems based on ``setuptools`` (e.g. only have a ``setup.py`` and have
-not migrated to the official ``pyproject.toml`` format). Unlike
-``setuptools3.bbclass``, this uses the traditional ``setup.py`` ``build`` and
-``install`` commands and not wheels. This use of ``setuptools`` like this is
-`deprecated <https://github.com/pypa/setuptools/blob/main/CHANGES.rst#v5830>`_
+The :ref:`ref-classes-setuptools3_legacy` class supports
+Python version 3.x extensions that use build systems based on ``setuptools``
+(e.g. only have a ``setup.py`` and have not migrated to the official
+``pyproject.toml`` format). Unlike :ref:`ref-classes-setuptools3`,
+this uses the traditional ``setup.py`` ``build`` and ``install`` commands and
+not wheels. This use of ``setuptools`` like this is
+`deprecated <https://github.com/pypa/setuptools/blob/main/CHANGES.rst#v5830>`__
but still relatively common.
.. _ref-classes-setuptools3-base:
-``setuptools3-base.bbclass``
-============================
-
-The ``setuptools3-base`` class provides a reusable base for other classes
-that support building Python version 3.x extensions. If you need
-functionality that is not provided by the :ref:`setuptools3 <ref-classes-setuptools3>` class, you may
-want to ``inherit setuptools3-base``. Some recipes do not need the tasks
-in the :ref:`setuptools3 <ref-classes-setuptools3>` class and inherit this class instead.
-
-.. _ref-classes-sign_rpm:
-
-``sign_rpm.bbclass``
+``setuptools3-base``
====================
-The ``sign_rpm`` class supports generating signed RPM packages.
+The :ref:`ref-classes-setuptools3-base` class provides a reusable base for
+other classes that support building Python version 3.x extensions. If you need
+functionality that is not provided by the :ref:`ref-classes-setuptools3` class,
+you may want to ``inherit setuptools3-base``. Some recipes do not need the tasks
+in the :ref:`ref-classes-setuptools3` class and inherit this class instead.
-.. _ref-classes-sip:
+.. _ref-classes-sign_rpm:
-``sip.bbclass``
-===============
+``sign_rpm``
+============
-The ``sip`` class supports recipes that build or package SIP-based
-Python bindings.
+The :ref:`ref-classes-sign_rpm` class supports generating signed RPM packages.
.. _ref-classes-siteconfig:
-``siteconfig.bbclass``
-======================
+``siteconfig``
+==============
-The ``siteconfig`` class provides functionality for handling site
-configuration. The class is used by the
-:ref:`autotools <ref-classes-autotools>` class to accelerate the
-:ref:`ref-tasks-configure` task.
+The :ref:`ref-classes-siteconfig` class provides functionality for handling site
+configuration. The class is used by the :ref:`ref-classes-autotools` class to
+accelerate the :ref:`ref-tasks-configure` task.
.. _ref-classes-siteinfo:
-``siteinfo.bbclass``
-====================
+``siteinfo``
+============
-The ``siteinfo`` class provides information about the targets that might
-be needed by other classes or recipes.
+The :ref:`ref-classes-siteinfo` class provides information about the targets
+that might be needed by other classes or recipes.
As an example, consider Autotools, which can require tests that must
execute on the target hardware. Since this is not possible in general
@@ -2491,12 +2795,12 @@ The class also provides variables like :term:`SITEINFO_ENDIANNESS` and
.. _ref-classes-sstate:
-``sstate.bbclass``
-==================
+``sstate``
+==========
-The ``sstate`` class provides support for Shared State (sstate). By
-default, the class is enabled through the
-:term:`INHERIT_DISTRO` variable's default value.
+The :ref:`ref-classes-sstate` class provides support for Shared State (sstate).
+By default, the class is enabled through the :term:`INHERIT_DISTRO` variable's
+default value.
For more information on sstate, see the
":ref:`overview-manual/concepts:shared state cache`"
@@ -2504,10 +2808,10 @@ section in the Yocto Project Overview and Concepts Manual.
.. _ref-classes-staging:
-``staging.bbclass``
-===================
+``staging``
+===========
-The ``staging`` class installs files into individual recipe work
+The :ref:`ref-classes-staging` class installs files into individual recipe work
directories for sysroots. The class contains the following key tasks:
- The :ref:`ref-tasks-populate_sysroot` task,
@@ -2520,14 +2824,14 @@ directories for sysroots. The class contains the following key tasks:
installs the files into the individual recipe work directories (i.e.
:term:`WORKDIR`).
-The code in the ``staging`` class is complex and basically works in two
-stages:
+The code in the :ref:`ref-classes-staging` class is complex and basically works
+in two stages:
- *Stage One:* The first stage addresses recipes that have files they
want to share with other recipes that have dependencies on the
originating recipe. Normally these dependencies are installed through
the :ref:`ref-tasks-install` task into
- ``${``\ :term:`D`\ ``}``. The ``do_populate_sysroot`` task
+ ``${``\ :term:`D`\ ``}``. The :ref:`ref-tasks-populate_sysroot` task
copies a subset of these files into ``${SYSROOT_DESTDIR}``. This
subset of files is controlled by the
:term:`SYSROOT_DIRS`,
@@ -2594,8 +2898,7 @@ stages:
dependencies traversed or installed. The same sstate dependency code
is used so that builds should be identical regardless of whether
sstate was used or not. For a closer look, see the
- ``setscene_depvalid()`` function in the
- :ref:`sstate <ref-classes-sstate>` class.
+ ``setscene_depvalid()`` function in the :ref:`ref-classes-sstate` class.
The build system is careful to maintain manifests of the files it
installs so that any given dependency can be installed as needed. The
@@ -2604,11 +2907,11 @@ stages:
.. _ref-classes-syslinux:
-``syslinux.bbclass``
-====================
+``syslinux``
+============
-The ``syslinux`` class provides syslinux-specific functions for building
-bootable images.
+The :ref:`ref-classes-syslinux` class provides syslinux-specific functions for
+building bootable images.
The class supports the following variables:
@@ -2647,11 +2950,11 @@ The class supports the following variables:
.. _ref-classes-systemd:
-``systemd.bbclass``
-===================
+``systemd``
+===========
-The ``systemd`` class provides support for recipes that install systemd
-unit files.
+The :ref:`ref-classes-systemd` class provides support for recipes that install
+systemd unit files.
The functionality for this class is disabled unless you have "systemd"
in :term:`DISTRO_FEATURES`.
@@ -2676,27 +2979,27 @@ Services are set up to start on boot automatically
unless you have set
:term:`SYSTEMD_AUTO_ENABLE` to "disable".
-For more information on ``systemd``, see the
-":ref:`dev-manual/common-tasks:selecting an initialization manager`"
+For more information on :ref:`ref-classes-systemd`, see the
+":ref:`dev-manual/init-manager:selecting an initialization manager`"
section in the Yocto Project Development Tasks Manual.
.. _ref-classes-systemd-boot:
-``systemd-boot.bbclass``
-========================
+``systemd-boot``
+================
-The ``systemd-boot`` class provides functions specific to the
+The :ref:`ref-classes-systemd-boot` class provides functions specific to the
systemd-boot bootloader for building bootable images. This is an
internal class and is not intended to be used directly.
.. note::
- The ``systemd-boot`` class is a result from merging the ``gummiboot`` class
+ The :ref:`ref-classes-systemd-boot` class is a result from merging the ``gummiboot`` class
used in previous Yocto Project releases with the ``systemd`` project.
-Set the :term:`EFI_PROVIDER` variable to
-"systemd-boot" to use this class. Doing so creates a standalone EFI
-bootloader that is not dependent on systemd.
+Set the :term:`EFI_PROVIDER` variable to ":ref:`ref-classes-systemd-boot`" to
+use this class. Doing so creates a standalone EFI bootloader that is not
+dependent on systemd.
For information on more variables used and supported in this class, see
the :term:`SYSTEMD_BOOT_CFG`,
@@ -2709,60 +3012,57 @@ for more information.
.. _ref-classes-terminal:
-``terminal.bbclass``
-====================
+``terminal``
+============
-The ``terminal`` class provides support for starting a terminal session.
-The :term:`OE_TERMINAL` variable controls which
-terminal emulator is used for the session.
+The :ref:`ref-classes-terminal` class provides support for starting a terminal
+session. The :term:`OE_TERMINAL` variable controls which terminal emulator is
+used for the session.
-Other classes use the ``terminal`` class anywhere a separate terminal
-session needs to be started. For example, the
-:ref:`patch <ref-classes-patch>` class assuming
-:term:`PATCHRESOLVE` is set to "user", the
-:ref:`cml1 <ref-classes-cml1>` class, and the
-:ref:`devshell <ref-classes-devshell>` class all use the ``terminal``
-class.
+Other classes use the :ref:`ref-classes-terminal` class anywhere a separate
+terminal session needs to be started. For example, the :ref:`ref-classes-patch`
+class assuming :term:`PATCHRESOLVE` is set to "user", the
+:ref:`ref-classes-cml1` class, and the :ref:`ref-classes-devshell` class all
+use the :ref:`ref-classes-terminal` class.
-.. _ref-classes-testimage*:
+.. _ref-classes-testimage:
-``testimage*.bbclass``
-======================
+``testimage``
+=============
-The ``testimage*`` classes support running automated tests against
+The :ref:`ref-classes-testimage` class supports running automated tests against
images using QEMU and on actual hardware. The classes handle loading the
tests and starting the image. To use the classes, you need to perform
steps to set up the environment.
-.. note::
+To enable this class, add the following to your configuration::
- Best practices include using :term:`IMAGE_CLASSES` rather than
- :term:`INHERIT` to inherit the ``testimage`` class for automated image
- testing.
+ IMAGE_CLASSES += "testimage"
The tests are commands that run on the target system over ``ssh``. Each
test is written in Python and makes use of the ``unittest`` module.
-The ``testimage.bbclass`` runs tests on an image when called using the
+The :ref:`ref-classes-testimage` class runs tests on an image when called using the
following::
$ bitbake -c testimage image
-The ``testimage-auto`` class
-runs tests on an image after the image is constructed (i.e.
-:term:`TESTIMAGE_AUTO` must be set to "1").
+Alternatively, if you wish to have tests automatically run for each image
+after it is built, you can set :term:`TESTIMAGE_AUTO`::
+
+ TESTIMAGE_AUTO = "1"
For information on how to enable, run, and create new tests, see the
-":ref:`dev-manual/common-tasks:performing automated runtime testing`"
+":ref:`dev-manual/runtime-testing:performing automated runtime testing`"
section in the Yocto Project Development Tasks Manual.
.. _ref-classes-testsdk:
-``testsdk.bbclass``
-===================
+``testsdk``
+===========
This class supports running automated tests against software development
-kits (SDKs). The ``testsdk`` class runs tests on an SDK when called
+kits (SDKs). The :ref:`ref-classes-testsdk` class runs tests on an SDK when called
using the following::
$ bitbake -c testsdk image
@@ -2770,13 +3070,13 @@ using the following::
.. note::
Best practices include using :term:`IMAGE_CLASSES` rather than
- :term:`INHERIT` to inherit the ``testsdk`` class for automated SDK
+ :term:`INHERIT` to inherit the :ref:`ref-classes-testsdk` class for automated SDK
testing.
.. _ref-classes-texinfo:
-``texinfo.bbclass``
-===================
+``texinfo``
+===========
This class should be inherited by recipes whose upstream packages invoke
the ``texinfo`` utilities at build-time. Native and cross recipes are
@@ -2793,10 +3093,10 @@ host system.
.. _ref-classes-toaster:
-``toaster.bbclass``
-===================
+``toaster``
+===========
-The ``toaster`` class collects information about packages and images and
+The :ref:`ref-classes-toaster` class collects information about packages and images and
sends them as events that the BitBake user interface can receive. The
class is enabled when the Toaster user interface is running.
@@ -2804,18 +3104,18 @@ This class is not intended to be used directly.
.. _ref-classes-toolchain-scripts:
-``toolchain-scripts.bbclass``
-=============================
+``toolchain-scripts``
+=====================
-The ``toolchain-scripts`` class provides the scripts used for setting up
+The :ref:`ref-classes-toolchain-scripts` class provides the scripts used for setting up
the environment for installed SDKs.
.. _ref-classes-typecheck:
-``typecheck.bbclass``
-=====================
+``typecheck``
+=============
-The ``typecheck`` class provides support for validating the values of
+The :ref:`ref-classes-typecheck` class provides support for validating the values of
variables set at the configuration level against their defined types.
The OpenEmbedded build system allows you to define the type of a
variable using the "type" varflag. Here is an example::
@@ -2824,14 +3124,14 @@ variable using the "type" varflag. Here is an example::
.. _ref-classes-uboot-config:
-``uboot-config.bbclass``
-========================
+``uboot-config``
+================
-The ``uboot-config`` class provides support for U-Boot configuration for
+The :ref:`ref-classes-uboot-config` class provides support for U-Boot configuration for
a machine. Specify the machine in your recipe as follows::
UBOOT_CONFIG ??= <default>
- UBOOT_CONFIG[foo] = "config,images"
+ UBOOT_CONFIG[foo] = "config,images,binary"
You can also specify the machine using this method::
@@ -2840,10 +3140,47 @@ You can also specify the machine using this method::
See the :term:`UBOOT_CONFIG` and :term:`UBOOT_MACHINE` variables for additional
information.
+.. _ref-classes-uboot-sign:
+
+``uboot-sign``
+==============
+
+The :ref:`ref-classes-uboot-sign` class provides support for U-Boot verified boot.
+It is intended to be inherited from U-Boot recipes.
+
+The variables used by this class are:
+
+- :term:`SPL_MKIMAGE_DTCOPTS`: DTC options for U-Boot ``mkimage`` when
+ building the FIT image.
+- :term:`SPL_SIGN_ENABLE`: enable signing the FIT image.
+- :term:`SPL_SIGN_KEYDIR`: directory containing the signing keys.
+- :term:`SPL_SIGN_KEYNAME`: base filename of the signing keys.
+- :term:`UBOOT_FIT_DESC`: description string encoded into the FIT image.
+- :term:`UBOOT_FIT_GENERATE_KEYS`: generate the keys if they don't exist yet.
+- :term:`UBOOT_FIT_HASH_ALG`: hash algorithm for the FIT image.
+- :term:`UBOOT_FIT_KEY_GENRSA_ARGS`: ``openssl genrsa`` arguments.
+- :term:`UBOOT_FIT_KEY_REQ_ARGS`: ``openssl req`` arguments.
+- :term:`UBOOT_FIT_SIGN_ALG`: signature algorithm for the FIT image.
+- :term:`UBOOT_FIT_SIGN_NUMBITS`: size of the private key for FIT image
+ signing.
+- :term:`UBOOT_FIT_KEY_SIGN_PKCS`: algorithm for the public key certificate
+ for FIT image signing.
+- :term:`UBOOT_FITIMAGE_ENABLE`: enable the generation of a U-Boot FIT image.
+- :term:`UBOOT_MKIMAGE_DTCOPTS`: DTC options for U-Boot ``mkimage`` when
+ rebuilding the FIT image containing the kernel.
+
+See U-Boot's documentation for details about `verified boot
+<https://source.denx.de/u-boot/u-boot/-/blob/master/doc/uImage.FIT/verified-boot.txt>`__
+and the `signature process
+<https://source.denx.de/u-boot/u-boot/-/blob/master/doc/uImage.FIT/signature.txt>`__.
+
+See also the description of :ref:`ref-classes-kernel-fitimage` class, which this class
+imitates.
+
.. _ref-classes-uninative:
-``uninative.bbclass``
-=====================
+``uninative``
+=============
Attempts to isolate the build system from the host distribution's C
library in order to make re-use of native shared state artifacts across
@@ -2858,21 +3195,21 @@ yourself, publish the resulting tarball (e.g. via HTTP) and set
``UNINATIVE_URL`` and ``UNINATIVE_CHECKSUM`` appropriately. For an
example, see the ``meta/conf/distro/include/yocto-uninative.inc``.
-The ``uninative`` class is also used unconditionally by the extensible
+The :ref:`ref-classes-uninative` class is also used unconditionally by the extensible
SDK. When building the extensible SDK, ``uninative-tarball`` is built
and the resulting tarball is included within the SDK.
.. _ref-classes-update-alternatives:
-``update-alternatives.bbclass``
-===============================
+``update-alternatives``
+=======================
-The ``update-alternatives`` class helps the alternatives system when
+The :ref:`ref-classes-update-alternatives` class helps the alternatives system when
multiple sources provide the same command. This situation occurs when
several programs that have the same or similar function are installed
with the same name. For example, the ``ar`` command is available from
the ``busybox``, ``binutils`` and ``elfutils`` packages. The
-``update-alternatives`` class handles renaming the binaries so that
+:ref:`ref-classes-update-alternatives` class handles renaming the binaries so that
multiple packages can be installed without conflicts. The ``ar`` command
still works regardless of which packages are installed or subsequently
removed. The class renames the conflicting binary in each package and
@@ -2892,7 +3229,7 @@ To use this class, you need to define a number of variables:
These variables list alternative commands needed by a package, provide
pathnames for links, default links for targets, and so forth. For
details on how to use this class, see the comments in the
-:yocto_git:`update-alternatives.bbclass </poky/tree/meta/classes/update-alternatives.bbclass>`
+:yocto_git:`update-alternatives.bbclass </poky/tree/meta/classes-recipe/update-alternatives.bbclass>`
file.
.. note::
@@ -2902,10 +3239,10 @@ file.
.. _ref-classes-update-rc.d:
-``update-rc.d.bbclass``
-=======================
+``update-rc.d``
+===============
-The ``update-rc.d`` class uses ``update-rc.d`` to safely install an
+The :ref:`ref-classes-update-rc.d` class uses ``update-rc.d`` to safely install an
initialization script on behalf of the package. The OpenEmbedded build
system takes care of details such as making sure the script is stopped
before a package is removed and started when the package is installed.
@@ -2916,10 +3253,10 @@ for details.
.. _ref-classes-useradd:
-``useradd*.bbclass``
-====================
+``useradd*``
+============
-The ``useradd*`` classes support the addition of users or groups for
+The :ref:`useradd* <ref-classes-useradd>` classes support the addition of users or groups for
usage by the package on the target. For example, if you have packages
that contain system services that should be run under their own user or
group, you can use these classes to enable creation of the user or
@@ -2928,16 +3265,16 @@ group. The :oe_git:`meta-skeleton/recipes-skeleton/useradd/useradd-example.bb
recipe in the :term:`Source Directory` provides a simple
example that shows how to add three users and groups to two packages.
-The ``useradd_base`` class provides basic functionality for user or
+The :ref:`useradd_base <ref-classes-useradd>` class provides basic functionality for user or
groups settings.
-The ``useradd*`` classes support the
+The :ref:`useradd* <ref-classes-useradd>` classes support the
:term:`USERADD_PACKAGES`,
:term:`USERADD_PARAM`,
:term:`GROUPADD_PARAM`, and
:term:`GROUPMEMS_PARAM` variables.
-The ``useradd-staticids`` class supports the addition of users or groups
+The :ref:`useradd-staticids <ref-classes-useradd>` class supports the addition of users or groups
that have static user identification (``uid``) and group identification
(``gid``) values.
@@ -2953,17 +3290,15 @@ set static values, the OpenEmbedded build system looks in
:term:`BBPATH` for ``files/passwd`` and ``files/group``
files for the values.
-To use static ``uid`` and ``gid`` values, you need to set some
-variables. See the :term:`USERADDEXTENSION`,
-:term:`USERADD_UID_TABLES`,
-:term:`USERADD_GID_TABLES`, and
-:term:`USERADD_ERROR_DYNAMIC` variables.
-You can also see the :ref:`useradd <ref-classes-useradd>` class for
-additional information.
+To use static ``uid`` and ``gid`` values, you need to set some variables. See
+the :term:`USERADDEXTENSION`, :term:`USERADD_UID_TABLES`,
+:term:`USERADD_GID_TABLES`, and :term:`USERADD_ERROR_DYNAMIC` variables.
+You can also see the :ref:`ref-classes-useradd` class for additional
+information.
.. note::
- You do not use the ``useradd-staticids`` class directly. You either enable
+ You do not use the :ref:`useradd-staticids <ref-classes-useradd>` class directly. You either enable
or disable the class by setting the :term:`USERADDEXTENSION` variable. If you
enable or disable the class in a configured system, :term:`TMPDIR` might
contain incorrect ``uid`` and ``gid`` values. Deleting the :term:`TMPDIR`
@@ -2971,43 +3306,42 @@ additional information.
.. _ref-classes-utility-tasks:
-``utility-tasks.bbclass``
-=========================
+``utility-tasks``
+=================
-The ``utility-tasks`` class provides support for various "utility" type
-tasks that are applicable to all recipes, such as
-:ref:`ref-tasks-clean` and
-:ref:`ref-tasks-listtasks`.
+The :ref:`ref-classes-utility-tasks` class provides support for various
+"utility" type tasks that are applicable to all recipes, such as
+:ref:`ref-tasks-clean` and :ref:`ref-tasks-listtasks`.
This class is enabled by default because it is inherited by the
-:ref:`base <ref-classes-base>` class.
+:ref:`ref-classes-base` class.
.. _ref-classes-utils:
-``utils.bbclass``
-=================
+``utils``
+=========
-The ``utils`` class provides some useful Python functions that are
+The :ref:`ref-classes-utils` class provides some useful Python functions that are
typically used in inline Python expressions (e.g. ``${@...}``). One
example use is for ``bb.utils.contains()``.
This class is enabled by default because it is inherited by the
-:ref:`base <ref-classes-base>` class.
+:ref:`ref-classes-base` class.
.. _ref-classes-vala:
-``vala.bbclass``
-================
+``vala``
+========
-The ``vala`` class supports recipes that need to build software written
+The :ref:`ref-classes-vala` class supports recipes that need to build software written
using the Vala programming language.
.. _ref-classes-waf:
-``waf.bbclass``
-===============
+``waf``
+=======
-The ``waf`` class supports recipes that need to build software that uses
+The :ref:`ref-classes-waf` class supports recipes that need to build software that uses
the Waf build system. You can use the
:term:`EXTRA_OECONF` or
:term:`PACKAGECONFIG_CONFARGS` variables
diff --git a/documentation/ref-manual/devtool-reference.rst b/documentation/ref-manual/devtool-reference.rst
index a1a8bcdc98..296734e7c1 100644
--- a/documentation/ref-manual/devtool-reference.rst
+++ b/documentation/ref-manual/devtool-reference.rst
@@ -165,7 +165,7 @@ Adding a New Recipe to the Workspace Layer
==========================================
Use the ``devtool add`` command to add a new recipe to the workspace
-layer. The recipe you add should not exist - ``devtool`` creates it for
+layer. The recipe you add should not exist --- ``devtool`` creates it for
you. The source files the recipe uses should exist in an external area.
The following example creates and adds a new recipe named ``jackson`` to
@@ -379,16 +379,14 @@ command::
Unless you provide a specific recipe name on the command line, the
command checks all recipes in all configured layers.
-Following is a partial example table that reports on all the recipes.
+Here is a partial example table that reports on all the recipes.
Notice the reported reason for not upgrading the ``base-passwd`` recipe.
In this example, while a new version is available upstream, you do not
want to use it because the dependency on ``cdebconf`` is not easily
satisfied. Maintainers can explicit the reason that is shown by adding
the :term:`RECIPE_NO_UPDATE_REASON` variable to the corresponding recipe.
See :yocto_git:`base-passwd.bb </poky/tree/meta/recipes-core/base-passwd/base-passwd_3.5.29.bb>`
-for an example.
-
-::
+for an example::
$ devtool check-upgrade-status
...
@@ -411,7 +409,7 @@ Upgrading a Recipe
As software matures, upstream recipes are upgraded to newer versions. As
a developer, you need to keep your local recipes up-to-date with the
upstream version releases. There are several ways of upgrading recipes.
-You can read about them in the ":ref:`dev-manual/common-tasks:upgrading recipes`"
+You can read about them in the ":ref:`dev-manual/upgrading-recipes:upgrading recipes`"
section of the Yocto Project Development Tasks Manual. This section
overviews the ``devtool upgrade`` command.
@@ -439,7 +437,7 @@ You can read more on the ``devtool upgrade`` workflow in the
":ref:`sdk-manual/extensible:use \`\`devtool upgrade\`\` to create a version of the recipe that supports a newer version of the software`"
section in the Yocto Project Application Development and the Extensible
Software Development Kit (eSDK) manual. You can also see an example of
-how to use ``devtool upgrade`` in the ":ref:`dev-manual/common-tasks:using \`\`devtool upgrade\`\``"
+how to use ``devtool upgrade`` in the ":ref:`dev-manual/upgrading-recipes:using \`\`devtool upgrade\`\``"
section in the Yocto Project Development Tasks Manual.
.. _devtool-resetting-a-recipe:
@@ -599,7 +597,7 @@ The ``devtool status`` command has no command-line options::
$ devtool status
-Following is sample output after using
+Here is sample output after using
:ref:`devtool add <ref-manual/devtool-reference:adding a new recipe to the workspace layer>`
to create and add the ``mtr_0.86.bb`` recipe to the ``workspace`` directory::
diff --git a/documentation/ref-manual/faq.rst b/documentation/ref-manual/faq.rst
index e06b5e6caa..20e09f8577 100644
--- a/documentation/ref-manual/faq.rst
+++ b/documentation/ref-manual/faq.rst
@@ -22,7 +22,7 @@ Can I still use the Yocto Project?
**A:** You can get the required tools on your host development system a
couple different ways (i.e. building a tarball or downloading a
tarball). See the
-":ref:`ref-manual/system-requirements:required git, tar, python and gcc versions`"
+":ref:`ref-manual/system-requirements:required git, tar, python, make and gcc versions`"
section for steps on how to update your build tools.
**Q:** How can you claim Poky / OpenEmbedded-Core is stable?
@@ -45,7 +45,7 @@ section for steps on how to update your build tools.
**A:** Support for an additional board is added by creating a Board
Support Package (BSP) layer for it. For more information on how to
create a BSP layer, see the
-":ref:`dev-manual/common-tasks:understanding and creating layers`"
+":ref:`dev-manual/layers:understanding and creating layers`"
section in the Yocto Project Development Tasks Manual and the
:doc:`/bsp-guide/index`.
@@ -73,7 +73,7 @@ device.
**A:** To add a package, you need to create a BitBake recipe. For
information on how to create a BitBake recipe, see the
-":ref:`dev-manual/common-tasks:writing a new recipe`"
+":ref:`dev-manual/new-recipe:writing a new recipe`"
section in the Yocto Project Development Tasks Manual.
**Q:** Do I have to reflash my entire board with a new Yocto Project
@@ -201,7 +201,7 @@ You can find more information on licensing in the
":ref:`overview-manual/development-environment:licensing`"
section in the Yocto
Project Overview and Concepts Manual and also in the
-":ref:`dev-manual/common-tasks:maintaining open source license compliance during your product's lifecycle`"
+":ref:`dev-manual/licenses:maintaining open source license compliance during your product's lifecycle`"
section in the Yocto Project Development Tasks Manual.
**Q:** How do I disable the cursor on my touchscreen device?
@@ -312,7 +312,7 @@ HTTPS requests and direct them to the ``http://`` sources mirror. You
can use ``file://`` URLs to point to local directories or network shares
as well.
-Here are other options::
+Another option is to set::
BB_NO_NETWORK = "1"
@@ -328,7 +328,7 @@ This statement
limits the build system to pulling source from the :term:`PREMIRRORS` only.
Again, this technique is useful for reproducing builds.
-Here is another technique::
+Here is yet another technique::
BB_GENERATE_MIRROR_TARBALLS = "1"
@@ -364,7 +364,7 @@ redirect requests through proxy servers.
**Q:** Can I get rid of build output so I can start over?
-**A:** Yes - you can easily do this. When you use BitBake to build an
+**A:** Yes --- you can easily do this. When you use BitBake to build an
image, all the build output goes into the directory created when you run
the build environment setup script (i.e.
:ref:`structure-core-script`). By default, this :term:`Build Directory`
@@ -428,7 +428,7 @@ relatively normal and the second is not:
build/tmp/sysroots/x86_64-linux/usr/bin
Even if the paths look unusual,
-they both are correct - the first for a target and the second for a
+they both are correct --- the first for a target and the second for a
native recipe. These paths are a consequence of the ``DESTDIR``
mechanism and while they appear strange, they are correct and in
practice very effective.
diff --git a/documentation/ref-manual/features.rst b/documentation/ref-manual/features.rst
index f7abb417ba..4f0366eeb6 100644
--- a/documentation/ref-manual/features.rst
+++ b/documentation/ref-manual/features.rst
@@ -62,6 +62,8 @@ Project metadata:
- *keyboard:* Hardware has a keyboard
+- *numa:* Hardware has non-uniform memory access
+
- *pcbios:* Support for booting through BIOS
- *pci:* Hardware has a PCI bus
@@ -155,7 +157,7 @@ metadata:
- *ptest:* Enables building the package tests where supported by
individual recipes. For more information on package tests, see the
- ":ref:`dev-manual/common-tasks:testing packages with ptest`" section
+ ":ref:`dev-manual/packages:testing packages with ptest`" section
in the Yocto Project Development Tasks Manual.
- *smbfs:* Include SMB networks client support (for mounting
@@ -196,7 +198,7 @@ you can add several different predefined packages such as development
utilities or packages with debug information needed to investigate
application problems or profile applications.
-Here are the image features available for all images:
+The image features available for all images are:
- *allow-empty-password:* Allows Dropbear and OpenSSH to accept root
logins and logins from accounts having an empty password string.
@@ -239,7 +241,7 @@ Here are the image features available for all images:
- *read-only-rootfs:* Creates an image whose root filesystem is
read-only. See the
- ":ref:`dev-manual/common-tasks:creating a read-only root filesystem`"
+ ":ref:`dev-manual/read-only-rootfs:creating a read-only root filesystem`"
section in the Yocto Project Development Tasks Manual for more
information.
@@ -276,7 +278,7 @@ these valid features is as follows:
- *tools-debug:* Installs debugging tools such as ``strace`` and
``gdb``. For information on GDB, see the
- ":ref:`dev-manual/common-tasks:debugging with the gnu project debugger (gdb) remotely`" section
+ ":ref:`dev-manual/debugging:debugging with the gnu project debugger (gdb) remotely`" section
in the Yocto Project Development Tasks Manual. For information on
tracing and profiling, see the :doc:`/profile-manual/index`.
diff --git a/documentation/ref-manual/images.rst b/documentation/ref-manual/images.rst
index 31fb567687..f875633128 100644
--- a/documentation/ref-manual/images.rst
+++ b/documentation/ref-manual/images.rst
@@ -14,15 +14,17 @@ image you want.
Building an image without GNU General Public License Version 3
(GPLv3), GNU Lesser General Public License Version 3 (LGPLv3), and
the GNU Affero General Public License Version 3 (AGPL-3.0) components
- is only supported for minimal and base images. Furthermore, if you
- are going to build an image using non-GPLv3 and similarly licensed
- components, you must make the following changes in the ``local.conf``
- file before using the BitBake command to build the minimal or base
- image::
+ is only tested for core-image-minimal image. Furthermore, if you would like to
+ build an image and verify that it does not include GPLv3 and similarly licensed
+ components, you must make the following changes in the image recipe
+ file before using the BitBake command to build the image:
- 1. Comment out the EXTRA_IMAGE_FEATURES line
- 2. Set INCOMPATIBLE_LICENSE = "GPL-3.0* LGPL-3.0* AGPL-3.0*"
+ INCOMPATIBLE_LICENSE = "GPL-3.0* LGPL-3.0*"
+ Alternatively, you can adjust ``local.conf`` file, repeating and adjusting the line
+ for all images where the license restriction must apply:
+
+ INCOMPATIBLE_LICENSE:pn-your-image-name = "GPL-3.0* LGPL-3.0*"
From within the ``poky`` Git repository, you can use the following
command to display the list of directories within the :term:`Source Directory`
@@ -30,7 +32,7 @@ that contain image recipe files::
$ ls meta*/recipes*/images/*.bb
-Following is a list of supported recipes:
+Here is a list of supported recipes:
- ``build-appliance-image``: An example virtual machine that contains
all the pieces required to run builds using the build system as well
@@ -117,7 +119,7 @@ Following is a list of supported recipes:
deployed to a separate partition so that you can boot into it and use
it to deploy a second image to be tested. You can find more
information about runtime testing in the
- ":ref:`dev-manual/common-tasks:performing automated runtime testing`"
+ ":ref:`dev-manual/runtime-testing:performing automated runtime testing`"
section in the Yocto Project Development Tasks Manual.
- ``core-image-testmaster-initramfs``: A RAM-based Initial Root
@@ -127,7 +129,7 @@ Following is a list of supported recipes:
- ``core-image-weston``: A very basic Wayland image with a terminal.
This image provides the Wayland protocol libraries and the reference
Weston compositor. For more information, see the
- ":ref:`dev-manual/common-tasks:using wayland and weston`"
+ ":ref:`dev-manual/wayland:using wayland and weston`"
section in the Yocto Project Development Tasks Manual.
- ``core-image-x11``: A very basic X11 image with a terminal.
diff --git a/documentation/ref-manual/kickstart.rst b/documentation/ref-manual/kickstart.rst
index d82da0ee75..c7766a2dfd 100644
--- a/documentation/ref-manual/kickstart.rst
+++ b/documentation/ref-manual/kickstart.rst
@@ -82,7 +82,7 @@ the ``part`` and ``partition`` commands:
source of the data that populates the partition. The most common
value for this option is "rootfs", but you can use any value that
maps to a valid source plugin. For information on the source plugins,
- see the ":ref:`dev-manual/common-tasks:using the wic plugin interface`"
+ see the ":ref:`dev-manual/wic:using the wic plugin interface`"
section in the Yocto Project Development Tasks Manual.
If you use ``--source rootfs``, Wic creates a partition as large as
diff --git a/documentation/ref-manual/qa-checks.rst b/documentation/ref-manual/qa-checks.rst
index 8c475d0f72..1e5fe21b02 100644
--- a/documentation/ref-manual/qa-checks.rst
+++ b/documentation/ref-manual/qa-checks.rst
@@ -162,7 +162,7 @@ Errors and Warnings
normally expected to be empty (such as ``/tmp``). These files may
be more appropriately installed to a different location, or
perhaps alternatively not installed at all, usually by updating the
- ``do_install`` task/function.
+ :ref:`ref-tasks-install` task/function.
.. _qa-check-arch:
@@ -536,7 +536,7 @@ Errors and Warnings
in (e.g. ``FILES:${``\ :term:`PN`\ ``}`` for the main
package).
- - Delete the files at the end of the ``do_install`` task if the
+ - Delete the files at the end of the :ref:`ref-tasks-install` task if the
files are not needed in any package.
 
@@ -579,10 +579,10 @@ Errors and Warnings
- ``package contains mime types but does not inherit mime: <packagename> path '<file>' [mime]``
The specified package contains mime type files (``.xml`` files in
- ``${datadir}/mime/packages``) and yet does not inherit the mime
- class which will ensure that these get properly installed. Either
- add ``inherit mime`` to the recipe or remove the files at the
- ``do_install`` step if they are not needed.
+ ``${datadir}/mime/packages``) and yet does not inherit the
+ :ref:`ref-classes-mime` class which will ensure that these get
+ properly installed. Either add ``inherit mime`` to the recipe or remove the
+ files at the :ref:`ref-tasks-install` step if they are not needed.
.. _qa-check-mime-xdg:
@@ -590,10 +590,10 @@ Errors and Warnings
- ``package contains desktop file with key 'MimeType' but does not inhert mime-xdg: <packagename> path '<file>' [mime-xdg]``
The specified package contains a .desktop file with a 'MimeType' key
- present, but does not inherit the mime-xdg class that is required in
- order for that to be activated. Either add ``inherit mime`` to the
- recipe or remove the files at the ``do_install`` step if they are not
- needed.
+ present, but does not inherit the :ref:`mime-xdg <ref-classes-mime-xdg>`
+ class that is required in order for that to be activated. Either add
+ ``inherit mime`` to the recipe or remove the files at the
+ :ref:`ref-tasks-install` step if they are not needed.
.. _qa-check-src-uri-bad:
@@ -602,7 +602,7 @@ Errors and Warnings
GitHub provides "archive" tarballs, however these can be re-generated
on the fly and thus the file's signature will not necessarily match that
- in the SRC_URI checksums in future leading to build failures. It is
+ in the :term:`SRC_URI` checksums in future leading to build failures. It is
recommended that you use an official release tarball or switch to
pulling the corresponding revision in the actual git repository instead.
@@ -613,18 +613,20 @@ Errors and Warnings
so using ${:term:`BPN`} rather than ${:term:`PN`} as the latter will change
for different variants of the same recipe e.g. when :term:`BBCLASSEXTEND`
or multilib are being used. This check will fail if a reference to ``${PN}``
- is found within the :term:`SRC_URI` value - change it to ``${BPN}`` instead.
+ is found within the :term:`SRC_URI` value --- change it to ``${BPN}`` instead.
.. _qa-check-unhandled-features-check:
- ``<recipename>: recipe doesn't inherit features_check [unhandled-features-check]``
- This check ensures that if one of the variables that the :ref:`features_check <ref-classes-features_check>`
- class supports (e.g. :term:`REQUIRED_DISTRO_FEATURES`) is used, then the recipe
- inherits ``features_check`` in order for the requirement to actually work. If
- you are seeing this message, either add ``inherit features_check`` to your recipe
- or remove the reference to the variable if it is not needed.
+ This check ensures that if one of the variables that the
+ :ref:`ref-classes-features_check` class supports (e.g.
+ :term:`REQUIRED_DISTRO_FEATURES`) is used, then the recipe
+ inherits :ref:`ref-classes-features_check` in order for
+ the requirement to actually work. If you are seeing this message, either
+ add ``inherit features_check`` to your recipe or remove the reference to
+ the variable if it is not needed.
.. _qa-check-missing-update-alternatives:
@@ -632,7 +634,7 @@ Errors and Warnings
- ``<recipename>: recipe defines ALTERNATIVE:<packagename> but doesn't inherit update-alternatives. This might fail during do_rootfs later! [missing-update-alternatives]``
This check ensures that if a recipe sets the :term:`ALTERNATIVE` variable that the
- recipe also inherits :ref:`update-alternatives <ref-classes-update-alternatives>` such
+ recipe also inherits :ref:`ref-classes-update-alternatives` such
that the alternative will be correctly set up. If you are seeing this message, either
add ``inherit update-alternatives`` to your recipe or remove the reference to the variable
if it is not needed.
@@ -653,7 +655,7 @@ Errors and Warnings
- ``<packagename> contains perllocal.pod (<files>), should not be installed [perllocalpod]``
``perllocal.pod`` is an index file of locally installed modules and so shouldn't be
- installed by any distribution packages. The :ref:`cpan <ref-classes-cpan>` class
+ installed by any distribution packages. The :ref:`ref-classes-cpan` class
already sets ``NO_PERLLOCAL`` to stop this file being generated by most Perl recipes,
but if a recipe is using ``MakeMaker`` directly then they might not be doing this
correctly. This check ensures that perllocal.pod is not in any package in order to
@@ -667,8 +669,8 @@ Errors and Warnings
If ``usrmerge`` is in :term:`DISTRO_FEATURES`, this check will ensure that no package
installs files to root (``/bin``, ``/sbin``, ``/lib``, ``/lib64``) directories. If you are seeing this
- message, it indicates that the ``do_install`` step (or perhaps the build process that
- ``do_install`` is calling into, e.g. ``make install`` is using hardcoded paths instead
+ message, it indicates that the :ref:`ref-tasks-install` step (or perhaps the build process that
+ :ref:`ref-tasks-install` is calling into, e.g. ``make install`` is using hardcoded paths instead
of the variables set up for this (``bindir``, ``sbindir``, etc.), and should be
changed so that it does.
@@ -677,7 +679,7 @@ Errors and Warnings
- ``Fuzz detected: <patch output> [patch-fuzz]``
- This check looks for evidence of "fuzz" when applying patches within the ``do_patch``
+ This check looks for evidence of "fuzz" when applying patches within the :ref:`ref-tasks-patch`
task. Patch fuzz is a situation when the ``patch`` tool ignores some of the context
lines in order to apply the patch. Consider this example:
@@ -727,7 +729,7 @@ Errors and Warnings
devtool modify <recipe>
This will apply all of the patches, and create new commits out of them in
- the workspace - with the patch context updated.
+ the workspace --- with the patch context updated.
Then, replace the patches in the recipe layer::
@@ -748,6 +750,45 @@ Errors and Warnings
other things in the patches, those can be discarded.
+.. _qa-check-patch-status:
+
+- ``Missing Upstream-Status in patch <patchfile> Please add according to <url> [patch-status-core/patch-status-noncore]``
+
+ The ``Upstream-Status`` value is missing in the specified patch file's header.
+ This value is intended to track whether or not the patch has been sent
+ upstream, whether or not it has been merged, etc.
+
+ There are two options for this same check - ``patch-status-core`` (for
+ recipes in OE-Core) and ``patch-status-noncore`` (for recipes in any other
+ layer).
+
+ For more information, see the
+ ":ref:`contributor-guide/recipe-style-guide:patch upstream status`"
+ section in the Yocto Project and OpenEmbedded Contributor Guide.
+
+- ``Malformed Upstream-Status in patch <patchfile> Please correct according to <url> [patch-status-core/patch-status-noncore]``
+
+ The ``Upstream-Status`` value in the specified patch file's header is invalid -
+ it must be a specific format. See the "Missing Upstream-Status" entry above
+ for more information.
+
+
+.. _qa-check-buildpaths:
+
+- ``File <filename> in package <packagename> contains reference to TMPDIR [buildpaths]``
+
+ This check ensures that build system paths (including :term:`TMPDIR`) do not
+ appear in output files, which not only leaks build system configuration into
+ the target, but also hinders binary reproducibility as the output will change
+ if the build system configuration changes.
+
+ Typically these paths will enter the output through some mechanism in the
+ configuration or compilation of the software being built by the recipe. To
+ resolve this issue you will need to determine how the detected path is
+ entering the output. Sometimes it may require adjusting scripts or code to
+ use a relative path rather than an absolute one, or to pick up the path from
+ runtime configuration or environment variables.
+
Configuring and Disabling QA Checks
===================================
diff --git a/documentation/ref-manual/release-process.rst b/documentation/ref-manual/release-process.rst
index 8acb4b8e09..2913a4d4eb 100644
--- a/documentation/ref-manual/release-process.rst
+++ b/documentation/ref-manual/release-process.rst
@@ -14,13 +14,13 @@ Major and Minor Release Cadence
The Yocto Project delivers major releases (e.g. &DISTRO;) using a six
month cadence roughly timed each April and October of the year.
-Following are examples of some major YP releases with their codenames
+Here are examples of some major YP releases with their codenames
also shown. See the ":ref:`ref-manual/release-process:major release codenames`"
section for information on codenames used with major releases.
- - 2.2 (Morty)
- - 2.1 (Krogoth)
- - 2.0 (Jethro)
+ - 4.1 ("Langdale")
+ - 4.0 ("Kirkstone")
+ - 3.4 ("Honister")
While the cadence is never perfect, this timescale facilitates
regular releases that have strong QA cycles while not overwhelming users
@@ -29,12 +29,12 @@ major holidays in various geographies.
The Yocto project delivers minor (point) releases on an unscheduled
basis and are usually driven by the accumulation of enough significant
-fixes or enhancements to the associated major release. Following are
-some example past point releases:
+fixes or enhancements to the associated major release.
+Some example past point releases are:
- - 2.1.1
- - 2.1.2
- - 2.2.1
+ - 4.1.3
+ - 4.0.8
+ - 3.4.4
The point release
indicates a point in the major release branch where a full QA cycle and
@@ -87,15 +87,51 @@ stable release.
exception to this policy occurs when there is a strong reason such as
the fix happens to also be the preferred upstream approach.
-Stable release branches have strong maintenance for about a year after
-their initial release. Should significant issues be found for any
-release regardless of its age, fixes could be backported to older
-releases. For issues that are not backported given an older release,
-Community LTS trees and branches allow community members to share
-patches for older releases. However, these types of patches do not go
-through the same release process as do point releases. You can find more
-information about stable branch maintenance at
-:yocto_wiki:`/Stable_branch_maintenance`.
+.. _ref-long-term-support-releases:
+
+Long Term Support Releases
+==========================
+
+While stable releases are supported for a duration of seven months,
+some specific ones are now supported for a longer period by the Yocto
+Project, and are called Long Term Support (:term:`LTS`) releases.
+
+When significant issues are found, :term:`LTS` releases allow to publish
+fixes not only for the current stable release, but also to the
+:term:`LTS` releases that are still supported. Older stable releases which
+have reached their End of Life (EOL) won't receive such updates.
+
+This started with version 3.1 ("Dunfell"), released in April 2020, which
+the project initially committed to supporting for two years, but this duration
+was later extended to four years. Similarly, the following :term:`LTS` release,
+version 4.0 ("Kirkstone"), was released two years later in May 2022 and the
+project committed to supporting it for four years too.
+
+Therefore, a new :term:`LTS` release is made every two years and is supported
+for four years. This offers more stability to project users and leaves more
+time to upgrade to the following :term:`LTS` release.
+
+See :yocto_wiki:`/Stable_Release_and_LTS` for details about the management
+of stable and :term:`LTS` releases.
+
+.. image:: svg/releases.*
+ :width: 100%
+
+.. note::
+
+ In some circumstances, a layer can be created by the community in order to
+ add a specific feature or support a new version of some package for an :term:`LTS`
+ release. This is called a :term:`Mixin` layer. These are thin and specific
+ purpose layers which can be stacked with an :term:`LTS` release to "mix" a specific
+ feature into that build. These are created on an as-needed basis and
+ maintained by the people who need them.
+
+ Policies on testing these layers depend on how widespread their usage is and
+ determined on a case-by-case basis. You can find some :term:`Mixin` layers in the
+ :yocto_git:`meta-lts-mixins </meta-lts-mixins>` repository. While the Yocto
+ Project provides hosting for those repositories, it does not provides
+ testing on them. Other :term:`Mixin` layers may be released elsewhere by the wider
+ community.
Testing and Quality Assurance
=============================
@@ -107,7 +143,7 @@ Additionally, because the test strategies are visible to you as a
developer, you can validate your projects. This section overviews the
available test infrastructure used in the Yocto Project. For information
on how to run available tests on your projects, see the
-":ref:`dev-manual/common-tasks:performing automated runtime testing`"
+":ref:`dev-manual/runtime-testing:performing automated runtime testing`"
section in the Yocto Project Development Tasks Manual.
The QA/testing infrastructure is woven into the project to the point
@@ -127,19 +163,19 @@ consists of the following pieces:
an ARM target, did the build produce ARM binaries. If, for example,
the build produced PPC binaries then there is a problem.
-- :ref:`ref-classes-testimage*`: This class
+- :ref:`ref-classes-testimage`: This class
performs runtime testing of images after they are built. The tests
are usually used with :doc:`QEMU </dev-manual/qemu>`
to boot the images and check the combined runtime result boot
operation and functions. However, the test can also use the IP
address of a machine to test.
-- :ref:`ptest <dev-manual/common-tasks:testing packages with ptest>`:
+- :ref:`ptest <dev-manual/packages:testing packages with ptest>`:
Runs tests against packages produced during the build for a given
piece of software. The test allows the packages to be run within a
target image.
-- ``oe-selftest``: Tests combination BitBake invocations. These tests
+- ``oe-selftest``: Tests combinations of BitBake invocations. These tests
operate outside the OpenEmbedded build system itself. The
``oe-selftest`` can run all tests by default or can run selected
tests or test suites.
@@ -155,14 +191,12 @@ effort has been made to automate the tests so that more people can use
them and the Yocto Project development team can run them faster and more
efficiently.
-The Yocto Project's main Autobuilder (&YOCTO_AB_URL;)
-publicly tests each Yocto Project release's code in the
-:term:`OpenEmbedded-Core (OE-Core)`, Poky, and BitBake repositories. The testing
-occurs for both the current state of the "master" branch and also for
+The Yocto Project's main Autobuilder (&YOCTO_AB_URL;) publicly tests each Yocto
+Project release's code in the :oe_git:`openembedded-core </openembedded-core>`,
+:yocto_git:`poky </poky>` and :oe_git:`bitbake </bitbake>` repositories. The
+testing occurs for both the current state of the "master" branch and also for
submitted patches. Testing for submitted patches usually occurs in the
-"ross/mut" branch in the ``poky-contrib`` repository (i.e. the
-master-under-test branch) or in the "master-next" branch in the ``poky``
-repository.
+in the "master-next" branch in the :yocto_git:`poky </poky>` repository.
.. note::
diff --git a/documentation/ref-manual/resources.rst b/documentation/ref-manual/resources.rst
index c942384662..4eaaca942e 100644
--- a/documentation/ref-manual/resources.rst
+++ b/documentation/ref-manual/resources.rst
@@ -23,8 +23,7 @@ The Yocto Project gladly accepts contributions. You can submit changes
to the project either by creating and sending pull requests, or by
submitting patches through email. For information on how to do both as
well as information on how to identify the maintainer for each area of
-code, see the ":ref:`dev-manual/common-tasks:submitting a change to the yocto project`" section in the
-Yocto Project Development Tasks Manual.
+code, see the :doc:`../contributor-guide/index`.
.. _resources-bugtracker:
@@ -46,8 +45,8 @@ your expectations).
For a general procedure and guidelines on how to use Bugzilla to submit a bug
against the Yocto Project, see the following:
-- The ":ref:`dev-manual/common-tasks:submitting a defect against the yocto project`"
- section in the Yocto Project Development Tasks Manual.
+- The ":doc:`../contributor-guide/report-defect`"
+ section in the Yocto Project and OpenEmbedded Contributor Guide.
- The Yocto Project :yocto_wiki:`Bugzilla wiki page </Bugzilla_Configuration_and_Bug_Tracking>`
@@ -64,26 +63,31 @@ and announcements. To subscribe to one of the following mailing lists,
click on the appropriate URL in the following list and follow the
instructions:
-- :yocto_lists:`/g/yocto` - General Yocto Project
+- :yocto_lists:`/g/yocto` --- general Yocto Project
discussion mailing list.
-- :oe_lists:`/g/openembedded-core` - Discussion mailing
+- :yocto_lists:`/g/yocto-patches` --- patch contribution mailing list for Yocto
+ Project-related layers which do not have their own mailing list.
+
+- :oe_lists:`/g/openembedded-core` --- discussion mailing
list about OpenEmbedded-Core (the core metadata).
-- :oe_lists:`/g/openembedded-devel` - Discussion
+- :oe_lists:`/g/openembedded-devel` --- discussion
mailing list about OpenEmbedded.
-- :oe_lists:`/g/bitbake-devel` - Discussion mailing
+- :oe_lists:`/g/bitbake-devel` --- discussion mailing
list about the :term:`BitBake` build tool.
-- :yocto_lists:`/g/poky` - Discussion mailing list
+- :yocto_lists:`/g/poky` --- discussion mailing list
about :term:`Poky`.
-- :yocto_lists:`/g/yocto-announce` - Mailing list to
+- :yocto_lists:`/g/yocto-announce` --- mailing list to
receive official Yocto Project release and milestone announcements.
-For more Yocto Project-related mailing lists, see the
-:yocto_home:`Yocto Project Website <>`.
+- :yocto_lists:`/g/docs` --- discussion mailing list about the Yocto Project
+ documentation.
+
+See also :yocto_home:`the description of all mailing lists </community/mailing-lists/>`.
.. _resources-irc:
@@ -104,93 +108,96 @@ Links and Related Documentation
Here is a list of resources you might find helpful:
-- :yocto_home:`The Yocto Project Website <>`\ *:* The home site
+- :yocto_home:`The Yocto Project Website <>`: The home site
for the Yocto Project.
-- :yocto_wiki:`The Yocto Project Main Wiki Page <>`\ *:* The main wiki page for
+- :yocto_wiki:`The Yocto Project Main Wiki Page <>`: The main wiki page for
the Yocto Project. This page contains information about project
planning, release engineering, QA & automation, a reference site map,
and other resources related to the Yocto Project.
-- :oe_home:`OpenEmbedded <>`\ *:* The build system used by the
+- :oe_home:`OpenEmbedded <>`: The build system used by the
Yocto Project. This project is the upstream, generic, embedded
distribution from which the Yocto Project derives its build system
(Poky) and to which it contributes.
-- :oe_wiki:`BitBake </BitBake>`\ *:* The tool used to process metadata.
+- :oe_wiki:`BitBake </BitBake>`: The tool used to process metadata.
-- :doc:`BitBake User Manual <bitbake:index>`\ *:* A comprehensive
+- :doc:`BitBake User Manual <bitbake:index>`: A comprehensive
guide to the BitBake tool. If you want information on BitBake, see
this manual.
-- :doc:`/brief-yoctoprojectqs/index` *:* This
+- :doc:`/brief-yoctoprojectqs/index`: This
short document lets you experience building an image using the Yocto
Project without having to understand any concepts or details.
-- :doc:`/overview-manual/index` *:* This manual provides overview
+- :doc:`/overview-manual/index`: This manual provides overview
and conceptual information about the Yocto Project.
-- :doc:`/dev-manual/index` *:* This manual is a "how-to" guide
+- :doc:`/dev-manual/index`: This manual is a "how-to" guide
that presents procedures useful to both application and system
developers who use the Yocto Project.
-- :doc:`/sdk-manual/index` *manual :* This
+- :doc:`/sdk-manual/index` manual: This
guide provides information that lets you get going with the standard
or extensible SDK. An SDK, with its cross-development toolchains,
allows you to develop projects inside or outside of the Yocto Project
environment.
-- :doc:`/bsp-guide/bsp` *:* This guide defines the structure
+- :doc:`/bsp-guide/bsp`: This guide defines the structure
for BSP components. Having a commonly understood structure encourages
standardization.
-- :doc:`/kernel-dev/index` *:* This manual describes
+- :doc:`/kernel-dev/index`: This manual describes
how to work with Linux Yocto kernels as well as provides a bit of
conceptual information on the construction of the Yocto Linux kernel
tree.
-- :doc:`/ref-manual/index` *:* This
+- :doc:`/ref-manual/index`: This
manual provides reference material such as variable, task, and class
descriptions.
-- :yocto_docs:`Yocto Project Mega-Manual </singleindex.html>`\ *:* This manual
+- :yocto_docs:`Yocto Project Mega-Manual </singleindex.html>`: This manual
is simply a single HTML file comprised of the bulk of the Yocto
Project manuals. It makes it easy to search for phrases and terms used
in the Yocto Project documentation set.
-- :doc:`/profile-manual/index` *:* This manual presents a set of
+- :doc:`/profile-manual/index`: This manual presents a set of
common and generally useful tracing and profiling schemes along with
their applications (as appropriate) to each tool.
-- :doc:`/toaster-manual/index` *:* This manual
+- :doc:`/toaster-manual/index`: This manual
introduces and describes how to set up and use Toaster. Toaster is an
Application Programming Interface (API) and web-based interface to
the :term:`OpenEmbedded Build System`, which uses
BitBake, that reports build information.
-- :yocto_wiki:`FAQ </FAQ>`\ *:* A list of commonly asked
+- `Yocto Project BitBake extension for VSCode
+ <https://marketplace.visualstudio.com/items?itemName=yocto-project.yocto-bitbake>`__:
+ This extension provides a rich feature set when working with BitBake recipes
+ within the Visual Studio Code IDE.
+
+- :yocto_wiki:`FAQ </FAQ>`: A list of commonly asked
questions and their answers.
-- *Release Notes:* Features, updates and known issues for the current
- release of the Yocto Project. To access the Release Notes, go to the
- :yocto_home:`Downloads </software-overview/downloads>` page on
- the Yocto Project website and click on the "RELEASE INFORMATION" link
- for the appropriate release.
+- :doc:`Release Information </migration-guides/index>`:
+ Migration guides, release notes, new features, updates and known issues
+ for the current and past releases of the Yocto Project.
-- :yocto_bugs:`Bugzilla <>`\ *:* The bug tracking application
+- :yocto_bugs:`Bugzilla <>`: The bug tracking application
the Yocto Project uses. If you find problems with the Yocto Project,
you should report them using this application.
- :yocto_wiki:`Bugzilla Configuration and Bug Tracking Wiki Page
- </Bugzilla_Configuration_and_Bug_Tracking>`\ *:*
+ </Bugzilla_Configuration_and_Bug_Tracking>`:
Information on how to get set up and use the Yocto Project
implementation of Bugzilla for logging and tracking Yocto Project
defects.
-- *Internet Relay Chat (IRC):* Two IRC channels on
+- Internet Relay Chat (IRC): Two IRC channels on
`Libera Chat <https://libera.chat/>`__ are
available for Yocto Project and OpenEmbeddded discussions: ``#yocto`` and
``#oe``, respectively.
-- `Quick EMUlator (QEMU) <https://wiki.qemu.org/Index.html>`__\ *:* An
+- `Quick EMUlator (QEMU) <https://wiki.qemu.org/Index.html>`__: An
open-source machine emulator and virtualizer.
diff --git a/documentation/ref-manual/structure.rst b/documentation/ref-manual/structure.rst
index 262b041ea6..496fd6ae41 100644
--- a/documentation/ref-manual/structure.rst
+++ b/documentation/ref-manual/structure.rst
@@ -175,7 +175,7 @@ within the :term:`Source Directory`. If you design a
custom distribution, you can include your own version of this
configuration file to mention the targets defined by your distribution.
See the
-":ref:`dev-manual/common-tasks:creating a custom template configuration directory`"
+":ref:`dev-manual/custom-template-configuration-directory:creating a custom template configuration directory`"
section in the Yocto Project Development Tasks Manual for more
information.
@@ -191,7 +191,7 @@ Directory named ``mybuilds/`` that is outside of the :term:`Source Directory`::
The OpenEmbedded build system uses the template configuration files, which
are found by default in the ``meta-poky/conf/`` directory in the Source
Directory. See the
-":ref:`dev-manual/common-tasks:creating a custom template configuration directory`"
+":ref:`dev-manual/custom-template-configuration-directory:creating a custom template configuration directory`"
section in the Yocto Project Development Tasks Manual for more
information.
@@ -213,8 +213,8 @@ These files are standard top-level files.
.. _structure-build:
-The Build Directory - ``build/``
-================================
+The Build Directory --- ``build/``
+==================================
The OpenEmbedded build system creates the :term:`Build Directory`
when you run the build environment setup
@@ -234,7 +234,7 @@ The OpenEmbedded build system creates this directory when you enable
build history via the :ref:`buildhistory <ref-classes-buildhistory>` class file. The directory
organizes build information into image, packages, and SDK
subdirectories. For information on the build history feature, see the
-":ref:`dev-manual/common-tasks:maintaining build output quality`"
+":ref:`dev-manual/build-quality:maintaining build output quality`"
section in the Yocto Project Development Tasks Manual.
.. _structure-build-conf-local.conf:
@@ -289,7 +289,7 @@ file, it uses ``sed`` to substitute final
----------------------------
This configuration file defines
-:ref:`layers <dev-manual/common-tasks:understanding and creating layers>`,
+:ref:`layers <dev-manual/layers:understanding and creating layers>`,
which are directory trees, traversed (or walked) by BitBake. The
``bblayers.conf`` file uses the :term:`BBLAYERS`
variable to list the layers BitBake tries to find.
@@ -434,7 +434,7 @@ directory contains sub-directories for ``bash``, ``busybox``, and
``glibc`` (among others) that in turn contain appropriate ``COPYING``
license files with other licensing information. For information on
licensing, see the
-":ref:`dev-manual/common-tasks:maintaining open source license compliance during your product's lifecycle`"
+":ref:`dev-manual/licenses:maintaining open source license compliance during your product's lifecycle`"
section in the Yocto Project Development Tasks Manual.
.. _structure-build-tmp-deploy-images:
@@ -530,7 +530,7 @@ recipe-specific :term:`WORKDIR` directories. Thus, the
This directory holds information that BitBake uses for accounting
purposes to track what tasks have run and when they have run. The
directory is sub-divided by architecture, package name, and version.
-Following is an example::
+Here is an example::
stamps/all-poky-linux/distcc-config/1.0-r0.do_build-2fdd....2do
@@ -571,7 +571,7 @@ built within the Yocto Project. For this package, a work directory of
``tmp/work/qemux86-poky-linux/linux-yocto/3.0+git1+<.....>``, referred
to as the :term:`WORKDIR`, is created. Within this directory, the source is
unpacked to ``linux-qemux86-standard-build`` and then patched by Quilt.
-(See the ":ref:`dev-manual/common-tasks:using quilt in your workflow`" section in
+(See the ":ref:`dev-manual/quilt:using quilt in your workflow`" section in
the Yocto Project Development Tasks Manual for more information.) Within
the ``linux-qemux86-standard-build`` directory, standard Quilt
directories ``linux-3.0/patches`` and ``linux-3.0/.pc`` are created, and
@@ -589,7 +589,7 @@ install" places its output that is then split into sub-packages within
``build/tmp/work/tunearch/recipename/version/``
-----------------------------------------------
-The recipe work directory - ``${WORKDIR}``.
+The recipe work directory --- ``${WORKDIR}``.
As described earlier in the
":ref:`structure-build-tmp-sysroots`" section,
@@ -654,8 +654,8 @@ recipes. In practice, this is only used for ``gcc`` and its variants
.. _structure-meta:
-The Metadata - ``meta/``
-========================
+The Metadata --- ``meta/``
+==========================
As mentioned previously, :term:`Metadata` is the core of the
Yocto Project. Metadata has several important subdivisions:
diff --git a/documentation/ref-manual/svg/releases.svg b/documentation/ref-manual/svg/releases.svg
new file mode 100644
index 0000000000..198d4632b1
--- /dev/null
+++ b/documentation/ref-manual/svg/releases.svg
@@ -0,0 +1,1744 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ version="1.1"
+ id="svg2"
+ width="2040.0006"
+ height="624.30518"
+ viewBox="0 0 2040.0006 624.30515"
+ sodipodi:docname="releases.svg"
+ inkscape:version="1.1.2 (0a00cf5339, 2022-02-04)"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:dc="http://purl.org/dc/elements/1.1/">
+ <title
+ id="title8568">Yocto Project Release Timeline</title>
+ <metadata
+ id="metadata8">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <cc:license
+ rdf:resource="http://artlibre.org/licence/lal" />
+ <dc:title>Yocto Project Release Timeline</dc:title>
+ <dc:creator>
+ <cc:Agent>
+ <dc:title>The Yocto Project</dc:title>
+ </cc:Agent>
+ </dc:creator>
+ </cc:Work>
+ <cc:License
+ rdf:about="http://creativecommons.org/licenses/by-sa/4.0/">
+ <cc:permits
+ rdf:resource="http://creativecommons.org/ns#Reproduction" />
+ <cc:permits
+ rdf:resource="http://creativecommons.org/ns#Distribution" />
+ <cc:requires
+ rdf:resource="http://creativecommons.org/ns#Notice" />
+ <cc:requires
+ rdf:resource="http://creativecommons.org/ns#Attribution" />
+ <cc:permits
+ rdf:resource="http://creativecommons.org/ns#DerivativeWorks" />
+ <cc:requires
+ rdf:resource="http://creativecommons.org/ns#ShareAlike" />
+ </cc:License>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs6">
+ <inkscape:path-effect
+ effect="powerstroke"
+ id="path-effect6121"
+ is_visible="true"
+ lpeversion="1"
+ offset_points="0,0.5"
+ sort_points="true"
+ interpolator_type="CubicBezierJohan"
+ interpolator_beta="0.2"
+ start_linecap_type="zerowidth"
+ linejoin_type="extrp_arc"
+ miter_limit="4"
+ scale_width="1"
+ end_linecap_type="zerowidth"
+ not_jump="false" />
+ <marker
+ style="overflow:visible"
+ id="marker5783"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend"
+ inkscape:isstock="true">
+ <path
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path5781" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="marker5623"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend"
+ inkscape:isstock="true">
+ <path
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path5621" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="marker5487"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend"
+ inkscape:isstock="true">
+ <path
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path5485" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="marker5285"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mstart"
+ inkscape:isstock="true">
+ <path
+ transform="scale(0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path5283" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="marker5161"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend"
+ inkscape:isstock="true">
+ <path
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path5159" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="marker4860"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend"
+ inkscape:isstock="true">
+ <path
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path4858" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="marker4504"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend"
+ inkscape:isstock="true">
+ <path
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#ffa348;fill-opacity:1;fill-rule:evenodd;stroke:#ffa348;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path4502" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="Arrow1Mend"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Mend"
+ inkscape:isstock="true">
+ <path
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ style="fill:#62a0ea;fill-opacity:1;fill-rule:evenodd;stroke:#62a0ea;stroke-width:1pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ id="path3318" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="marker4174"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mstart"
+ inkscape:isstock="true">
+ <path
+ transform="scale(0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#62a0ea;fill-opacity:1;fill-rule:evenodd;stroke:#62a0ea;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path4172" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="Arrow2Mend"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend"
+ inkscape:isstock="true">
+ <path
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#ffa348;fill-opacity:1;fill-rule:evenodd;stroke:#ffa348;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path3336" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="Arrow1Mstart"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Mstart"
+ inkscape:isstock="true">
+ <path
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ style="fill:#ff7800;fill-opacity:1;fill-rule:evenodd;stroke:#ffa348;stroke-width:1pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ id="path3315" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="Arrow2Lstart"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Lstart"
+ inkscape:isstock="true">
+ <path
+ transform="matrix(1.1,0,0,1.1,1.1,0)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#ff7800;fill-opacity:1;fill-rule:evenodd;stroke:#ffa348;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path3327" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="Arrow1Lstart"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Lstart"
+ inkscape:isstock="true">
+ <path
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ style="fill:#ff7800;fill-opacity:1;fill-rule:evenodd;stroke:#ffa348;stroke-width:1pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ id="path3309" />
+ </marker>
+ <linearGradient
+ id="linearGradient921"
+ inkscape:swatch="solid">
+ <stop
+ style="stop-color:#deddda;stop-opacity:1;"
+ offset="0"
+ id="stop919" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient6035-4">
+ <stop
+ id="stop6037-2"
+ style="stop-color:#ffffff"
+ offset="0" />
+ <stop
+ id="stop6039-9"
+ style="stop-color:#ffffff;stop-opacity:0"
+ offset="1" />
+ </linearGradient>
+ <marker
+ style="overflow:visible"
+ id="Arrow2Mstart-4"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mstart"
+ inkscape:isstock="true">
+ <path
+ transform="scale(0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#ffa348;fill-opacity:1;fill-rule:evenodd;stroke:#ffa348;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path3333-2" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="Arrow2Mend-2"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend"
+ inkscape:isstock="true">
+ <path
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#ffa348;fill-opacity:1;fill-rule:evenodd;stroke:#ffa348;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path3336-7" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="marker5623-2"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend"
+ inkscape:isstock="true">
+ <path
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path5621-3" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="Arrow2Mend-4"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend"
+ inkscape:isstock="true">
+ <path
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#ffa348;fill-opacity:1;fill-rule:evenodd;stroke:#ffa348;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path3336-3" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="marker5285-1"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mstart"
+ inkscape:isstock="true">
+ <path
+ transform="scale(0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path5283-7" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="marker5161-4"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend"
+ inkscape:isstock="true">
+ <path
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path5159-3" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="marker5285-1-4"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mstart"
+ inkscape:isstock="true">
+ <path
+ transform="scale(0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path5283-7-6" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="marker5161-4-9"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend"
+ inkscape:isstock="true">
+ <path
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path5159-3-4" />
+ </marker>
+ <marker
+ style="overflow:visible"
+ id="marker4174-8"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mstart"
+ inkscape:isstock="true">
+ <path
+ transform="scale(0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#62a0ea;fill-opacity:1;fill-rule:evenodd;stroke:#62a0ea;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path4172-8" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="1043"
+ id="namedview4"
+ showgrid="true"
+ inkscape:zoom="1.4472045"
+ inkscape:cx="736.24703"
+ inkscape:cy="312.32629"
+ inkscape:window-x="1728"
+ inkscape:window-y="0"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="g10"
+ inkscape:document-rotation="0"
+ inkscape:snap-perpendicular="true"
+ fit-margin-top="30"
+ lock-margins="true"
+ fit-margin-left="30"
+ fit-margin-right="30"
+ fit-margin-bottom="30"
+ inkscape:pagecheckerboard="0">
+ <inkscape:grid
+ type="xygrid"
+ id="grid1257"
+ originx="-289.99936"
+ originy="325" />
+ </sodipodi:namedview>
+ <g
+ inkscape:groupmode="layer"
+ inkscape:label="Image"
+ id="g10"
+ transform="translate(-289.99936,325.00004)">
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1080,220.00003 v -515.00007 0 0"
+ id="path207708" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1200,220.00003 v -515.00007 0 0"
+ id="path207708-4" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1320,220.00003 v -515.00007 0 0"
+ id="path207708-4-3" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1440,219.99998 v -515.00002 0 0"
+ id="path207708-4-3-6" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1560,219.99998 v -515.00001 0 0"
+ id="path207708-4-3-6-2" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1680,219.99998 v -515.00002 0 0"
+ id="path207708-4-3-6-2-8" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1800,219.99998 v -515.00002 0 0"
+ id="path207708-4-3-6-2-8-4" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1920,219.99998 v -515.00002 0 0"
+ id="path207708-4-3-6-2-8-4-3" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2040,219.99997 v -460.00002 0 0"
+ id="path207708-4-3-6-2-8-4-3-8" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2040,219.99998 v -515.00002 0 0"
+ id="path207708-4-3-6-2-8-4-3-8-0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2159.954,219.99997 v -514.99999 0 0"
+ id="path207708-4-3-6-2-8-4-3-8-4" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2280,219.99997 v -514.99999 0 0"
+ id="path207708-4-3-6-2-8-4-3-8-4-0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 960,220.00003 v -515.00007 0 0"
+ id="path207708-9" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 840,220.00001 v -375 0 0"
+ id="path207708-9-6" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 840,220.00002 v -515.00004 0 0"
+ id="path207708-9-6-2" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 720,220.00003 v -515.00007 0 0"
+ id="path207708-9-6-2-5" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 600,220.00003 v -515.00007 0 0"
+ id="path207708-9-6-2-5-9" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 480,220.00003 v -515.00007 0 0"
+ id="path207708-9-6-2-5-9-0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 360,220.00003 v -515.00007 0 0"
+ id="path207708-9-6-2-5-9-0-5" />
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:42.5884px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="142.22464"
+ y="565.10297"
+ id="text907"><tspan
+ sodipodi:role="line"
+ id="tspan905"
+ x="142.22464"
+ y="565.10297" /></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:42.5884px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="98.363503"
+ y="637.8432"
+ id="text911"><tspan
+ sodipodi:role="line"
+ id="tspan909"
+ x="98.363503"
+ y="637.8432" /></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:42.5884px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="-59.575905"
+ y="580.05695"
+ id="text915"><tspan
+ sodipodi:role="line"
+ id="tspan913"
+ x="-59.575905"
+ y="580.05695" /></text>
+ <rect
+ style="fill:#333333;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2;stroke-opacity:1"
+ id="rect917-0-0"
+ width="980"
+ height="45.000004"
+ x="360"
+ y="154.99997"
+ ry="2.2558987" />
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#fffefe;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="420.52835"
+ y="174.12433"
+ id="text1185-3-55-4"><tspan
+ sodipodi:role="line"
+ x="420.52835"
+ y="174.12433"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan957-2-8">Dunfell (LTS)</tspan><tspan
+ sodipodi:role="line"
+ x="420.52835"
+ y="192.121"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan10317">3.1</tspan></text>
+ <rect
+ style="fill:#333333;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2;stroke-opacity:1"
+ id="rect917-0-0-4"
+ width="140.00002"
+ height="45.000004"
+ x="480"
+ y="99.999969"
+ ry="2.2558987" />
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#fffefe;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="534.10651"
+ y="118.94971"
+ id="text1185-3-55-4-0"><tspan
+ sodipodi:role="line"
+ x="534.10651"
+ y="118.94971"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan957-2-8-6">Gatesgarth</tspan><tspan
+ sodipodi:role="line"
+ x="534.10651"
+ y="136.94638"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan10317-2">3.2</tspan></text>
+ <rect
+ style="fill:#333333;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2;stroke-opacity:1"
+ id="rect917-0-0-4-4"
+ width="260"
+ height="45.000004"
+ x="599.99994"
+ y="45.000011"
+ ry="2.2558987" />
+ <rect
+ style="fill:#333333;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2;stroke-opacity:1"
+ id="rect917-0-0-4-4-9"
+ width="160.00002"
+ height="45.000004"
+ x="720"
+ y="-9.9999905"
+ ry="2.2558987" />
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#fffefe;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="766.10297"
+ y="9.57586"
+ id="text1185-3-55-4-0-0"><tspan
+ sodipodi:role="line"
+ x="766.10297"
+ y="9.57586"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan957-2-8-6-3">Honister</tspan><tspan
+ sodipodi:role="line"
+ x="766.10297"
+ y="27.57254"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan10317-2-9">3.4</tspan></text>
+ <rect
+ style="fill:#333333;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2;stroke-opacity:1"
+ id="rect917-0-0-4-4-9-4"
+ width="160.00002"
+ height="45.000004"
+ x="959.99994"
+ y="-120"
+ ry="2.2558987" />
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#fffefe;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="1008.4941"
+ y="-100.605"
+ id="text1185-3-55-4-0-0-0"><tspan
+ sodipodi:role="line"
+ x="1008.4941"
+ y="-100.605"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan957-2-8-6-3-9">Langdale</tspan><tspan
+ sodipodi:role="line"
+ x="1008.4941"
+ y="-82.608322"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan10317-2-9-1">4.1</tspan></text>
+ <rect
+ style="opacity:1;fill:#333333;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2;stroke-opacity:1"
+ id="rect917-0-0-4-4-9-4-5"
+ width="140.00003"
+ height="45.000004"
+ x="1100"
+ y="-175.00003"
+ ry="2.2558987" />
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#fffefe;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="1156.057"
+ y="-155.49881"
+ id="text1185-3-55-4-0-0-0-1"><tspan
+ sodipodi:role="line"
+ x="1156.057"
+ y="-155.49881"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan957-2-8-6-3-9-7">Mickledore</tspan><tspan
+ sodipodi:role="line"
+ x="1156.057"
+ y="-137.50214"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan10317-2-9-1-4">4.2</tspan></text>
+ <g
+ id="g1379">
+ <rect
+ style="fill:#333333;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2;stroke-opacity:1"
+ id="rect917-0-0-4-4-9-4-5-38"
+ width="140.00003"
+ height="45.000004"
+ x="1220"
+ y="-230.00005"
+ ry="2.2558987" />
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#fffefe;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="1269.2329"
+ y="-210.32925"
+ id="text1185-3-55-4-0-0-0-1-1"><tspan
+ sodipodi:role="line"
+ x="1269.2329"
+ y="-210.32925"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan957-2-8-6-3-9-7-4">Nanbield</tspan><tspan
+ sodipodi:role="line"
+ x="1269.2329"
+ y="-192.33258"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan10317-2-9-1-4-6">4.3</tspan></text>
+ </g>
+ <rect
+ style="opacity:0.75;fill:#333333;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2;stroke-opacity:1"
+ id="rect917-0-0-4-4-9-4-5-3-9"
+ width="979.99994"
+ height="45.000004"
+ x="1320"
+ y="-285.00003"
+ ry="2.2558987" />
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#fffefe;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="1373.233"
+ y="-265.32928"
+ id="text1185-3-55-4-0-0-0-1-1-6"><tspan
+ sodipodi:role="line"
+ x="1373.233"
+ y="-265.32928"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan957-2-8-6-3-9-7-4-2">Scarthgap</tspan><tspan
+ sodipodi:role="line"
+ x="1373.233"
+ y="-247.33261"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan10317-2-9-1-4-6-5">5.0</tspan></text>
+ <rect
+ style="fill:#333333;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2;stroke-opacity:1"
+ id="rect917-0-0-4-4-9-9"
+ width="960.00012"
+ height="45.000004"
+ x="859.99994"
+ y="-64.999992"
+ ry="2.2558987" />
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#fffefe;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="928.49872"
+ y="-45.648258"
+ id="text1185-3-55-4-0-0-9"><tspan
+ sodipodi:role="line"
+ x="928.49872"
+ y="-45.648258"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan957-2-8-6-3-6">Kirkstone (LTS)</tspan><tspan
+ sodipodi:role="line"
+ x="928.49872"
+ y="-27.651579"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan10317-2-9-0">4.0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#fffefe;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="653.72168"
+ y="64.866302"
+ id="text1185-3-55-4-0-0-7"><tspan
+ sodipodi:role="line"
+ x="653.72168"
+ y="64.866302"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan957-2-8-6-3-2">Hardknott </tspan><tspan
+ sodipodi:role="line"
+ x="653.72168"
+ y="82.862984"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans Bold';text-align:center;text-anchor:middle;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan10317-2-9-8">3.3</tspan></text>
+ <g
+ id="g1125-0"
+ transform="matrix(0.42240595,0,0,0.41654472,354.53445,-399.96314)"
+ style="stroke:none;stroke-width:2.38399">
+ <rect
+ style="opacity:1;fill:#333333;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:4.76797;stroke-opacity:1"
+ id="rect1061-9"
+ width="452.00439"
+ height="192.0562"
+ x="11.609296"
+ y="276.44562"
+ ry="4.0176301" />
+ <g
+ id="g1109-1"
+ transform="translate(-2.7615661,-1.7576335)"
+ style="stroke:none;stroke-width:2.38399">
+ <path
+ id="path14-9"
+ class="st0"
+ d="m 439.74452,358.11274 c 0,4.22 -3.41,7.64 -7.64,7.64 -4.22,0 -7.63,-3.42 -7.63,-7.64 0,-4.22 3.41,-7.64 7.63,-7.64 4.23,0 7.64,3.42 7.64,7.64 v 0"
+ style="fill:#4a97d2;fill-opacity:1;stroke:none;stroke-width:0.238399;stroke-opacity:1" />
+ <path
+ id="path16-6"
+ class="st1"
+ d="m 114.56452,324.94274 -11.13,-6.3 -22.409996,45.41 -23.9,-45.41 -11.27,6.3 28.41,53.38 c -0.21,0.51 -0.86,1.9 -1.95,4.22 -1.11,2.21 -2.25,4.41 -3.46,6.62 -2.11,3.81 -4.26,6.91 -6.46,9.32 -2.21,2.51 -4.46,4.51 -6.78,6.02 -2.3,1.51 -4.7,2.65 -7.21,3.46 -2.41,0.8 -4.87,1.45 -7.38,1.95 l 5.12,10.68 c 1.6,-0.21 3.75,-0.71 6.46,-1.51 2.81,-0.7 5.86,-2.06 9.17,-4.06 3.3,-2 6.67,-4.86 10.07,-8.57 3.52,-3.71 6.78,-8.62 9.78,-14.73 l 32.939996,-66.78"
+ style="fill:#ffffff;stroke:none;stroke-width:0.238399;stroke-opacity:1" />
+ <path
+ id="path18-2"
+ class="st1"
+ d="m 175.39452,358.18274 c 0,3.51 -0.6,6.76 -1.81,9.78 -1.21,3 -2.86,5.62 -4.95,7.81 -2.01,2.11 -4.41,3.81 -7.22,5.12 -2.71,1.19 -5.67,1.8 -8.87,1.8 -3.21,0 -6.22,-0.6 -9.02,-1.8 -2.81,-1.31 -5.27,-3.01 -7.38,-5.12 -2,-2.19 -3.6,-4.81 -4.81,-7.81 -1.21,-3.01 -1.81,-6.27 -1.81,-9.78 0,-3.51 0.6,-6.76 1.81,-9.77 1.21,-3 2.81,-5.61 4.81,-7.82 2.11,-2.21 4.57,-3.92 7.38,-5.11 2.8,-1.32 5.81,-1.97 9.02,-1.97 3.21,0 6.16,0.65 8.87,1.97 2.81,1.19 5.21,2.9 7.22,5.11 2.1,2.21 3.75,4.81 4.95,7.82 1.2,3.01 1.81,6.26 1.81,9.77 m 13.98,0 c 0,-5.21 -0.95,-10.08 -2.86,-14.59 -1.81,-4.51 -4.36,-8.42 -7.67,-11.73 -3.32,-3.3 -7.22,-5.86 -11.73,-7.67 -4.51,-1.9 -9.38,-2.86 -14.59,-2.86 -5.21,0 -10.08,0.95 -14.59,2.86 -4.51,1.81 -8.43,4.36 -11.73,7.67 -3.3,3.31 -5.92,7.22 -7.82,11.73 -1.9,4.51 -2.86,9.38 -2.86,14.59 0,5.21 0.95,10.08 2.86,14.59 1.9,4.41 4.52,8.27 7.82,11.57 3.3,3.32 7.22,5.92 11.73,7.82 4.51,1.81 9.38,2.71 14.59,2.71 5.21,0 10.08,-0.9 14.59,-2.71 4.51,-1.91 8.41,-4.51 11.73,-7.82 3.3,-3.3 5.86,-7.16 7.67,-11.57 1.91,-4.51 2.86,-9.38 2.86,-14.59"
+ style="fill:#ffffff;stroke:none;stroke-width:0.238399;stroke-opacity:1" />
+ <path
+ id="path20-5"
+ class="st1"
+ d="m 373.22452,358.18274 c 0,3.51 -0.6,6.76 -1.81,9.78 -1.21,3 -2.86,5.62 -4.97,7.81 -2,2.11 -4.4,3.81 -7.21,5.12 -2.71,1.19 -5.67,1.8 -8.87,1.8 -3.21,0 -6.22,-0.6 -9.03,-1.8 -2.8,-1.31 -5.26,-3.01 -7.37,-5.12 -2,-2.19 -3.61,-4.81 -4.81,-7.81 -1.21,-3.01 -1.81,-6.27 -1.81,-9.78 0,-3.51 0.6,-6.76 1.81,-9.77 1.21,-3 2.81,-5.61 4.81,-7.82 2.11,-2.21 4.57,-3.92 7.37,-5.11 2.81,-1.32 5.82,-1.97 9.03,-1.97 3.21,0 6.16,0.65 8.87,1.97 2.81,1.19 5.21,2.9 7.21,5.11 2.11,2.21 3.76,4.81 4.97,7.82 1.21,3.01 1.81,6.26 1.81,9.77 m 13.98,0 c 0,-5.21 -0.95,-10.08 -2.86,-14.59 -1.81,-4.51 -4.36,-8.42 -7.67,-11.73 -3.32,-3.3 -7.22,-5.86 -11.73,-7.67 -4.51,-1.9 -9.38,-2.86 -14.59,-2.86 -5.22,0 -10.08,0.95 -14.59,2.86 -4.51,1.81 -8.43,4.36 -11.73,7.67 -3.3,3.31 -5.92,7.22 -7.82,11.73 -1.9,4.51 -2.86,9.38 -2.86,14.59 0,5.21 0.95,10.08 2.86,14.59 1.9,4.41 4.52,8.27 7.82,11.57 3.3,3.32 7.22,5.92 11.73,7.82 4.51,1.81 9.37,2.71 14.59,2.71 5.21,0 10.08,-0.9 14.59,-2.71 4.51,-1.91 8.41,-4.51 11.73,-7.82 3.3,-3.3 5.86,-7.16 7.67,-11.57 1.91,-4.51 2.86,-9.38 2.86,-14.59"
+ style="fill:#ffffff;stroke:none;stroke-width:0.238399;stroke-opacity:1" />
+ <path
+ id="path22-4"
+ class="st1"
+ d="m 288.05452,298.59274 -13.39,7.52 v 16.45 h -36.21 c -26.15,0 -41.9,12.74 -41.9,35.95 0,36.04 37.55,42.84 64.25,29.96 l -5.63,-10.92 c -21.13,9.23 -44.53,5.3 -44.53,-19.28 0,-15.86 8.26,-24.54 27.49,-24.54 h 36.54 v 43.82 c 0,19.37 22.19,19.81 35.95,11.86 l -5.29,-10.45 c -8.85,4.48 -17.26,5.06 -17.26,-3.53 v -41.7 h 18.32 v -11.17 h -18.32 l -0.02,-23.97 v 0"
+ style="fill:#ffffff;stroke:none;stroke-width:0.238399;stroke-opacity:1" />
+ <path
+ id="path24-4"
+ class="st1"
+ d="m 136.79452,428.30274 h 3.35 c 1.69,-0.01 3.34,1.19 3.34,2.9 0,2.87 -3.23,3.3 -3.23,3.3 l -3.46,0.02 z m -4.84,-4.1 v 25.3 h 4.83 l 0.06,-10.67 c 8.62,0.54 11.84,-2.46 11.84,-7.75 0,-4.75 -4.26,-6.88 -8.34,-6.88 h -8.39 v 0"
+ style="fill:#ffffff;stroke:none;stroke-width:0.238399;stroke-opacity:1" />
+ <path
+ id="path26-9"
+ class="st1"
+ d="m 224.38452,436.87274 c 0,1.23 -0.23,2.39 -0.69,3.46 -0.42,1.04 -1.02,1.95 -1.81,2.74 -0.78,0.78 -1.7,1.4 -2.75,1.86 -1.04,0.42 -2.17,0.64 -3.38,0.64 -1.22,0 -2.35,-0.22 -3.39,-0.64 -1.05,-0.46 -1.96,-1.07 -2.75,-1.86 -0.76,-0.78 -1.36,-1.7 -1.81,-2.74 -0.46,-1.07 -0.69,-2.23 -0.69,-3.46 0,-1.23 0.23,-2.37 0.69,-3.42 0.45,-1.06 1.05,-1.99 1.81,-2.77 0.78,-0.78 1.7,-1.39 2.75,-1.82 1.04,-0.45 2.17,-0.67 3.39,-0.67 1.21,0 2.34,0.23 3.38,0.67 1.05,0.43 1.96,1.04 2.75,1.82 0.78,0.78 1.39,1.71 1.81,2.77 0.46,1.05 0.69,2.19 0.69,3.42 m 4.9,0 c 0,-1.81 -0.35,-3.5 -1.06,-5.06 -0.69,-1.59 -1.65,-2.97 -2.89,-4.12 -1.21,-1.17 -2.64,-2.09 -4.3,-2.75 -1.64,-0.69 -3.41,-1.04 -5.3,-1.04 -1.9,0 -3.69,0.35 -5.35,1.04 -1.64,0.66 -3.06,1.58 -4.27,2.75 -1.22,1.16 -2.17,2.53 -2.89,4.12 -0.69,1.57 -1.03,3.25 -1.03,5.06 0,1.83 0.34,3.53 1.03,5.1 0.72,1.57 1.68,2.94 2.89,4.12 1.21,1.17 2.63,2.09 4.27,2.75 1.66,0.66 3.45,1 5.35,1 1.89,0 3.67,-0.34 5.3,-1 1.66,-0.66 3.1,-1.58 4.3,-2.75 1.24,-1.18 2.21,-2.55 2.89,-4.12 0.71,-1.56 1.06,-3.26 1.06,-5.1"
+ style="fill:#ffffff;stroke:none;stroke-width:0.238399;stroke-opacity:1" />
+ <path
+ id="path28-9"
+ class="st1"
+ d="m 249.02452,424.25274 v 19.18 c 0,0.77 -0.57,1.63 -1.51,1.65 l -1.51,0.06 0.08,4.7 1.41,0.02 c 4.73,0.07 6.36,-4.37 6.36,-6.45 v -19.13"
+ style="fill:#ffffff;stroke:none;stroke-width:0.238399;stroke-opacity:1" />
+ <path
+ id="path30-3"
+ class="st1"
+ d="m 290.20452,424.19274 h -16.13 v 25.22 h 16.2 l -0.07,-4.06 h -11.28 v -6.58 h 9.44 v -4.06 h -9.44 v -6.38 h 11.21 l 0.07,-4.14"
+ style="fill:#ffffff;stroke:none;stroke-width:0.238399;stroke-opacity:1" />
+ <path
+ id="path32-6"
+ class="st1"
+ d="m 327.39452,430.24274 c -8.26,-4.53 -16.39,-1.78 -16.39,6.52 0,6.69 6.43,11.84 17,6.31 l 1.53,4.13 c -10.15,5.58 -23.51,1.6 -23.51,-10.44 0,-10.91 11.85,-16.59 23.36,-10.61 l -1.99,4.09"
+ style="fill:#ffffff;stroke:none;stroke-width:0.238399;stroke-opacity:1" />
+ <path
+ id="path34-0"
+ class="st1"
+ d="m 366.07452,424.19274 0.01,4.13 h -8.05 v 21.16 h -4.85 v -21.15 h -7.73 l 0.11,-4.13 h 20.51"
+ style="fill:#ffffff;stroke:none;stroke-width:0.238399;stroke-opacity:1" />
+ <path
+ id="path36-5"
+ class="st1"
+ d="m 172.67452,428.30274 h 3.35 c 1.69,-0.01 3.34,1.19 3.34,2.9 0,2.87 -3.23,3.3 -3.23,3.3 l -3.46,0.02 z m -4.82,-4.1 v 25.3 h 4.82 v -10.89 h 3.2 l 5.59,10.89 h 5.5 l -6.67,-12.2 c 2.64,-1.18 4.01,-3.26 4.01,-6.22 0,-4.94 -4.33,-6.88 -9.09,-6.88 h -7.36 v 0"
+ style="fill:#ffffff;stroke:none;stroke-width:0.238399;stroke-opacity:1" />
+ </g>
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:42.5884px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="313.46567"
+ y="412.9321"
+ id="text3781"><tspan
+ sodipodi:role="line"
+ id="tspan3779"
+ x="313.46567"
+ y="412.9321" /></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="1199.6055"
+ y="250.21216"
+ id="text1185-9-7-1-1"><tspan
+ sodipodi:role="line"
+ x="1199.6055"
+ y="250.21216"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345">Oct.</tspan><tspan
+ sodipodi:role="line"
+ x="1199.6055"
+ y="268.20883"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906">2023</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="1439.3904"
+ y="249.86044"
+ id="text1185-9-7-1-1-89"><tspan
+ sodipodi:role="line"
+ x="1439.3904"
+ y="249.86044"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-7">Oct.</tspan><tspan
+ sodipodi:role="line"
+ x="1439.3904"
+ y="267.85712"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-76">2024</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="1679.3094"
+ y="250.58356"
+ id="text1185-9-7-1-1-89-6"><tspan
+ sodipodi:role="line"
+ x="1679.3094"
+ y="250.58356"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-7-8">Oct.</tspan><tspan
+ sodipodi:role="line"
+ x="1679.3094"
+ y="268.58023"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-76-0">2025</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:6.66667px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="849.49744"
+ y="61.106953"
+ id="text1185-9-7-1-1-0"><tspan
+ sodipodi:role="line"
+ x="849.49744"
+ y="61.106953"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:6.66667px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:end;text-anchor:end;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan46212">Support for this version was extended to leave</tspan><tspan
+ sodipodi:role="line"
+ x="849.49744"
+ y="70.105324"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:6.66667px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:end;text-anchor:end;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan120364">users more time to adapt to override syntax</tspan><tspan
+ sodipodi:role="line"
+ x="849.49744"
+ y="79.103691"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:6.66667px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:end;text-anchor:end;fill:#fffefe;fill-opacity:1;stroke:none"
+ id="tspan123280">changes in the 3.4 release.</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="959.52008"
+ y="250.67822"
+ id="text1185-9-7-1-1-0-7"><tspan
+ sodipodi:role="line"
+ x="959.52008"
+ y="250.67822"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-42-7">Oct.</tspan><tspan
+ sodipodi:role="line"
+ x="959.52008"
+ y="268.6749"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-9-6">2022</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="719.13617"
+ y="250.21216"
+ id="text1185-9-7-1-1-2"><tspan
+ sodipodi:role="line"
+ x="719.13617"
+ y="250.21216"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-1">Oct.</tspan><tspan
+ sodipodi:role="line"
+ x="719.13617"
+ y="268.20883"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-5">2021</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="478.82367"
+ y="250.21216"
+ id="text1185-9-7-1-1-80"><tspan
+ sodipodi:role="line"
+ x="478.82367"
+ y="250.21216"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-5">Oct.</tspan><tspan
+ sodipodi:role="line"
+ x="478.82367"
+ y="268.20883"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-6">2020</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="361.81961"
+ y="250.07544"
+ id="text1185-9-7-1-1-8"><tspan
+ sodipodi:role="line"
+ x="361.81961"
+ y="250.07544"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-4">Apr.</tspan><tspan
+ sodipodi:role="line"
+ x="361.81961"
+ y="268.07211"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-7">2020</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="840.9248"
+ y="250.07544"
+ id="text1185-9-7-1-1-8-1"><tspan
+ sodipodi:role="line"
+ x="840.9248"
+ y="250.07544"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-4-0">Apr</tspan><tspan
+ sodipodi:role="line"
+ x="840.9248"
+ y="268.07211"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-7-3">2022</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="1321.8608"
+ y="250.07544"
+ id="text1185-9-7-1-1-8-1-0"><tspan
+ sodipodi:role="line"
+ x="1321.8608"
+ y="250.07544"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-4-0-4">Apr.</tspan><tspan
+ sodipodi:role="line"
+ x="1321.8608"
+ y="268.07211"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-7-3-8">2024</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="1561.8163"
+ y="249.66977"
+ id="text1185-9-7-1-1-8-1-0-4"><tspan
+ sodipodi:role="line"
+ x="1561.8163"
+ y="249.66977"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-4-0-4-81">Apr.</tspan><tspan
+ sodipodi:role="line"
+ x="1561.8163"
+ y="267.66644"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-7-3-8-2">2025</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="1802.1477"
+ y="250.26334"
+ id="text1185-9-7-1-1-8-1-0-4-2"><tspan
+ sodipodi:role="line"
+ x="1802.1477"
+ y="250.26334"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-4-0-4-81-5">Apr.</tspan><tspan
+ sodipodi:role="line"
+ x="1802.1477"
+ y="268.26001"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-7-3-8-2-8">2026</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="1081.4458"
+ y="250.07544"
+ id="text1185-9-7-1-1-8-1-0-2"><tspan
+ sodipodi:role="line"
+ x="1081.4458"
+ y="250.07544"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-4-0-4-8">Apr.</tspan><tspan
+ sodipodi:role="line"
+ x="1081.4458"
+ y="268.07211"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-7-3-8-3">2023</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="602.51526"
+ y="250.07544"
+ id="text1185-9-7-1-1-8-1-7"><tspan
+ sodipodi:role="line"
+ x="602.51526"
+ y="250.07544"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-4-0-5">Apr.</tspan><tspan
+ sodipodi:role="line"
+ x="602.51526"
+ y="268.07211"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-7-3-6">2021</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:42.5884px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="-16.290483"
+ y="345.7359"
+ id="text3116"><tspan
+ sodipodi:role="line"
+ id="tspan3114"
+ x="-16.290483"
+ y="345.7359" /></text>
+ <path
+ id="path29430"
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 319.99936,219.99912 H 2300 Z" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 360,219.99997 v 10.00004 0"
+ id="path29548" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 480,219.99996 v 10 0"
+ id="path29548-5" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 600,219.99992 v 10.00005 0"
+ id="path29548-5-1" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 720,220.00002 v 9.99999 0"
+ id="path29548-5-1-3" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 840,220.00002 v 9.99995 0"
+ id="path29548-5-1-3-6" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 960,220.00002 v 9.99999 0"
+ id="path29548-5-1-3-6-3" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1080,220.00002 v 9.99999 0"
+ id="path29548-5-1-3-6-3-1" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 380,219.99997 v 5.00004 0"
+ id="path29548-8"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 400,219.99997 v 5.00004 0"
+ id="path29548-8-5"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.999997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 420,219.99997 v 5 0"
+ id="path29548-8-5-0"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282155" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.999997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 440,219.99997 v 5 0"
+ id="path29548-8-5-0-6"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282155" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 460,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 500,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.999997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 520,219.99997 v 5 0"
+ id="path29548-8-5-0-6-4-6-2-9"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282155" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 540,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 560,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 580,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 620.266,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 640,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 660,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 679.61073,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 700,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 740,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 760,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 780.36587,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 800,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 820,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 860,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 880,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 899.72384,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 920,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 940,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 980,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1000,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1020,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1040,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1059.7216,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-9"
+ inkscape:transform-center-x="-14.78205"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1100,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-0"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1120,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-7"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1140,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-5"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1160,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-8"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1179.7216,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-9-7"
+ inkscape:transform-center-x="-14.78205"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1320,219.99996 v 10 0"
+ id="path29548-5-1-3-6-3-1-0-8" />
+ <g
+ id="g1267">
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1200,220.00002 v 9.99999 0"
+ id="path29548-5-1-3-6-3-1-0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1220,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-0-5"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1240,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-7-5"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1260,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-5-2"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1280,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-8-9"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1299.7216,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-9-7-0"
+ inkscape:transform-center-x="-14.78205"
+ inkscape:transform-center-y="-0.085282837" />
+ </g>
+ <g
+ id="g1267-4"
+ transform="translate(240,-4e-5)">
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1200,220.00002 v 9.99999 0"
+ id="path29548-5-1-3-6-3-1-0-3" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1220,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-0-5-0"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1240,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-7-5-3"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1260,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-5-2-0"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1280,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-8-9-9"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1299.7216,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-9-7-0-2"
+ inkscape:transform-center-x="-14.78205"
+ inkscape:transform-center-y="-0.085282837" />
+ </g>
+ <g
+ id="g1267-4-5"
+ transform="translate(480,-5e-5)">
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1200,220.00002 v 9.99999 0"
+ id="path29548-5-1-3-6-3-1-0-3-4" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1220,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-0-5-0-0"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1240,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-7-5-3-5"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1260,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-5-2-0-9"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1280,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-8-9-9-4"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1299.7216,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-9-7-0-2-6"
+ inkscape:transform-center-x="-14.78205"
+ inkscape:transform-center-y="-0.085282837" />
+ </g>
+ <g
+ id="g1267-4-5-22"
+ transform="translate(600,-4e-5)">
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1200,220.00002 v 9.99999 0"
+ id="path29548-5-1-3-6-3-1-0-3-4-0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1220,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-0-5-0-0-55"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1240,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-7-5-3-5-2"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1260,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-5-2-0-9-90"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1280,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-8-9-9-4-2"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1299.7216,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-9-7-0-2-6-8"
+ inkscape:transform-center-x="-14.78205"
+ inkscape:transform-center-y="-0.085282837" />
+ </g>
+ <g
+ id="g1267-4-5-9"
+ transform="translate(360,-4e-5)">
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1200,220.00002 v 9.99999 0"
+ id="path29548-5-1-3-6-3-1-0-3-4-2" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1220,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-0-5-0-0-2"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1240,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-7-5-3-5-4"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1260,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-5-2-0-9-7"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1280,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-8-9-9-4-7"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1299.7216,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-9-7-0-2-6-5"
+ inkscape:transform-center-x="-14.78205"
+ inkscape:transform-center-y="-0.085282837" />
+ </g>
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1800,219.99997 v 9.99999 0"
+ id="path29548-5-1-3-6-3-1-0-3-4-2-0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1340,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-0-5-3"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1360,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-7-5-0"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1380,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-5-2-3"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1400,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-8-9-0"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1419.7216,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-9-7-0-9"
+ inkscape:transform-center-x="-14.78205"
+ inkscape:transform-center-y="-0.085282837" />
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="1919.3904"
+ y="249.86044"
+ id="text1185-9-7-1-1-89-62"><tspan
+ sodipodi:role="line"
+ x="1919.3904"
+ y="249.86044"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-7-6">Oct.</tspan><tspan
+ sodipodi:role="line"
+ x="1919.3904"
+ y="267.85712"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-76-7">2026</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="2159.3093"
+ y="250.58356"
+ id="text1185-9-7-1-1-89-6-5"><tspan
+ sodipodi:role="line"
+ x="2159.3093"
+ y="250.58356"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-7-8-6">Oct.</tspan><tspan
+ sodipodi:role="line"
+ x="2159.3093"
+ y="268.58023"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-76-0-9">2027</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="2041.8163"
+ y="249.66977"
+ id="text1185-9-7-1-1-8-1-0-4-8"><tspan
+ sodipodi:role="line"
+ x="2041.8163"
+ y="249.66977"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-4-0-4-81-7">Apr.</tspan><tspan
+ sodipodi:role="line"
+ x="2041.8163"
+ y="267.66644"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-7-3-8-2-2">2027</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="2282.1477"
+ y="250.26334"
+ id="text1185-9-7-1-1-8-1-0-4-2-8"><tspan
+ sodipodi:role="line"
+ x="2282.1477"
+ y="250.26334"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan31345-4-0-4-81-5-2">Apr.</tspan><tspan
+ sodipodi:role="line"
+ x="2282.1477"
+ y="268.26001"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:13.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;text-align:center;text-anchor:middle;stroke:none"
+ id="tspan49906-7-3-8-2-8-9">2028</tspan></text>
+ <g
+ id="g1267-4-9"
+ transform="translate(720,-3e-5)">
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1200,220.00002 v 9.99999 0"
+ id="path29548-5-1-3-6-3-1-0-3-6" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1220,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-0-5-0-02"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1240,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-7-5-3-7"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1260,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-5-2-0-6"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1280,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-8-9-9-1"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1299.7216,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-9-7-0-2-3"
+ inkscape:transform-center-x="-14.78205"
+ inkscape:transform-center-y="-0.085282837" />
+ </g>
+ <g
+ id="g1267-4-5-2"
+ transform="translate(960,-4e-5)">
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1200,220.00002 v 9.99999 0"
+ id="path29548-5-1-3-6-3-1-0-3-4-1" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1220,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-0-5-0-0-5"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1240,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-7-5-3-5-9"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1260,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-5-2-0-9-9"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1280,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-8-9-9-4-1"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1299.7216,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-9-7-0-2-6-4"
+ inkscape:transform-center-x="-14.78205"
+ inkscape:transform-center-y="-0.085282837" />
+ </g>
+ <g
+ id="g1267-4-5-9-9"
+ transform="translate(840,-3e-5)">
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1200,220.00002 v 9.99999 0"
+ id="path29548-5-1-3-6-3-1-0-3-4-2-1" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1220,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-0-5-0-0-2-0"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1240,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-7-5-3-5-4-7"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1260,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-5-2-0-9-7-5"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1280,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-8-9-9-4-7-8"
+ inkscape:transform-center-x="14.782001"
+ inkscape:transform-center-y="-0.085282837" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1299.7216,219.99997 v 5.00004 0"
+ id="path29548-8-5-0-6-4-6-2-9-0-8-1-3-1-9-6-9-3-4-0-4-6-2-2-7-6-1-9-9-1-4-9-7-0-2-6-5-7"
+ inkscape:transform-center-x="-14.78205"
+ inkscape:transform-center-y="-0.085282837" />
+ </g>
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2280,219.99998 v 9.99999 0"
+ id="path29548-5-1-3-6-3-1-0-3-4-2-0-0" />
+ </g>
+ <style
+ type="text/css"
+ id="style1021"> .st0{fill:#4A97D2;} .st1{fill:#333333;} </style>
+</svg>
diff --git a/documentation/ref-manual/system-requirements.rst b/documentation/ref-manual/system-requirements.rst
index 04f9efaa23..b45c863005 100644
--- a/documentation/ref-manual/system-requirements.rst
+++ b/documentation/ref-manual/system-requirements.rst
@@ -29,34 +29,77 @@ and conceptual information in the :doc:`/overview-manual/index`.
For more information about the Yocto Project Documentation set, see
the :ref:`ref-manual/resources:links and related documentation` section.
-.. _detailed-supported-distros:
+Minimum Free Disk Space
+=======================
+
+To build an image such as ``core-image-sato`` for the ``qemux86-64`` machine,
+you need a system with at least &MIN_DISK_SPACE; Gbytes of free disk space.
+However, much more disk space will be necessary to build more complex images,
+to run multiple builds and to cache build artifacts, improving build efficiency.
+
+If you have a shortage of disk space, see the ":doc:`/dev-manual/disk-space`"
+section of the Development Tasks Manual.
+
+.. _system-requirements-minimum-ram:
+
+Minimum System RAM
+==================
+
+You will manage to build an image such as ``core-image-sato`` for the
+``qemux86-64`` machine with as little as &MIN_RAM; Gbytes of RAM on an old
+system with 4 CPU cores, but your builds will be much faster on a system with
+as much RAM and as many CPU cores as possible.
+
+.. _system-requirements-supported-distros:
Supported Linux Distributions
=============================
-Currently, the Yocto Project is supported on the following
-distributions:
+Currently, the &DISTRO; release ("&DISTRO_NAME;") of the Yocto Project is
+supported on the following distributions:
+
+- Ubuntu 20.04 (LTS)
+
+- Ubuntu 22.04 (LTS)
+
+- Fedora 38
+
+- Debian GNU/Linux 11.x (Bullseye)
+
+- AlmaLinux 8
+
+The following distribution versions are still tested, even though the
+organizations publishing them no longer make updates publicly available:
- Ubuntu 18.04 (LTS)
-- Ubuntu 20.04 (LTS)
+Note that the Yocto Project doesn't have access to private updates
+that some of these versions may have. Therefore, our testing has
+limited value if you have access to such updates.
+
+Finally, here are the distribution versions which were previously
+tested on former revisions of "&DISTRO_NAME;", but no longer are:
+
+- Ubuntu 16.04 (LTS)
+
+- Ubuntu 21.10
- Fedora 34
- Fedora 35
+- Fedora 36
+
+- Fedora 37
+
- CentOS 7.x
- CentOS 8.x
-- AlmaLinux 8.5
-
- Debian GNU/Linux 9.x (Stretch)
- Debian GNU/Linux 10.x (Buster)
-- Debian GNU/Linux 11.x (Bullseye)
-
- OpenSUSE Leap 15.3
.. note::
@@ -75,24 +118,29 @@ distributions:
has no plans to support rolling-releases or development
distributions due to their constantly changing nature. We welcome
patches and bug reports, but keep in mind that our priority is on
- the supported platforms listed below.
+ the supported platforms listed above.
+
+ - If your Linux distribution is not in the above list, we recommend to
+ get the :term:`buildtools` or :term:`buildtools-extended` tarballs
+ containing the host tools required by your Yocto Project release,
+ typically by running ``scripts/install-buildtools`` as explained in
+ the ":ref:`system-requirements-buildtools`" section.
- You may use Windows Subsystem For Linux v2 to set up a build host
- using Windows 10, but validation is not performed against build
- hosts using WSLv2.
+ using Windows 10 or later, or Windows Server 2019 or later, but validation
+ is not performed against build hosts using WSL 2.
- - The Yocto Project is not compatible with WSLv1, it is
- compatible but not officially supported nor validated with
- WSLv2, if you still decide to use WSL please upgrade to WSLv2.
+ See the
+ :ref:`dev-manual/start:setting up to use windows subsystem for linux (wsl 2)`
+ section in the Yocto Project Development Tasks Manual for more information.
- If you encounter problems, please go to :yocto_bugs:`Yocto Project
Bugzilla <>` and submit a bug. We are
interested in hearing about your experience. For information on
how to submit a bug, see the Yocto Project
:yocto_wiki:`Bugzilla wiki page </Bugzilla_Configuration_and_Bug_Tracking>`
- and the ":ref:`dev-manual/common-tasks:submitting a defect against the yocto project`"
- section in the Yocto Project Development Tasks Manual.
-
+ and the ":doc:`../contributor-guide/report-defect`"
+ section in the Yocto Project and OpenEmbedded Contributor Guide.
Required Packages for the Build Host
====================================
@@ -107,8 +155,10 @@ function.
Ubuntu and Debian
-----------------
-Here are the required packages by function given a
-supported Ubuntu or Debian Linux distribution:
+Here are the packages needed to build an image on a headless system
+with a supported Ubuntu or Debian Linux distribution::
+
+ $ sudo apt install &UBUNTU_HOST_PACKAGES_ESSENTIAL;
.. note::
@@ -120,122 +170,68 @@ supported Ubuntu or Debian Linux distribution:
$ sudo apt build-dep qemu
$ sudo apt remove oss4-dev
- - For Debian-8, ``python3-git`` and ``pylint3`` are no longer
- available via ``apt``.
- ::
-
- $ sudo pip3 install GitPython pylint==1.9.5
-
-- *Essentials:* Packages needed to build an image on a headless system::
-
- $ sudo apt install &UBUNTU_HOST_PACKAGES_ESSENTIAL;
-
-- *Documentation:* Packages needed if you are going to build out the
- Yocto Project documentation manuals::
-
- $ sudo apt install make python3-pip
- &PIP3_HOST_PACKAGES_DOC;
-
- .. note::
+Here are the packages needed to build Project documentation manuals::
- It is currently not possible to build out documentation from Debian 8
- (Jessie) because of outdated ``pip3`` and ``python3``. ``python3-sphinx``
- is too outdated.
+ $ sudo apt install git make inkscape texlive-latex-extra
+ $ sudo apt install sphinx python3-saneyaml python3-sphinx-rtd-theme
Fedora Packages
---------------
-Here are the required packages by function given a
-supported Fedora Linux distribution:
+Here are the packages needed to build an image on a headless system
+with a supported Fedora Linux distribution::
-- *Essentials:* Packages needed to build an image for a headless
- system::
+ $ sudo dnf install &FEDORA_HOST_PACKAGES_ESSENTIAL;
- $ sudo dnf install &FEDORA_HOST_PACKAGES_ESSENTIAL;
+Here are the packages needed to build Project documentation manuals::
-- *Documentation:* Packages needed if you are going to build out the
- Yocto Project documentation manuals::
-
- $ sudo dnf install make python3-pip which
- &PIP3_HOST_PACKAGES_DOC;
+ $ sudo dnf install git make python3-pip which inkscape texlive-fncychap
+ &PIP3_HOST_PACKAGES_DOC;
openSUSE Packages
-----------------
-Here are the required packages by function given a
-supported openSUSE Linux distribution:
-
-- *Essentials:* Packages needed to build an image for a headless
- system::
-
- $ sudo zypper install &OPENSUSE_HOST_PACKAGES_ESSENTIAL;
-
-- *Documentation:* Packages needed if you are going to build out the
- Yocto Project documentation manuals::
-
- $ sudo zypper install make python3-pip which
- &PIP3_HOST_PACKAGES_DOC;
-
-
-CentOS-7 Packages
------------------
-
-Here are the required packages by function given a
-supported CentOS-7 Linux distribution:
-
-- *Essentials:* Packages needed to build an image for a headless
- system::
-
- $ sudo yum install &CENTOS7_HOST_PACKAGES_ESSENTIAL;
-
- .. note::
+Here are the packages needed to build an image on a headless system
+with a supported openSUSE distribution::
- - Extra Packages for Enterprise Linux (i.e. ``epel-release``) is
- a collection of packages from Fedora built on RHEL/CentOS for
- easy installation of packages not included in enterprise Linux
- by default. You need to install these packages separately.
+ $ sudo zypper install &OPENSUSE_HOST_PACKAGES_ESSENTIAL;
- - The ``makecache`` command consumes additional Metadata from
- ``epel-release``.
+Here are the packages needed to build Project documentation manuals::
-- *Documentation:* Packages needed if you are going to build out the
- Yocto Project documentation manuals::
+ $ sudo zypper install git make python3-pip which inkscape texlive-fncychap
+ &PIP3_HOST_PACKAGES_DOC;
- $ sudo yum install make python3-pip which
- &PIP3_HOST_PACKAGES_DOC;
-CentOS-8 Packages
------------------
+AlmaLinux Packages
+------------------
-Here are the required packages by function given a
-supported CentOS-8 Linux distribution:
+Here are the packages needed to build an image on a headless system
+with a supported AlmaLinux distribution::
-- *Essentials:* Packages needed to build an image for a headless
- system::
+ $ sudo dnf install &ALMALINUX_HOST_PACKAGES_ESSENTIAL;
- $ sudo dnf install &CENTOS8_HOST_PACKAGES_ESSENTIAL;
+.. note::
- .. note::
+ - Extra Packages for Enterprise Linux (i.e. ``epel-release``) is
+ a collection of packages from Fedora built on RHEL/CentOS for
+ easy installation of packages not included in enterprise Linux
+ by default. You need to install these packages separately.
- - Extra Packages for Enterprise Linux (i.e. ``epel-release``) is
- a collection of packages from Fedora built on RHEL/CentOS for
- easy installation of packages not included in enterprise Linux
- by default. You need to install these packages separately.
+ - The ``PowerTools/CRB`` repo provides additional packages such as
+ ``rpcgen`` and ``texinfo``.
- - The ``PowerTools`` repo provides additional packages such as
- ``rpcgen`` and ``texinfo``.
+ - The ``makecache`` command consumes additional Metadata from
+ ``epel-release``.
- - The ``makecache`` command consumes additional Metadata from
- ``epel-release``.
+Here are the packages needed to build Project documentation manuals::
-- *Documentation:* Packages needed if you are going to build out the
- Yocto Project documentation manuals::
+ $ sudo dnf install git make python3-pip which inkscape texlive-fncychap
+ &PIP3_HOST_PACKAGES_DOC;
- $ sudo dnf install make python3-pip which
- &PIP3_HOST_PACKAGES_DOC;
+.. _system-requirements-buildtools:
-Required Git, tar, Python and gcc Versions
-==========================================
+Required Git, tar, Python, make and gcc Versions
+================================================
In order to use the build system, your host development system must meet
the following version requirements for Git, tar, and Python:
@@ -246,10 +242,12 @@ the following version requirements for Git, tar, and Python:
- Python &MIN_PYTHON_VERSION; or greater
+- GNU make &MIN_MAKE_VERSION; or greater
+
If your host development system does not meet all these requirements,
-you can resolve this by installing a ``buildtools`` tarball that
-contains these tools. You can get the tarball one of two ways: download
-a pre-built tarball or use BitBake to build the tarball.
+you can resolve this by installing a :term:`buildtools` tarball that
+contains these tools. You can either download a pre-built tarball or
+use BitBake to build one.
In addition, your host development system must meet the following
version requirement for gcc:
@@ -257,21 +255,26 @@ version requirement for gcc:
- gcc &MIN_GCC_VERSION; or greater
If your host development system does not meet this requirement, you can
-resolve this by installing a ``buildtools-extended`` tarball that
+resolve this by installing a :term:`buildtools-extended` tarball that
contains additional tools, the equivalent of the Debian/Ubuntu ``build-essential``
package.
+For systems with a broken make version (e.g. make 4.2.1 without patches) but
+where the rest of the host tools are usable, you can use the :term:`buildtools-make`
+tarball instead.
+
In the sections that follow, three different methods will be described for
-installing the ``buildtools`` or ``buildtools-extended`` toolset.
+installing the :term:`buildtools`, :term:`buildtools-extended` or :term:`buildtools-make`
+toolset.
Installing a Pre-Built ``buildtools`` Tarball with ``install-buildtools`` script
--------------------------------------------------------------------------------
The ``install-buildtools`` script is the easiest of the three methods by
-which you can get these tools. It downloads a pre-built buildtools
+which you can get these tools. It downloads a pre-built :term:`buildtools`
installer and automatically installs the tools for you:
-1. Execute the ``install-buildtools`` script. Here is an example::
+#. Execute the ``install-buildtools`` script. Here is an example::
$ cd poky
$ scripts/install-buildtools \
@@ -280,7 +283,7 @@ installer and automatically installs the tools for you:
--release yocto-&DISTRO; \
--installer-version &DISTRO;
- During execution, the buildtools tarball will be downloaded, the
+ During execution, the :term:`buildtools` tarball will be downloaded, the
checksum of the download will be verified, the installer will be run
for you, and some basic checks will be run to make sure the
installation is functional.
@@ -291,25 +294,29 @@ installer and automatically installs the tools for you:
/path/to/poky/buildtools
If your host development system needs the additional tools provided
- in the ``buildtools-extended`` tarball, you can instead execute the
+ in the :term:`buildtools-extended` tarball, you can instead execute the
``install-buildtools`` script with the default parameters::
$ cd poky
$ scripts/install-buildtools
-2. Source the tools environment setup script by using a command like the
+ Alternatively if your host development system has a broken ``make``
+ version such that you only need a known good version of ``make``,
+ you can use the ``--make-only`` option::
+
+ $ cd poky
+ $ scripts/install-buildtools --make-only
+
+#. Source the tools environment setup script by using a command like the
following::
$ source /path/to/poky/buildtools/environment-setup-x86_64-pokysdk-linux
- Of course, you need to supply your installation directory and be sure to
- use the right file (i.e. i586 or x86_64).
-
After you have sourced the setup script, the tools are added to
``PATH`` and any other environment variables required to run the
tools are initialized. The results are working versions versions of
Git, tar, Python and ``chrpath``. And in the case of the
- ``buildtools-extended`` tarball, additional working versions of tools
+ :term:`buildtools-extended` tarball, additional working versions of tools
including ``gcc``, ``make`` and the other tools included in
``packagegroup-core-buildessential``.
@@ -317,12 +324,14 @@ Downloading a Pre-Built ``buildtools`` Tarball
----------------------------------------------
If you would prefer not to use the ``install-buildtools`` script, you can instead
-download and run a pre-built buildtools installer yourself with the following
+download and run a pre-built :term:`buildtools` installer yourself with the following
steps:
-1. Locate and download the ``*.sh`` at :yocto_dl:`/releases/yocto/yocto-&DISTRO;/buildtools/`
+#. Go to :yocto_dl:`/releases/yocto/yocto-&DISTRO;/buildtools/`, locate and
+ download the ``.sh`` file corresponding to your host architecture
+ and to :term:`buildtools`, :term:`buildtools-extended` or :term:`buildtools-make`.
-2. Execute the installation script. Here is an example for the
+#. Execute the installation script. Here is an example for the
traditional installer::
$ sh ~/Downloads/x86_64-buildtools-nativesdk-standalone-&DISTRO;.sh
@@ -331,51 +340,55 @@ steps:
$ sh ~/Downloads/x86_64-buildtools-extended-nativesdk-standalone-&DISTRO;.sh
+ An example for the make-only installer::
+
+ $ sh ~/Downloads/x86_64-buildtools-make-nativesdk-standalone-&DISTRO;.sh
+
During execution, a prompt appears that allows you to choose the
installation directory. For example, you could choose the following:
``/home/your-username/buildtools``
-3. Source the tools environment setup script by using a command like the
- following::
-
- $ source /home/your_username/buildtools/environment-setup-i586-poky-linux
+#. As instructed by the installer script, you will have to source the tools
+ environment setup script::
- Of
- course, you need to supply your installation directory and be sure to
- use the right file (i.e. i585 or x86-64).
+ $ source /home/your_username/buildtools/environment-setup-x86_64-pokysdk-linux
After you have sourced the setup script, the tools are added to
``PATH`` and any other environment variables required to run the
tools are initialized. The results are working versions versions of
Git, tar, Python and ``chrpath``. And in the case of the
- ``buildtools-extended`` tarball, additional working versions of tools
+ :term:`buildtools-extended` tarball, additional working versions of tools
including ``gcc``, ``make`` and the other tools included in
``packagegroup-core-buildessential``.
Building Your Own ``buildtools`` Tarball
----------------------------------------
-Building and running your own buildtools installer applies only when you
+Building and running your own :term:`buildtools` installer applies only when you
have a build host that can already run BitBake. In this case, you use
that machine to build the ``.sh`` file and then take steps to transfer
and run it on a machine that does not meet the minimal Git, tar, and
Python (or gcc) requirements.
-Here are the steps to take to build and run your own buildtools
+Here are the steps to take to build and run your own :term:`buildtools`
installer:
-1. On the machine that is able to run BitBake, be sure you have set up
+#. On the machine that is able to run BitBake, be sure you have set up
your build environment with the setup script
(:ref:`structure-core-script`).
-2. Run the BitBake command to build the tarball::
+#. Run the BitBake command to build the tarball::
$ bitbake buildtools-tarball
- or run the BitBake command to build the extended tarball::
+ or to build the extended tarball::
$ bitbake buildtools-extended-tarball
+ or to build the make-only tarball::
+
+ $ bitbake buildtools-make-tarball
+
.. note::
The :term:`SDKMACHINE` variable in your ``local.conf`` file determines
@@ -384,37 +397,37 @@ installer:
Once the build completes, you can find the ``.sh`` file that installs
the tools in the ``tmp/deploy/sdk`` subdirectory of the
:term:`Build Directory`. The installer file has the string
- "buildtools" (or "buildtools-extended") in the name.
+ "buildtools" or "buildtools-extended" in the name.
-3. Transfer the ``.sh`` file from the build host to the machine that
+#. Transfer the ``.sh`` file from the build host to the machine that
does not meet the Git, tar, or Python (or gcc) requirements.
-4. On the machine that does not meet the requirements, run the ``.sh``
- file to install the tools. Here is an example for the traditional
- installer::
+#. On this machine, run the ``.sh`` file to install the tools. Here is an
+ example for the traditional installer::
$ sh ~/Downloads/x86_64-buildtools-nativesdk-standalone-&DISTRO;.sh
- Here is an example for the extended installer::
+ For the extended installer::
$ sh ~/Downloads/x86_64-buildtools-extended-nativesdk-standalone-&DISTRO;.sh
+ And for the make-only installer::
+
+ $ sh ~/Downloads/x86_64-buildtools-make-nativesdk-standalone-&DISTRO;.sh
+
During execution, a prompt appears that allows you to choose the
installation directory. For example, you could choose the following:
``/home/your_username/buildtools``
-5. Source the tools environment setup script by using a command like the
+#. Source the tools environment setup script by using a command like the
following::
$ source /home/your_username/buildtools/environment-setup-x86_64-poky-linux
- Of course, you need to supply your installation directory and be sure to
- use the right file (i.e. i586 or x86_64).
-
After you have sourced the setup script, the tools are added to
``PATH`` and any other environment variables required to run the
tools are initialized. The results are working versions versions of
Git, tar, Python and ``chrpath``. And in the case of the
- ``buildtools-extended`` tarball, additional working versions of tools
+ :term:`buildtools-extended` tarball, additional working versions of tools
including ``gcc``, ``make`` and the other tools included in
``packagegroup-core-buildessential``.
diff --git a/documentation/ref-manual/tasks.rst b/documentation/ref-manual/tasks.rst
index a2b8763e7c..346e9491f3 100644
--- a/documentation/ref-manual/tasks.rst
+++ b/documentation/ref-manual/tasks.rst
@@ -343,7 +343,7 @@ while ``file2.patch`` would not be applied.
You can find out more about the patching process in the
":ref:`overview-manual/concepts:patching`" section in
the Yocto Project Overview and Concepts Manual and the
-":ref:`dev-manual/common-tasks:patching code`" section in the
+":ref:`dev-manual/new-recipe:patching code`" section in the
Yocto Project Development Tasks Manual.
.. _ref-tasks-populate_lic:
@@ -369,7 +369,7 @@ information.
``do_populate_sdk_ext``
-----------------------
-Creates the file and directory structure for an installable extensible
+Creates the file and directory structure for an installable extensible
SDK (eSDK). See the ":ref:`overview-manual/concepts:sdk generation`"
section in the Yocto Project Overview and Concepts Manual for more
information.
@@ -481,9 +481,29 @@ You can run this task using BitBake as follows::
$ bitbake -c cleanall recipe
-Typically, you would not normally use the ``cleanall`` task. Do so only
-if you want to start fresh with the :ref:`ref-tasks-fetch`
-task.
+You should never use the :ref:`ref-tasks-cleanall` task in a normal
+scenario. If you want to start fresh with the :ref:`ref-tasks-fetch` task,
+use instead::
+
+ $ bitbake -f -c fetch recipe
+
+.. note::
+
+ The reason to prefer ``bitbake -f -c fetch`` is that the
+ :ref:`ref-tasks-cleanall` task would break in some cases, such as::
+
+ $ bitbake -c fetch recipe
+ $ bitbake -c cleanall recipe-native
+ $ bitbake -c unpack recipe
+
+ because after step 1 there is a stamp file for the
+ :ref:`ref-tasks-fetch` task of ``recipe``, and it won't be removed at
+ step 2 because step 2 uses a different work directory. So the unpack task
+ at step 3 will try to extract the downloaded archive and fail as it has
+ been deleted in step 2.
+
+ Note that this also applies to BitBake from concurrent processes when a
+ shared download directory (:term:`DL_DIR`) is setup.
.. _ref-tasks-cleansstate:
@@ -507,7 +527,19 @@ scratch is guaranteed.
.. note::
- The ``do_cleansstate`` task cannot remove sstate from a remote sstate
+ Using :ref:`ref-tasks-cleansstate` with a shared :term:`SSTATE_DIR` is
+ not recommended because it could trigger an error during the build of a
+ separate BitBake instance. This is because the builds check sstate "up
+ front" but download the files later, so it if is deleted in the
+ meantime, it will cause an error but not a total failure as it will
+ rebuild it.
+
+ The reliable and preferred way to force a new build is to use ``bitbake
+ -f`` instead.
+
+.. note::
+
+ The :ref:`ref-tasks-cleansstate` task cannot remove sstate from a remote sstate
mirror. If you need to build a target from scratch using remote mirrors, use
the "-f" option as follows::
@@ -522,7 +554,7 @@ scratch is guaranteed.
Starts a shell in which an interactive Python interpreter allows you to
interact with the BitBake build environment. From within this shell, you
can directly examine and set bits from the data store and execute
-functions as if within the BitBake environment. See the ":ref:`dev-manual/common-tasks:using a python development shell`" section in
+functions as if within the BitBake environment. See the ":ref:`dev-manual/python-development-shell:using a Python development shell`" section in
the Yocto Project Development Tasks Manual for more information about
using ``pydevshell``.
@@ -532,7 +564,7 @@ using ``pydevshell``.
---------------
Starts a shell whose environment is set up for development, debugging,
-or both. See the ":ref:`dev-manual/common-tasks:using a development shell`" section in the
+or both. See the ":ref:`dev-manual/development-shell:using a development shell`" section in the
Yocto Project Development Tasks Manual for more information about using
``devshell``.
@@ -597,7 +629,7 @@ information on how the root filesystem is created.
Boots an image and performs runtime tests within the image. For
information on automatically testing images, see the
-":ref:`dev-manual/common-tasks:performing automated runtime testing`"
+":ref:`dev-manual/runtime-testing:performing automated runtime testing`"
section in the Yocto Project Development Tasks Manual.
.. _ref-tasks-testimage_auto:
@@ -610,7 +642,7 @@ after it has been built. This task is enabled when you set
:term:`TESTIMAGE_AUTO` equal to "1".
For information on automatically testing images, see the
-":ref:`dev-manual/common-tasks:performing automated runtime testing`"
+":ref:`dev-manual/runtime-testing:performing automated runtime testing`"
section in the Yocto Project Development Tasks Manual.
Kernel-Related Tasks
diff --git a/documentation/ref-manual/terms.rst b/documentation/ref-manual/terms.rst
index cba514c345..2b9bccb4a9 100644
--- a/documentation/ref-manual/terms.rst
+++ b/documentation/ref-manual/terms.rst
@@ -4,7 +4,7 @@
Yocto Project Terms
*******************
-Following is a list of terms and definitions users new to the Yocto Project
+Here is a list of terms and definitions users new to the Yocto Project
development environment might find helpful. While some of these terms are
universal, the list includes them just in case:
@@ -21,7 +21,7 @@ universal, the list includes them just in case:
Information in append files extends or overrides the information in the
similarly-named recipe file. For an example of an append file in use, see
- the ":ref:`dev-manual/common-tasks:appending other layers metadata with your layer`"
+ the ":ref:`dev-manual/layers:appending other layers metadata with your layer`"
section in the Yocto Project Development Tasks Manual.
When you name an append file, you can use the "``%``" wildcard character
@@ -66,8 +66,8 @@ universal, the list includes them just in case:
(i.e. :ref:`ref-manual/structure:\`\`oe-init-build-env\`\``). The
:term:`TOPDIR` variable points to the Build Directory.
- You have a lot of flexibility when creating the Build Directory.
- Following are some examples that show how to create the directory. The
+ You have a lot of flexibility when creating the :term:`Build Directory`.
+ Here are some examples that show how to create the directory. The
examples assume your :term:`Source Directory` is named ``poky``:
- Create the Build Directory inside your Source Directory and let
@@ -109,6 +109,24 @@ universal, the list includes them just in case:
environment. The build system is sometimes referred to as the development
host.
+ :term:`buildtools`
+ Build tools in binary form, providing required versions of development
+ tools (such as Git, GCC, Python and make), to run the OpenEmbedded build
+ system on a development host without such minimum versions.
+
+ See the ":ref:`system-requirements-buildtools`" paragraph in the
+ Reference Manual for details about downloading or building an archive
+ of such tools.
+
+ :term:`buildtools-extended`
+ A set of :term:`buildtools` binaries extended with additional development
+ tools, such as a required version of the GCC compiler to run the
+ OpenEmbedded build system.
+
+ :term:`buildtools-make`
+ A variant of :term:`buildtools`, just providing the required
+ version of ``make`` to run the OpenEmbedded build system.
+
:term:`Classes`
Files that provide for logic encapsulation and inheritance so that
commonly used patterns can be defined once and then easily used in
@@ -192,6 +210,48 @@ universal, the list includes them just in case:
of the supported image types that the Yocto Project provides, see the
":ref:`ref-manual/images:Images`" chapter.
+ :term:`Initramfs`
+ An Initial RAM Filesystem (:term:`Initramfs`) is an optionally compressed
+ :wikipedia:`cpio <Cpio>` archive which is extracted
+ by the Linux kernel into RAM in a special :wikipedia:`tmpfs <Tmpfs>`
+ instance, used as the initial root filesystem.
+
+ This is a replacement for the legacy init RAM disk ("initrd")
+ technique, booting on an emulated block device in RAM, but being less
+ efficient because of the overhead of going through a filesystem and
+ having to duplicate accessed file contents in the file cache in RAM,
+ as for any block device.
+
+ .. note::
+
+ As far as bootloaders are concerned, :term:`Initramfs` and "initrd"
+ images are still copied to RAM in the same way. That's why most
+ most bootloaders refer to :term:`Initramfs` images as "initrd"
+ or "init RAM disk".
+
+ This kind of mechanism is typically used for two reasons:
+
+ - For booting the same kernel binary on multiple systems requiring
+ different device drivers. The :term:`Initramfs` image is then customized
+ for each type of system, to include the specific kernel modules
+ necessary to access the final root filesystem. This technique
+ is used on all GNU / Linux distributions for desktops and servers.
+
+ - For booting faster. As the root filesystem is extracted into RAM,
+ accessing the first user-space applications is very fast, compared
+ to having to initialize a block device, to access multiple blocks
+ from it, and to go through a filesystem having its own overhead.
+ For example, this allows to display a splashscreen very early,
+ and to later take care of mounting the final root filesystem and
+ loading less time-critical kernel drivers.
+
+ This cpio archive can either be loaded to RAM by the bootloader,
+ or be included in the kernel binary.
+
+ For information on creating and using an :term:`Initramfs`, see the
+ ":ref:`dev-manual/building:building an initial ram filesystem (Initramfs) image`"
+ section in the Yocto Project Development Tasks Manual.
+
:term:`Layer`
A collection of related recipes. Layers allow you to consolidate related
metadata to customize your build. Layers also isolate information used
@@ -205,12 +265,18 @@ universal, the list includes them just in case:
":ref:`overview-manual/yp-intro:The Yocto Project Layer
Model`" section in the Yocto Project Overview and Concepts Manual. For
more detailed information on layers, see the
- ":ref:`dev-manual/common-tasks:Understanding and Creating
+ ":ref:`dev-manual/layers:Understanding and Creating
Layers`" section in the Yocto Project Development Tasks Manual. For a
discussion specifically on BSP Layers, see the ":ref:`bsp-guide/bsp:BSP
Layers`" section in the Yocto Project Board Support Packages (BSP)
Developer's Guide.
+ :term:`LTS`
+ This term means "Long Term Support", and in the context of the Yocto
+ Project, it corresponds to selected stable releases for which bug and
+ security fixes are provided for at least four years. See
+ the :ref:`ref-long-term-support-releases` section for details.
+
:term:`Metadata`
A key element of the Yocto Project is the Metadata that
is used to construct a Linux distribution and is contained in the
@@ -230,6 +296,12 @@ universal, the list includes them just in case:
:yocto_git:`yocto-kernel-cache </yocto-kernel-cache>`
Git repository.
+ :term:`Mixin`
+ A :term:`Mixin` layer is a layer which can be created by the community to
+ add a specific feature or support a new version of some package for an
+ :term:`LTS` release. See the :ref:`ref-long-term-support-releases`
+ section for details.
+
:term:`OpenEmbedded-Core (OE-Core)`
OE-Core is metadata comprised of
foundational recipes, classes, and associated files that are meant to
@@ -270,7 +342,7 @@ universal, the list includes them just in case:
your Linux distribution.
Another point worth noting is that historically within the Yocto
- Project, recipes were referred to as packages - thus, the existence
+ Project, recipes were referred to as packages --- thus, the existence
of several BitBake variables that are seemingly mis-named, (e.g.
:term:`PR`, :term:`PV`, and
:term:`PE`).
@@ -323,6 +395,23 @@ universal, the list includes them just in case:
:term:`build host<Build Host>` and other components, that can
work on specific hardware.
+ :term:`SBOM`
+ This term means *Software Bill of Materials*. When you distribute
+ software, it offers a description of all the components you used,
+ their corresponding licenses, their dependencies, the changes that were
+ applied and the known vulnerabilities that were fixed.
+
+ This can be used by the recipients of the software to assess
+ their exposure to license compliance and security vulnerability issues.
+
+ See the :wikipedia:`Software Supply Chain <Software_supply_chain>`
+ article on Wikipedia for more details.
+
+ The OpenEmbedded Build System can generate such documentation for your
+ project, in :term:`SPDX` format, based on all the metadata it used to
+ build the software images. See the ":ref:`dev-manual/sbom:creating
+ a software bill of materials`" section of the Development Tasks manual.
+
:term:`Source Directory`
This term refers to the directory structure
created as a result of creating a local copy of the ``poky`` Git
@@ -369,7 +458,7 @@ universal, the list includes them just in case:
Directory created by unpacking a released tarball as compared to
cloning ``git://git.yoctoproject.org/poky``. When you unpack a
tarball, you have an exact copy of the files based on the time of
- release - a fixed release point. Any changes you make to your local
+ release --- a fixed release point. Any changes you make to your local
files in the Source Directory are on top of the release and will
remain local only. On the other hand, when you clone the ``poky`` Git
repository, you have an active development repository with access to
@@ -383,6 +472,17 @@ universal, the list includes them just in case:
":ref:`overview-manual/development-environment:repositories, tags, and branches`"
section in the Yocto Project Overview and Concepts Manual.
+ :term:`SPDX`
+ This term means *Software Package Data Exchange*, and is used as an open
+ standard for providing a *Software Bill of Materials* (:term:`SBOM`).
+ This standard is developed through a `Linux Foundation project
+ <https://spdx.dev/>`__ and is used by the OpenEmbedded Build System to
+ provide an :term:`SBOM` associated to each software image.
+
+ For details, see Wikipedia's :wikipedia:`SPDX page <Software_Package_Data_Exchange>`
+ and the ":ref:`dev-manual/sbom:creating a software bill of materials`"
+ section of the Development Tasks manual.
+
:term:`Task`
A per-recipe unit of execution for BitBake (e.g.
:ref:`ref-tasks-compile`,
diff --git a/documentation/ref-manual/variables.rst b/documentation/ref-manual/variables.rst
index f8808cc052..c2a6566341 100644
--- a/documentation/ref-manual/variables.rst
+++ b/documentation/ref-manual/variables.rst
@@ -223,7 +223,7 @@ system and gives an overview of their function and contents.
so that it does contain ``${SRCPV}``.
For more information see the
- ":ref:`dev-manual/common-tasks:automatically incrementing a package version number`"
+ ":ref:`dev-manual/packages:automatically incrementing a package version number`"
section in the Yocto Project Development Tasks Manual.
:term:`AUTO_SYSLINUXMENU`
@@ -239,7 +239,7 @@ system and gives an overview of their function and contents.
The list simply presents the tunes that are available. Not all tunes
may be compatible with a particular machine configuration, or with
each other in a
- :ref:`Multilib <dev-manual/common-tasks:combining multiple versions of library files into one image>`
+ :ref:`Multilib <dev-manual/libraries:combining multiple versions of library files into one image>`
configuration.
To add a tune to the list, be sure to append it with spaces using the
@@ -304,7 +304,7 @@ system and gives an overview of their function and contents.
:term:`BASE_LIB`
The library directory name for the CPU or Application Binary
Interface (ABI) tune. The :term:`BASE_LIB` applies only in the Multilib
- context. See the ":ref:`dev-manual/common-tasks:combining multiple versions of library files into one image`"
+ context. See the ":ref:`dev-manual/libraries:combining multiple versions of library files into one image`"
section in the Yocto Project Development Tasks Manual for information
on Multilib.
@@ -318,7 +318,7 @@ system and gives an overview of their function and contents.
:term:`BB_ALLOWED_NETWORKS`
Specifies a space-delimited list of hosts that the fetcher is allowed
- to use to obtain the required source code. Following are
+ to use to obtain the required source code. Here are
considerations surrounding this variable:
- This host list is only used if :term:`BB_NO_NETWORK` is either not set
@@ -528,7 +528,7 @@ system and gives an overview of their function and contents.
is not set higher than "20".
For more information on speeding up builds, see the
- ":ref:`dev-manual/common-tasks:speeding up a build`"
+ ":ref:`dev-manual/speeding-up-build:speeding up a build`"
section in the Yocto Project Development Tasks Manual.
:term:`BB_SERVER_TIMEOUT`
@@ -591,7 +591,7 @@ system and gives an overview of their function and contents.
This variable is useful in situations where the same recipe appears
in more than one layer. Setting this variable allows you to
prioritize a layer against other layers that contain the same recipe
- - effectively letting you control the precedence for the multiple
+ --- effectively letting you control the precedence for the multiple
layers. The precedence established through this variable stands
regardless of a recipe's version (:term:`PV` variable). For
example, a layer that has a recipe with a higher :term:`PV` value but for
@@ -725,7 +725,7 @@ system and gives an overview of their function and contents.
For information on how to use :term:`BBMULTICONFIG` in an environment
that supports building targets with multiple configurations, see the
- ":ref:`dev-manual/common-tasks:building images for multiple targets using multiple configurations`"
+ ":ref:`dev-manual/building:building images for multiple targets using multiple configurations`"
section in the Yocto Project Development Tasks Manual.
:term:`BBPATH`
@@ -888,7 +888,7 @@ system and gives an overview of their function and contents.
:term:`BUILD_OS`
Specifies the operating system in use on the build host (e.g.
"linux"). The OpenEmbedded build system sets the value of
- :term:`BUILD_OS` from the OS reported by the ``uname`` command - the
+ :term:`BUILD_OS` from the OS reported by the ``uname`` command --- the
first word, converted to lower-case characters.
:term:`BUILD_PREFIX`
@@ -971,7 +971,7 @@ system and gives an overview of their function and contents.
When inheriting the :ref:`buildhistory <ref-classes-buildhistory>`
class, this variable specifies the build history features to be
enabled. For more information on how build history works, see the
- ":ref:`dev-manual/common-tasks:maintaining build output quality`"
+ ":ref:`dev-manual/build-quality:maintaining build output quality`"
section in the Yocto Project Development Tasks Manual.
You can specify these features in the form of a space-separated list:
@@ -1156,6 +1156,26 @@ system and gives an overview of their function and contents.
optional at the distribution level, in case the hardware supports
Bluetooth but you do not ever intend to use it.
+ :term:`COMMERCIAL_AUDIO_PLUGINS`
+ This variable is specific to the :yocto_git:`GStreamer recipes
+ </poky/tree/meta/recipes-multimedia/gstreamer/gstreamer1.0-meta-base.bb>`.
+ It allows to build the GStreamer `"ugly"
+ <https://github.com/GStreamer/gst-plugins-ugly>`__ and
+ `"bad" <https://github.com/GStreamer/gst-plugins-bad>`__ audio plugins.
+
+ See the :ref:`dev-manual/licenses:other variables related to commercial licenses`
+ section for usage details.
+
+ :term:`COMMERCIAL_VIDEO_PLUGINS`
+ This variable is specific to the :yocto_git:`GStreamer recipes
+ </poky/tree/meta/recipes-multimedia/gstreamer/gstreamer1.0-meta-base.bb>`.
+ It allows to build the GStreamer `"ugly"
+ <https://github.com/GStreamer/gst-plugins-ugly>`__ and
+ `"bad" <https://github.com/GStreamer/gst-plugins-bad>`__ video plugins.
+
+ See the :ref:`dev-manual/licenses:other variables related to commercial licenses`
+ section for usage details.
+
:term:`COMMON_LICENSE_DIR`
Points to ``meta/files/common-licenses`` in the
:term:`Source Directory`, which is where generic license
@@ -1182,6 +1202,32 @@ system and gives an overview of their function and contents.
speed since the build system skips parsing recipes not compatible
with the current machine.
+ If one wants to have a recipe only available for some architectures
+ (here ``aarch64`` and ``mips64``), the following can be used::
+
+ COMPATIBLE_MACHINE = "^$"
+ COMPATIBLE_MACHINE:arch64 = "^(aarch64)$"
+ COMPATIBLE_MACHINE:mips64 = "^(mips64)$"
+
+ The first line means "match all machines whose :term:`MACHINEOVERRIDES`
+ contains the empty string", which will always be none.
+
+ The second is for matching all machines whose :term:`MACHINEOVERRIDES`
+ contains one override which is exactly ``aarch64``.
+
+ The third is for matching all machines whose :term:`MACHINEOVERRIDES`
+ contains one override which is exactly ``mips64``.
+
+ The same could be achieved with::
+
+ COMPATIBLE_MACHINE = "^(aarch64|mips64)$"
+
+ .. note::
+
+ When :term:`COMPATIBLE_MACHINE` is set in a recipe inherits from
+ native, the recipe is always skipped. All native recipes must be
+ entirely target independent and should not rely on :term:`MACHINE`.
+
:term:`COMPLEMENTARY_GLOB`
Defines wildcards to match when installing a list of complementary
packages for all the packages explicitly (or implicitly) installed in
@@ -1274,8 +1320,8 @@ system and gives an overview of their function and contents.
If you specify multiple directories and files, the initramfs image
will be the aggregate of all of them.
- For information on creating an initramfs, see the
- ":ref:`dev-manual/common-tasks:building an initial ram filesystem (initramfs) image`" section
+ For information on creating an :term:`Initramfs`, see the
+ ":ref:`dev-manual/building:building an initial ram filesystem (Initramfs) image`" section
in the Yocto Project Development Tasks Manual.
:term:`CONFIG_SITE`
@@ -1296,6 +1342,19 @@ system and gives an overview of their function and contents.
the recipe will be skipped, and if the build system attempts to build
the recipe then an error will be triggered.
+ :term:`CONVERSION_CMD`
+ This variable is used for storing image conversion commands.
+ Image conversion can convert an image into different objects like:
+
+ - Compressed version of the image
+
+ - Checksums for the image
+
+ An example of :term:`CONVERSION_CMD` from :ref:`image-types
+ <ref-classes-image_types>` class is::
+
+ CONVERSION_CMD:lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+
:term:`COPY_LIC_DIRS`
If set to "1" along with the
:term:`COPY_LIC_MANIFEST` variable, the
@@ -1310,7 +1369,7 @@ system and gives an overview of their function and contents.
newly installed packages to an image, which might be most suitable for
read-only filesystems that cannot be upgraded. See the
:term:`LICENSE_CREATE_PACKAGE` variable for additional information.
- You can also reference the ":ref:`dev-manual/common-tasks:providing license text`"
+ You can also reference the ":ref:`dev-manual/licenses:providing license text`"
section in the Yocto Project Development Tasks Manual for
information on providing license text.
@@ -1326,7 +1385,7 @@ system and gives an overview of their function and contents.
newly installed packages to an image, which might be most suitable for
read-only filesystems that cannot be upgraded. See the
:term:`LICENSE_CREATE_PACKAGE` variable for additional information.
- You can also reference the ":ref:`dev-manual/common-tasks:providing license text`"
+ You can also reference the ":ref:`dev-manual/licenses:providing license text`"
section in the Yocto Project Development Tasks Manual for
information on providing license text.
@@ -1469,15 +1528,31 @@ system and gives an overview of their function and contents.
# This is windows only issue.
CVE_CHECK_IGNORE += "CVE-2020-15523"
+ :term:`CVE_CHECK_SHOW_WARNINGS`
+ Specifies whether or not the :ref:`cve-check <ref-classes-cve-check>`
+ class should generate warning messages on the console when unpatched
+ CVEs are found. The default is "1", but you may wish to set it to "0" if
+ you are already examining/processing the logs after the build has
+ completed and thus do not need the warning messages.
+
:term:`CVE_CHECK_SKIP_RECIPE`
The list of package names (:term:`PN`) for which
CVEs (Common Vulnerabilities and Exposures) are ignored.
+ :term:`CVE_DB_UPDATE_INTERVAL`
+ Specifies the CVE database update interval in seconds, as used by
+ ``cve-update-db-native``. The default value is "86400" i.e. once a day
+ (24*60*60). If the value is set to "0" then the update will be forced
+ every time. Alternatively, a negative value e.g. "-1" will disable
+ updates entirely.
+
:term:`CVE_PRODUCT`
In a recipe, defines the name used to match the recipe name
against the name in the upstream `NIST CVE database <https://nvd.nist.gov/>`__.
- The default is ${:term:`BPN`}. If it does not match the name in the NIST CVE
+ The default is ${:term:`BPN`} (except for recipes that inherit the
+ :ref:`pypi <ref-classes-pypi>` class where it is set based upon
+ :term:`PYPI_PACKAGE`). If it does not match the name in the NIST CVE
database or matches with multiple entries in the database, the default
value needs to be changed.
@@ -1492,6 +1567,18 @@ system and gives an overview of their function and contents.
CVE_PRODUCT = "vendor:package"
+ :term:`CVE_VERSION`
+ In a recipe, defines the version used to match the recipe version
+ against the version in the `NIST CVE database <https://nvd.nist.gov/>`__
+ when usign :ref:`cve-check <ref-classes-cve-check>`.
+
+ The default is ${:term:`PV`} but if recipes use custom version numbers
+ which do not map to upstream software component release versions and the versions
+ used in the CVE database, then this variable can be used to set the
+ version number for :ref:`cve-check <ref-classes-cve-check>`. Example::
+
+ CVE_VERSION = "2.39"
+
:term:`CVSDIR`
The directory in which files checked out under the CVS system are
stored.
@@ -1688,7 +1775,7 @@ system and gives an overview of their function and contents.
``${TMPDIR}/deploy``.
For more information on the structure of the Build Directory, see
- ":ref:`ref-manual/structure:the build directory - \`\`build/\`\``" section.
+ ":ref:`ref-manual/structure:the build directory --- \`\`build/\`\``" section.
For more detail on the contents of the ``deploy`` directory, see the
":ref:`overview-manual/concepts:images`",
":ref:`overview-manual/concepts:package feeds`", and
@@ -1732,7 +1819,7 @@ system and gives an overview of their function and contents.
<ref-classes-image>` class.
For more information on the structure of the Build Directory, see
- ":ref:`ref-manual/structure:the build directory - \`\`build/\`\``" section.
+ ":ref:`ref-manual/structure:the build directory --- \`\`build/\`\``" section.
For more detail on the contents of the ``deploy`` directory, see the
":ref:`overview-manual/concepts:images`" and
":ref:`overview-manual/concepts:application development sdk`" sections both in
@@ -2020,13 +2107,25 @@ system and gives an overview of their function and contents.
:term:`DOC_COMPRESS`
When inheriting the :ref:`compress_doc <ref-classes-compress_doc>`
class, this variable sets the compression policy used when the
- OpenEmbedded build system compresses man pages and info pages. By
+ OpenEmbedded build system compresses manual and info pages. By
default, the compression method used is gz (gzip). Other policies
available are xz and bz2.
For information on policies and on how to use this variable, see the
comments in the ``meta/classes/compress_doc.bbclass`` file.
+ :term:`DT_FILES_PATH`
+ When compiling out-of-tree device tree sources using a recipe that
+ inherits the :ref:`ref-classes-devicetree` class, this variable specifies
+ the path to the directory containing dts files to build.
+
+ Defaults to the :term:`S` directory.
+
+ :term:`DT_PADDING_SIZE`
+ When inheriting the :ref:`ref-classes-devicetree` class, this variable
+ specifies the size of padding appended to the device tree blob, used as
+ extra space typically for additional properties during boot.
+
:term:`EFI_PROVIDER`
When building bootable images (i.e. where ``hddimg``, ``iso``, or
``wic.vmdk`` is in :term:`IMAGE_FSTYPES`), the
@@ -2043,11 +2142,10 @@ system and gives an overview of their function and contents.
less).
:term:`ERR_REPORT_DIR`
- When used with the :ref:`report-error <ref-classes-report-error>`
- class, specifies the path used for storing the debug files created by
- the :ref:`error reporting
- tool <dev-manual/common-tasks:using the error reporting tool>`, which
- allows you to submit build errors you encounter to a central
+ When used with the :ref:`ref-classes-report-error` class, specifies the
+ path used for storing the debug files created by the :ref:`error reporting
+ tool <dev-manual/error-reporting-tool:using the error reporting tool>`,
+ which allows you to submit build errors you encounter to a central
database. By default, the value of this variable is
``${``\ :term:`LOG_DIR`\ ``}/error-report``.
@@ -2195,6 +2293,12 @@ system and gives an overview of their function and contents.
:ref:`kernel-yocto <ref-classes-kernel-yocto>` class in
``meta/classes`` to see how the variable is used.
+ :term:`EXTERNAL_TOOLCHAIN`
+ When you intend to use an
+ :ref:`external toolchain <dev-manual/external-toolchain:optionally using an external toolchain>`,
+ this variable allows to specify the directory where this toolchain was
+ installed.
+
:term:`EXTERNALSRC`
When inheriting the :ref:`externalsrc <ref-classes-externalsrc>`
class, this variable points to the source tree, which is outside of
@@ -2204,7 +2308,7 @@ system and gives an overview of their function and contents.
See the ":ref:`ref-classes-externalsrc`" section for details. You
can also find information on how to use this variable in the
- ":ref:`dev-manual/common-tasks:building software from an external source`"
+ ":ref:`dev-manual/building:building software from an external source`"
section in the Yocto Project Development Tasks Manual.
:term:`EXTERNALSRC_BUILD`
@@ -2217,11 +2321,11 @@ system and gives an overview of their function and contents.
See the ":ref:`ref-classes-externalsrc`" section for details. You
can also find information on how to use this variable in the
- ":ref:`dev-manual/common-tasks:building software from an external source`"
+ ":ref:`dev-manual/building:building software from an external source`"
section in the Yocto Project Development Tasks Manual.
:term:`EXTRA_AUTORECONF`
- For recipes inheriting the :ref:`autotools <ref-classes-autotools>`
+ For recipes inheriting the :ref:`ref-classes-autotools`
class, you can use :term:`EXTRA_AUTORECONF` to specify extra options to
pass to the ``autoreconf`` command that is executed during the
:ref:`ref-tasks-configure` task.
@@ -2244,31 +2348,31 @@ system and gives an overview of their function and contents.
Here are some examples of features you can add:
- - "dbg-pkgs" - Adds -dbg packages for all installed packages including
+ - "dbg-pkgs" --- adds -dbg packages for all installed packages including
symbol information for debugging and profiling.
- - "debug-tweaks" - Makes an image suitable for debugging. For example, allows root logins without passwords and
+ - "debug-tweaks" --- makes an image suitable for debugging. For example, allows root logins without passwords and
enables post-installation logging. See the 'allow-empty-password' and
'post-install-logging' features in the ":ref:`ref-features-image`"
section for more information.
- - "dev-pkgs" - Adds -dev packages for all installed packages. This is
+ - "dev-pkgs" --- adds -dev packages for all installed packages. This is
useful if you want to develop against the libraries in the image.
- - "read-only-rootfs" - Creates an image whose root filesystem is
+ - "read-only-rootfs" --- creates an image whose root filesystem is
read-only. See the
- ":ref:`dev-manual/common-tasks:creating a read-only root filesystem`"
+ ":ref:`dev-manual/read-only-rootfs:creating a read-only root filesystem`"
section in the Yocto Project Development Tasks Manual for more
information
- - "tools-debug" - Adds debugging tools such as gdb and strace.
- - "tools-sdk" - Adds development tools such as gcc, make,
+ - "tools-debug" --- adds debugging tools such as gdb and strace.
+ - "tools-sdk" --- adds development tools such as gcc, make,
pkgconfig and so forth.
- - "tools-testapps" - Adds useful testing tools
+ - "tools-testapps" --- adds useful testing tools
such as ts_print, aplay, arecord and so forth.
For a complete list of image features that ships with the Yocto
Project, see the ":ref:`ref-features-image`" section.
For an example that shows how to customize your image by using this
- variable, see the ":ref:`dev-manual/common-tasks:customizing images using custom \`\`image_features\`\` and \`\`extra_image_features\`\``"
+ variable, see the ":ref:`dev-manual/customizing-images:customizing images using custom \`\`image_features\`\` and \`\`extra_image_features\`\``"
section in the Yocto Project Development Tasks Manual.
:term:`EXTRA_IMAGECMD`
@@ -2317,6 +2421,20 @@ system and gives an overview of their function and contents.
variable specifies additional configuration options you want to pass
to the ``scons`` command line.
+ :term:`EXTRA_OEMESON`
+ Additional `Meson <https://mesonbuild.com/>`__ options. See the
+ :ref:`ref-classes-meson` class for additional information.
+
+ In addition to standard Meson options, such options correspond to
+ `Meson build options <https://mesonbuild.com/Build-options.html>`__
+ defined in the ``meson_options.txt`` file in the sources to build.
+ Here is an example::
+
+ EXTRA_OEMESON = "-Dpython=disabled -Dvalgrind=disabled"
+
+ Note that any custom value for the Meson ``--buildtype`` option
+ should be set through the :term:`MESON_BUILDTYPE` variable.
+
:term:`EXTRA_USERS_PARAMS`
When inheriting the :ref:`extrausers <ref-classes-extrausers>`
class, this variable provides image level user and group operations.
@@ -2613,7 +2731,7 @@ system and gives an overview of their function and contents.
You can find out more about the patching process in the
":ref:`overview-manual/concepts:patching`" section
in the Yocto Project Overview and Concepts Manual and the
- ":ref:`dev-manual/common-tasks:patching code`" section in
+ ":ref:`dev-manual/new-recipe:patching code`" section in
the Yocto Project Development Tasks Manual. See the
:ref:`ref-tasks-patch` task as well.
@@ -2745,7 +2863,7 @@ system and gives an overview of their function and contents.
Allows to specify an extra search path for ``.so`` files
in GLib related recipes using GObject introspection,
and which do not compile without this setting.
- See the ":ref:`dev-manual/common-tasks:enabling gobject introspection support`"
+ See the ":ref:`dev-manual/gobject-introspection:enabling gobject introspection support`"
section for details.
:term:`GITDIR`
@@ -2767,6 +2885,73 @@ system and gives an overview of their function and contents.
GLIBC_GENERATE_LOCALES = "en_GB.UTF-8 en_US.UTF-8"
+ :term:`GO_IMPORT`
+ When inheriting the :ref:`ref-classes-go` class, this mandatory variable
+ sets the import path for the Go package that will be created for the code
+ to build. If you have a ``go.mod`` file in the source directory, this
+ typically matches the path in the ``module`` line in this file.
+
+ Other Go programs importing this package will use this path.
+
+ Here is an example setting from the
+ :yocto_git:`go-helloworld_0.1.bb </poky/tree/meta/recipes-extended/go-examples/go-helloworld_0.1.bb>`
+ recipe::
+
+ GO_IMPORT = "golang.org/x/example"
+
+ :term:`GO_INSTALL`
+ When inheriting the :ref:`ref-classes-go` class, this optional variable
+ specifies which packages in the sources should be compiled and
+ installed in the Go build space by the
+ `go install <https://go.dev/ref/mod#go-install>`__ command.
+
+ Here is an example setting from the
+ :oe_git:`crucible </meta-openembedded/tree/meta-oe/recipes-support/crucible/>`
+ recipe::
+
+ GO_INSTALL = "\
+ ${GO_IMPORT}/cmd/crucible \
+ ${GO_IMPORT}/cmd/habtool \
+ "
+
+ By default, :term:`GO_INSTALL` is defined as::
+
+ GO_INSTALL ?= "${GO_IMPORT}/..."
+
+ The ``...`` wildcard means that it will catch all
+ packages found in the sources.
+
+ See the :term:`GO_INSTALL_FILTEROUT` variable for
+ filtering out unwanted packages from the ones
+ found from the :term:`GO_INSTALL` value.
+
+ :term:`GO_INSTALL_FILTEROUT`
+ When using the Go "vendor" mechanism to bring in dependencies for a Go
+ package, the default :term:`GO_INSTALL` setting, which uses the ``...``
+ wildcard, will include the vendored packages in the build, which produces
+ incorrect results.
+
+ There are also some Go packages that are structured poorly, so that the
+ ``...`` wildcard results in building example or test code that should not
+ be included in the build, or could fail to build.
+
+ This optional variable allows for filtering out a subset of the sources.
+ It defaults to excluding everything under the ``vendor`` subdirectory
+ under package's main directory. This is the normal location for vendored
+ packages, but it can be overridden by a recipe to filter out other
+ subdirectories if needed.
+
+ :term:`GO_WORKDIR`
+ When using Go Modules, the current working directory must be the directory
+ containing the ``go.mod`` file, or one of its subdirectories. When the
+ ``go`` tool is used, it will automatically look for the ``go.mod`` file
+ in the Go working directory or in any parent directory, but not in
+ subdirectories.
+
+ When using the :ref:`ref-classes-go-mod` class to use Go modules,
+ the optional :term:`GO_WORKDIR` variable, defaulting to the value
+ of :term:`GO_IMPORT`, allows to specify a different Go working directory.
+
:term:`GROUPADD_PARAM`
When inheriting the :ref:`useradd <ref-classes-useradd>` class,
this variable specifies for a package what parameters should be
@@ -2777,6 +2962,14 @@ system and gives an overview of their function and contents.
GROUPADD_PARAM:${PN} = "-r netdev"
+ More than one group can be added by separating each set of different
+ groups' parameters with a semicolon.
+
+ Here is an example adding multiple groups from the ``useradd-example.bb``
+ file in the ``meta-skeleton`` layer::
+
+ GROUPADD_PARAM:${PN} = "-g 880 group1; -g 890 group2"
+
For information on the standard Linux shell command
``groupadd``, see https://linux.die.net/man/8/groupadd.
@@ -3030,22 +3223,28 @@ system and gives an overview of their function and contents.
the same files into a ``boot`` directory within the target partition.
You can find information on how to use the Wic tool in the
- ":ref:`dev-manual/common-tasks:creating partitioned images using wic`"
+ ":ref:`dev-manual/wic:creating partitioned images using wic`"
section of the Yocto Project Development Tasks Manual. Reference
material for Wic is located in the
":doc:`/ref-manual/kickstart`" chapter.
- :term:`IMAGE_CLASSES`
- A list of classes that all images should inherit. You typically use
- this variable to specify the list of classes that register the
- different types of images the OpenEmbedded build system creates.
+ :term:`IMAGE_BUILDINFO_FILE`
+ When using the :ref:`ref-classes-image-buildinfo` class,
+ specifies the file in the image to write the build information into. The
+ default value is "``${sysconfdir}/buildinfo``".
- The default value for :term:`IMAGE_CLASSES` is ``image_types``. You can
- set this variable in your ``local.conf`` or in a distribution
- configuration file.
+ :term:`IMAGE_BUILDINFO_VARS`
+ When using the :ref:`ref-classes-image-buildinfo` class,
+ specifies the list of variables to include in the `Build Configuration`
+ section of the output file (as a space-separated list). Defaults to
+ ":term:`DISTRO` :term:`DISTRO_VERSION`".
- For more information, see ``meta/classes/image_types.bbclass`` in the
- :term:`Source Directory`.
+ :term:`IMAGE_CLASSES`
+ A list of classes that all images should inherit. This is typically used
+ to enable functionality across all image recipes.
+
+ Classes specified in :term:`IMAGE_CLASSES` must be located in the
+ ``classes-recipe/`` or ``classes/`` subdirectories.
:term:`IMAGE_CMD`
Specifies the command to create the image file for a specific image
@@ -3102,7 +3301,7 @@ system and gives an overview of their function and contents.
the same files into a ``boot`` directory within the target partition.
You can find information on how to use the Wic tool in the
- ":ref:`dev-manual/common-tasks:creating partitioned images using wic`"
+ ":ref:`dev-manual/wic:creating partitioned images using wic`"
section of the Yocto Project Development Tasks Manual. Reference
material for Wic is located in the
":doc:`/ref-manual/kickstart`" chapter.
@@ -3123,7 +3322,7 @@ system and gives an overview of their function and contents.
the ":ref:`ref-features-image`" section.
For an example that shows how to customize your image by using this
- variable, see the ":ref:`dev-manual/common-tasks:customizing images using custom \`\`image_features\`\` and \`\`extra_image_features\`\``"
+ variable, see the ":ref:`dev-manual/customizing-images:customizing images using custom \`\`image_features\`\` and \`\`extra_image_features\`\``"
section in the Yocto Project Development Tasks Manual.
:term:`IMAGE_FSTYPES`
@@ -3178,8 +3377,8 @@ system and gives an overview of their function and contents.
:term:`PACKAGE_INSTALL` variable, which
allows the initial RAM filesystem (initramfs) recipe to use a
fixed set of packages and not be affected by :term:`IMAGE_INSTALL`.
- For information on creating an initramfs, see the
- ":ref:`dev-manual/common-tasks:building an initial ram filesystem (initramfs) image`"
+ For information on creating an :term:`Initramfs`, see the
+ ":ref:`dev-manual/building:building an initial ram filesystem (Initramfs) image`"
section in the Yocto Project Development Tasks Manual.
- Using :term:`IMAGE_INSTALL` with the
@@ -3255,7 +3454,7 @@ system and gives an overview of their function and contents.
IMAGE_NAME ?= "${IMAGE_BASENAME}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
:term:`IMAGE_NAME_SUFFIX`
- Suffix used for the image output filename - defaults to ``".rootfs"``
+ Suffix used for the image output filename --- defaults to ``".rootfs"``
to distinguish the image file from other files created during image
building; however if this suffix is redundant or not desired you can
clear the value of this variable (set the value to ""). For example,
@@ -3519,9 +3718,18 @@ system and gives an overview of their function and contents.
:term:`INCOMPATIBLE_LICENSE`
Specifies a space-separated list of license names (as they would
appear in :term:`LICENSE`) that should be excluded
- from the build. Recipes that provide no alternatives to listed
+ from the build (if set globally), or from an image (if set locally
+ in an image recipe).
+
+ When the variable is set globally, recipes that provide no alternatives to listed
incompatible licenses are not built. Packages that are individually
licensed with the specified incompatible licenses will be deleted.
+ Most of the time this does not allow a feasible build (because it becomes impossible
+ to satisfy build time dependencies), so the recommended way to
+ implement license restrictions is to set the variable in specific
+ image recipes where the restrictions must apply. That way there
+ are no build time restrictions, but the license check is still
+ performed when the image's filesystem is assembled from packages.
There is some support for wildcards in this variable's value,
however it is restricted to specific licenses. Currently only
@@ -3618,6 +3826,21 @@ system and gives an overview of their function and contents.
even if the toolchain's binaries are strippable, there are other files
needed for the build that are not strippable.
+ :term:`INIT_MANAGER`
+ Specifies the system init manager to use. Available options are:
+
+ - ``sysvinit`` - System V init (default for poky)
+ - ``systemd`` - systemd
+ - ``mdev-busybox`` - mdev provided by busybox
+ - ``none`` - no init manager
+
+ More concretely, this is used to include
+ ``conf/distro/include/init-manager-${INIT_MANAGER}.inc`` into the global
+ configuration. You can have a look at the ``conf/distro/include/init-manager-*.inc``
+ files for more information, and also the
+ ":ref:`dev-manual/init-manager:selecting an initialization manager`"
+ section in the Yocto Project Development Tasks Manual.
+
:term:`INITRAMFS_DEPLOY_DIR_IMAGE`
Indicates the deploy directory used by ``do_bundle_initramfs`` where the
:term:`INITRAMFS_IMAGE` will be fetched from.
@@ -3672,8 +3895,8 @@ system and gives an overview of their function and contents.
For more information, you can also see the
:term:`INITRAMFS_IMAGE_BUNDLE`
variable, which allows the generated image to be bundled inside the
- kernel image. Additionally, for information on creating an initramfs
- image, see the ":ref:`dev-manual/common-tasks:building an initial ram filesystem (initramfs) image`" section
+ kernel image. Additionally, for information on creating an :term:`Initramfs`
+ image, see the ":ref:`dev-manual/building:building an initial ram filesystem (Initramfs) image`" section
in the Yocto Project Development Tasks Manual.
:term:`INITRAMFS_IMAGE_BUNDLE`
@@ -3725,7 +3948,7 @@ system and gives an overview of their function and contents.
See the
:yocto_git:`local.conf.sample.extended </poky/tree/meta-poky/conf/local.conf.sample.extended>`
file for additional information. Also, for information on creating an
- initramfs, see the ":ref:`dev-manual/common-tasks:building an initial ram filesystem (initramfs) image`" section
+ :term:`Initramfs`, see the ":ref:`dev-manual/building:building an initial ram filesystem (Initramfs) image`" section
in the Yocto Project Development Tasks Manual.
:term:`INITRAMFS_LINK_NAME`
@@ -3750,8 +3973,8 @@ system and gives an overview of their function and contents.
This allows the kernel to bundle an :term:`INITRAMFS_IMAGE` coming from
a separate multiconfig, this is meant to be used in addition to :term:`INITRAMFS_DEPLOY_DIR_IMAGE`.
- For more information on how to bundle an initramfs image from a separate
- multiconfig see the ":ref:`dev-manual/common-tasks:Bundling an Initramfs Image From a Separate Multiconfig`"
+ For more information on how to bundle an :term:`Initramfs` image from a separate
+ multiconfig see the ":ref:`dev-manual/building:Bundling an Initramfs Image From a Separate Multiconfig`"
section in the Yocto Project Development Tasks Manual.
:term:`INITRAMFS_NAME`
@@ -3860,7 +4083,7 @@ system and gives an overview of their function and contents.
Values for this variable are set in the kernel's recipe file and the
kernel's append file. For example, if you are using the
- ``linux-yocto_4.12`` kernel, the kernel recipe file is the
+ ``linux-yocto_5.15`` kernel, the kernel recipe file is the
``meta/recipes-kernel/linux/linux-yocto_4.12.bb`` file. :term:`KBRANCH`
is set as follows in that kernel recipe file::
@@ -3873,13 +4096,13 @@ system and gives an overview of their function and contents.
BSP layer for a given machine. For example, the append file for the
Beaglebone, EdgeRouter, and generic versions of both 32 and 64-bit IA
machines (``meta-yocto-bsp``) is named
- ``meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.12.bbappend``.
+ ``meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.15.bbappend``.
Here are the related statements from that append file::
- KBRANCH:genericx86 = "standard/base"
- KBRANCH:genericx86-64 = "standard/base"
- KBRANCH:edgerouter = "standard/edgerouter"
- KBRANCH:beaglebone = "standard/beaglebone"
+ KBRANCH:genericx86 = "v5.15/standard/base"
+ KBRANCH:genericx86-64 = "v5.15/standard/base"
+ KBRANCH:edgerouter = "v5.15/standard/edgerouter"
+ KBRANCH:beaglebone-yocto = "v5.15/standard/beaglebone"
The :term:`KBRANCH` statements
identify the kernel branch to use when building for each supported
@@ -3998,9 +4221,18 @@ system and gives an overview of their function and contents.
There is legacy support for specifying the full path to the device
tree. However, providing just the ``.dtb`` file is preferred.
- In order to use this variable, the
- :ref:`kernel-devicetree <ref-classes-kernel-devicetree>` class must
- be inherited.
+ In order to use this variable, the :ref:`ref-classes-kernel-devicetree`
+ class must be inherited.
+
+ :term:`KERNEL_DEVICETREE_BUNDLE`
+ When set to "1", this variable allows to bundle the Linux kernel
+ and the Device Tree Binary together in a single file.
+
+ This feature is currently only supported on the "arm" (32 bit)
+ architecture.
+
+ This variable is set to "0" by default by the
+ :ref:`ref-classes-kernel-devicetree` class.
:term:`KERNEL_DTB_LINK_NAME`
The link name of the kernel device tree binary (DTB). This variable
@@ -4025,10 +4257,25 @@ system and gives an overview of their function and contents.
KERNEL_DTB_NAME ?= "${KERNEL_ARTIFACT_NAME}"
- The value of the :term:`KERNEL_ARTIFACT_NAME`
- variable, which is set in the same file, has the following value::
+ See :term:`KERNEL_ARTIFACT_NAME` for additional information.
- KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
+
+ :term:`KERNEL_DTBDEST`
+ This variable, used by the :ref:`ref-classes-kernel-devicetree`
+ class, allows to change the installation directory of the DTB
+ (Device Tree Binary) files.
+
+ It is set by default to "${KERNEL_IMAGEDEST}" by the
+ :ref:`ref-classes-kernel` class.
+
+ :term:`KERNEL_DTBVENDORED`
+ This variable, used by the :ref:`ref-classes-kernel-devicetree`,
+ allows to ignore vendor subdirectories when installing DTB
+ (Device Tree Binary) files, when it is set to "false".
+
+ To keep vendor subdirectories, set this variable to "true".
+
+ It is set by default to "false" by the :ref:`ref-classes-kernel` class.
:term:`KERNEL_DTC_FLAGS`
Specifies the ``dtc`` flags that are passed to the Linux kernel build
@@ -4143,9 +4390,12 @@ system and gives an overview of their function and contents.
when building the kernel and is passed to ``make`` as the target to
build.
- If you want to build an alternate kernel image type in addition to that
- specified by :term:`KERNEL_IMAGETYPE`, use the :term:`KERNEL_ALT_IMAGETYPE`
- variable.
+ To build additional kernel image types, use :term:`KERNEL_IMAGETYPES`.
+
+ :term:`KERNEL_IMAGETYPES`
+ Lists additional types of kernel images to build for a device in addition
+ to image type specified in :term:`KERNEL_IMAGETYPE`. Usually set by the
+ machine configuration files.
:term:`KERNEL_MODULE_AUTOLOAD`
Lists kernel modules that need to be auto-loaded during boot.
@@ -4183,6 +4433,14 @@ system and gives an overview of their function and contents.
provide those module configurations, see the
:term:`module_conf_* <module_conf>` variable.
+ :term:`KERNEL_PACKAGE_NAME`
+ Specifies the base name of the kernel packages, such as "kernel"
+ in the kernel packages such as "kernel-modules", "kernel-image" and
+ "kernel-dbg".
+
+ The default value for this variable is set to "kernel" by the
+ :ref:`ref-classes-kernel` class.
+
:term:`KERNEL_PATH`
The location of the kernel sources. This variable is set to the value
of the :term:`STAGING_KERNEL_DIR` within
@@ -4345,7 +4603,7 @@ system and gives an overview of their function and contents.
The OpenEmbedded build system produces a warning if the variable
is not set for any given layer.
- See the ":ref:`dev-manual/common-tasks:creating your own layer`"
+ See the ":ref:`dev-manual/layers:creating your own layer`"
section in the Yocto Project Development Tasks Manual.
:term:`LAYERVERSION`
@@ -4394,7 +4652,7 @@ system and gives an overview of their function and contents.
This variable must be defined for all recipes (unless
:term:`LICENSE` is set to "CLOSED").
- For more information, see the ":ref:`dev-manual/common-tasks:tracking license changes`"
+ For more information, see the ":ref:`dev-manual/licenses:tracking license changes`"
section in the Yocto Project Development Tasks Manual.
:term:`LICENSE`
@@ -4458,7 +4716,7 @@ system and gives an overview of their function and contents.
For related information on providing license text, see the
:term:`COPY_LIC_DIRS` variable, the
:term:`COPY_LIC_MANIFEST` variable, and the
- ":ref:`dev-manual/common-tasks:providing license text`"
+ ":ref:`dev-manual/licenses:providing license text`"
section in the Yocto Project Development Tasks Manual.
:term:`LICENSE_FLAGS`
@@ -4471,14 +4729,14 @@ system and gives an overview of their function and contents.
typically used to mark recipes that might require additional licenses
in order to be used in a commercial product. For more information,
see the
- ":ref:`dev-manual/common-tasks:enabling commercially licensed recipes`"
+ ":ref:`dev-manual/licenses:enabling commercially licensed recipes`"
section in the Yocto Project Development Tasks Manual.
:term:`LICENSE_FLAGS_ACCEPTED`
Lists license flags that when specified in
:term:`LICENSE_FLAGS` within a recipe should not
prevent that recipe from being built. For more information, see the
- ":ref:`dev-manual/common-tasks:enabling commercially licensed recipes`"
+ ":ref:`dev-manual/licenses:enabling commercially licensed recipes`"
section in the Yocto Project Development Tasks Manual.
:term:`LICENSE_PATH`
@@ -4757,6 +5015,17 @@ system and gives an overview of their function and contents.
:term:`MAINTAINER`
The email address of the distribution maintainer.
+ :term:`MESON_BUILDTYPE`
+ Value of the Meson ``--buildtype`` argument used by the
+ :ref:`ref-classes-meson` class. It defaults to ``debug`` if
+ :term:`DEBUG_BUILD` is set to "1", and ``plain`` otherwise.
+
+ See `Meson build options <https://mesonbuild.com/Builtin-options.html>`__
+ for the values you could set in a recipe. Values such as ``plain``,
+ ``debug``, ``debugoptimized``, ``release`` and ``minsize`` allow
+ you to specify the inclusion of debugging symbols and the compiler
+ optimizations (none, performance or size).
+
:term:`METADATA_BRANCH`
The branch currently checked out for the OpenEmbedded-Core layer (path
determined by :term:`COREBASE`).
@@ -4765,6 +5034,13 @@ system and gives an overview of their function and contents.
The revision currently checked out for the OpenEmbedded-Core layer (path
determined by :term:`COREBASE`).
+ :term:`MIME_XDG_PACKAGES`
+ The current implementation of the :ref:`mime-xdg <ref-classes-mime-xdg>`
+ class cannot detect ``.desktop`` files installed through absolute
+ symbolic links. Use this setting to make the class create post-install
+ and post-remove scripts for these packages anyway, to invoke the
+ ``update-destop-database`` command.
+
:term:`MIRRORS`
Specifies additional paths from which the OpenEmbedded build system
gets source code. When the build system searches for source code, it
@@ -4773,9 +5049,8 @@ system and gives an overview of their function and contents.
:term:`PREMIRRORS`, the upstream source, and then
locations specified by :term:`MIRRORS` in that order.
- Assuming your distribution (:term:`DISTRO`) is "poky",
- the default value for :term:`MIRRORS` is defined in the
- ``conf/distro/poky.conf`` file in the ``meta-poky`` Git repository.
+ The default value for :term:`MIRRORS` is defined in the
+ ``meta/classes-global/mirrors.bbclass`` file in the core metadata layer.
:term:`MLPREFIX`
Specifies a prefix has been added to :term:`PN` to create a
@@ -5007,6 +5282,16 @@ system and gives an overview of their function and contents.
:term:`Source Directory` for details on how this class
applies these additional sed command arguments.
+ :term:`OECMAKE_GENERATOR`
+ A variable for the :ref:`ref-classes-cmake` class, allowing to choose
+ which back-end will be generated by CMake to build an application.
+
+ By default, this variable is set to ``Ninja``, which is faster than GNU
+ make, but if building is broken with Ninja, a recipe can use this
+ variable to use GNU make instead::
+
+ OECMAKE_GENERATOR = "Unix Makefiles"
+
:term:`OE_IMPORTS`
An internal variable used to tell the OpenEmbedded build system what
Python modules to import for every Python function run by the system.
@@ -5027,7 +5312,7 @@ system and gives an overview of their function and contents.
Controls how the OpenEmbedded build system spawns interactive
terminals on the host development system (e.g. using the BitBake
command with the ``-c devshell`` command-line option). For more
- information, see the ":ref:`dev-manual/common-tasks:using a development shell`" section in
+ information, see the ":ref:`dev-manual/development-shell:using a development shell`" section in
the Yocto Project Development Tasks Manual.
You can use the following values for the :term:`OE_TERMINAL` variable:
@@ -5050,6 +5335,20 @@ system and gives an overview of their function and contents.
For additional information on how this variable is used, see the
initialization script.
+ :term:`OEQA_REPRODUCIBLE_TEST_PACKAGE`
+ Set the package manager(s) for build reproducibility testing.
+ See :yocto_git:`reproducible.py </poky/tree/meta/lib/oeqa/selftest/cases/reproducible.py>`
+ and :doc:`/test-manual/reproducible-builds`.
+
+ :term:`OEQA_REPRODUCIBLE_TEST_TARGET`
+ Set build target for build reproducibility testing. By default
+ all available recipes are compiled with "bitbake world", see also :term:`EXCLUDE_FROM_WORLD`
+ and :doc:`/test-manual/reproducible-builds`.
+
+ :term:`OEQA_REPRODUCIBLE_TEST_SSTATE_TARGETS`
+ Set build targets which can be rebuilt using :ref:`shared state <overview-manual/concepts:shared state cache>`
+ when running build reproducibility tests. See :doc:`/test-manual/reproducible-builds`.
+
:term:`OLDEST_KERNEL`
Declares the oldest version of the Linux kernel that the produced
binaries must support. This variable is passed into the build of the
@@ -5094,7 +5393,7 @@ system and gives an overview of their function and contents.
An easy way to see what overrides apply is to search for :term:`OVERRIDES`
in the output of the ``bitbake -e`` command. See the
- ":ref:`dev-manual/common-tasks:viewing variable values`" section in the Yocto
+ ":ref:`dev-manual/debugging:viewing variable values`" section in the Yocto
Project Development Tasks Manual for more information.
:term:`P`
@@ -5115,7 +5414,7 @@ system and gives an overview of their function and contents.
specific by using the package name as a suffix.
You can find out more about applying this variable in the
- ":ref:`dev-manual/common-tasks:adding custom metadata to packages`"
+ ":ref:`dev-manual/packages:adding custom metadata to packages`"
section in the Yocto Project Development Tasks Manual.
:term:`PACKAGE_ARCH`
@@ -5222,7 +5521,7 @@ system and gives an overview of their function and contents.
use of the :term:`INHIBIT_PACKAGE_DEBUG_SPLIT` variable.
You can find out more about debugging using GDB by reading the
- ":ref:`dev-manual/common-tasks:debugging with the gnu project debugger (gdb) remotely`" section
+ ":ref:`dev-manual/debugging:debugging with the gnu project debugger (gdb) remotely`" section
in the Yocto Project Development Tasks Manual.
:term:`PACKAGE_EXCLUDE`
@@ -5381,7 +5680,7 @@ system and gives an overview of their function and contents.
the :ref:`core-image-minimal-initramfs <ref-manual/images:images>`
image. When working with an initial RAM filesystem (initramfs) image,
use the :term:`PACKAGE_INSTALL` variable. For information on creating an
- initramfs, see the ":ref:`dev-manual/common-tasks:building an initial ram filesystem (initramfs) image`" section
+ :term:`Initramfs`, see the ":ref:`dev-manual/building:building an initial ram filesystem (Initramfs) image`" section
in the Yocto Project Development Tasks Manual.
:term:`PACKAGE_INSTALL_ATTEMPTONLY`
@@ -5404,7 +5703,7 @@ system and gives an overview of their function and contents.
:term:`PACKAGE_WRITE_DEPS`.
For information on running post-installation scripts, see the
- ":ref:`dev-manual/common-tasks:post-installation scripts`"
+ ":ref:`dev-manual/new-recipe:post-installation scripts`"
section in the Yocto Project Development Tasks Manual.
:term:`PACKAGECONFIG`
@@ -5432,25 +5731,23 @@ system and gives an overview of their function and contents.
omit any argument you like but must retain the separating commas. The
order is important and specifies the following:
- 1. Extra arguments that should be added to the configure script
- argument list (:term:`EXTRA_OECONF` or
- :term:`PACKAGECONFIG_CONFARGS`) if
- the feature is enabled.
+ #. Extra arguments that should be added to :term:`PACKAGECONFIG_CONFARGS`
+ if the feature is enabled.
- 2. Extra arguments that should be added to :term:`EXTRA_OECONF` or
- :term:`PACKAGECONFIG_CONFARGS` if the feature is disabled.
+ #. Extra arguments that should be added to :term:`PACKAGECONFIG_CONFARGS`
+ if the feature is disabled.
- 3. Additional build dependencies (:term:`DEPENDS`)
+ #. Additional build dependencies (:term:`DEPENDS`)
that should be added if the feature is enabled.
- 4. Additional runtime dependencies (:term:`RDEPENDS`)
+ #. Additional runtime dependencies (:term:`RDEPENDS`)
that should be added if the feature is enabled.
- 5. Additional runtime recommendations
+ #. Additional runtime recommendations
(:term:`RRECOMMENDS`) that should be added if
the feature is enabled.
- 6. Any conflicting (that is, mutually exclusive) :term:`PACKAGECONFIG`
+ #. Any conflicting (that is, mutually exclusive) :term:`PACKAGECONFIG`
settings for this feature.
Consider the following :term:`PACKAGECONFIG` block taken from the
@@ -5497,6 +5794,38 @@ system and gives an overview of their function and contents.
PACKAGECONFIG:append:pn-recipename = " f4"
+ Consider the following example of a :ref:`ref-classes-cmake` recipe with a systemd service
+ in which :term:`PACKAGECONFIG` is used to transform the systemd service
+ into a feature that can be easily enabled or disabled via :term:`PACKAGECONFIG`::
+
+ example.c
+ example.service
+ CMakeLists.txt
+
+ The ``CMakeLists.txt`` file contains::
+
+ if(WITH_SYSTEMD)
+ install(FILES ${PROJECT_SOURCE_DIR}/example.service DESTINATION /etc/systemd/systemd)
+ endif(WITH_SYSTEMD)
+
+ In order to enable the installation of ``example.service`` we need to
+ ensure that ``-DWITH_SYSTEMD=ON`` is passed to the ``cmake`` command
+ execution. Recipes that have ``CMakeLists.txt`` generally inherit the
+ :ref:`ref-classes-cmake` class, that runs ``cmake`` with
+ :term:`EXTRA_OECMAKE`, which :term:`PACKAGECONFIG_CONFARGS` will be
+ appended to. Now, knowing that :term:`PACKAGECONFIG_CONFARGS` is
+ automatically filled with either the first or second element of
+ :term:`PACKAGECONFIG` flag value, the recipe would be like::
+
+ inherit cmake
+ PACKAGECONFIG = "systemd"
+ PACKAGECONFIG[systemd] = "-DWITH_SYSTEMD=ON,-DWITH_SYSTEMD=OFF"
+
+ A side note to this recipe is to check if ``systemd`` is in fact the used :term:`INIT_MANAGER`
+ or not::
+
+ PACKAGECONFIG = "${@'systemd' if d.getVar('INIT_MANAGER') == 'systemd' else ''}"
+
:term:`PACKAGECONFIG_CONFARGS`
A space-separated list of configuration options generated from the
:term:`PACKAGECONFIG` setting.
@@ -5555,7 +5884,7 @@ system and gives an overview of their function and contents.
For an example of how to use the :term:`PACKAGES_DYNAMIC` variable when
you are splitting packages, see the
- ":ref:`dev-manual/common-tasks:handling optional module packaging`"
+ ":ref:`dev-manual/packages:handling optional module packaging`"
section in the Yocto Project Development Tasks Manual.
:term:`PACKAGESPLITFUNCS`
@@ -5590,7 +5919,7 @@ system and gives an overview of their function and contents.
the ``do_compile`` task that result in race conditions, you can clear
the :term:`PARALLEL_MAKE` variable within the recipe as a workaround. For
information on addressing race conditions, see the
- ":ref:`dev-manual/common-tasks:debugging parallel make races`"
+ ":ref:`dev-manual/debugging:debugging parallel make races`"
section in the Yocto Project Development Tasks Manual.
For single socket systems (i.e. one CPU), you should not have to
@@ -5600,7 +5929,7 @@ system and gives an overview of their function and contents.
not set higher than "-j 20".
For more information on speeding up builds, see the
- ":ref:`dev-manual/common-tasks:speeding up a build`"
+ ":ref:`dev-manual/speeding-up-build:speeding up a build`"
section in the Yocto Project Development Tasks Manual.
:term:`PARALLEL_MAKEINST`
@@ -5620,7 +5949,7 @@ system and gives an overview of their function and contents.
the ``do_install`` task that result in race conditions, you can
clear the :term:`PARALLEL_MAKEINST` variable within the recipe as a
workaround. For information on addressing race conditions, see the
- ":ref:`dev-manual/common-tasks:debugging parallel make races`"
+ ":ref:`dev-manual/debugging:debugging parallel make races`"
section in the Yocto Project Development Tasks Manual.
:term:`PATCHRESOLVE`
@@ -5720,7 +6049,7 @@ system and gives an overview of their function and contents.
For examples of how this data is used, see the
":ref:`overview-manual/concepts:automatically added runtime dependencies`"
section in the Yocto Project Overview and Concepts Manual and the
- ":ref:`dev-manual/common-tasks:viewing package information with \`\`oe-pkgdata-util\`\``"
+ ":ref:`dev-manual/debugging:viewing package information with \`\`oe-pkgdata-util\`\``"
section in the Yocto Project Development Tasks Manual. For more
information on the shared, global-state directory, see
:term:`STAGING_DIR_HOST`.
@@ -5836,7 +6165,7 @@ system and gives an overview of their function and contents.
Because manually managing :term:`PR` can be cumbersome and error-prone,
an automated solution exists. See the
- ":ref:`dev-manual/common-tasks:working with a pr service`" section
+ ":ref:`dev-manual/packages:working with a pr service`" section
in the Yocto Project Development Tasks Manual for more information.
:term:`PREFERRED_PROVIDER`
@@ -5853,13 +6182,13 @@ system and gives an overview of their function and contents.
The :term:`PREFERRED_PROVIDER` variable is set with the name (:term:`PN`) of
the recipe you prefer to provide "virtual/kernel".
- Following are more examples::
+ Here are more examples::
PREFERRED_PROVIDER_virtual/xserver = "xserver-xf86"
PREFERRED_PROVIDER_virtual/libgl ?= "mesa"
For more
- information, see the ":ref:`dev-manual/common-tasks:using virtual providers`"
+ information, see the ":ref:`dev-manual/new-recipe:using virtual providers`"
section in the Yocto Project Development Tasks Manual.
.. note::
@@ -5939,9 +6268,8 @@ system and gives an overview of their function and contents.
source, and then locations specified by
:term:`MIRRORS` in that order.
- Assuming your distribution (:term:`DISTRO`) is "poky",
- the default value for :term:`PREMIRRORS` is defined in the
- ``conf/distro/poky.conf`` file in the ``meta-poky`` Git repository.
+ The default value for :term:`PREMIRRORS` is defined in the
+ ``meta/classes-global/mirrors.bbclass`` file in the core metadata layer.
Typically, you could add a specific server for the build system to
attempt before any others by adding something like the following to
@@ -6036,11 +6364,11 @@ system and gives an overview of their function and contents.
.. note::
- A corresponding mechanism for virtual runtime dependencies
- (packages) exists. However, the mechanism does not depend on any
- special functionality beyond ordinary variable assignments. For
- example, ``VIRTUAL-RUNTIME_dev_manager`` refers to the package of
- the component that manages the ``/dev`` directory.
+ A corresponding mechanism for virtual runtime dependencies (packages)
+ exists. However, the mechanism does not depend on any special
+ functionality beyond ordinary variable assignments. For example,
+ :term:`VIRTUAL-RUNTIME_dev_manager <VIRTUAL-RUNTIME>` refers to the
+ package of the component that manages the ``/dev`` directory.
Setting the "preferred provider" for runtime dependencies is as
simple as using the following assignment in a configuration file::
@@ -6059,7 +6387,7 @@ system and gives an overview of their function and contents.
You must
set the variable if you want to automatically start a local :ref:`PR
- service <dev-manual/common-tasks:working with a pr service>`. You can
+ service <dev-manual/packages:working with a pr service>`. You can
set :term:`PRSERV_HOST` to other values to use a remote PR service.
@@ -6073,7 +6401,7 @@ system and gives an overview of their function and contents.
:term:`PTEST_ENABLED`
Specifies whether or not :ref:`Package
- Test <dev-manual/common-tasks:testing packages with ptest>` (ptest)
+ Test <dev-manual/packages:testing packages with ptest>` (ptest)
functionality is enabled when building a recipe. You should not set
this variable directly. Enabling and disabling building Package Tests
at build time should be done by adding "ptest" to (or removing it
@@ -6089,6 +6417,14 @@ system and gives an overview of their function and contents.
:term:`PV` is the default value of the :term:`PKGV` variable.
+ :term:`PYPI_PACKAGE`
+ When inheriting the :ref:`pypi <ref-classes-pypi>` class, specifies the
+ `PyPI <https://pypi.org/>`__ package name to be built. The default value
+ is set based upon :term:`BPN` (stripping any "python-" or "python3-"
+ prefix off if present), however for some packages it will need to be set
+ explicitly if that will not match the package name (e.g. where the
+ package name has a prefix, underscores, uppercase letters etc.)
+
:term:`PYTHON_ABI`
When used by recipes that inherit the
:ref:`setuptools3 <ref-classes-setuptools3>` class, denotes the
@@ -6205,7 +6541,7 @@ system and gives an overview of their function and contents.
``baz``.
The names of the packages you list within :term:`RDEPENDS` must be the
- names of other packages - they cannot be recipe names. Although
+ names of other packages --- they cannot be recipe names. Although
package names and recipe names usually match, the important point
here is that you are providing package names within the :term:`RDEPENDS`
variable. For an example of the default list of packages created from
@@ -6276,6 +6612,22 @@ system and gives an overview of their function and contents.
BitBake User Manual for additional information on tasks and
dependencies.
+ :term:`RECIPE_MAINTAINER`
+ This variable defines the name and e-mail address of the maintainer of a
+ recipe. Such information can be used by human users submitted changes,
+ and by automated tools to send notifications, for example about
+ vulnerabilities or source updates.
+
+ The variable can be defined in a global distribution :oe_git:`maintainers.inc
+ </openembedded-core/tree/meta/conf/distro/include/maintainers.inc>` file::
+
+ meta/conf/distro/include/maintainers.inc:RECIPE_MAINTAINER:pn-sysvinit = "Ross Burton <ross.burton@arm.com>"
+
+ It can also be directly defined in a recipe,
+ for example in the ``libgpiod`` one::
+
+ RECIPE_MAINTAINER = "Bartosz Golaszewski <brgl@bgdev.pl>"
+
:term:`RECIPE_NO_UPDATE_REASON`
If a recipe should not be replaced by a more recent upstream version,
putting the reason why in this variable in a recipe allows
@@ -6283,6 +6635,39 @@ system and gives an overview of their function and contents.
in the ":ref:`ref-manual/devtool-reference:checking on the upgrade status of a recipe`"
section.
+ :term:`RECIPE_SYSROOT`
+ This variable points to the directory that holds all files populated from
+ recipes specified in :term:`DEPENDS`. As the name indicates,
+ think of this variable as a custom root (``/``) for the recipe that will be
+ used by the compiler in order to find headers and other files needed to complete
+ its job.
+
+ This variable is related to :term:`STAGING_DIR_HOST` or :term:`STAGING_DIR_TARGET`
+ according to the type of the recipe and the build target.
+
+ To better understand this variable, consider the following examples:
+
+ - For ``#include <header.h>``, ``header.h`` should be in ``"${RECIPE_SYSROOT}/usr/include"``
+
+ - For ``-lexample``, ``libexample.so`` should be in ``"${RECIPE_SYSROOT}/lib"``
+ or other library sysroot directories.
+
+ The default value is ``"${WORKDIR}/recipe-sysroot"``.
+ Do not modify it.
+
+ :term:`RECIPE_SYSROOT_NATIVE`
+ This is similar to :term:`RECIPE_SYSROOT` but the populated files are from
+ ``-native`` recipes. This allows a recipe built for the target machine to
+ use ``native`` tools.
+
+ This variable is related to :term:`STAGING_DIR_NATIVE`.
+
+ The default value is ``"${WORKDIR}/recipe-sysroot-native"``.
+ Do not modify it.
+
+ :term:`REPODIR`
+ See :term:`bitbake:REPODIR` in the BitBake manual.
+
:term:`REQUIRED_DISTRO_FEATURES`
When inheriting the
:ref:`features_check <ref-classes-features_check>`
@@ -6644,13 +7029,16 @@ system and gives an overview of their function and contents.
:term:`SDK_EXT_TYPE` is set to "full".
:term:`SDK_NAME`
- The base name for SDK output files. The name is derived from the
- :term:`DISTRO`, :term:`TCLIBC`,
- :term:`SDK_ARCH`,
- :term:`IMAGE_BASENAME`, and
- :term:`TUNE_PKGARCH` variables::
+ The base name for SDK output files. The default value (as set in
+ ``meta-poky/conf/distro/poky.conf``) is derived from the
+ :term:`DISTRO`,
+ :term:`TCLIBC`,
+ :term:`SDKMACHINE`,
+ :term:`IMAGE_BASENAME`,
+ :term:`TUNE_PKGARCH`, and
+ :term:`MACHINE` variables::
- SDK_NAME = "${DISTRO}-${TCLIBC}-${SDK_ARCH}-${IMAGE_BASENAME}-${TUNE_PKGARCH}"
+ SDK_NAME = "${DISTRO}-${TCLIBC}-${SDKMACHINE}-${IMAGE_BASENAME}-${TUNE_PKGARCH}-${MACHINE}"
:term:`SDK_OS`
Specifies the operating system for which the SDK will be built. The
@@ -6821,6 +7209,10 @@ system and gives an overview of their function and contents.
configuration will not take effect.
:term:`SDKPATH`
+ Defines the path used to collect the SDK components and build the
+ installer.
+
+ :term:`SDKPATHINSTALL`
Defines the path offered to the user for installation of the SDK that
is generated by the OpenEmbedded build system. The path appears as
the default location for installing the SDK when you run the SDK's
@@ -6830,7 +7222,7 @@ system and gives an overview of their function and contents.
:term:`SDKTARGETSYSROOT`
The full path to the sysroot used for cross-compilation within an SDK
as it will be when installed into the default
- :term:`SDKPATH`.
+ :term:`SDKPATHINSTALL`.
:term:`SECTION`
The section in which packages should be categorized. Package
@@ -7058,6 +7450,93 @@ system and gives an overview of their function and contents.
You can specify only a single URL in :term:`SOURCE_MIRROR_URL`.
+ :term:`SPDX_ARCHIVE_PACKAGED`
+ This option allows to add to :term:`SPDX` output compressed archives
+ of the files in the generated target packages.
+
+ Such archives are available in
+ ``tmp/deploy/spdx/MACHINE/packages/packagename.tar.zst``
+ under the :term:`Build Directory`.
+
+ Enable this option as follows::
+
+ SPDX_ARCHIVE_PACKAGED = "1"
+
+ According to our tests on release 4.1 "langdale", building
+ ``core-image-minimal`` for the ``qemux86-64`` machine, enabling this
+ option multiplied the size of the ``tmp/deploy/spdx`` directory by a
+ factor of 13 (+1.6 GiB for this image), compared to just using the
+ :ref:`create-spdx <ref-classes-create-spdx>` class with no option.
+
+ Note that this option doesn't increase the size of :term:`SPDX`
+ files in ``tmp/deploy/images/MACHINE``.
+
+ :term:`SPDX_ARCHIVE_SOURCES`
+ This option allows to add to :term:`SPDX` output compressed archives
+ of the sources for packages installed on the target. It currently
+ only works when :term:`SPDX_INCLUDE_SOURCES` is set.
+
+ This is one way of fulfilling "source code access" license
+ requirements.
+
+ Such source archives are available in
+ ``tmp/deploy/spdx/MACHINE/recipes/recipe-packagename.tar.zst``
+ under the :term:`Build Directory`.
+
+ Enable this option as follows::
+
+ SPDX_INCLUDE_SOURCES = "1"
+ SPDX_ARCHIVE_SOURCES = "1"
+
+ According to our tests on release 4.1 "langdale", building
+ ``core-image-minimal`` for the ``qemux86-64`` machine, enabling
+ these options multiplied the size of the ``tmp/deploy/spdx``
+ directory by a factor of 11 (+1.4 GiB for this image),
+ compared to just using the :ref:`create-spdx <ref-classes-create-spdx>`
+ class with no option.
+
+ Note that using this option only marginally increases the size
+ of the :term:`SPDX` output in ``tmp/deploy/images/MACHINE/``
+ (+ 0.07\% with the tested image), compared to just enabling
+ :term:`SPDX_INCLUDE_SOURCES`.
+
+ :term:`SPDX_INCLUDE_SOURCES`
+ This option allows to add a description of the source files used to build
+ the host tools and the target packages, to the ``spdx.json`` files in
+ ``tmp/deploy/spdx/MACHINE/recipes/`` under the :term:`Build Directory`.
+ As a consequence, the ``spdx.json`` files under the ``by-namespace`` and
+ ``packages`` subdirectories in ``tmp/deploy/spdx/MACHINE`` are also
+ modified to include references to such source file descriptions.
+
+ Enable this option as follows::
+
+ SPDX_INCLUDE_SOURCES = "1"
+
+ According to our tests on release 4.1 "langdale", building
+ ``core-image-minimal`` for the ``qemux86-64`` machine, enabling
+ this option multiplied the total size of the ``tmp/deploy/spdx``
+ directory by a factor of 3 (+291 MiB for this image),
+ and the size of the ``IMAGE-MACHINE.spdx.tar.zst`` in
+ ``tmp/deploy/images/MACHINE`` by a factor of 130 (+15 MiB for this
+ image), compared to just using the
+ :ref:`create-spdx <ref-classes-create-spdx>` class with no option.
+
+ :term:`SPDX_NAMESPACE_PREFIX`
+ This option could be used in order to change the prefix of ``spdxDocument``
+ and the prefix of ``documentNamespace``. It is set by default to
+ ``http://spdx.org/spdxdoc``.
+
+ :term:`SPDX_PRETTY`
+ This option makes the SPDX output more human-readable, using
+ identation and newlines, instead of the default output in a
+ single line::
+
+ SPDX_PRETTY = "1"
+
+ The generated SPDX files are approximately 20% bigger, but
+ this option is recommended if you want to inspect the SPDX
+ output files with a text editor.
+
:term:`SPDXLICENSEMAP`
Maps commonly used license names to their SPDX counterparts found in
``meta/files/common-licenses/``. For the default :term:`SPDXLICENSEMAP`
@@ -7095,10 +7574,74 @@ system and gives an overview of their function and contents.
various ``SPL_*`` variables used by the OpenEmbedded build system.
See the BeagleBone machine configuration example in the
- ":ref:`dev-manual/common-tasks:adding a layer using the \`\`bitbake-layers\`\` script`"
+ ":ref:`dev-manual/layers:adding a layer using the \`\`bitbake-layers\`\` script`"
section in the Yocto Project Board Support Package Developer's Guide
for additional information.
+ :term:`SPL_MKIMAGE_DTCOPTS`
+ Options for the device tree compiler passed to ``mkimage -D`` feature
+ while creating a FIT image with the :ref:`ref-classes-uboot-sign`
+ class. If :term:`SPL_MKIMAGE_DTCOPTS` is not set then the
+ :ref:`ref-classes-uboot-sign` class will not pass the ``-D`` option
+ to ``mkimage``.
+
+ The default value is set to "" by the :ref:`ref-classes-uboot-config`
+ class.
+
+ :term:`SPL_SIGN_ENABLE`
+ Enable signing of the U-Boot FIT image. The default value is "0".
+ This variable is used by the :ref:`ref-classes-uboot-sign` class.
+
+ :term:`SPL_SIGN_KEYDIR`
+ Location of the directory containing the RSA key and certificate used for
+ signing the U-Boot FIT image, used by the :ref:`ref-classes-uboot-sign`
+ class.
+
+ :term:`SPL_SIGN_KEYNAME`
+ The name of keys used by the :ref:`ref-classes-kernel-fitimage` class
+ for signing U-Boot FIT image stored in the :term:`SPL_SIGN_KEYDIR`
+ directory. If we have for example a ``dev.key`` key and a ``dev.crt``
+ certificate stored in the :term:`SPL_SIGN_KEYDIR` directory, you will
+ have to set :term:`SPL_SIGN_KEYNAME` to ``dev``.
+
+ :term:`SPLASH`
+ This variable, used by the :ref:`ref-classes-image` class, allows
+ to choose splashscreen applications. Set it to the names of packages
+ for such applications to use. This variable is set by default to
+ ``psplash``.
+
+ :term:`SPLASH_IMAGES`
+ This variable, used by the ``psplash`` recipe, allows to customize
+ the default splashscreen image.
+
+ Specified images in PNG format are converted to ``.h`` files by the recipe,
+ and are included in the ``psplash`` binary, so you won't find them in
+ the root filesystem.
+
+ To make such a change, it is recommended to customize the
+ ``psplash`` recipe in a custom layer. Here is an example structure for
+ an ``ACME`` board::
+
+ meta-acme/recipes-core/psplash
+ ├── files
+ │   └── logo-acme.png
+ └── psplash_%.bbappend
+
+ And here are the contents of the ``psplash_%.bbappend`` file in
+ this example::
+
+ SPLASH_IMAGES = "file://logo-acme.png;outsuffix=default"
+ FILESEXTRAPATHS:prepend := "${THISDIR}/files:"
+
+ You could even add specific configuration options for ``psplash``,
+ for example::
+
+ EXTRA_OECONF += "--disable-startup-msg --enable-img-fullscreen"
+
+ For information on append files, see the
+ ":ref:`dev-manual/layers:appending other layers metadata with your layer`"
+ section.
+
:term:`SRC_URI`
See the BitBake manual for the initial description for this variable:
@@ -7108,35 +7651,35 @@ system and gives an overview of their function and contents.
There are standard and recipe-specific options. Here are standard ones:
- - ``apply`` - Whether to apply the patch or not. The default
+ - ``apply`` --- whether to apply the patch or not. The default
action is to apply the patch.
- - ``striplevel`` - Which striplevel to use when applying the
+ - ``striplevel`` --- which striplevel to use when applying the
patch. The default level is 1.
- - ``patchdir`` - Specifies the directory in which the patch should
+ - ``patchdir`` --- specifies the directory in which the patch should
be applied. The default is ``${``\ :term:`S`\ ``}``.
Here are options specific to recipes building code from a revision
control system:
- - ``mindate`` - Apply the patch only if
+ - ``mindate`` --- apply the patch only if
:term:`SRCDATE` is equal to or greater than
``mindate``.
- - ``maxdate`` - Apply the patch only if :term:`SRCDATE` is not later
+ - ``maxdate`` --- apply the patch only if :term:`SRCDATE` is not later
than ``maxdate``.
- - ``minrev`` - Apply the patch only if :term:`SRCREV` is equal to or
+ - ``minrev`` --- apply the patch only if :term:`SRCREV` is equal to or
greater than ``minrev``.
- - ``maxrev`` - Apply the patch only if :term:`SRCREV` is not later
+ - ``maxrev`` --- apply the patch only if :term:`SRCREV` is not later
than ``maxrev``.
- - ``rev`` - Apply the patch only if :term:`SRCREV` is equal to
+ - ``rev`` --- apply the patch only if :term:`SRCREV` is equal to
``rev``.
- - ``notrev`` - Apply the patch only if :term:`SRCREV` is not equal to
+ - ``notrev`` --- apply the patch only if :term:`SRCREV` is not equal to
``rev``.
.. note::
@@ -7180,14 +7723,14 @@ system and gives an overview of their function and contents.
that if you want to build a fixed revision and you want to avoid
performing a query on the remote repository every time BitBake parses
your recipe, you should specify a :term:`SRCREV` that is a full revision
- identifier and not just a tag.
+ identifier (e.g. the full SHA hash in git) and not just a tag.
.. note::
For information on limitations when inheriting the latest revision
of software using :term:`SRCREV`, see the :term:`AUTOREV` variable
description and the
- ":ref:`dev-manual/common-tasks:automatically incrementing a package version number`"
+ ":ref:`dev-manual/packages:automatically incrementing a package version number`"
section, which is in the Yocto Project Development Tasks Manual.
:term:`SRCTREECOVEREDTASKS`
@@ -7217,6 +7760,32 @@ system and gives an overview of their function and contents.
:term:`SSTATE_DIR`
The directory for the shared state cache.
+ :term:`SSTATE_EXCLUDEDEPS_SYSROOT`
+ This variable allows to specify indirect dependencies to exclude
+ from sysroots, for example to avoid the situations when a dependency on
+ any ``-native`` recipe will pull in all dependencies of that recipe
+ in the recipe sysroot. This behaviour might not always be wanted,
+ for example when that ``-native`` recipe depends on build tools
+ that are not relevant for the current recipe.
+
+ This way, irrelevant dependencies are ignored, which could have
+ prevented the reuse of prebuilt artifacts stored in the Shared
+ State Cache.
+
+ :term:`SSTATE_EXCLUDEDEPS_SYSROOT` is evaluated as two regular
+ expressions of recipe and dependency to ignore. An example
+ is the rule in :oe_git:`meta/conf/layer.conf </openembedded-core/tree/meta/conf/layer.conf>`::
+
+ # Nothing needs to depend on libc-initial
+ # base-passwd/shadow-sysroot don't need their dependencies
+ SSTATE_EXCLUDEDEPS_SYSROOT += "\
+ .*->.*-initial.* \
+ .*(base-passwd|shadow-sysroot)->.* \
+ "
+
+ The ``->`` substring represents the dependency between
+ the two regular expressions.
+
:term:`SSTATE_MIRROR_ALLOW_NETWORK`
If set to "1", allows fetches from mirrors that are specified in
:term:`SSTATE_MIRRORS` to work even when
@@ -7259,6 +7828,16 @@ system and gives an overview of their function and contents.
file://.* https://someserver.tld/share/sstate/PATH;downloadfilename=PATH \
file://.* file:///some-local-dir/sstate/PATH"
+ The Yocto Project actually shares the cache data objects built by its
+ autobuilder::
+
+ SSTATE_MIRRORS ?= "file://.* http://cdn.jsdelivr.net/yocto/sstate/all/PATH;downloadfilename=PATH"
+
+ As such binary artifacts are built for the generic QEMU machines
+ supported by the various Poky releases, they are less likely to be
+ reusable in real projects building binaries optimized for a specific
+ CPU family.
+
:term:`SSTATE_SCAN_FILES`
Controls the list of files the OpenEmbedded build system scans for
hardcoded installation paths. The variable uses a space-separated
@@ -7377,10 +7956,15 @@ system and gives an overview of their function and contents.
for ``-native`` recipes, as they make use of host headers and
libraries.
+ Check :term:`RECIPE_SYSROOT` and :term:`RECIPE_SYSROOT_NATIVE`.
+
:term:`STAGING_DIR_NATIVE`
Specifies the path to the sysroot directory used when building
components that run on the build host itself.
+ The default value is ``"${RECIPE_SYSROOT_NATIVE}"``,
+ check :term:`RECIPE_SYSROOT_NATIVE`.
+
:term:`STAGING_DIR_TARGET`
Specifies the path to the sysroot used for the system for which the
component generates code. For components that do not generate code,
@@ -7562,6 +8146,35 @@ system and gives an overview of their function and contents.
${libdir}/${BPN}/ptest \
"
+ Consider the following example in which you need to manipulate this variable.
+ Assume you have a recipe ``A`` that provides a shared library ``.so.*`` that is
+ installed into a custom folder other than "``${libdir}``"
+ or "``${base_libdir}``", let's say "``/opt/lib``".
+
+ .. note::
+
+ This is not a recommended way to deal with shared libraries, but this
+ is just to show the usefulness of setting :term:`SYSROOT_DIRS`.
+
+ When a recipe ``B`` :term:`DEPENDS` on ``A``, it means what is in
+ :term:`SYSROOT_DIRS` will be copied from :term:`D` of the recipe ``B``
+ into ``B``'s :term:`SYSROOT_DESTDIR` that is "``${WORKDIR}/sysroot-destdir``".
+
+ Now, since ``/opt/lib`` is not in :term:`SYSROOT_DIRS`, it will never be copied to
+ ``A``'s :term:`RECIPE_SYSROOT`, which is "``${WORKDIR}/recipe-sysroot``". So,
+ the linking process will fail.
+
+ To fix this, you need to add ``/opt/lib`` to :term:`SYSROOT_DIRS`::
+
+ SYSROOT_DIRS:append = " /opt/lib"
+
+ .. note::
+ Even after setting ``/opt/lib`` to :term:`SYSROOT_DIRS`, the linking process will still fail
+ because the linker does not know that location, since :term:`TARGET_LDFLAGS`
+ doesn't contain it (if your recipe is for the target). Therefore, so you should add::
+
+ TARGET_LDFLAGS:append = " -L${RECIPE_SYSROOT}/opt/lib"
+
:term:`SYSROOT_DIRS_NATIVE`
Extra directories staged into the sysroot by the
:ref:`ref-tasks-populate_sysroot` task for
@@ -7609,7 +8222,7 @@ system and gives an overview of their function and contents.
:ref:`systemd-boot <ref-classes-systemd-boot>` class sets the
:term:`SYSTEMD_BOOT_CFG` as follows::
- SYSTEMD_BOOT_CFG ?= "${:term:`S`}/loader.conf"
+ SYSTEMD_BOOT_CFG ?= "${S}/loader.conf"
For information on Systemd-boot, see the `Systemd-boot
documentation <https://www.freedesktop.org/wiki/Software/systemd/systemd-boot/>`__.
@@ -7663,8 +8276,7 @@ system and gives an overview of their function and contents.
SYSTEMD_SERVICE:${PN} = "connman.service"
:term:`SYSVINIT_ENABLED_GETTYS`
- When using
- :ref:`SysVinit <dev-manual/common-tasks:enabling system services>`,
+ When using :ref:`SysVinit <dev-manual/new-recipe:enabling system services>`,
specifies a space-separated list of the virtual terminals that should
run a `getty <https://en.wikipedia.org/wiki/Getty_%28Unix%29>`__
(allowing login), assuming :term:`USE_VT` is not set to
@@ -7946,7 +8558,7 @@ system and gives an overview of their function and contents.
file.
For more information on testing images, see the
- ":ref:`dev-manual/common-tasks:performing automated runtime testing`"
+ ":ref:`dev-manual/runtime-testing:performing automated runtime testing`"
section in the Yocto Project Development Tasks Manual.
:term:`TEST_SERIALCONTROL_CMD`
@@ -8019,7 +8631,7 @@ system and gives an overview of their function and contents.
TEST_SUITES = "test_A test_B"
For more information on testing images, see the
- ":ref:`dev-manual/common-tasks:performing automated runtime testing`"
+ ":ref:`dev-manual/runtime-testing:performing automated runtime testing`"
section in the Yocto Project Development Tasks Manual.
:term:`TEST_TARGET`
@@ -8038,7 +8650,7 @@ system and gives an overview of their function and contents.
You can provide the following arguments with :term:`TEST_TARGET`:
- *"qemu":* Boots a QEMU image and runs the tests. See the
- ":ref:`dev-manual/common-tasks:enabling runtime tests on qemu`" section
+ ":ref:`dev-manual/runtime-testing:enabling runtime tests on qemu`" section
in the Yocto Project Development Tasks Manual for more
information.
@@ -8054,7 +8666,7 @@ system and gives an overview of their function and contents.
``meta/lib/oeqa/controllers/simpleremote.py``.
For information on running tests on hardware, see the
- ":ref:`dev-manual/common-tasks:enabling runtime tests on hardware`"
+ ":ref:`dev-manual/runtime-testing:enabling runtime tests on hardware`"
section in the Yocto Project Development Tasks Manual.
:term:`TEST_TARGET_IP`
@@ -8091,9 +8703,9 @@ system and gives an overview of their function and contents.
For more information
on enabling, running, and writing these tests, see the
- ":ref:`dev-manual/common-tasks:performing automated runtime testing`"
+ ":ref:`dev-manual/runtime-testing:performing automated runtime testing`"
section in the Yocto Project Development Tasks Manual and the
- ":ref:`ref-classes-testimage*`" section.
+ ":ref:`ref-classes-testimage`" section.
:term:`THISDIR`
The directory in which the file BitBake is currently parsing is
@@ -8158,6 +8770,16 @@ system and gives an overview of their function and contents.
portion of an eSDK. This is similar to :term:`TOOLCHAIN_HOST_TASK`
applying to SDKs.
+ :term:`TOOLCHAIN_OPTIONS`
+ This variable holds extra options passed to the compiler and the linker
+ for non ``-native`` recipes as they have to point to their custom
+ ``sysroot`` folder pointed to by :term:`RECIPE_SYSROOT`::
+
+ TOOLCHAIN_OPTIONS = " --sysroot=${RECIPE_SYSROOT}"
+
+ Native recipes don't need this variable to be set, as they are
+ built for the host machine with the native compiler.
+
:term:`TOOLCHAIN_OUTPUTNAME`
This variable defines the name used for the toolchain output. The
:ref:`populate_sdk_base <ref-classes-populate-sdk-*>` class sets
@@ -8327,23 +8949,30 @@ system and gives an overview of their function and contents.
See the machine include files in the :term:`Source Directory`
for these features.
- :term:`UBOOT_CONFIG`
- Configures the :term:`UBOOT_MACHINE` and can
- also define :term:`IMAGE_FSTYPES` for individual
- cases.
-
- Following is an example from the ``meta-fsl-arm`` layer. ::
+ :term:`UBOOT_BINARY`
+ Specifies the name of the binary build by U-Boot.
- UBOOT_CONFIG ??= "sd"
- UBOOT_CONFIG[sd] = "mx6qsabreauto_config,sdcard"
- UBOOT_CONFIG[eimnor] = "mx6qsabreauto_eimnor_config"
- UBOOT_CONFIG[nand] = "mx6qsabreauto_nand_config,ubifs"
- UBOOT_CONFIG[spinor] = "mx6qsabreauto_spinor_config"
-
- In this example, "sd" is selected as the configuration of the possible four for the
- :term:`UBOOT_MACHINE`. The "sd" configuration defines
- "mx6qsabreauto_config" as the value for :term:`UBOOT_MACHINE`, while the
- "sdcard" specifies the :term:`IMAGE_FSTYPES` to use for the U-Boot image.
+ :term:`UBOOT_CONFIG`
+ Configures one or more U-Boot configurations to build. Each
+ configuration can define the :term:`UBOOT_MACHINE` and optionally the
+ :term:`IMAGE_FSTYPES` and the :term:`UBOOT_BINARY`.
+
+ Here is an example from the ``meta-freescale`` layer. ::
+
+ UBOOT_CONFIG ??= "sdcard-ifc-secure-boot sdcard-ifc sdcard-qspi lpuart qspi secure-boot nor"
+ UBOOT_CONFIG[nor] = "ls1021atwr_nor_defconfig"
+ UBOOT_CONFIG[sdcard-ifc] = "ls1021atwr_sdcard_ifc_defconfig,,u-boot-with-spl-pbl.bin"
+ UBOOT_CONFIG[sdcard-qspi] = "ls1021atwr_sdcard_qspi_defconfig,,u-boot-with-spl-pbl.bin"
+ UBOOT_CONFIG[lpuart] = "ls1021atwr_nor_lpuart_defconfig"
+ UBOOT_CONFIG[qspi] = "ls1021atwr_qspi_defconfig"
+ UBOOT_CONFIG[secure-boot] = "ls1021atwr_nor_SECURE_BOOT_defconfig"
+ UBOOT_CONFIG[sdcard-ifc-secure-boot] = "ls1021atwr_sdcard_ifc_SECURE_BOOT_defconfig,,u-boot-with-spl-pbl.bin"
+
+ In this example, all possible seven configurations are selected. Each
+ configuration specifies "..._defconfig" as :term:`UBOOT_MACHINE`, and
+ the "sd..." configurations define an individual name for
+ :term:`UBOOT_BINARY`. No configuration defines a second parameter for
+ :term:`IMAGE_FSTYPES` to use for the U-Boot image.
For more information on how the :term:`UBOOT_CONFIG` is handled, see the
:ref:`uboot-config <ref-classes-uboot-config>`
@@ -8367,6 +8996,64 @@ system and gives an overview of their function and contents.
creation, the :term:`UBOOT_ENTRYPOINT` variable is passed as a
command-line parameter to the ``uboot-mkimage`` utility.
+ :term:`UBOOT_FIT_DESC`
+ Specifies the description string encoded into a U-Boot fitImage. The default
+ value is set by the :ref:`ref-classes-uboot-sign` class as follows::
+
+ UBOOT_FIT_DESC ?= "U-Boot fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}"
+
+ :term:`UBOOT_FIT_GENERATE_KEYS`
+ Decides whether to generate the keys for signing the U-Boot fitImage if
+ they don't already exist. The keys are created in :term:`SPL_SIGN_KEYDIR`.
+ The default value is "0".
+
+ Enable this as follows::
+
+ UBOOT_FIT_GENERATE_KEYS = "1"
+
+ This variable is used in the :ref:`ref-classes-uboot-sign` class.
+
+ :term:`UBOOT_FIT_HASH_ALG`
+ Specifies the hash algorithm used in creating the U-Boot FIT Image.
+ It is set by default to ``sha256`` by the :ref:`ref-classes-uboot-sign`
+ class.
+
+ :term:`UBOOT_FIT_KEY_GENRSA_ARGS`
+ Arguments to ``openssl genrsa`` for generating a RSA private key for
+ signing the U-Boot FIT image. The default value of this variable
+ is set to "-F4" by the :ref:`ref-classes-uboot-sign` class.
+
+ :term:`UBOOT_FIT_KEY_REQ_ARGS`
+ Arguments to ``openssl req`` for generating a certificate for signing
+ the U-Boot FIT image. The default value is "-batch -new" by the
+ :ref:`ref-classes-uboot-sign` class, "batch" for
+ non interactive mode and "new" for generating new keys.
+
+ :term:`UBOOT_FIT_KEY_SIGN_PKCS`
+ Format for the public key certificate used for signing the U-Boot FIT
+ image. The default value is set to "x509" by the
+ :ref:`ref-classes-uboot-sign` class.
+
+ :term:`UBOOT_FIT_SIGN_ALG`
+ Specifies the signature algorithm used in creating the U-Boot FIT Image.
+ This variable is set by default to "rsa2048" by the
+ :ref:`ref-classes-uboot-sign` class.
+
+ :term:`UBOOT_FIT_SIGN_NUMBITS`
+ Size of the private key used in signing the U-Boot FIT image, in number
+ of bits. The default value for this variable is set to "2048"
+ by the :ref:`ref-classes-uboot-sign` class.
+
+ :term:`UBOOT_FITIMAGE_ENABLE`
+ This variable allows to generate a FIT image for U-Boot, which is one
+ of the ways to implement a verified boot process.
+
+ Its default value is "0", so set it to "1" to enable this functionality::
+
+ UBOOT_FITIMAGE_ENABLE = "1"
+
+ See the :ref:`ref-classes-uboot-sign` class for details.
+
:term:`UBOOT_LOADADDRESS`
Specifies the load address for the U-Boot image. During U-Boot image
creation, the :term:`UBOOT_LOADADDRESS` variable is passed as a
@@ -8550,16 +9237,15 @@ system and gives an overview of their function and contents.
specifically set. Typically, you would set :term:`USE_DEVFS` to "0" for a
statically populated ``/dev`` directory.
- See the ":ref:`dev-manual/common-tasks:selecting a device manager`" section in
+ See the ":ref:`dev-manual/device-manager:selecting a device manager`" section in
the Yocto Project Development Tasks Manual for information on how to
use this variable.
:term:`USE_VT`
When using
- :ref:`SysVinit <dev-manual/common-tasks:enabling system services>`,
- determines whether or not to run a
- `getty <https://en.wikipedia.org/wiki/Getty_%28Unix%29>`__ on any
- virtual terminals in order to enable logging in through those
+ :ref:`SysVinit <dev-manual/new-recipe:enabling system services>`,
+ determines whether or not to run a :wikipedia:`getty <Getty_(Unix)>`
+ on any virtual terminals in order to enable logging in through those
terminals.
The default value used for :term:`USE_VT` is "1" when no default value is
@@ -8704,6 +9390,33 @@ system and gives an overview of their function and contents.
Additionally, you should also set the
:term:`USERADD_ERROR_DYNAMIC` variable.
+ :term:`VIRTUAL-RUNTIME`
+ :term:`VIRTUAL-RUNTIME` is a commonly used prefix for defining virtual
+ packages for runtime usage, typically for use in :term:`RDEPENDS`
+ or in image definitions.
+
+ An example is ``VIRTUAL-RUNTIME_base-utils`` that makes it possible
+ to either use BusyBox based utilities::
+
+ VIRTUAL-RUNTIME_base-utils = "busybox"
+
+ or their full featured implementations from GNU Coreutils
+ and other projects::
+
+ VIRTUAL-RUNTIME_base-utils = "packagegroup-core-base-utils"
+
+ Here are two examples using this virtual runtime package. The
+ first one is in :yocto_git:`initramfs-framework_1.0.bb
+ </poky/tree/meta/recipes-core/initrdscripts/initramfs-framework_1.0.bb?h=scarthgap>`::
+
+ RDEPENDS:${PN} += "${VIRTUAL-RUNTIME_base-utils}"
+
+ The second example is in the :yocto_git:`core-image-initramfs-boot
+ </poky/tree/meta/recipes-core/images/core-image-initramfs-boot.bb?h=scarthgap>`
+ image definition::
+
+ PACKAGE_INSTALL = "${INITRAMFS_SCRIPTS} ${VIRTUAL-RUNTIME_base-utils} base-passwd"
+
:term:`VOLATILE_LOG_DIR`
Specifies the persistence of the target's ``/var/log`` directory,
which is used to house postinstall target log files.
@@ -8724,7 +9437,7 @@ system and gives an overview of their function and contents.
OpenEmbedded build system to create a partitioned image
(``image.wic``). For information on how to create a partitioned
image, see the
- ":ref:`dev-manual/common-tasks:creating partitioned images using wic`"
+ ":ref:`dev-manual/wic:creating partitioned images using wic`"
section in the Yocto Project Development Tasks Manual. For details on
the kickstart file format, see the ":doc:`/ref-manual/kickstart`" Chapter.
@@ -8744,7 +9457,7 @@ system and gives an overview of their function and contents.
With the :term:`WKS_FILE_DEPENDS` variable, you have the possibility to
specify a list of additional dependencies (e.g. native tools,
bootloaders, and so forth), that are required to build Wic images.
- Following is an example::
+ Here is an example::
WKS_FILE_DEPENDS = "some-native-tool"
@@ -8752,6 +9465,19 @@ system and gives an overview of their function and contents.
previous example, some-native-tool would be replaced with an actual
native tool on which the build would depend.
+ :term:`WKS_FILES`
+ Specifies a list of candidate Wic kickstart files to be used by the
+ OpenEmbedded build system to create a partitioned image. Only the
+ first one that is found, from left to right, will be used.
+
+ This is only useful when there are multiple ``.wks`` files that can be
+ used to produce an image. A typical case is when multiple layers are
+ used for different hardware platforms, each supplying a different
+ ``.wks`` file. In this case, you specify all possible ones through
+ :term:`WKS_FILES`.
+
+ If only one ``.wks`` file is used, set :term:`WKS_FILE` instead.
+
:term:`WORKDIR`
The pathname of the work directory in which the OpenEmbedded build
system builds a recipe. This directory is located within the
@@ -8767,8 +9493,8 @@ system and gives an overview of their function and contents.
- :term:`TMPDIR`: The top-level build output directory
- :term:`MULTIMACH_TARGET_SYS`: The target system identifier
- :term:`PN`: The recipe name
- - :term:`EXTENDPE`: The epoch - (if :term:`PE` is not specified, which
- is usually the case for most recipes, then `EXTENDPE` is blank)
+ - :term:`EXTENDPE`: The epoch --- if :term:`PE` is not specified, which
+ is usually the case for most recipes, then `EXTENDPE` is blank.
- :term:`PV`: The recipe version
- :term:`PR`: The recipe revision
diff --git a/documentation/ref-manual/varlocality.rst b/documentation/ref-manual/varlocality.rst
index 5f7dba8775..e2c086ffa0 100644
--- a/documentation/ref-manual/varlocality.rst
+++ b/documentation/ref-manual/varlocality.rst
@@ -113,7 +113,7 @@ This section lists variables that are required for recipes.
- :term:`LIC_FILES_CHKSUM`
-- :term:`SRC_URI` - used in recipes that fetch local or remote files.
+- :term:`SRC_URI` --- used in recipes that fetch local or remote files.
.. _ref-varlocality-recipe-dependencies:
diff --git a/documentation/sdk-manual/appendix-obtain.rst b/documentation/sdk-manual/appendix-obtain.rst
index 841abac5aa..d384b4916a 100644
--- a/documentation/sdk-manual/appendix-obtain.rst
+++ b/documentation/sdk-manual/appendix-obtain.rst
@@ -25,27 +25,20 @@ Follow these steps to locate and hand-install the toolchain:
download the installer appropriate for your build host, target
hardware, and image type.
- The installer files (``*.sh``) follow this naming convention::
+ The installer files (``*.sh``) follow this naming convention:
+ ``poky-glibc-host_system-core-image-type-arch-toolchain[-ext]-release.sh``:
- poky-glibc-host_system-core-image-type-arch-toolchain[-ext]-release.sh
+ - ``host_system``: string representing your development system: ``i686`` or ``x86_64``
- Where:
- host_system is a string representing your development system:
- "i686" or "x86_64"
+ - ``type``: string representing the image: ``sato`` or ``minimal``
- type is a string representing the image:
- "sato" or "minimal"
+ - ``arch``: string representing the target architecture such as ``cortexa57-qemuarm64``
- arch is a string representing the target architecture:
- "aarch64", "armv5e", "core2-64", "cortexa8hf-neon", "i586", "mips32r2",
- "mips64", or "ppc7400"
-
- release is the version of Yocto Project.
-
- NOTE:
- The standard SDK installer does not have the "-ext" string as
- part of the filename.
+ - ``release``: version of the Yocto Project.
+ .. note::
+ The standard SDK installer does not have the ``-ext`` string as
+ part of the filename.
The toolchains provided by the Yocto
Project are based off of the ``core-image-sato`` and
@@ -53,16 +46,16 @@ Follow these steps to locate and hand-install the toolchain:
developing against those images.
For example, if your build host is a 64-bit x86 system and you need
- an extended SDK for a 64-bit core2 target, go into the ``x86_64``
+ an extended SDK for a 64-bit core2 QEMU target, go into the ``x86_64``
folder and download the following installer::
- poky-glibc-x86_64-core-image-sato-core2-64-toolchain-ext-&DISTRO;.sh
+ poky-glibc-x86_64-core-image-sato-core2-64-qemux86-64-toolchain-&DISTRO;.sh
-4. *Run the Installer:* Be sure you have execution privileges and run
- the installer. Following is an example from the ``Downloads``
+#. *Run the Installer:* Be sure you have execution privileges and run
+ the installer. Here is an example from the ``Downloads``
directory::
- $ ~/Downloads/poky-glibc-x86_64-core-image-sato-core2-64-toolchain-ext-&DISTRO;.sh
+ $ ~/Downloads/poky-glibc-x86_64-core-image-sato-core2-64-qemux86-64-toolchain-&DISTRO;.sh
During execution of the script, you choose the root location for the
toolchain. See the
@@ -162,12 +155,12 @@ build the SDK installer. Follow these steps:
variable inside your ``local.conf`` file before building the
SDK installer. Doing so ensures that the eventual SDK
installation process installs the appropriate library packages
- as part of the SDK. Following is an example using ``libc``
+ as part of the SDK. Here is an example using ``libc``
static development libraries: TOOLCHAIN_TARGET_TASK:append = "
libc-staticdev"
-7. *Run the Installer:* You can now run the SDK installer from
- ``tmp/deploy/sdk`` in the Build Directory. Following is an example::
+#. *Run the Installer:* You can now run the SDK installer from
+ ``tmp/deploy/sdk`` in the :term:`Build Directory`. Here is an example::
$ cd poky/build/tmp/deploy/sdk
$ ./poky-glibc-x86_64-core-image-sato-core2-64-toolchain-ext-&DISTRO;.sh
@@ -206,21 +199,14 @@ Follow these steps to extract the root filesystem:
also contain flattened root filesystem image files (``*.ext4``),
which you can use with QEMU directly.
- The pre-built root filesystem image files follow these naming
- conventions::
-
- core-image-profile-arch.tar.bz2
+ The pre-built root filesystem image files follow the
+ ``core-image-profile-machine.tar.bz2`` naming convention:
- Where:
- profile is the filesystem image's profile:
- lsb, lsb-dev, lsb-sdk, minimal, minimal-dev, minimal-initramfs,
- sato, sato-dev, sato-sdk, sato-sdk-ptest. For information on
- these types of image profiles, see the "Images" chapter in
- the Yocto Project Reference Manual.
+ - ``profile``: filesystem image's profile, such as ``minimal``,
+ ``minimal-dev`` or ``sato``. For information on these types of image
+ profiles, see the "Images" chapter in the Yocto Project Reference Manual.
- arch is a string representing the target architecture:
- beaglebone-yocto, beaglebone-yocto-lsb, edgerouter, edgerouter-lsb,
- genericx86, genericx86-64, genericx86-64-lsb, genericx86-lsb and qemu*.
+ - ``machine``: same string as the name of the parent download directory.
The root filesystems
provided by the Yocto Project are based off of the
@@ -239,7 +225,7 @@ Follow these steps to extract the root filesystem:
This script is located in the top-level directory in which you
installed the toolchain (e.g. ``poky_sdk``).
- Following is an example based on the toolchain installed in the
+ Here is an example based on the toolchain installed in the
":ref:`sdk-manual/appendix-obtain:locating pre-built sdk installers`" section::
$ source poky_sdk/environment-setup-core2-64-poky-linux
@@ -247,7 +233,7 @@ Follow these steps to extract the root filesystem:
3. *Extract the Root Filesystem:* Use the ``runqemu-extract-sdk``
command and provide the root filesystem image.
- Following is an example command that extracts the root filesystem
+ Here is an example command that extracts the root filesystem
from a previously built root filesystem image that was downloaded
from the :yocto_dl:`Index of Releases </releases/yocto/yocto-&DISTRO;/machines/>`.
This command extracts the root filesystem into the ``core2-64-sato``
diff --git a/documentation/sdk-manual/extensible.rst b/documentation/sdk-manual/extensible.rst
index c5970f74fa..7edb3eb4b4 100644
--- a/documentation/sdk-manual/extensible.rst
+++ b/documentation/sdk-manual/extensible.rst
@@ -102,16 +102,7 @@ must be writable for whichever users need to use the SDK.
The following command shows how to run the installer given a toolchain
tarball for a 64-bit x86 development host system and a 64-bit x86 target
architecture. The example assumes the SDK installer is located in
-``~/Downloads/`` and has execution rights.
-
-.. note::
-
- If you do not have write permissions for the directory into which you
- are installing the SDK, the installer notifies you and exits. For
- that case, set up the proper permissions in the directory and run the
- installer again.
-
-::
+``~/Downloads/`` and has execution rights::
$ ./Downloads/poky-glibc-x86_64-core-image-minimal-core2-64-toolchain-ext-2.5.sh
Poky (Yocto Project Reference Distro) Extensible SDK installer version 2.5
@@ -132,6 +123,13 @@ architecture. The example assumes the SDK installer is located in
Each time you wish to use the SDK in a new shell session, you need to source the environment setup script e.g.
$ . /home/scottrif/poky_sdk/environment-setup-core2-64-poky-linux
+.. note::
+
+ If you do not have write permissions for the directory into which you
+ are installing the SDK, the installer notifies you and exits. For
+ that case, set up the proper permissions in the directory and run the
+ installer again.
+
Running the Extensible SDK Environment Setup Script
===================================================
@@ -154,11 +152,9 @@ script is for an IA-based target machine using i586 tuning::
SDK environment now set up; additionally you may now run devtool to perform development tasks.
Run devtool --help for further details.
-Running the setup script defines many environment variables needed in
-order to use the SDK (e.g. ``PATH``,
-:term:`CC`,
-:term:`LD`, and so forth). If you want to
-see all the environment variables the script exports, examine the
+Running the setup script defines many environment variables needed in order to
+use the SDK (e.g. ``PATH``, :term:`CC`, :term:`LD`, and so forth). If you want
+to see all the environment variables the script exports, examine the
installation file itself.
Using ``devtool`` in Your SDK Workflow
@@ -172,11 +168,8 @@ system.
.. note::
- The use of
- devtool
- is not limited to the extensible SDK. You can use
- devtool
- to help you easily develop any project whose build output must be
+ The use of ``devtool`` is not limited to the extensible SDK. You can use
+ ``devtool`` to help you easily develop any project whose build output must be
part of an image built using the build system.
The ``devtool`` command line is organized similarly to
@@ -186,15 +179,10 @@ all the commands.
.. note::
- See the "
- devtool
-  Quick Reference
- " in the Yocto Project Reference Manual for a
- devtool
- quick reference.
+ See the ":doc:`/ref-manual/devtool-reference`"
+ section in the Yocto Project Reference Manual.
-Three ``devtool`` subcommands provide entry-points into
-development:
+Three ``devtool`` subcommands provide entry-points into development:
- *devtool add*: Assists in adding new software to be built.
@@ -233,9 +221,9 @@ shows common development flows you would use with the ``devtool add``
command:
.. image:: figures/sdk-devtool-add-flow.png
- :align: center
+ :width: 100%
-1. *Generating the New Recipe*: The top part of the flow shows three
+#. *Generating the New Recipe*: The top part of the flow shows three
scenarios by which you could use ``devtool add`` to generate a recipe
based on existing source code.
@@ -252,7 +240,7 @@ command:
- *Left*: The left scenario in the figure represents a common
situation where the source code does not exist locally and needs
to be extracted. In this situation, the source code is extracted
- to the default workspace - you do not want the files in some
+ to the default workspace --- you do not want the files in some
specific location outside of the workspace. Thus, everything you
need will be located in the workspace::
@@ -267,13 +255,12 @@ command:
- *Middle*: The middle scenario in the figure also represents a
situation where the source code does not exist locally. In this
case, the code is again upstream and needs to be extracted to some
- local area - this time outside of the default workspace.
+ local area --- this time outside of the default workspace.
.. note::
- If required,
- devtool
- always creates a Git repository locally during the extraction.
+ If required, ``devtool`` always creates a Git repository locally
+ during the extraction.
Furthermore, the first positional argument ``srctree`` in this case
identifies where the ``devtool add`` command will locate the
@@ -282,8 +269,7 @@ command:
$ devtool add recipe srctree fetchuri
- In summary,
- the source code is pulled from fetchuri and extracted into the
+ In summary, the source code is pulled from fetchuri and extracted into the
location defined by ``srctree`` as a local Git repository.
Within workspace, ``devtool`` creates a recipe named recipe along
@@ -302,28 +288,26 @@ command:
recipe for the code and places the recipe into the workspace.
Because the extracted source code already exists, ``devtool`` does
- not try to relocate the source code into the workspace - only the
+ not try to relocate the source code into the workspace --- only the
new recipe is placed in the workspace.
Aside from a recipe folder, the command also creates an associated
append folder and places an initial ``*.bbappend`` file within.
-2. *Edit the Recipe*: You can use ``devtool edit-recipe`` to open up the
+#. *Edit the Recipe*: You can use ``devtool edit-recipe`` to open up the
editor as defined by the ``$EDITOR`` environment variable and modify
the file::
$ devtool edit-recipe recipe
- From within the editor, you
- can make modifications to the recipe that take effect when you build
- it later.
+ From within the editor, you can make modifications to the recipe that
+ take effect when you build it later.
-3. *Build the Recipe or Rebuild the Image*: The next step you take
+#. *Build the Recipe or Rebuild the Image*: The next step you take
depends on what you are going to do with the new code.
If you need to eventually move the build output to the target
- hardware, use the following ``devtool`` command:
- :;
+ hardware, use the following ``devtool`` command::
$ devtool build recipe
@@ -334,7 +318,7 @@ command:
$ devtool build-image image
-4. *Deploy the Build Output*: When you use the ``devtool build`` command
+#. *Deploy the Build Output*: When you use the ``devtool build`` command
to build out your recipe, you probably want to see if the resulting
build output works as expected on the target hardware.
@@ -348,20 +332,22 @@ command:
development machine.
You can deploy your build output to that target hardware by using the
- ``devtool deploy-target`` command: $ devtool deploy-target recipe
- target The target is a live target machine running as an SSH server.
+ ``devtool deploy-target`` command::
+
+ $ devtool deploy-target recipe target
+
+ The target is a live target machine running as an SSH server.
You can, of course, also deploy the image you build to actual
hardware by using the ``devtool build-image`` command. However,
``devtool`` does not provide a specific command that allows you to
deploy the image to actual hardware.
-5. *Finish Your Work With the Recipe*: The ``devtool finish`` command
+#. *Finish Your Work With the Recipe*: The ``devtool finish`` command
creates any patches corresponding to commits in the local Git
repository, moves the new recipe to a more permanent layer, and then
resets the recipe so that the recipe is built normally rather than
- from the workspace.
- ::
+ from the workspace::
$ devtool finish recipe layer
@@ -379,11 +365,9 @@ command:
.. note::
- You can use the
- devtool reset
- command to put things back should you decide you do not want to
- proceed with your work. If you do use this command, realize that
- the source tree is preserved.
+ You can use the ``devtool reset`` command to put things back should you
+ decide you do not want to proceed with your work. If you do use this
+ command, realize that the source tree is preserved.
Use ``devtool modify`` to Modify the Source of an Existing Component
--------------------------------------------------------------------
@@ -401,9 +385,9 @@ diagram shows common development flows for the ``devtool modify``
command:
.. image:: figures/sdk-devtool-modify-flow.png
- :align: center
+ :width: 100%
-1. *Preparing to Modify the Code*: The top part of the flow shows three
+#. *Preparing to Modify the Code*: The top part of the flow shows three
scenarios by which you could use ``devtool modify`` to prepare to
work on source files. Each scenario assumes the following:
@@ -430,11 +414,9 @@ command:
$ devtool modify recipe
- Once
- ``devtool``\ locates the recipe, ``devtool`` uses the recipe's
- :term:`SRC_URI` statements to
- locate the source code and any local patch files from other
- developers.
+ Once ``devtool`` locates the recipe, ``devtool`` uses the recipe's
+ :term:`SRC_URI` statements to locate the source code and any local
+ patch files from other developers.
With this scenario, there is no ``srctree`` argument. Consequently, the
default behavior of the ``devtool modify`` command is to extract
@@ -470,11 +452,7 @@ command:
.. note::
- You cannot provide a URL for
- srctree
- using the
- devtool
- command.
+ You cannot provide a URL for ``srctree`` using the ``devtool`` command.
As with all extractions, the command uses the recipe's :term:`SRC_URI`
statements to locate the source files and any associated patch
@@ -512,11 +490,11 @@ command:
append file for the recipe in the ``devtool`` workspace. The
recipe and the source code remain in their original locations.
-2. *Edit the Source*: Once you have used the ``devtool modify`` command,
+#. *Edit the Source*: Once you have used the ``devtool modify`` command,
you are free to make changes to the source files. You can use any
editor you like to make and save your source code modifications.
-3. *Build the Recipe or Rebuild the Image*: The next step you take
+#. *Build the Recipe or Rebuild the Image*: The next step you take
depends on what you are going to do with the new code.
If you need to eventually move the build output to the target
@@ -527,9 +505,11 @@ command:
On the other hand, if you want an image to contain the recipe's
packages from the workspace for immediate deployment onto a device
(e.g. for testing purposes), you can use the ``devtool build-image``
- command: $ devtool build-image image
+ command::
+
+ $ devtool build-image image
-4. *Deploy the Build Output*: When you use the ``devtool build`` command
+#. *Deploy the Build Output*: When you use the ``devtool build`` command
to build out your recipe, you probably want to see if the resulting
build output works as expected on target hardware.
@@ -554,13 +534,12 @@ command:
``devtool`` does not provide a specific command to deploy the image
to actual hardware.
-5. *Finish Your Work With the Recipe*: The ``devtool finish`` command
+#. *Finish Your Work With the Recipe*: The ``devtool finish`` command
creates any patches corresponding to commits in the local Git
repository, updates the recipe to point to them (or creates a
``.bbappend`` file to do so, depending on the specified destination
layer), and then resets the recipe so that the recipe is built
- normally rather than from the workspace.
- ::
+ normally rather than from the workspace::
$ devtool finish recipe layer
@@ -568,8 +547,7 @@ command:
Any changes you want to turn into patches must be staged and
committed within the local Git repository before you use the
- devtool finish
- command.
+ ``devtool finish`` command.
Because there is no need to move the recipe, ``devtool finish``
either updates the original recipe in the original layer or the
@@ -584,11 +562,9 @@ command:
.. note::
- You can use the
- devtool reset
- command to put things back should you decide you do not want to
- proceed with your work. If you do use this command, realize that
- the source tree is preserved.
+ You can use the ``devtool reset`` command to put things back should you
+ decide you do not want to proceed with your work. If you do use this
+ command, realize that the source tree is preserved.
Use ``devtool upgrade`` to Create a Version of the Recipe that Supports a Newer Version of the Software
-------------------------------------------------------------------------------------------------------
@@ -602,27 +578,25 @@ counterparts.
.. note::
- Several methods exist by which you can upgrade recipes -
- ``devtool upgrade``
- happens to be one. You can read about all the methods by which you
- can upgrade recipes in the
- :ref:`dev-manual/common-tasks:upgrading recipes` section
- of the Yocto Project Development Tasks Manual.
+ Several methods exist by which you can upgrade recipes ---
+ ``devtool upgrade`` happens to be one. You can read about all the methods by
+ which you can upgrade recipes in the
+ :ref:`dev-manual/upgrading-recipes:upgrading recipes` section of the Yocto
+ Project Development Tasks Manual.
-The ``devtool upgrade`` command is flexible enough to allow you to
-specify source code revision and versioning schemes, extract code into
-or out of the ``devtool``
-:ref:`devtool-the-workspace-layer-structure`,
-and work with any source file forms that the
-:ref:`bitbake:bitbake-user-manual/bitbake-user-manual-fetching:fetchers` support.
+The ``devtool upgrade`` command is flexible enough to allow you to specify
+source code revision and versioning schemes, extract code into or out of the
+``devtool`` :ref:`devtool-the-workspace-layer-structure`, and work with any
+source file forms that the
+:ref:`bitbake-user-manual/bitbake-user-manual-fetching:fetchers` support.
The following diagram shows the common development flow used with the
``devtool upgrade`` command:
.. image:: figures/sdk-devtool-upgrade-flow.png
- :align: center
+ :width: 100%
-1. *Initiate the Upgrade*: The top part of the flow shows the typical
+#. *Initiate the Upgrade*: The top part of the flow shows the typical
scenario by which you use the ``devtool upgrade`` command. The
following conditions exist:
@@ -674,7 +648,7 @@ The following diagram shows the common development flow used with the
are incorporated into the build the next time you build the software
just as are other changes you might have made to the source.
-2. *Resolve any Conflicts created by the Upgrade*: Conflicts could happen
+#. *Resolve any Conflicts created by the Upgrade*: Conflicts could happen
after upgrading the software to a new version. Conflicts occur
if your recipe specifies some patch files in :term:`SRC_URI` that
conflict with changes made in the new version of the software. For
@@ -685,7 +659,7 @@ The following diagram shows the common development flow used with the
conflicts created through use of a newer or different version of the
software.
-3. *Build the Recipe or Rebuild the Image*: The next step you take
+#. *Build the Recipe or Rebuild the Image*: The next step you take
depends on what you are going to do with the new code.
If you need to eventually move the build output to the target
@@ -700,7 +674,7 @@ The following diagram shows the common development flow used with the
$ devtool build-image image
-4. *Deploy the Build Output*: When you use the ``devtool build`` command
+#. *Deploy the Build Output*: When you use the ``devtool build`` command
or ``bitbake`` to build your recipe, you probably want to see if the
resulting build output works as expected on target hardware.
@@ -714,15 +688,18 @@ The following diagram shows the common development flow used with the
development machine.
You can deploy your build output to that target hardware by using the
- ``devtool deploy-target`` command: $ devtool deploy-target recipe
- target The target is a live target machine running as an SSH server.
+ ``devtool deploy-target`` command::
+
+ $ devtool deploy-target recipe target
+
+ The target is a live target machine running as an SSH server.
You can, of course, also deploy the image you build using the
``devtool build-image`` command to actual hardware. However,
``devtool`` does not provide a specific command that allows you to do
this.
-5. *Finish Your Work With the Recipe*: The ``devtool finish`` command
+#. *Finish Your Work With the Recipe*: The ``devtool finish`` command
creates any patches corresponding to commits in the local Git
repository, moves the new recipe to a more permanent layer, and then
resets the recipe so that the recipe is built normally rather than
@@ -734,8 +711,7 @@ The following diagram shows the common development flow used with the
If you specify a destination layer that is the same as the original
source, then the old version of the recipe and associated files are
- removed prior to adding the new version.
- ::
+ removed prior to adding the new version::
$ devtool finish recipe layer
@@ -750,11 +726,9 @@ The following diagram shows the common development flow used with the
.. note::
- You can use the
- devtool reset
- command to put things back should you decide you do not want to
- proceed with your work. If you do use this command, realize that
- the source tree is preserved.
+ You can use the ``devtool reset`` command to put things back should you
+ decide you do not want to proceed with your work. If you do use this
+ command, realize that the source tree is preserved.
A Closer Look at ``devtool add``
================================
@@ -822,10 +796,9 @@ run ``devtool add`` again and provide the name or the version.
Dependency Detection and Mapping
--------------------------------
-The ``devtool add`` command attempts to detect build-time dependencies
-and map them to other recipes in the system. During this mapping, the
-command fills in the names of those recipes as part of the
-:term:`DEPENDS` variable within the
+The ``devtool add`` command attempts to detect build-time dependencies and map
+them to other recipes in the system. During this mapping, the command fills in
+the names of those recipes as part of the :term:`DEPENDS` variable within the
recipe. If a dependency cannot be mapped, ``devtool`` places a comment
in the recipe indicating such. The inability to map a dependency can
result from naming not being recognized or because the dependency simply
@@ -842,10 +815,8 @@ following to your recipe::
.. note::
- The
- devtool add
- command often cannot distinguish between mandatory and optional
- dependencies. Consequently, some of the detected dependencies might
+ The ``devtool add`` command often cannot distinguish between mandatory and
+ optional dependencies. Consequently, some of the detected dependencies might
in fact be optional. When in doubt, consult the documentation or the
configure script for the software the recipe is building for further
details. In some cases, you might find you can substitute the
@@ -855,16 +826,14 @@ following to your recipe::
License Detection
-----------------
-The ``devtool add`` command attempts to determine if the software you
-are adding is able to be distributed under a common, open-source
-license. If so, the command sets the
-:term:`LICENSE` value accordingly.
+The ``devtool add`` command attempts to determine if the software you are
+adding is able to be distributed under a common, open-source license. If
+so, the command sets the :term:`LICENSE` value accordingly.
You should double-check the value added by the command against the
documentation or source files for the software you are building and, if
necessary, update that :term:`LICENSE` value.
-The ``devtool add`` command also sets the
-:term:`LIC_FILES_CHKSUM`
+The ``devtool add`` command also sets the :term:`LIC_FILES_CHKSUM`
value to point to all files that appear to be license-related. Realize
that license statements often appear in comments at the top of source
files or within the documentation. In such cases, the command does not
@@ -944,10 +913,9 @@ mind:
Adding Native Tools
-------------------
-Often, you need to build additional tools that run on the :term:`Build
-Host` as opposed to
-the target. You should indicate this requirement by using one of the
-following methods when you run ``devtool add``:
+Often, you need to build additional tools that run on the :term:`Build Host`
+as opposed to the target. You should indicate this requirement by using one of
+the following methods when you run ``devtool add``:
- Specify the name of the recipe such that it ends with "-native".
Specifying the name like this produces a recipe that only builds for
@@ -971,8 +939,7 @@ Adding Node.js Modules
----------------------
You can use the ``devtool add`` command two different ways to add
-Node.js modules: 1) Through ``npm`` and, 2) from a repository or local
-source.
+Node.js modules: through ``npm`` or from a repository or local source.
Use the following form to add Node.js modules through ``npm``::
@@ -987,7 +954,7 @@ these behaviors ensure the reproducibility and integrity of the build.
.. note::
- - You must use quotes around the URL. The ``devtool add`` does not
+ - You must use quotes around the URL. ``devtool add`` does not
require the quotes, but the shell considers ";" as a splitter
between multiple commands. Thus, without the quotes,
``devtool add`` does not receive the other parts, which results in
@@ -1002,9 +969,8 @@ repository or local source tree. To add modules this way, use
$ devtool add https://github.com/diversario/node-ssdp
-In this example, ``devtool``
-fetches the specified Git repository, detects the code as Node.js code,
-fetches dependencies using ``npm``, and sets
+In this example, ``devtool`` fetches the specified Git repository, detects the
+code as Node.js code, fetches dependencies using ``npm``, and sets
:term:`SRC_URI` accordingly.
Working With Recipes
@@ -1013,17 +979,17 @@ Working With Recipes
When building a recipe using the ``devtool build`` command, the typical
build progresses as follows:
-1. Fetch the source
+#. Fetch the source
-2. Unpack the source
+#. Unpack the source
-3. Configure the source
+#. Configure the source
-4. Compile the source
+#. Compile the source
-5. Install the build output
+#. Install the build output
-6. Package the installed output
+#. Package the installed output
For recipes in the workspace, fetching and unpacking is disabled as the
source tree has already been prepared and is persistent. Each of these
@@ -1038,9 +1004,8 @@ does not include complete instructions for building the software.
Instead, common functionality is encapsulated in classes inherited with
the ``inherit`` directive. This technique leaves the recipe to describe
just the things that are specific to the software being built. There is
-a :ref:`base <ref-classes-base>` class that
-is implicitly inherited by all recipes and provides the functionality
-that most recipes typically need.
+a :ref:`ref-classes-base` class that is implicitly inherited by all recipes
+and provides the functionality that most recipes typically need.
The remainder of this section presents information useful when working
with recipes.
@@ -1066,9 +1031,9 @@ links created within the source tree:
``${``\ :term:`D`\ ``}``.
- ``sysroot-destdir/``: Contains a subset of files installed within
- ``do_install`` that have been put into the shared sysroot. For
+ :ref:`ref-tasks-install` that have been put into the shared sysroot. For
more information, see the
- ":ref:`dev-manual/common-tasks:sharing files between recipes`" section.
+ ":ref:`dev-manual/new-recipe:sharing files between recipes`" section.
- ``packages-split/``: Contains subdirectories for each package
produced by the recipe. For more information, see the
@@ -1082,18 +1047,13 @@ Setting Configure Arguments
If the software your recipe is building uses GNU autoconf, then a fixed
set of arguments is passed to it to enable cross-compilation plus any
-extras specified by
-:term:`EXTRA_OECONF` or
-:term:`PACKAGECONFIG_CONFARGS`
+extras specified by :term:`EXTRA_OECONF` or :term:`PACKAGECONFIG_CONFARGS`
set within the recipe. If you wish to pass additional options, add them
to :term:`EXTRA_OECONF` or :term:`PACKAGECONFIG_CONFARGS`. Other supported build
-tools have similar variables (e.g.
-:term:`EXTRA_OECMAKE` for
-CMake, :term:`EXTRA_OESCONS`
-for Scons, and so forth). If you need to pass anything on the ``make``
-command line, you can use :term:`EXTRA_OEMAKE` or the
-:term:`PACKAGECONFIG_CONFARGS`
-variables to do so.
+tools have similar variables (e.g. :term:`EXTRA_OECMAKE` for CMake,
+:term:`EXTRA_OESCONS` for Scons, and so forth). If you need to pass anything on
+the ``make`` command line, you can use :term:`EXTRA_OEMAKE` or the
+:term:`PACKAGECONFIG_CONFARGS` variables to do so.
You can use the ``devtool configure-help`` command to help you set the
arguments listed in the previous paragraph. The command determines the
@@ -1117,8 +1077,7 @@ the build host.
Recipes should never write files directly into the sysroot. Instead,
files should be installed into standard locations during the
-:ref:`ref-tasks-install` task within
-the ``${``\ :term:`D`\ ``}`` directory. A
+:ref:`ref-tasks-install` task within the ``${``\ :term:`D`\ ``}`` directory. A
subset of these files automatically goes into the sysroot. The reason
for this limitation is that almost all files that go into the sysroot
are cataloged in manifests in order to ensure they can be removed later
@@ -1134,14 +1093,12 @@ the target device, it is important to understand packaging because the
contents of the image are expressed in terms of packages and not
recipes.
-During the :ref:`ref-tasks-package`
-task, files installed during the
-:ref:`ref-tasks-install` task are
-split into one main package, which is almost always named the same as
-the recipe, and into several other packages. This separation exists
-because not all of those installed files are useful in every image. For
-example, you probably do not need any of the documentation installed in
-a production image. Consequently, for each recipe the documentation
+During the :ref:`ref-tasks-package` task, files installed during the
+:ref:`ref-tasks-install` task are split into one main package, which is almost
+always named the same as the recipe, and into several other packages. This
+separation exists because not all of those installed files are useful in every
+image. For example, you probably do not need any of the documentation installed
+in a production image. Consequently, for each recipe the documentation
files are separated into a ``-doc`` package. Recipes that package
software containing optional modules or plugins might undergo additional
package splitting as well.
@@ -1149,8 +1106,7 @@ package splitting as well.
After building a recipe, you can see where files have gone by looking in
the ``oe-workdir/packages-split`` directory, which contains a
subdirectory for each package. Apart from some advanced cases, the
-:term:`PACKAGES` and
-:term:`FILES` variables controls
+:term:`PACKAGES` and :term:`FILES` variables controls
splitting. The :term:`PACKAGES` variable lists all of the packages to be
produced, while the :term:`FILES` variable specifies which files to include
in each package by using an override to specify the package. For
@@ -1192,16 +1148,11 @@ target machine.
.. note::
- The
- devtool deploy-target
- and
- devtool undeploy-target
- commands do not currently interact with any package management system
- on the target device (e.g. RPM or OPKG). Consequently, you should not
- intermingle
- devtool deploy-target
- and package manager operations on the target device. Doing so could
- result in a conflicting set of files.
+ The ``devtool deploy-target`` and ``devtool undeploy-target`` commands do
+ not currently interact with any package management system on the target
+ device (e.g. RPM or OPKG). Consequently, you should not intermingle
+ ``devtool deploy-target`` and package manager operations on the target
+ device. Doing so could result in a conflicting set of files.
Installing Additional Items Into the Extensible SDK
===================================================
@@ -1215,9 +1166,12 @@ need to link to libGL but you are not sure which recipe provides libGL.
You can use the following command to find out::
$ devtool search libGL mesa
+ A free implementation of the OpenGL API
+
+Once you know the recipe
+(i.e. ``mesa`` in this example), you can install it.
-A free implementation of the OpenGL API Once you know the recipe
-(i.e. ``mesa`` in this example), you can install it::
+::
$ devtool sdk-install mesa
@@ -1244,13 +1198,13 @@ To update your installed SDK, use ``devtool`` as follows::
$ devtool sdk-update
-The previous command assumes your SDK provider has set the
-default update URL for you through the :term:`SDK_UPDATE_URL`
-variable as described in the
+The previous command assumes your SDK provider has set the default update URL
+for you through the :term:`SDK_UPDATE_URL` variable as described in the
":ref:`sdk-manual/appendix-customizing:Providing Updates to the Extensible SDK After Installation`"
section. If the SDK provider has not set that default URL, you need to
-specify it yourself in the command as follows: $ devtool sdk-update
-path_to_update_directory
+specify it yourself in the command as follows::
+
+ $ devtool sdk-update path_to_update_directory
.. note::
@@ -1267,15 +1221,15 @@ those customers need an SDK that has custom libraries. In such a case,
you can produce a derivative SDK based on the currently installed SDK
fairly easily by following these steps:
-1. If necessary, install an extensible SDK that you want to use as a
+#. If necessary, install an extensible SDK that you want to use as a
base for your derivative SDK.
-2. Source the environment script for the SDK.
+#. Source the environment script for the SDK.
-3. Add the extra libraries or other components you want by using the
+#. Add the extra libraries or other components you want by using the
``devtool add`` command.
-4. Run the ``devtool build-sdk`` command.
+#. Run the ``devtool build-sdk`` command.
The previous steps take the recipes added to the workspace and construct
a new SDK installer that contains those recipes and the resulting binary
diff --git a/documentation/sdk-manual/intro.rst b/documentation/sdk-manual/intro.rst
index 802d3f3d42..4310e133fb 100644
--- a/documentation/sdk-manual/intro.rst
+++ b/documentation/sdk-manual/intro.rst
@@ -66,7 +66,7 @@ The SDK development environment consists of the following:
In summary, the extensible and standard SDK share many features.
However, the extensible SDK has powerful development tools to help you
-more quickly develop applications. Following is a table that summarizes
+more quickly develop applications. Here is a table that summarizes
the primary differences between the standard and extensible SDK types
when considering which to build:
diff --git a/documentation/sdk-manual/working-projects.rst b/documentation/sdk-manual/working-projects.rst
index 276daa9bb6..296d7d8464 100644
--- a/documentation/sdk-manual/working-projects.rst
+++ b/documentation/sdk-manual/working-projects.rst
@@ -172,19 +172,19 @@ variables and Makefile variables during development.
The main point of this section is to explain the following three cases
regarding variable behavior:
-- *Case 1 - No Variables Set in the Makefile Map to Equivalent
+- *Case 1 --- No Variables Set in the Makefile Map to Equivalent
Environment Variables Set in the SDK Setup Script:* Because matching
variables are not specifically set in the ``Makefile``, the variables
retain their values based on the environment setup script.
-- *Case 2 - Variables Are Set in the Makefile that Map to Equivalent
+- *Case 2 --- Variables Are Set in the Makefile that Map to Equivalent
Environment Variables from the SDK Setup Script:* Specifically
setting matching variables in the ``Makefile`` during the build
results in the environment settings of the variables being
overwritten. In this case, the variables you set in the ``Makefile``
are used.
-- *Case 3 - Variables Are Set Using the Command Line that Map to
+- *Case 3 --- Variables Are Set Using the Command Line that Map to
Equivalent Environment Variables from the SDK Setup Script:*
Executing the ``Makefile`` from the command line results in the
environment variables being overwritten. In this case, the
@@ -286,14 +286,14 @@ example:
# CC=i586-poky-linux-gcc -m32 -march=i586 --sysroot=/opt/poky/2.5/sysroots/i586-poky-linux
# CC="gcc"
all: main.o module.o
- ${CC} main.o module.o -o target_bin
+ ${CC} main.o module.o -o target_bin
main.o: main.c module.h
- ${CC} -I . -c main.c
+ ${CC} -I . -c main.c
module.o: module.c
- module.h ${CC} -I . -c module.c
+ module.h ${CC} -I . -c module.c
clean:
- rm -rf *.o
- rm target_bin
+ rm -rf *.o
+ rm target_bin
4. *Make the Project:* Use the ``make`` command to create the binary
output file. Because variables are commented out in the Makefile, the
diff --git a/documentation/standards.md b/documentation/standards.md
index a2274f6d6e..d3b25adfab 100644
--- a/documentation/standards.md
+++ b/documentation/standards.md
@@ -5,6 +5,21 @@ documentation is created.
It is currently a work in progress.
+## Automatic style validation
+
+There is an ongoing effort to automate style validation
+through the [Vale](https://vale.sh/). To try it, run:
+
+ $ make stylecheck
+
+Note that this just applies to text. Therefore, the syntax
+conventions described below still apply.
+
+If you wish to add a new word to an "accept.txt" file
+(./styles/config/vocabularies/<Vocab>/accept.txt),
+make sure the spelling and capitalization matches
+what Wikipedia or the project defining this word uses.
+
## Text standards
This section has not been filled yet
diff --git a/documentation/styles/config/vocabularies/OpenSource/accept.txt b/documentation/styles/config/vocabularies/OpenSource/accept.txt
new file mode 100644
index 0000000000..e378fbf79b
--- /dev/null
+++ b/documentation/styles/config/vocabularies/OpenSource/accept.txt
@@ -0,0 +1,20 @@
+autovivification
+blkparse
+blktrace
+callee
+debugfs
+ftrace
+KernelShark
+Kprobe
+LTTng
+perf
+profiler
+subcommand
+subnode
+superset
+Sysprof
+systemd
+toolchain
+tracepoint
+Uprobe
+wget
diff --git a/documentation/styles/config/vocabularies/Yocto/accept.txt b/documentation/styles/config/vocabularies/Yocto/accept.txt
new file mode 100644
index 0000000000..ca622ba412
--- /dev/null
+++ b/documentation/styles/config/vocabularies/Yocto/accept.txt
@@ -0,0 +1,5 @@
+BitBake
+BSP
+crosstap
+OpenEmbedded
+Yocto
diff --git a/documentation/template/template.svg b/documentation/template/template.svg
index 43043e3afb..50715c08b0 100644
--- a/documentation/template/template.svg
+++ b/documentation/template/template.svg
@@ -1019,7 +1019,7 @@
id="tspan1183-1-8"
x="-52.348656"
y="518.42615"
- style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:37.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;stroke:none">Objets</tspan></text>
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:37.3333px;font-family:'Liberation Sans';-inkscape-font-specification:'Liberation Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;stroke:none">Objects</tspan></text>
<text
xml:space="preserve"
style="font-weight:bold;font-size:13.3333px;line-height:125%;font-family:'Nimbus Roman';-inkscape-font-specification:'Nimbus Roman, Bold';letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
diff --git a/documentation/test-manual/intro.rst b/documentation/test-manual/intro.rst
index 9c1a93cd40..bb267d2d88 100644
--- a/documentation/test-manual/intro.rst
+++ b/documentation/test-manual/intro.rst
@@ -14,15 +14,13 @@ release works as intended. All the project's testing infrastructure and
processes are publicly visible and available so that the community can
see what testing is being performed, how it's being done and the current
status of the tests and the project at any given time. It is intended
-that Other organizations can leverage off the process and testing
+that other organizations can leverage off the process and testing
environment used by the Yocto Project to create their own automated,
production test environment, building upon the foundations from the
project core.
-Currently, the Yocto Project Test Environment Manual has no projected
-release date. This manual is a work-in-progress and is being initially
-loaded with information from the README files and notes from key
-engineers:
+This manual is a work-in-progress and is being initially loaded with
+information from the README files and notes from key engineers:
- *yocto-autobuilder2:* This
:yocto_git:`README.md </yocto-autobuilder2/tree/README.md>`
@@ -39,7 +37,7 @@ engineers:
As a result, it can be used by any Continuous Improvement (CI) system
to run builds, support getting the correct code revisions, configure
builds and layers, run builds, and collect results. The code is
- independent of any CI system, which means the code can work `Buildbot <https://docs.buildbot.net/0.9.15.post1/>`__,
+ independent of any CI system, which means the code can work `Buildbot <https://docs.buildbot.net/current/>`__,
Jenkins, or others. This repository has a branch per release of the
project defining the tests to run on a per release basis.
@@ -54,8 +52,8 @@ the Autobuilder tests if things work. The Autobuilder builds all test
targets and runs all the tests.
The Yocto Project uses now uses standard upstream
-`Buildbot <https://docs.buildbot.net/0.9.15.post1/>`__ (version 9) to
-drive its integration and testing. Buildbot Nine has a plug-in interface
+Buildbot (`version 3.8 <https://docs.buildbot.net/3.8.0/>`__) to
+drive its integration and testing. Buildbot has a plug-in interface
that the Yocto Project customizes using code from the
``yocto-autobuilder2`` repository, adding its own console UI plugin. The
resulting UI plug-in allows you to visualize builds in a way suited to
@@ -84,8 +82,8 @@ topology that includes a controller and a cluster of workers:
.. image:: figures/ab-test-cluster.png
:align: center
-Yocto Project Tests - Types of Testing Overview
-===============================================
+Yocto Project Tests --- Types of Testing Overview
+=================================================
The Autobuilder tests different elements of the project by using
the following types of tests:
@@ -93,8 +91,8 @@ the following types of tests:
- *Build Testing:* Tests whether specific configurations build by
varying :term:`MACHINE`,
:term:`DISTRO`, other configuration
- options, and the specific target images being built (or world). Used
- to trigger builds of all the different test configurations on the
+ options, and the specific target images being built (or ``world``). This is
+ used to trigger builds of all the different test configurations on the
Autobuilder. Builds usually cover many different targets for
different architectures, machines, and distributions, as well as
different configurations, such as different init systems. The
@@ -121,7 +119,8 @@ the following types of tests:
$ bitbake image -c testsdkext
- The tests utilize the :ref:`testsdkext <ref-classes-testsdk>` class and the ``do_testsdkext`` task.
+ The tests use the :ref:`ref-classes-testsdk` class and the
+ ``do_testsdkext`` task.
- *Feature Testing:* Various scenario-based tests are run through the
:ref:`OpenEmbedded Self test (oe-selftest) <ref-manual/release-process:Testing and Quality Assurance>`. We test oe-selftest on each of the main distributions
@@ -131,8 +130,8 @@ the following types of tests:
$ bitbake image -c testimage
- The tests utilize the :ref:`testimage* <ref-classes-testimage*>`
- classes and the :ref:`ref-tasks-testimage` task.
+ The tests use the :ref:`ref-classes-testimage`
+ class and the :ref:`ref-tasks-testimage` task.
- *Layer Testing:* The Autobuilder has the possibility to test whether
specific layers work with the test of the system. The layers tested
@@ -142,7 +141,7 @@ the following types of tests:
- *Package Testing:* A Package Test (ptest) runs tests against packages
built by the OpenEmbedded build system on the target machine. See the
:ref:`Testing Packages With
- ptest <dev-manual/common-tasks:Testing Packages With ptest>` section
+ ptest <dev-manual/packages:Testing Packages With ptest>` section
in the Yocto Project Development Tasks Manual and the
":yocto_wiki:`Ptest </Ptest>`" Wiki page for more
information on Ptest.
@@ -151,7 +150,7 @@ the following types of tests:
$ bitbake image -c testsdk
- The tests utilize the :ref:`testsdk <ref-classes-testsdk>` class and
+ The tests use the :ref:`ref-classes-testsdk` class and
the ``do_testsdk`` task.
- *Unit Testing:* Unit tests on various components of the system run
@@ -179,7 +178,7 @@ Tests map into the codebase as follows:
$ bitbake-selftest
To skip tests that access the Internet, use the ``BB_SKIP_NETTESTS``
- variable when running "bitbake-selftest" as follows::
+ variable when running ``bitbake-selftest`` as follows::
$ BB_SKIP_NETTESTS=yes bitbake-selftest
@@ -191,31 +190,32 @@ Tests map into the codebase as follows:
Use this option when you wish to skip tests that access the network,
which are mostly necessary to test the fetcher modules. To specify
individual test modules to run, append the test module name to the
- "bitbake-selftest" command. For example, to specify the tests for the
- bb.data.module, run::
+ ``bitbake-selftest`` command. For example, to specify the tests for
+ ``bb.tests.data.DataExpansions``, run::
- $ bitbake-selftest bb.test.data.module
+ $ bitbake-selftest bb.tests.data.DataExpansions
You can also specify individual tests by defining the full name and module
plus the class path of the test, for example::
- $ bitbake-selftest bb.tests.data.TestOverrides.test_one_override
+ $ bitbake-selftest bb.tests.data.DataExpansions.test_one_var
- The tests are based on `Python
- unittest <https://docs.python.org/3/library/unittest.html>`__.
+ The tests are based on
+ `Python unittest <https://docs.python.org/3/library/unittest.html>`__.
- *oe-selftest:*
- These tests use OE to test the workflows, which include testing
specific features, behaviors of tasks, and API unit tests.
- - The tests can take advantage of parallelism through the "-j"
+ - The tests can take advantage of parallelism through the ``-j``
option, which can specify a number of threads to spread the tests
across. Note that all tests from a given class of tests will run
in the same thread. To parallelize large numbers of tests you can
split the class into multiple units.
- - The tests are based on Python unittest.
+ - The tests are based on
+ `Python unittest <https://docs.python.org/3/library/unittest.html>`__.
- The code for the tests resides in
``meta/lib/oeqa/selftest/cases/``.
@@ -225,18 +225,18 @@ Tests map into the codebase as follows:
$ oe-selftest -a
- To run a specific test, use the following command form where
- testname is the name of the specific test::
+ ``testname`` is the name of the specific test::
$ oe-selftest -r <testname>
- For example, the following command would run the tinfoil
- getVar API test::
+ For example, the following command would run the ``tinfoil``
+ ``getVar`` API test::
$ oe-selftest -r tinfoil.TinfoilTests.test_getvar
It is also possible to run a set
of tests. For example the following command will run all of the
- tinfoil tests::
+ ``tinfoil`` tests::
$ oe-selftest -r tinfoil
@@ -271,7 +271,7 @@ Tests map into the codebase as follows:
- These tests build an extended SDK (eSDK), install that eSDK, and
run tests against the eSDK.
- - The code for these tests resides in ``meta/lib/oeqa/esdk``.
+ - The code for these tests resides in ``meta/lib/oeqa/sdkext/cases/``.
- To run the tests, use the following command form::
@@ -298,13 +298,13 @@ Tests map into the codebase as follows:
Git repository.
Use the ``oe-build-perf-report`` command to generate text reports
- and HTML reports with graphs of the performance data. For
- examples, see
- :yocto_dl:`/releases/yocto/yocto-2.7/testresults/buildperf-centos7/perf-centos7.yoctoproject.org_warrior_20190414204758_0e39202.html`
+ and HTML reports with graphs of the performance data. See
+ :yocto_dl:`html </releases/yocto/yocto-4.3/testresults/buildperf-debian11/perf-debian11_nanbield_20231019191258_15b576c410.html>`
and
- :yocto_dl:`/releases/yocto/yocto-2.7/testresults/buildperf-centos7/perf-centos7.yoctoproject.org_warrior_20190414204758_0e39202.txt`.
+ :yocto_dl:`txt </releases/yocto/yocto-4.3/testresults/buildperf-debian11/perf-debian11_nanbield_20231019191258_15b576c410.txt>`
+ examples.
- - The tests are contained in ``lib/oeqa/buildperf/test_basic.py``.
+ - The tests are contained in ``meta/lib/oeqa/buildperf/test_basic.py``.
Test Examples
=============
@@ -312,16 +312,14 @@ Test Examples
This section provides example tests for each of the tests listed in the
:ref:`test-manual/intro:How Tests Map to Areas of Code` section.
-For oeqa tests, testcases for each area reside in the main test
-directory at ``meta/lib/oeqa/selftest/cases`` directory.
+- ``oe-selftest`` testcases reside in the ``meta/lib/oeqa/selftest/cases`` directory.
-For oe-selftest. bitbake testcases reside in the ``lib/bb/tests/``
-directory.
+- ``bitbake-selftest`` testcases reside in the ``bitbake/lib/bb/tests/`` directory.
``bitbake-selftest``
--------------------
-A simple test example from ``lib/bb/tests/data.py`` is::
+A simple test example from ``bitbake/lib/bb/tests/data.py`` is::
class DataExpansions(unittest.TestCase):
def setUp(self):
@@ -334,21 +332,24 @@ A simple test example from ``lib/bb/tests/data.py`` is::
val = self.d.expand("${foo}")
self.assertEqual(str(val), "value_of_foo")
-In this example, a ``DataExpansions`` class of tests is created,
-derived from standard python unittest. The class has a common ``setUp``
-function which is shared by all the tests in the class. A simple test is
-then added to test that when a variable is expanded, the correct value
-is found.
+In this example, a ``DataExpansions`` class of tests is created, derived from
+standard `Python unittest <https://docs.python.org/3/library/unittest.html>`__.
+The class has a common ``setUp`` function which is shared by all the tests in
+the class. A simple test is then added to test that when a variable is
+expanded, the correct value is found.
-Bitbake selftests are straightforward python unittest. Refer to the
-Python unittest documentation for additional information on writing
-these tests at: https://docs.python.org/3/library/unittest.html.
+BitBake selftests are straightforward
+`Python unittest <https://docs.python.org/3/library/unittest.html>`__.
+Refer to the `Python unittest documentation
+<https://docs.python.org/3/library/unittest.html>`__ for additional information
+on writing such tests.
``oe-selftest``
---------------
These tests are more complex due to the setup required behind the scenes
-for full builds. Rather than directly using Python's unittest, the code
+for full builds. Rather than directly using `Python unittest
+<https://docs.python.org/3/library/unittest.html>`__, the code
wraps most of the standard objects. The tests can be simple, such as
testing a command from within the OE build environment using the
following example::
@@ -385,14 +386,14 @@ so tests within a given test class should always run in the same build,
while tests in different classes or modules may be split into different
builds. There is no data store available for these tests since the tests
launch the ``bitbake`` command and exist outside of its context. As a
-result, common bitbake library functions (bb.\*) are also unavailable.
+result, common BitBake library functions (``bb.\*``) are also unavailable.
``testimage``
-------------
These tests are run once an image is up and running, either on target
hardware or under QEMU. As a result, they are assumed to be running in a
-target image environment, as opposed to a host build environment. A
+target image environment, as opposed to in a host build environment. A
simple example from ``meta/lib/oeqa/runtime/cases/python.py`` contains
the following::
@@ -407,19 +408,19 @@ the following::
In this example, the ``OERuntimeTestCase`` class wraps
``unittest.TestCase``. Within the test, ``self.target`` represents the
-target system, where commands can be run on it using the ``run()``
+target system, where commands can be run using the ``run()``
method.
-To ensure certain test or package dependencies are met, you can use the
+To ensure certain tests or package dependencies are met, you can use the
``OETestDepends`` and ``OEHasPackage`` decorators. For example, the test
-in this example would only make sense if python3-core is installed in
+in this example would only make sense if ``python3-core`` is installed in
the image.
``testsdk_ext``
---------------
These tests are run against built extensible SDKs (eSDKs). The tests can
-assume that the eSDK environment has already been setup. An example from
+assume that the eSDK environment has already been set up. An example from
``meta/lib/oeqa/sdk/cases/devtool.py`` contains the following::
class DevtoolTest(OESDKExtTestCase):
@@ -466,9 +467,9 @@ following::
output = self._run(cmd)
self.assertEqual(output, "Hello, world\n")
-In this example, if nativesdk-python3-core has been installed into the SDK, the code runs
-the python3 interpreter with a basic command to check it is working
-correctly. The test would only run if python3 is installed in the SDK.
+In this example, if ``nativesdk-python3-core`` has been installed into the SDK,
+the code runs the ``python3`` interpreter with a basic command to check it is
+working correctly. The test would only run if Python3 is installed in the SDK.
``oe-build-perf-test``
----------------------
@@ -512,9 +513,9 @@ an isolated directory.
**Running "cleansstate" is not permitted.**
-This can delete files from SSTATE_DIR which would potentially break
-other builds running in parallel. If this is required, SSTATE_DIR must
-be set to an isolated directory. Alternatively, you can use the "-f"
+This can delete files from :term:`SSTATE_DIR` which would potentially break
+other builds running in parallel. If this is required, :term:`SSTATE_DIR` must
+be set to an isolated directory. Alternatively, you can use the ``-f``
option with the ``bitbake`` command to "taint" tasks by changing the
sstate checksums to ensure sstate cache items will not be reused.
diff --git a/documentation/test-manual/reproducible-builds.rst b/documentation/test-manual/reproducible-builds.rst
index 5977366c9e..6994989f7c 100644
--- a/documentation/test-manual/reproducible-builds.rst
+++ b/documentation/test-manual/reproducible-builds.rst
@@ -68,17 +68,6 @@ things we do within the build system to ensure reproducibility include:
- Filtering the tools available from the host's ``PATH`` to only a specific set
of tools, set using the :term:`HOSTTOOLS` variable.
-.. note::
-
- Because of an open bug in GCC, using ``DISTRO_FEATURES:append = " lto"`` or
- adding ``-flto`` (Link Time Optimization) to ``CFLAGS`` makes the resulting
- binary non-reproducible, in that it depends on the full absolute build path
- to ``recipe-sysroot-native``, so installing the Yocto Project in a different
- directory results in a different binary.
-
- This issue is addressed by
- :yocto_bugs:`bug 14481 - Programs built with -flto are not reproducible</show_bug.cgi?id=14481>`.
-
=========================================
Can we prove the project is reproducible?
=========================================
diff --git a/documentation/test-manual/test-process.rst b/documentation/test-manual/test-process.rst
index 4c3b32bfea..7bec5ba828 100644
--- a/documentation/test-manual/test-process.rst
+++ b/documentation/test-manual/test-process.rst
@@ -20,8 +20,8 @@ helps review and test patches and this is his testing tree).
We have two broad categories of test builds, including "full" and
"quick". On the Autobuilder, these can be seen as "a-quick" and
"a-full", simply for ease of sorting in the UI. Use our Autobuilder
-console view to see where me manage most test-related items, available
-at: :yocto_ab:`/typhoon/#/console`.
+:yocto_ab:`console view </typhoon/#/console>` to see where we manage most
+test-related items.
Builds are triggered manually when the test branches are ready. The
builds are monitored by the SWAT team. For additional information, see
@@ -34,24 +34,21 @@ which the result was required.
The Autobuilder does build the ``master`` branch once daily for several
reasons, in particular, to ensure the current ``master`` branch does
-build, but also to keep ``yocto-testresults``
-(:yocto_git:`/yocto-testresults/`),
-buildhistory
-(:yocto_git:`/poky-buildhistory/`), and
-our sstate up to date. On the weekend, there is a master-next build
+build, but also to keep (:yocto_git:`yocto-testresults </yocto-testresults/>`),
+(:yocto_git:`buildhistory </poky-buildhistory/>`), and
+our sstate up to date. On the weekend, there is a ``master-next`` build
instead to ensure the test results are updated for the less frequently
run targets.
-Performance builds (buildperf-\* targets in the console) are triggered
+Performance builds (``buildperf-\*`` targets in the console) are triggered
separately every six hours and automatically push their results to the
-buildstats repository at:
-:yocto_git:`/yocto-buildstats/`.
+:yocto_git:`buildstats </yocto-buildstats/>` repository.
-The 'quick' targets have been selected to be the ones which catch the
-most failures or give the most valuable data. We run 'fast' ptests in
+The "quick" targets have been selected to be the ones which catch the
+most failures or give the most valuable data. We run "fast" ptests in
this case for example but not the ones which take a long time. The quick
-target doesn't include \*-lsb builds for all architectures, some world
-builds and doesn't trigger performance tests or ltp testing. The full
+target doesn't include ``\*-lsb`` builds for all architectures, some ``world``
+builds and doesn't trigger performance tests or ``ltp`` testing. The full
build includes all these things and is slower but more comprehensive.
Release Builds
@@ -67,12 +64,12 @@ that in :ref:`test-manual/test-process:day to day development`, in that the
a-full target of the Autobuilder is used but in addition the form is
configured to generate and publish artifacts and the milestone number,
version, release candidate number and other information is entered. The
-box to "generate an email to QA"is also checked.
+box to "generate an email to QA" is also checked.
-When the build completes, an email is sent out using the send-qa-email
-script in the ``yocto-autobuilder-helper`` repository to the list of
-people configured for that release. Release builds are placed into a
-directory in https://autobuilder.yocto.io/pub/releases on the
+When the build completes, an email is sent out using the ``send-qa-email``
+script in the :yocto_git:`yocto-autobuilder-helper </yocto-autobuilder-helper>`
+repository to the list of people configured for that release. Release builds
+are placed into a directory in https://autobuilder.yocto.io/pub/releases on the
Autobuilder which is included in the email. The process from here is
more manual and control is effectively passed to release engineering.
The next steps include:
@@ -80,14 +77,15 @@ The next steps include:
- QA teams respond to the email saying which tests they plan to run and
when the results will be available.
-- QA teams run their tests and share their results in the yocto-
- testresults-contrib repository, along with a summary of their
- findings.
+- QA teams run their tests and share their results in the
+ :yocto_git:`yocto-testresults-contrib </yocto-testresults-contrib>`
+ repository, along with a summary of their findings.
- Release engineering prepare the release as per their process.
- Test results from the QA teams are included into the release in
- separate directories and also uploaded to the yocto-testresults
+ separate directories and also uploaded to the
+ :yocto_git:`yocto-testresults </yocto-testresults>`
repository alongside the other test results for the given revision.
- The QA report in the final release is regenerated using resulttool to
diff --git a/documentation/test-manual/understand-autobuilder.rst b/documentation/test-manual/understand-autobuilder.rst
index b6809ce7bd..837109ad7d 100644
--- a/documentation/test-manual/understand-autobuilder.rst
+++ b/documentation/test-manual/understand-autobuilder.rst
@@ -9,8 +9,8 @@ Execution Flow within the Autobuilder
The "a-full" and "a-quick" targets are the usual entry points into the
Autobuilder and it makes sense to follow the process through the system
-starting there. This is best visualized from the Autobuilder Console
-view (:yocto_ab:`/typhoon/#/console`).
+starting there. This is best visualized from the :yocto_ab:`Autobuilder
+Console view </typhoon/#/console>`.
Each item along the top of that view represents some "target build" and
these targets are all run in parallel. The 'full' build will trigger the
@@ -18,9 +18,9 @@ majority of them, the "quick" build will trigger some subset of them.
The Autobuilder effectively runs whichever configuration is defined for
each of those targets on a separate buildbot worker. To understand the
configuration, you need to look at the entry on ``config.json`` file
-within the ``yocto-autobuilder-helper`` repository. The targets are
-defined in the ‘overrides' section, a quick example could be qemux86-64
-which looks like::
+within the :yocto_git:`yocto-autobuilder-helper </yocto-autobuilder-helper>`
+repository. The targets are defined in the ``overrides`` section, a quick
+example could be ``qemux86-64`` which looks like::
"qemux86-64" : {
"MACHINE" : "qemux86-64",
@@ -32,8 +32,8 @@ which looks like::
}
},
-And to expand that, you need the "arch-qemu" entry from
-the "templates" section, which looks like::
+And to expand that, you need the ``arch-qemu`` entry from
+the ``templates`` section, which looks like::
"arch-qemu" : {
"BUILDINFO" : true,
@@ -54,11 +54,11 @@ the "templates" section, which looks like::
}
},
-Combining these two entries you can see that "qemux86-64" is a three step build where the
-``bitbake BBTARGETS`` would be run, then ``bitbake SANITYTARGETS`` for each step; all for
-``MACHINE="qemx86-64"`` but with differing SDKMACHINE settings. In step
-1 an extra variable is added to the ``auto.conf`` file to enable wic
-image generation.
+Combining these two entries you can see that ``qemux86-64`` is a three step
+build where ``bitbake BBTARGETS`` would be run, then ``bitbake SANITYTARGETS``
+for each step; all for ``MACHINE="qemux86-64"`` but with differing
+:term:`SDKMACHINE` settings. In step 1, an extra variable is added to the
+``auto.conf`` file to enable wic image generation.
While not every detail of this is covered here, you can see how the
template mechanism allows quite complex configurations to be built up
@@ -88,9 +88,9 @@ roughly consist of:
#. *Obtain yocto-autobuilder-helper*
- This step clones the ``yocto-autobuilder-helper`` git repository.
- This is necessary to prevent the requirement to maintain all the
- release or project-specific code within Buildbot. The branch chosen
+ This step clones the :yocto_git:`yocto-autobuilder-helper </yocto-autobuilder-helper>`
+ git repository. This is necessary to avoid the requirement to maintain all
+ the release or project-specific code within Buildbot. The branch chosen
matches the release being built so we can support older releases and
still make changes in newer ones.
@@ -163,8 +163,9 @@ Autobuilder Worker Janitor
--------------------------
This is a process running on each Worker that performs two basic
-operations, including background file deletion at IO idle (see :ref:`test-manual/understand-autobuilder:Autobuilder Target Execution Overview`: Run clobberdir) and
-maintenance of a cache of cloned repositories to improve the speed
+operations, including background file deletion at IO idle (see
+"Run clobberdir" in :ref:`test-manual/understand-autobuilder:Autobuilder Target Execution Overview`)
+and maintenance of a cache of cloned repositories to improve the speed
the system can checkout repositories.
Shared DL_DIR
@@ -172,7 +173,7 @@ Shared DL_DIR
The Workers are all connected over NFS which allows DL_DIR to be shared
between them. This reduces network accesses from the system and allows
-the build to be sped up. Usage of the directory within the build system
+the build to be sped up. The usage of the directory within the build system
is designed to be able to be shared over NFS.
Shared SSTATE_DIR
@@ -180,8 +181,8 @@ Shared SSTATE_DIR
The Workers are all connected over NFS which allows the ``sstate``
directory to be shared between them. This means once a Worker has built
-an artifact, all the others can benefit from it. Usage of the directory
-within the directory is designed for sharing over NFS.
+an artifact, all the others can benefit from it. The usage of the directory
+within the build system is designed for sharing over NFS.
Resulttool
----------
@@ -192,7 +193,7 @@ in a given build and their status. Additional information, such as
failure logs or the time taken to run the tests, may also be included.
Resulttool is part of OpenEmbedded-Core and is used to manipulate these
-json results files. It has the ability to merge files together, display
+JSON results files. It has the ability to merge files together, display
reports of the test results and compare different result files.
For details, see :yocto_wiki:`/Resulttool`.
@@ -204,9 +205,13 @@ The ``scripts/run-config`` execution is where most of the work within
the Autobuilder happens. It runs through a number of steps; the first
are general setup steps that are run once and include:
-#. Set up any ``buildtools-tarball`` if configured.
+#. Set up any :term:`buildtools` tarball if configured.
+<<<<<<< HEAD
#. Call "buildhistory-init" if buildhistory is configured.
+=======
+#. Call ``buildhistory-init`` if :ref:`ref-classes-buildhistory` is configured.
+>>>>>>> 7c4f616f77 (test-manual: text and formatting fixes)
For each step that is configured in ``config.json``, it will perform the
following:
@@ -250,15 +255,16 @@ Deploying Yocto Autobuilder
===========================
The most up to date information about how to setup and deploy your own
-Autobuilder can be found in README.md in the ``yocto-autobuilder2``
-repository.
+Autobuilder can be found in :yocto_git:`README.md </yocto-autobuilder2/tree/README.md>`
+in the :yocto_git:`yocto-autobuilder2 </yocto-autobuilder2>` repository.
-We hope that people can use the ``yocto-autobuilder2`` code directly but
-it is inevitable that users will end up needing to heavily customise the
-``yocto-autobuilder-helper`` repository, particularly the
-``config.json`` file as they will want to define their own test matrix.
+We hope that people can use the :yocto_git:`yocto-autobuilder2 </yocto-autobuilder2>`
+code directly but it is inevitable that users will end up needing to heavily
+customize the :yocto_git:`yocto-autobuilder-helper </yocto-autobuilder-helper>`
+repository, particularly the ``config.json`` file as they will want to define
+their own test matrix.
-The Autobuilder supports wo customization options:
+The Autobuilder supports two customization options:
- variable substitution
@@ -278,7 +284,7 @@ environment::
$ ABHELPER_JSON="config.json /some/location/local.json"
One issue users often run into is validation of the ``config.json`` files. A
-tip for minimizing issues from invalid json files is to use a Git
+tip for minimizing issues from invalid JSON files is to use a Git
``pre-commit-hook.sh`` script to verify the JSON file before committing
it. Create a symbolic link as follows::
diff --git a/documentation/test-manual/yocto-project-compatible.rst b/documentation/test-manual/yocto-project-compatible.rst
index 96c12ac083..65d924fad9 100644
--- a/documentation/test-manual/yocto-project-compatible.rst
+++ b/documentation/test-manual/yocto-project-compatible.rst
@@ -27,7 +27,7 @@ In the second version of the program, a script was added to make validation
easier and clearer, the script is called ``yocto-check-layer`` and is
available in :term:`OpenEmbedded-Core (OE-Core)`.
-See :ref:`dev-manual/common-tasks:making sure your layer is compatible with yocto project`
+See :ref:`dev-manual/layers:making sure your layer is compatible with yocto project`
for details.
========
diff --git a/documentation/toaster-manual/reference.rst b/documentation/toaster-manual/reference.rst
index 1bb9f98cca..927699d2a8 100644
--- a/documentation/toaster-manual/reference.rst
+++ b/documentation/toaster-manual/reference.rst
@@ -66,7 +66,7 @@ layers.
For general information on layers, see the
":ref:`overview-manual/yp-intro:the yocto project layer model`"
section in the Yocto Project Overview and Concepts Manual. For information on how
-to create layers, see the ":ref:`dev-manual/common-tasks:understanding and creating layers`"
+to create layers, see the ":ref:`dev-manual/layers:understanding and creating layers`"
section in the Yocto Project Development Tasks Manual.
Configuring Toaster to Hook Into Your Layer Index
diff --git a/documentation/toaster-manual/setup-and-use.rst b/documentation/toaster-manual/setup-and-use.rst
index 1e1a314d66..69155bdeb7 100644
--- a/documentation/toaster-manual/setup-and-use.rst
+++ b/documentation/toaster-manual/setup-and-use.rst
@@ -366,7 +366,7 @@ Perform the following steps to install Toaster:
/etc/apache2/conf.d/toaster.conf
- Following is a sample Apache configuration for Toaster you can follow:
+ Here is a sample Apache configuration for Toaster you can follow:
.. code-block:: apache
@@ -496,7 +496,7 @@ The Toaster web interface allows you to do the following:
Toaster Web Interface Videos
----------------------------
-Following are several videos that show how to use the Toaster GUI:
+Here are several videos that show how to use the Toaster GUI:
- *Build Configuration:* This
`video <https://www.youtube.com/watch?v=qYgDZ8YzV6w>`__ overviews and
diff --git a/documentation/transitioning-to-a-custom-environment.rst b/documentation/transitioning-to-a-custom-environment.rst
index f0035bd3af..41e93a3068 100644
--- a/documentation/transitioning-to-a-custom-environment.rst
+++ b/documentation/transitioning-to-a-custom-environment.rst
@@ -42,7 +42,7 @@ Transitioning to a custom environment for systems development
You might want to start with the build specification that Poky provides
(which is reference embedded distribution) and then add your newly chosen
layers to that. Here is the information :ref:`about adding layers
- <dev-manual/common-tasks:Understanding and Creating Layers>`.
+ <dev-manual/layers:Understanding and Creating Layers>`.
#. **Based on the layers you've chosen, make needed changes in your
configuration**.
@@ -58,7 +58,7 @@ Transitioning to a custom environment for systems development
releases. If you are using a Yocto Project release earlier than 2.4, use the
``yocto-layer create`` tool. The ``bitbake-layers`` tool also provides a number
of other useful layer-related commands. See
- :ref:`dev-manual/common-tasks:creating a general layer using the
+ :ref:`dev-manual/layers:creating a general layer using the
\`\`bitbake-layers\`\` script` section.
#. **Create your own layer for the BSP you're going to use**.
@@ -79,12 +79,12 @@ Transitioning to a custom environment for systems development
process of refinement. Start by getting each step of the build process
working beginning with fetching all the way through packaging. Next, run the
software on your target and refine further as needed. See :ref:`Writing a New
- Recipe <dev-manual/common-tasks:writing a new recipe>` in the
+ Recipe <dev-manual/new-recipe:writing a new recipe>` in the
Yocto Project Development Tasks Manual for more information.
#. **Now you're ready to create an image recipe**.
There are a number of ways to do this. However, it is strongly recommended
- that you have your own image recipe - don't try appending to existing image
+ that you have your own image recipe --- don't try appending to existing image
recipes. Recipes for images are trivial to create and you usually want to
fully customize their contents.
@@ -103,7 +103,7 @@ Transitioning to a custom environment for systems development
needs to change for your distribution. If you find yourself adding a lot of
configuration to your local.conf file aside from paths and other typical
local settings, it's time to :ref:`consider creating your own distribution
- <dev-manual/common-tasks:creating your own distribution>`.
+ <dev-manual/custom-distribution:creating your own distribution>`.
You can add product specifications that can customize the distribution if
needed in other layers. You can also add other functionality specific to the
diff --git a/documentation/what-i-wish-id-known.rst b/documentation/what-i-wish-id-known.rst
index dec5617173..c21810d49b 100644
--- a/documentation/what-i-wish-id-known.rst
+++ b/documentation/what-i-wish-id-known.rst
@@ -29,8 +29,9 @@ contact us with other suggestions.
#. **Get to know the layer index:**
All layers can be found in the :oe_layerindex:`layer index <>`. Layers which
have applied for Yocto Project Compatible status (structure continuity
- assurance and testing) can be found in the :yocto_home:`Yocto Project Compatible index
- </software-over/layer/>`. Generally check the Compatible layer index first,
+ assurance and testing) can be found in the :yocto_home:`Yocto Project
+ Compatible Layers </development/yocto-project-compatible-layers/>` page.
+ Generally check the Compatible layer index first,
and if you don't find the necessary layer check the general layer index. The
layer index is an original artifact from the Open Embedded Project. As such,
that index doesn't have the curating and testing that the Yocto Project
@@ -131,7 +132,7 @@ contact us with other suggestions.
say "bitbake foo" where "foo" is the name for a specific recipe. As you
become more advanced using the Yocto Project, and if builds are failing, it
can be useful to make sure the fetch itself works as desired. Here are some
- valuable links: :ref:`dev-manual/common-tasks:Using a Development
+ valuable links: :ref:`dev-manual/development-shell:Using a Development
Shell` for information on how to build and run a specific task using
devshell. Also, the :ref:`SDK manual shows how to build out a specific recipe
<sdk-manual/extensible:use \`\`devtool modify\`\` to modify the source of an existing component>`.
@@ -212,6 +213,13 @@ contact us with other suggestions.
OpenEmbedded build system. If you are interested in using this type of
interface to create images, see the :doc:`/toaster-manual/index`.
+ * **Discover the VSCode extension**: The `Yocto Project BitBake
+ <https://marketplace.visualstudio.com/items?itemName=yocto-project.yocto-bitbake>`__
+ extension for the Visual Studio Code IDE provides language features and
+ commands for working with the Yocto Project. If you are interested in using
+ this extension, visit its `marketplace page
+ <https://marketplace.visualstudio.com/items?itemName=yocto-project.yocto-bitbake>`__.
+
* **Have Available the Yocto Project Reference Manual**: Unlike the rest of
the Yocto Project manual set, this manual is comprised of material suited
for reference rather than procedures. You can get build details, a closer
diff --git a/meta-poky/conf/distro/poky.conf b/meta-poky/conf/distro/poky.conf
index 4d969b9cfd..fcf481bc23 100644
--- a/meta-poky/conf/distro/poky.conf
+++ b/meta-poky/conf/distro/poky.conf
@@ -1,7 +1,7 @@
DISTRO = "poky"
DISTRO_NAME = "Poky (Yocto Project Reference Distro)"
#DISTRO_VERSION = "3.4+snapshot-${METADATA_REVISION}"
-DISTRO_VERSION = "4.0.1"
+DISTRO_VERSION = "4.0.18"
DISTRO_CODENAME = "kirkstone"
SDK_VENDOR = "-pokysdk"
SDK_VERSION = "${@d.getVar('DISTRO_VERSION').replace('snapshot-${METADATA_REVISION}', 'snapshot')}"
@@ -34,19 +34,13 @@ TCLIBCAPPEND = ""
SANITY_TESTED_DISTROS ?= " \
poky-3.3 \n \
poky-3.4 \n \
- ubuntu-16.04 \n \
ubuntu-18.04 \n \
ubuntu-20.04 \n \
- ubuntu-21.10 \n \
- fedora-34 \n \
- fedora-35 \n \
- centos-7 \n \
- centos-8 \n \
- debian-9 \n \
- debian-10 \n \
+ ubuntu-22.04 \n \
+ fedora-37 \n \
debian-11 \n \
opensuseleap-15.3 \n \
- almalinux-8.5 \n \
+ almalinux-8.8 \n \
"
# add poky sanity bbclass
INHERIT += "poky-sanity"
diff --git a/meta-poky/conf/local.conf.sample b/meta-poky/conf/local.conf.sample
index ae9c87f4b2..ef491d43bf 100644
--- a/meta-poky/conf/local.conf.sample
+++ b/meta-poky/conf/local.conf.sample
@@ -229,7 +229,7 @@ BB_DISKMON_DIRS ??= "\
# which will depend on your network.
# Note: For this to work you also need hash-equivalence passthrough to the matching server
#
-#BB_HASHSERVE_UPSTREAM = "typhoon.yocto.io:8687"
+#BB_HASHSERVE_UPSTREAM = "hashserv.yocto.io:8687"
#SSTATE_MIRRORS ?= "file://.* http://sstate.yoctoproject.org/all/PATH;downloadfilename=PATH"
#
diff --git a/meta-selftest/files/static-group b/meta-selftest/files/static-group
index b2e0e2f870..cbec6f1377 100644
--- a/meta-selftest/files/static-group
+++ b/meta-selftest/files/static-group
@@ -23,3 +23,5 @@ _apt:x:523:
weston-launch:x:524:
weston:x:525:
wayland:x:526:
+render:x:527:
+sgx:x:528:
diff --git a/meta-selftest/recipes-test/devtool/devtool-test-local/file3 b/meta-selftest/recipes-test/devtool/devtool-test-local/file3
new file mode 100644
index 0000000000..0f30e9eec4
--- /dev/null
+++ b/meta-selftest/recipes-test/devtool/devtool-test-local/file3
@@ -0,0 +1 @@
+The third file.
diff --git a/meta-selftest/recipes-test/devtool/devtool-test-local_6.03.bb b/meta-selftest/recipes-test/devtool/devtool-test-local_6.03.bb
index 463cfe0a7a..d0fd697978 100644
--- a/meta-selftest/recipes-test/devtool/devtool-test-local_6.03.bb
+++ b/meta-selftest/recipes-test/devtool/devtool-test-local_6.03.bb
@@ -7,9 +7,12 @@ SRC_URI = "http://downloads.yoctoproject.org/mirror/sources/syslinux-${PV}.tar.x
file://file1 \
file://file2"
+SRC_URI:append:class-native = " file://file3"
+
SRC_URI[md5sum] = "92a253df9211e9c20172796ecf388f13"
SRC_URI[sha256sum] = "26d3986d2bea109d5dc0e4f8c4822a459276cf021125e8c9f23c3cca5d8c850e"
S = "${WORKDIR}/syslinux-${PV}"
EXCLUDE_FROM_WORLD = "1"
+BBCLASSEXTEND = "native"
diff --git a/meta-selftest/recipes-test/devtool/devtool-test-localonly.bb b/meta-selftest/recipes-test/devtool/devtool-test-localonly.bb
index 3f7123cda0..e767619879 100644
--- a/meta-selftest/recipes-test/devtool/devtool-test-localonly.bb
+++ b/meta-selftest/recipes-test/devtool/devtool-test-localonly.bb
@@ -4,4 +4,7 @@ INHIBIT_DEFAULT_DEPS = "1"
SRC_URI = "file://file1 \
file://file2"
+SRC_URI:append:class-native = " file://file3"
+
EXCLUDE_FROM_WORLD = "1"
+BBCLASSEXTEND = "native"
diff --git a/meta-selftest/recipes-test/devtool/devtool-test-localonly/file3 b/meta-selftest/recipes-test/devtool/devtool-test-localonly/file3
new file mode 100644
index 0000000000..0f30e9eec4
--- /dev/null
+++ b/meta-selftest/recipes-test/devtool/devtool-test-localonly/file3
@@ -0,0 +1 @@
+The third file.
diff --git a/meta-selftest/recipes-test/images/oe-selftest-image.bb b/meta-selftest/recipes-test/images/oe-selftest-image.bb
index e295943ae5..317a0712aa 100644
--- a/meta-selftest/recipes-test/images/oe-selftest-image.bb
+++ b/meta-selftest/recipes-test/images/oe-selftest-image.bb
@@ -1,7 +1,7 @@
SUMMARY = "An image used during oe-selftest tests"
# libudev is needed for deploy mdadm via devtool
-IMAGE_INSTALL = "packagegroup-core-boot dropbear libudev"
+IMAGE_INSTALL = "packagegroup-core-boot packagegroup-core-ssh-dropbear libudev"
IMAGE_FEATURES = "debug-tweaks"
IMAGE_LINGUAS = " "
diff --git a/meta-selftest/recipes-test/license/incompatible-license-alias.bb b/meta-selftest/recipes-test/license/incompatible-license-alias.bb
index e0b4e13c26..1af99e7809 100644
--- a/meta-selftest/recipes-test/license/incompatible-license-alias.bb
+++ b/meta-selftest/recipes-test/license/incompatible-license-alias.bb
@@ -1,3 +1,5 @@
SUMMARY = "Recipe with an alias of an SPDX license"
DESCRIPTION = "Is licensed with an alias of an SPDX license to be used for testing"
LICENSE = "GPLv3"
+
+EXCLUDE_FROM_WORLD = "1"
diff --git a/meta-selftest/recipes-test/license/incompatible-license.bb b/meta-selftest/recipes-test/license/incompatible-license.bb
index 282f5c2875..6fdc58fd30 100644
--- a/meta-selftest/recipes-test/license/incompatible-license.bb
+++ b/meta-selftest/recipes-test/license/incompatible-license.bb
@@ -1,3 +1,5 @@
SUMMARY = "Recipe with an SPDX license"
DESCRIPTION = "Is licensed with an SPDX license to be used for testing"
LICENSE = "GPL-3.0-only"
+
+EXCLUDE_FROM_WORLD = "1"
diff --git a/meta-selftest/recipes-test/license/incompatible-licenses.bb b/meta-selftest/recipes-test/license/incompatible-licenses.bb
index 9709892644..47bd8d7c00 100644
--- a/meta-selftest/recipes-test/license/incompatible-licenses.bb
+++ b/meta-selftest/recipes-test/license/incompatible-licenses.bb
@@ -1,3 +1,5 @@
SUMMARY = "Recipe with multiple SPDX licenses"
DESCRIPTION = "Is licensed with multiple SPDX licenses to be used for testing"
LICENSE = "GPL-2.0-only & GPL-3.0-only & LGPL-3.0-only"
+
+EXCLUDE_FROM_WORLD = "1"
diff --git a/meta-selftest/recipes-test/license/incompatible-nonspdx-license.bb b/meta-selftest/recipes-test/license/incompatible-nonspdx-license.bb
index 35af0966ef..142d73158e 100644
--- a/meta-selftest/recipes-test/license/incompatible-nonspdx-license.bb
+++ b/meta-selftest/recipes-test/license/incompatible-nonspdx-license.bb
@@ -1,3 +1,5 @@
SUMMARY = "Recipe with a non-SPDX license"
DESCRIPTION = "Is licensed with a non-SPDX license to be used for testing"
LICENSE = "FooLicense"
+
+EXCLUDE_FROM_WORLD = "1"
diff --git a/meta-skeleton/recipes-skeleton/useradd/useradd-example.bb b/meta-skeleton/recipes-skeleton/useradd/useradd-example.bb
index 3f4c42d714..cff624e2f9 100644
--- a/meta-skeleton/recipes-skeleton/useradd/useradd-example.bb
+++ b/meta-skeleton/recipes-skeleton/useradd/useradd-example.bb
@@ -33,8 +33,8 @@ USERADD_PACKAGES = "${PN} ${PN}-user3"
USERADD_PARAM:${PN} = "-u 1200 -d /home/user1 -r -s /bin/bash user1; -u 1201 -d /home/user2 -r -s /bin/bash user2"
# user3 will be managed in the useradd-example-user3 pacakge:
-# As an example, we use the -P option to set clear text password for user3
-USERADD_PARAM:${PN}-user3 = "-u 1202 -d /home/user3 -r -s /bin/bash -P 'user3' user3"
+# As an example, we use the -p option to set password ('user3') for user3
+USERADD_PARAM:${PN}-user3 = "-u 1202 -d /home/user3 -r -s /bin/bash -p '\$6\$XAWr.8nc\$bUE4pYYaVb8n6BbnBitU0zeJMtfhTpFpiOBLL9zRl4e4YQo88UU4r/1kjRzmTimCy.BvDh4xoFwVqcO.pihLa1' user3"
# GROUPADD_PARAM works the same way, which you set to the options
# you'd normally pass to the groupadd command. This will create
diff --git a/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.10.bbappend b/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.10.bbappend
index 4f4ec0f210..c6032a84d8 100644
--- a/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.10.bbappend
+++ b/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.10.bbappend
@@ -7,17 +7,17 @@ KMACHINE:genericx86 ?= "common-pc"
KMACHINE:genericx86-64 ?= "common-pc-64"
KMACHINE:beaglebone-yocto ?= "beaglebone"
-SRCREV_machine:genericx86 ?= "c0b313d988a16b25c1ee730bfe7393c462ee8a5c"
-SRCREV_machine:genericx86-64 ?= "c0b313d988a16b25c1ee730bfe7393c462ee8a5c"
-SRCREV_machine:edgerouter ?= "4ab94e777d8b41ee1ee4c279259e9733bc8049b1"
-SRCREV_machine:beaglebone-yocto ?= "941cc9c3849f96f7eaf109b1e35e05ba366aca56"
+SRCREV_machine:genericx86 ?= "7abf3b31ec4e4fc9564b7a8db6844d9b4d71a1b2"
+SRCREV_machine:genericx86-64 ?= "7abf3b31ec4e4fc9564b7a8db6844d9b4d71a1b2"
+SRCREV_machine:edgerouter ?= "7c9332d91089ee63581be6cd3e7197c9d3e9a883"
+SRCREV_machine:beaglebone-yocto ?= "3c44f12b9de336579d00ac0105852f4cbf7e8b7d"
COMPATIBLE_MACHINE:genericx86 = "genericx86"
COMPATIBLE_MACHINE:genericx86-64 = "genericx86-64"
COMPATIBLE_MACHINE:edgerouter = "edgerouter"
COMPATIBLE_MACHINE:beaglebone-yocto = "beaglebone-yocto"
-LINUX_VERSION:genericx86 = "5.10.99"
-LINUX_VERSION:genericx86-64 = "5.10.99"
-LINUX_VERSION:edgerouter = "5.10.63"
-LINUX_VERSION:beaglebone-yocto = "5.10.63"
+LINUX_VERSION:genericx86 = "5.10.149"
+LINUX_VERSION:genericx86-64 = "5.10.149"
+LINUX_VERSION:edgerouter = "5.10.130"
+LINUX_VERSION:beaglebone-yocto = "5.10.130"
diff --git a/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.15.bbappend b/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.15.bbappend
index 85d02a46e4..c6d145fdb9 100644
--- a/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.15.bbappend
+++ b/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.15.bbappend
@@ -7,17 +7,17 @@ KMACHINE:genericx86 ?= "common-pc"
KMACHINE:genericx86-64 ?= "common-pc-64"
KMACHINE:beaglebone-yocto ?= "beaglebone"
-SRCREV_machine:genericx86 ?= "ebfb1822e9f9726d8c587fc0f60cfed43fa0873e"
-SRCREV_machine:genericx86-64 ?= "ebfb1822e9f9726d8c587fc0f60cfed43fa0873e"
-SRCREV_machine:edgerouter ?= "b978686694c3e41968821d6cc2a2a371fd9c2fb0"
-SRCREV_machine:beaglebone-yocto ?= "4c875cf1376178dfab4913aa1350cab50bb093d3"
+SRCREV_machine:genericx86 ?= "7c82dac028864e8a608e70d3ac2dbc05b3cd1e14"
+SRCREV_machine:genericx86-64 ?= "7c82dac028864e8a608e70d3ac2dbc05b3cd1e14"
+SRCREV_machine:edgerouter ?= "23b867c1a618572a36b6283f55746a5162e08cc7"
+SRCREV_machine:beaglebone-yocto ?= "4fca0c437367cf4d4fe158d74e0ae880a3f99d3c"
COMPATIBLE_MACHINE:genericx86 = "genericx86"
COMPATIBLE_MACHINE:genericx86-64 = "genericx86-64"
COMPATIBLE_MACHINE:edgerouter = "edgerouter"
COMPATIBLE_MACHINE:beaglebone-yocto = "beaglebone-yocto"
-LINUX_VERSION:genericx86 = "5.15.36"
-LINUX_VERSION:genericx86-64 = "5.15.36"
-LINUX_VERSION:edgerouter = "5.15.36"
-LINUX_VERSION:beaglebone-yocto = "5.15.36"
+LINUX_VERSION:genericx86 = "5.15.150"
+LINUX_VERSION:genericx86-64 = "5.15.150"
+LINUX_VERSION:edgerouter = "5.15.150"
+LINUX_VERSION:beaglebone-yocto = "5.15.150"
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
index 33070cd17f..4a5865d7b5 100644
--- a/meta/classes/archiver.bbclass
+++ b/meta/classes/archiver.bbclass
@@ -69,7 +69,6 @@ SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}/mirror"
do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}"
-do_deploy_archives[dirs] = "${WORKDIR}"
# This is a convenience for the shell script to use it
@@ -460,7 +459,9 @@ def create_diff_gz(d, src_orig, src, ar_outdir):
def is_work_shared(d):
pn = d.getVar('PN')
- return bb.data.inherits_class('kernel', d) or pn.startswith('gcc-source')
+ return pn.startswith('gcc-source') or \
+ bb.data.inherits_class('kernel', d) or \
+ (bb.data.inherits_class('kernelsrc', d) and d.expand("${TMPDIR}/work-shared") in d.getVar('S'))
# Run do_unpack and do_patch
python do_unpack_and_patch() {
diff --git a/meta/classes/baremetal-image.bbclass b/meta/classes/baremetal-image.bbclass
index cb9e250350..3a96df1f2d 100644
--- a/meta/classes/baremetal-image.bbclass
+++ b/meta/classes/baremetal-image.bbclass
@@ -95,6 +95,17 @@ QB_OPT_APPEND:append:qemuriscv32 = " -bios none"
CFLAGS:append:qemuriscv64 = " -mcmodel=medany"
+## Emulate image.bbclass
+# Handle inherits of any of the image classes we need
+IMAGE_CLASSES ??= ""
+IMGCLASSES = " ${IMAGE_CLASSES}"
+inherit ${IMGCLASSES}
+# Set defaults to satisfy IMAGE_FEATURES check
+IMAGE_FEATURES ?= ""
+IMAGE_FEATURES[type] = "list"
+IMAGE_FEATURES[validitems] += ""
+
+
# This next part is necessary to trick the build system into thinking
# its building an image recipe so it generates the qemuboot.conf
addtask do_rootfs before do_image after do_install
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index bdb3ac33c6..ee26ee5597 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -12,7 +12,7 @@ inherit logging
OE_EXTRA_IMPORTS ?= ""
-OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license oe.qa oe.reproducible oe.rust ${OE_EXTRA_IMPORTS}"
+OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license oe.qa oe.reproducible oe.rust oe.go ${OE_EXTRA_IMPORTS}"
OE_IMPORTS[type] = "list"
PACKAGECONFIG_CONFARGS ??= ""
@@ -132,7 +132,7 @@ def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
# /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
# would return /usr/local/bin/ccache/gcc, but what we need is
# /usr/bin/gcc, this code can check and fix that.
- if "ccache" in srctool:
+ if os.path.islink(srctool) and os.path.basename(os.readlink(srctool)) == 'ccache':
srctool = bb.utils.which(path, tool, executable=True, direction=1)
if srctool:
os.symlink(srctool, desttool)
@@ -208,6 +208,7 @@ addtask do_deploy_source_date_epoch_setscene
addtask do_deploy_source_date_epoch before do_configure after do_patch
python create_source_date_epoch_stamp() {
+ # Version: 1
source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S'))
oe.reproducible.epochfile_write(source_date_epoch, d.getVar('SDE_FILE'), d)
}
@@ -596,9 +597,9 @@ python () {
for lic_exception in exceptions:
if ":" in lic_exception:
- lic_exception.split(":")[0]
+ lic_exception = lic_exception.split(":")[1]
if lic_exception in oe.license.obsolete_license_list():
- bb.fatal("Invalid license %s used in INCOMPATIBLE_LICENSE_EXCEPTIONS" % lic_exception)
+ bb.fatal("Obsolete license %s used in INCOMPATIBLE_LICENSE_EXCEPTIONS" % lic_exception)
pkgs = d.getVar('PACKAGES').split()
skipped_pkgs = {}
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
index c3aca20443..f0407e1329 100644
--- a/meta/classes/bin_package.bbclass
+++ b/meta/classes/bin_package.bbclass
@@ -30,8 +30,9 @@ bin_package_do_install () {
bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S.
fi
cd ${S}
+ install -d ${D}${base_prefix}
tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
- | tar --no-same-owner -xpf - -C ${D}
+ | tar --no-same-owner -xpf - -C ${D}${base_prefix}
}
FILES:${PN} = "/"
diff --git a/meta/classes/cargo.bbclass b/meta/classes/cargo.bbclass
index 4a780a501f..6df3b19b00 100644
--- a/meta/classes/cargo.bbclass
+++ b/meta/classes/cargo.bbclass
@@ -49,7 +49,6 @@ oe_cargo_build () {
do_compile[progress] = "outof:\s+(\d+)/(\d+)"
cargo_do_compile () {
- oe_cargo_fix_env
oe_cargo_build
}
diff --git a/meta/classes/cargo_common.bbclass b/meta/classes/cargo_common.bbclass
index 90fad75415..c1bc142d85 100644
--- a/meta/classes/cargo_common.bbclass
+++ b/meta/classes/cargo_common.bbclass
@@ -45,12 +45,12 @@ cargo_common_do_configure () {
directory = "${CARGO_VENDORING_DIRECTORY}"
EOF
- if [ -z "${EXTERNALSRC}" ] && [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then
+ if [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then
cat <<- EOF >> ${CARGO_HOME}/config
[source.crates-io]
replace-with = "bitbake"
- local-registry = "/nonexistant"
+ local-registry = "/nonexistent"
EOF
fi
@@ -88,7 +88,7 @@ cargo_common_do_configure () {
cat <<- EOF >> ${CARGO_HOME}/config
[build]
- # Use out of tree build destination to avoid poluting the source tree
+ # Use out of tree build destination to avoid polluting the source tree
target-dir = "${B}/target"
EOF
fi
@@ -101,6 +101,10 @@ cargo_common_do_configure () {
EOF
}
+do_compile:prepend () {
+ oe_cargo_fix_env
+}
+
oe_cargo_fix_env () {
export CC="${RUST_TARGET_CC}"
export CXX="${RUST_TARGET_CXX}"
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
index d9bcddbdbb..7ec6ca58fc 100644
--- a/meta/classes/cmake.bbclass
+++ b/meta/classes/cmake.bbclass
@@ -85,9 +85,12 @@ def map_host_arch_to_uname_arch(host_arch):
return "ppc64"
return host_arch
+
cmake_do_generate_toolchain_file() {
if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
cmake_crosscompiling="set( CMAKE_CROSSCOMPILING FALSE )"
+ else
+ cmake_sysroot="set( CMAKE_SYSROOT \"${RECIPE_SYSROOT}\" )"
fi
cat > ${WORKDIR}/toolchain.cmake <<EOF
# CMake system name must be something like "Linux".
@@ -120,6 +123,8 @@ set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
set( CMAKE_PROGRAM_PATH "/" )
+$cmake_sysroot
+
# Use qt.conf settings
set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
index d319d66ab2..fd087c2a14 100644
--- a/meta/classes/cml1.bbclass
+++ b/meta/classes/cml1.bbclass
@@ -48,7 +48,7 @@ python do_menuconfig() {
# ensure that environment variables are overwritten with this tasks 'd' values
d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR")
- oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
+ oe_terminal("sh -c 'make %s; if [ \\$? -ne 0 ]; then echo \"Command failed.\"; printf \"Press any key to continue... \"; read r; fi'" % d.getVar('KCONFIG_CONFIG_COMMAND'),
d.getVar('PN') + ' Configuration', d)
# FIXME this check can be removed when the minimum bitbake version has been bumped
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
index 84fd3eeb38..803727da0e 100644
--- a/meta/classes/core-image.bbclass
+++ b/meta/classes/core-image.bbclass
@@ -59,6 +59,10 @@ FEATURE_PACKAGES_hwcodecs = "${MACHINE_HWCODECS}"
# IMAGE_FEATURES_REPLACES_foo = 'bar1 bar2'
# Including image feature foo would replace the image features bar1 and bar2
IMAGE_FEATURES_REPLACES_ssh-server-openssh = "ssh-server-dropbear"
+# Do not install openssh complementary packages if either packagegroup-core-ssh-dropbear or dropbear
+# is installed # to avoid openssh-dropbear conflict
+# see [Yocto #14858] for more information
+PACKAGE_EXCLUDE_COMPLEMENTARY:append = "${@bb.utils.contains_any('PACKAGE_INSTALL', 'packagegroup-core-ssh-dropbear dropbear', ' openssh', '' , d)}"
# IMAGE_FEATURES_CONFLICTS_foo = 'bar1 bar2'
# An error exception would be raised if both image features foo and bar1(or bar2) are included
diff --git a/meta/classes/create-spdx.bbclass b/meta/classes/create-spdx.bbclass
index 37b6b569a1..349ecfe6ab 100644
--- a/meta/classes/create-spdx.bbclass
+++ b/meta/classes/create-spdx.bbclass
@@ -19,12 +19,12 @@ SPDX_TOOL_VERSION ??= "1.0"
SPDXRUNTIMEDEPLOY = "${SPDXDIR}/runtime-deploy"
SPDX_INCLUDE_SOURCES ??= "0"
-SPDX_INCLUDE_PACKAGED ??= "0"
SPDX_ARCHIVE_SOURCES ??= "0"
SPDX_ARCHIVE_PACKAGED ??= "0"
SPDX_UUID_NAMESPACE ??= "sbom.openembedded.org"
SPDX_NAMESPACE_PREFIX ??= "http://spdx.org/spdxdoc"
+SPDX_PRETTY ??= "0"
SPDX_LICENSES ??= "${COREBASE}/meta/files/spdx-licenses.json"
@@ -76,6 +76,11 @@ def recipe_spdx_is_native(d, recipe):
def is_work_shared_spdx(d):
return bb.data.inherits_class('kernel', d) or ('work-shared' in d.getVar('WORKDIR'))
+def get_json_indent(d):
+ if d.getVar("SPDX_PRETTY") == "1":
+ return 2
+ return None
+
python() {
import json
if d.getVar("SPDX_LICENSE_DATA"):
@@ -210,7 +215,7 @@ def add_package_files(d, doc, spdx_pkg, topdir, get_spdxid, get_types, *, archiv
filepath = Path(subdir) / file
filename = str(filepath.relative_to(topdir))
- if filepath.is_file() and not filepath.is_symlink():
+ if not filepath.is_symlink() and filepath.is_file():
spdx_file = oe.spdx.SPDXFile()
spdx_file.SPDXID = get_spdxid(file_counter)
for t in get_types(filepath):
@@ -423,7 +428,6 @@ python do_create_spdx() {
deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
spdx_workdir = Path(d.getVar("SPDXWORK"))
- include_packaged = d.getVar("SPDX_INCLUDE_PACKAGED") == "1"
include_sources = d.getVar("SPDX_INCLUDE_SOURCES") == "1"
archive_sources = d.getVar("SPDX_ARCHIVE_SOURCES") == "1"
archive_packaged = d.getVar("SPDX_ARCHIVE_PACKAGED") == "1"
@@ -445,12 +449,13 @@ python do_create_spdx() {
recipe.name = d.getVar("PN")
recipe.versionInfo = d.getVar("PV")
recipe.SPDXID = oe.sbom.get_recipe_spdxid(d)
- recipe.packageSupplier = d.getVar("SPDX_SUPPLIER")
+ recipe.supplier = d.getVar("SPDX_SUPPLIER")
if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
recipe.annotations.append(create_annotation(d, "isNative"))
for s in d.getVar('SRC_URI').split():
if not s.startswith("file://"):
+ s = s.split(';')[0]
recipe.downloadLocation = s
break
else:
@@ -515,7 +520,7 @@ python do_create_spdx() {
dep_recipes = collect_dep_recipes(d, doc, recipe)
- doc_sha1 = oe.sbom.write_doc(d, doc, "recipes")
+ doc_sha1 = oe.sbom.write_doc(d, doc, "recipes", indent=get_json_indent(d))
dep_recipes.append(oe.sbom.DepRecipe(doc, doc_sha1, recipe))
recipe_ref = oe.spdx.SPDXExternalDocumentRef()
@@ -555,7 +560,7 @@ python do_create_spdx() {
spdx_package.name = pkg_name
spdx_package.versionInfo = d.getVar("PV")
spdx_package.licenseDeclared = convert_license_to_spdx(package_license, package_doc, d, found_licenses)
- spdx_package.packageSupplier = d.getVar("SPDX_SUPPLIER")
+ spdx_package.supplier = d.getVar("SPDX_SUPPLIER")
package_doc.packages.append(spdx_package)
@@ -571,6 +576,7 @@ python do_create_spdx() {
pkgdest / package,
lambda file_counter: oe.sbom.get_packaged_file_spdxid(pkg_name, file_counter),
lambda filepath: ["BINARY"],
+ ignore_top_level_dirs=['CONTROL', 'DEBIAN'],
archive=archive,
)
@@ -579,7 +585,7 @@ python do_create_spdx() {
add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources)
- oe.sbom.write_doc(d, package_doc, "packages")
+ oe.sbom.write_doc(d, package_doc, "packages", indent=get_json_indent(d))
}
# NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source
addtask do_create_spdx after do_package do_packagedata do_unpack before do_populate_sdk do_build do_rm_work
@@ -743,7 +749,7 @@ python do_create_runtime_spdx() {
)
seen_deps.add(dep)
- oe.sbom.write_doc(d, runtime_doc, "runtime", spdx_deploy)
+ oe.sbom.write_doc(d, runtime_doc, "runtime", spdx_deploy, indent=get_json_indent(d))
}
addtask do_create_runtime_spdx after do_create_spdx before do_build do_rm_work
@@ -787,6 +793,7 @@ def spdx_get_src(d):
bb.build.exec_func('do_unpack', d)
# Copy source of kernel to spdx_workdir
if is_work_shared_spdx(d):
+ share_src = d.getVar('WORKDIR')
d.setVar('WORKDIR', spdx_workdir)
d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
src_dir = spdx_workdir + "/" + d.getVar('PN')+ "-" + d.getVar('PV') + "-" + d.getVar('PR')
@@ -794,8 +801,8 @@ def spdx_get_src(d):
if bb.data.inherits_class('kernel',d):
share_src = d.getVar('STAGING_KERNEL_DIR')
cmd_copy_share = "cp -rf " + share_src + "/* " + src_dir + "/"
- cmd_copy_kernel_result = os.popen(cmd_copy_share).read()
- bb.note("cmd_copy_kernel_result = " + cmd_copy_kernel_result)
+ cmd_copy_shared_res = os.popen(cmd_copy_share).read()
+ bb.note("cmd_copy_shared_result = " + cmd_copy_shared_res)
git_path = src_dir + "/.git"
if os.path.exists(git_path):
@@ -895,7 +902,7 @@ def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages):
image.name = d.getVar("PN")
image.versionInfo = d.getVar("PV")
image.SPDXID = rootfs_spdxid
- image.packageSupplier = d.getVar("SPDX_SUPPLIER")
+ image.supplier = d.getVar("SPDX_SUPPLIER")
doc.packages.append(image)
@@ -938,7 +945,7 @@ def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages):
image_spdx_path = rootfs_deploydir / (rootfs_name + ".spdx.json")
with image_spdx_path.open("wb") as f:
- doc.to_json(f, sort_keys=True)
+ doc.to_json(f, sort_keys=True, indent=get_json_indent(d))
num_threads = int(d.getVar("BB_NUMBER_THREADS"))
@@ -996,7 +1003,11 @@ def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages):
index["documents"].sort(key=lambda x: x["filename"])
- index_str = io.BytesIO(json.dumps(index, sort_keys=True).encode("utf-8"))
+ index_str = io.BytesIO(json.dumps(
+ index,
+ sort_keys=True,
+ indent=get_json_indent(d),
+ ).encode("utf-8"))
info = tarfile.TarInfo()
info.name = "index.json"
@@ -1010,4 +1021,4 @@ def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages):
spdx_index_path = rootfs_deploydir / (rootfs_name + ".spdx.index.json")
with spdx_index_path.open("w") as f:
- json.dump(index, f, sort_keys=True)
+ json.dump(index, f, sort_keys=True, indent=get_json_indent(d))
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
index 50b9247f46..f554150d94 100644
--- a/meta/classes/cve-check.bbclass
+++ b/meta/classes/cve-check.bbclass
@@ -26,7 +26,7 @@ CVE_PRODUCT ??= "${BPN}"
CVE_VERSION ??= "${PV}"
CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
-CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db"
+CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_2.db"
CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock"
CVE_CHECK_LOG ?= "${T}/cve.log"
@@ -42,8 +42,8 @@ CVE_CHECK_LOG_JSON ?= "${T}/cve.json"
CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
CVE_CHECK_RECIPE_FILE ?= "${CVE_CHECK_DIR}/${PN}"
CVE_CHECK_RECIPE_FILE_JSON ?= "${CVE_CHECK_DIR}/${PN}_cve.json"
-CVE_CHECK_MANIFEST ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
-CVE_CHECK_MANIFEST_JSON ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.json"
+CVE_CHECK_MANIFEST ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
+CVE_CHECK_MANIFEST_JSON ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.json"
CVE_CHECK_COPY_FILES ??= "1"
CVE_CHECK_CREATE_MANIFEST ??= "1"
@@ -98,6 +98,8 @@ def generate_json_report(d, out_path, link_path):
cve_check_merge_jsons(summary, data)
filename = f.readline()
+ summary["package"].sort(key=lambda d: d['name'])
+
with open(out_path, "w") as f:
json.dump(summary, f, indent=2)
@@ -139,22 +141,23 @@ python do_cve_check () {
"""
from oe.cve_check import get_patched_cves
- if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
- try:
- patched_cves = get_patched_cves(d)
- except FileNotFoundError:
- bb.fatal("Failure in searching patches")
- ignored, patched, unpatched, status = check_cves(d, patched_cves)
- if patched or unpatched or (d.getVar("CVE_CHECK_COVERAGE") == "1" and status):
- cve_data = get_cve_info(d, patched + unpatched + ignored)
- cve_write_data(d, patched, unpatched, ignored, cve_data, status)
- else:
- bb.note("No CVE database found, skipping CVE check")
+ with bb.utils.fileslocked([d.getVar("CVE_CHECK_DB_FILE_LOCK")], shared=True):
+ if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
+ try:
+ patched_cves = get_patched_cves(d)
+ except FileNotFoundError:
+ bb.fatal("Failure in searching patches")
+ ignored, patched, unpatched, status = check_cves(d, patched_cves)
+ if patched or unpatched or (d.getVar("CVE_CHECK_COVERAGE") == "1" and status):
+ cve_data = get_cve_info(d, patched + unpatched + ignored)
+ cve_write_data(d, patched, unpatched, ignored, cve_data, status)
+ else:
+ bb.note("No CVE database found, skipping CVE check")
}
addtask cve_check before do_build
-do_cve_check[depends] = "cve-update-db-native:do_fetch"
+do_cve_check[depends] = "cve-update-nvd2-native:do_fetch"
do_cve_check[nostamp] = "1"
python cve_check_cleanup () {
@@ -166,7 +169,7 @@ python cve_check_cleanup () {
}
addhandler cve_check_cleanup
-cve_check_cleanup[eventmask] = "bb.cooker.CookerExit"
+cve_check_cleanup[eventmask] = "bb.event.BuildCompleted"
python cve_check_write_rootfs_manifest () {
"""
@@ -195,7 +198,7 @@ python cve_check_write_rootfs_manifest () {
recipies.add(pkg_data["PN"])
bb.note("Writing rootfs CVE manifest")
- deploy_dir = d.getVar("DEPLOY_DIR_IMAGE")
+ deploy_dir = d.getVar("IMGDEPLOYDIR")
link_name = d.getVar("IMAGE_LINK_NAME")
json_data = {"version":"1", "package": []}
@@ -253,7 +256,7 @@ def check_cves(d, patched_cves):
"""
Connect to the NVD database and find unpatched cves.
"""
- from oe.cve_check import Version
+ from oe.cve_check import Version, convert_cve_version
pn = d.getVar("PN")
real_pv = d.getVar("PV")
@@ -290,7 +293,8 @@ def check_cves(d, patched_cves):
vendor = "%"
# Find all relevant CVE IDs.
- for cverow in conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)):
+ cve_cursor = conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor))
+ for cverow in cve_cursor:
cve = cverow[0]
if cve in cve_ignore:
@@ -309,12 +313,16 @@ def check_cves(d, patched_cves):
vulnerable = False
ignored = False
- for row in conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor)):
+ product_cursor = conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor))
+ for row in product_cursor:
(_, _, _, version_start, operator_start, version_end, operator_end) = row
#bb.debug(2, "Evaluating row " + str(row))
if cve in cve_ignore:
ignored = True
+ version_start = convert_cve_version(version_start)
+ version_end = convert_cve_version(version_end)
+
if (operator_start == '=' and pv == version_start) or version_start == '-':
vulnerable = True
else:
@@ -353,10 +361,12 @@ def check_cves(d, patched_cves):
bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve))
cves_unpatched.append(cve)
break
+ product_cursor.close()
if not vulnerable:
bb.note("%s-%s is not vulnerable to %s" % (pn, real_pv, cve))
patched_cves.add(cve)
+ cve_cursor.close()
if not cves_in_product:
bb.note("No CVE records found for product %s, pn %s" % (product, pn))
@@ -381,14 +391,15 @@ def get_cve_info(d, cves):
conn = sqlite3.connect(db_file, uri=True)
for cve in cves:
- for row in conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)):
+ cursor = conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,))
+ for row in cursor:
cve_data[row[0]] = {}
cve_data[row[0]]["summary"] = row[1]
cve_data[row[0]]["scorev2"] = row[2]
cve_data[row[0]]["scorev3"] = row[3]
cve_data[row[0]]["modified"] = row[4]
cve_data[row[0]]["vector"] = row[5]
-
+ cursor.close()
conn.close()
return cve_data
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
index 247d04478c..26c01c080a 100644
--- a/meta/classes/devshell.bbclass
+++ b/meta/classes/devshell.bbclass
@@ -2,8 +2,6 @@ inherit terminal
DEVSHELL = "${SHELL}"
-PATH:prepend:task-devshell = "${COREBASE}/scripts/git-intercept:"
-
python do_devshell () {
if d.getVarFlag("do_devshell", "manualfakeroot"):
d.prependVar("DEVSHELL", "pseudo ")
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
index b2f216f361..a209730240 100644
--- a/meta/classes/externalsrc.bbclass
+++ b/meta/classes/externalsrc.bbclass
@@ -60,7 +60,11 @@ python () {
if externalsrcbuild:
d.setVar('B', externalsrcbuild)
else:
- d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
+ d.setVar('B', '${WORKDIR}/${BPN}-${PV}')
+
+ if d.getVar('SRCREV', "INVALID") != "INVALID":
+ # Ensure SRCREV has been processed before accessing SRC_URI
+ bb.fetch.get_srcrev(d)
local_srcuri = []
fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d)
@@ -68,7 +72,7 @@ python () {
url_data = fetch.ud[url]
parm = url_data.parm
if (url_data.type == 'file' or
- url_data.type == 'npmsw' or
+ url_data.type == 'npmsw' or url_data.type == 'crate' or
'type' in parm and parm['type'] == 'kmeta'):
local_srcuri.append(url)
@@ -76,6 +80,8 @@ python () {
# Dummy value because the default function can't be called with blank SRC_URI
d.setVar('SRCPV', '999')
+ # sstate is never going to work for external source trees, disable it
+ d.setVar('SSTATE_SKIP_CREATION', '1')
if d.getVar('CONFIGUREOPT_DEPTRACK') == '--disable-dependency-tracking':
d.setVar('CONFIGUREOPT_DEPTRACK', '')
@@ -83,23 +89,22 @@ python () {
tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
for task in tasks:
- if task.endswith("_setscene"):
- # sstate is never going to work for external source trees, disable it
- bb.build.deltask(task, d)
- elif os.path.realpath(d.getVar('S')) == os.path.realpath(d.getVar('B')):
+ if os.path.realpath(d.getVar('S')) == os.path.realpath(d.getVar('B')):
# Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock")
- for funcname in [task, "base_" + task, "kernel_" + task]:
+ for v in d.keys():
+ cleandirs = d.getVarFlag(v, "cleandirs", False)
+ if cleandirs:
# We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
- cleandirs = oe.recipeutils.split_var_value(d.getVarFlag(funcname, 'cleandirs', False) or '')
+ cleandirs = oe.recipeutils.split_var_value(cleandirs)
setvalue = False
for cleandir in cleandirs[:]:
if oe.path.is_path_parent(externalsrc, d.expand(cleandir)):
cleandirs.remove(cleandir)
setvalue = True
if setvalue:
- d.setVarFlag(funcname, 'cleandirs', ' '.join(cleandirs))
+ d.setVarFlag(v, 'cleandirs', ' '.join(cleandirs))
fetch_tasks = ['do_fetch', 'do_unpack']
# If we deltask do_patch, there's no dependency to ensure do_unpack gets run, so add one
@@ -209,8 +214,8 @@ def srctree_hash_files(d, srcdir=None):
try:
git_dir = os.path.join(s_dir,
subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
- top_git_dir = os.path.join(s_dir, subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'],
- stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ top_git_dir = os.path.join(d.getVar("TOPDIR"),
+ subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
if git_dir == top_git_dir:
git_dir = None
except subprocess.CalledProcessError:
@@ -227,15 +232,16 @@ def srctree_hash_files(d, srcdir=None):
env['GIT_INDEX_FILE'] = tmp_index.name
subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
- submodule_helper = subprocess.check_output(['git', 'submodule--helper', 'list'], cwd=s_dir, env=env).decode("utf-8")
- for line in submodule_helper.splitlines():
- module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
- if os.path.isdir(module_dir):
- proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
- proc.communicate()
- proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
- stdout, _ = proc.communicate()
- git_sha1 += stdout.decode("utf-8")
+ if os.path.exists(os.path.join(s_dir, ".gitmodules")) and os.path.getsize(os.path.join(s_dir, ".gitmodules")) > 0:
+ submodule_helper = subprocess.check_output(["git", "config", "--file", ".gitmodules", "--get-regexp", "path"], cwd=s_dir, env=env).decode("utf-8")
+ for line in submodule_helper.splitlines():
+ module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
+ if os.path.isdir(module_dir):
+ proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ proc.communicate()
+ proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
+ stdout, _ = proc.communicate()
+ git_sha1 += stdout.decode("utf-8")
sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
with open(oe_hash_file, 'w') as fobj:
fobj.write(sha1)
diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass
index 442bfc7392..34688591f4 100644
--- a/meta/classes/fontcache.bbclass
+++ b/meta/classes/fontcache.bbclass
@@ -7,6 +7,7 @@ PACKAGE_WRITE_DEPS += "qemu-native"
inherit qemu
FONT_PACKAGES ??= "${PN}"
+FONT_PACKAGES:class-native = ""
FONT_EXTRA_RDEPENDS ?= "${MLPREFIX}fontconfig-utils"
FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig"
FONTCONFIG_CACHE_PARAMS ?= "-v"
diff --git a/meta/classes/fs-uuid.bbclass b/meta/classes/fs-uuid.bbclass
index 9b53dfba7a..731ea575bd 100644
--- a/meta/classes/fs-uuid.bbclass
+++ b/meta/classes/fs-uuid.bbclass
@@ -4,7 +4,7 @@
def get_rootfs_uuid(d):
import subprocess
rootfs = d.getVar('ROOTFS')
- output = subprocess.check_output(['tune2fs', '-l', rootfs])
+ output = subprocess.check_output(['tune2fs', '-l', rootfs], text=True)
for line in output.split('\n'):
if line.startswith('Filesystem UUID:'):
uuid = line.split()[-1]
diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass
index 9a5bd9a232..99ac472080 100644
--- a/meta/classes/gnomebase.bbclass
+++ b/meta/classes/gnomebase.bbclass
@@ -1,5 +1,5 @@
def gnome_verdir(v):
- return ".".join(v.split(".")[:-1])
+ return ".".join(v.split(".")[:-1]) or v
GNOME_COMPRESS_TYPE ?= "xz"
diff --git a/meta/classes/go.bbclass b/meta/classes/go.bbclass
index f3d83febbf..d944722309 100644
--- a/meta/classes/go.bbclass
+++ b/meta/classes/go.bbclass
@@ -122,7 +122,7 @@ go_do_install() {
tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \
tar -C ${D}${libdir}/go --no-same-owner -xf -
- if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
+ if ls ${B}/${GO_BUILD_BINDIR}/* >/dev/null 2>/dev/null ; then
install -d ${D}${bindir}
install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/
fi
diff --git a/meta/classes/goarch.bbclass b/meta/classes/goarch.bbclass
index 92fec16b82..394c0c5d84 100644
--- a/meta/classes/goarch.bbclass
+++ b/meta/classes/goarch.bbclass
@@ -61,31 +61,10 @@ SECURITY_NOPIE_CFLAGS ??= ""
CCACHE_DISABLE ?= "1"
def go_map_arch(a, d):
- import re
- if re.match('i.86', a):
- return '386'
- elif a == 'x86_64':
- return 'amd64'
- elif re.match('arm.*', a):
- return 'arm'
- elif re.match('aarch64.*', a):
- return 'arm64'
- elif re.match('mips64el.*', a):
- return 'mips64le'
- elif re.match('mips64.*', a):
- return 'mips64'
- elif a == 'mips':
- return 'mips'
- elif a == 'mipsel':
- return 'mipsle'
- elif re.match('p(pc|owerpc)(64le)', a):
- return 'ppc64le'
- elif re.match('p(pc|owerpc)(64)', a):
- return 'ppc64'
- elif a == 'riscv64':
- return 'riscv64'
- else:
+ arch = oe.go.map_arch(a)
+ if not arch:
raise bb.parse.SkipRecipe("Unsupported CPU architecture: %s" % a)
+ return arch
def go_map_arm(a, d):
if a.startswith("arm"):
diff --git a/meta/classes/gobject-introspection-data.bbclass b/meta/classes/gobject-introspection-data.bbclass
index 2ef684626a..d90cdb4839 100644
--- a/meta/classes/gobject-introspection-data.bbclass
+++ b/meta/classes/gobject-introspection-data.bbclass
@@ -5,3 +5,8 @@
# so that qemu use can be avoided when necessary.
GI_DATA_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'gobject-introspection-data', \
bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
+
+do_compile:prepend() {
+ # This prevents g-ir-scanner from writing cache data to $HOME
+ export GI_SCANNER_DISABLE_CACHE=1
+}
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
index 6808339b90..f999b891f3 100644
--- a/meta/classes/gtk-icon-cache.bbclass
+++ b/meta/classes/gtk-icon-cache.bbclass
@@ -3,7 +3,7 @@ FILES:${PN} += "${datadir}/icons/hicolor"
GTKIC_VERSION ??= '3'
GTKPN = "${@ 'gtk4' if d.getVar('GTKIC_VERSION') == '4' else 'gtk+3' }"
-GTKIC_CMD = "${@ 'gtk-update-icon-cache-3.0.0' if d.getVar('GTKIC_VERSION') == '4' else 'gtk4-update-icon-cache' }"
+GTKIC_CMD = "${@ 'gtk4-update-icon-cache' if d.getVar('GTKIC_VERSION') == '4' else 'gtk-update-icon-cache-3.0' }"
#gtk+3/gtk4 require GTK3DISTROFEATURES, DEPENDS on it make all the
#recipes inherit this class require GTK3DISTROFEATURES
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
index 2c948190cf..c0c1fb31ac 100644
--- a/meta/classes/image-live.bbclass
+++ b/meta/classes/image-live.bbclass
@@ -30,7 +30,7 @@ do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
virtual/kernel:do_deploy \
${MLPREFIX}syslinux:do_populate_sysroot \
syslinux-native:do_populate_sysroot \
- ${@'%s:do_image_%s' % (d.getVar('PN'), d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')) if d.getVar('ROOTFS') else ''} \
+ ${@'%s:do_image_%s' % (d.getVar('PN'), d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_').split('.')[0]) if d.getVar('ROOTFS') else ''} \
"
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
index 2139a7e576..00413d56d1 100644
--- a/meta/classes/image.bbclass
+++ b/meta/classes/image.bbclass
@@ -177,8 +177,7 @@ python () {
IMAGE_POSTPROCESS_COMMAND ?= ""
-# some default locales
-IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
+IMAGE_LINGUAS ??= ""
LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
@@ -314,7 +313,7 @@ fakeroot python do_image_qa () {
except oe.utils.ImageQAFailed as e:
qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
except Exception as e:
- qamsg = qamsg + '\tImage QA function %s failed\n' % cmd
+ qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (cmd, e)
if qamsg:
imgname = d.getVar('IMAGE_NAME')
@@ -441,7 +440,7 @@ python () {
localdata.delVar('DATE')
localdata.delVar('TMPDIR')
localdata.delVar('IMAGE_VERSION_SUFFIX')
- vardepsexclude = (d.getVarFlag('IMAGE_CMD:' + realt, 'vardepsexclude', True) or '').split()
+ vardepsexclude = (d.getVarFlag('IMAGE_CMD:' + realt, 'vardepsexclude') or '').split()
for dep in vardepsexclude:
localdata.delVar(dep)
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
index 960dab1a60..9d5f8c68a4 100644
--- a/meta/classes/image_types.bbclass
+++ b/meta/classes/image_types.bbclass
@@ -130,10 +130,11 @@ IMAGE_CMD:cpio () {
if [ ! -L ${IMAGE_ROOTFS}/init ] && [ ! -e ${IMAGE_ROOTFS}/init ]; then
if [ -L ${IMAGE_ROOTFS}/sbin/init ] || [ -e ${IMAGE_ROOTFS}/sbin/init ]; then
ln -sf /sbin/init ${WORKDIR}/cpio_append/init
+ touch -h -r ${IMAGE_ROOTFS}/sbin/init ${WORKDIR}/cpio_append/init
else
- touch ${WORKDIR}/cpio_append/init
+ touch -r ${IMAGE_ROOTFS} ${WORKDIR}/cpio_append/init
fi
- (cd ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
+ (cd ${WORKDIR}/cpio_append && echo ./init | cpio --reproducible -oA -H newc -F ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
fi
fi
}
@@ -187,7 +188,10 @@ multiubi_mkfs() {
fi
}
+MULTIUBI_ARGS = "MKUBIFS_ARGS UBINIZE_ARGS"
+
IMAGE_CMD:multiubi () {
+ ${@' '.join(['%s_%s="%s";' % (arg, name, d.getVar('%s_%s' % (arg, name))) for arg in d.getVar('MULTIUBI_ARGS').split() for name in d.getVar('MULTIUBI_BUILD').split()])}
# Split MKUBIFS_ARGS_<name> and UBINIZE_ARGS_<name>
for name in ${MULTIUBI_BUILD}; do
eval local mkubifs_args=\"\$MKUBIFS_ARGS_${name}\"
diff --git a/meta/classes/image_types_wic.bbclass b/meta/classes/image_types_wic.bbclass
index e3863c88a9..8497916d48 100644
--- a/meta/classes/image_types_wic.bbclass
+++ b/meta/classes/image_types_wic.bbclass
@@ -83,7 +83,9 @@ do_image_wic[recrdeptask] += "do_deploy"
do_image_wic[deptask] += "do_image_complete"
WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}'
-WKS_FILE_DEPENDS_DEFAULT += "bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native"
+WKS_FILE_DEPENDS_DEFAULT += "bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native erofs-utils-native"
+# Unified kernel images need objcopy
+WKS_FILE_DEPENDS_DEFAULT += "virtual/${MLPREFIX}${TARGET_PREFIX}binutils"
WKS_FILE_DEPENDS_BOOTLOADERS = ""
WKS_FILE_DEPENDS_BOOTLOADERS:x86 = "syslinux grub-efi systemd-boot os-release"
WKS_FILE_DEPENDS_BOOTLOADERS:x86-64 = "syslinux grub-efi systemd-boot os-release"
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
index 9ca84bace9..dfda70bad6 100644
--- a/meta/classes/insane.bbclass
+++ b/meta/classes/insane.bbclass
@@ -444,12 +444,14 @@ def package_qa_check_buildpaths(path, name, d, elf, messages):
Check for build paths inside target files and error if paths are not
explicitly ignored.
"""
+ import stat
# Ignore .debug files, not interesting
if path.find(".debug") != -1:
return
- # Ignore symlinks
- if os.path.islink(path):
+ # Ignore symlinks/devs/fifos
+ mode = os.lstat(path).st_mode
+ if stat.S_ISLNK(mode) or stat.S_ISBLK(mode) or stat.S_ISFIFO(mode) or stat.S_ISCHR(mode) or stat.S_ISSOCK(mode):
return
tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
@@ -550,7 +552,10 @@ python populate_lic_qa_checksum() {
import hashlib
lineno = 0
license = []
- m = hashlib.new('MD5', usedforsecurity=False)
+ try:
+ m = hashlib.new('MD5', usedforsecurity=False)
+ except TypeError:
+ m = hashlib.new('MD5')
for line in f:
lineno += 1
if (lineno >= beginline):
@@ -970,7 +975,7 @@ def package_qa_check_host_user(path, name, d, elf, messages):
dest = d.getVar('PKGDEST')
pn = d.getVar('PN')
- home = os.path.join(dest, 'home')
+ home = os.path.join(dest, name, 'home')
if path == home or path.startswith(home + os.sep):
return
@@ -1194,11 +1199,12 @@ python do_qa_patch() {
import re
from oe import patch
+ coremeta_path = os.path.join(d.getVar('COREBASE'), 'meta', '')
for url in patch.src_patches(d):
(_, _, fullpath, _, _, _) = bb.fetch.decodeurl(url)
# skip patches not in oe-core
- if '/meta/' not in fullpath:
+ if not os.path.abspath(fullpath).startswith(coremeta_path):
continue
kinda_status_re = re.compile(r"^.*upstream.*status.*$", re.IGNORECASE | re.MULTILINE)
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
index 07ec242e63..62c8211621 100644
--- a/meta/classes/kernel-arch.bbclass
+++ b/meta/classes/kernel-arch.bbclass
@@ -61,8 +61,7 @@ HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
TARGET_AR_KERNEL_ARCH ?= ""
HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
-KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH}"
+KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH} -fdebug-prefix-map=${STAGING_KERNEL_BUILDDIR}=${KERNEL_SRC_PATH}"
KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
-TOOLCHAIN = "gcc"
-
+TOOLCHAIN ?= "gcc"
diff --git a/meta/classes/kernel-devicetree.bbclass b/meta/classes/kernel-devicetree.bbclass
index b4338da1b1..18ab6b4c4f 100644
--- a/meta/classes/kernel-devicetree.bbclass
+++ b/meta/classes/kernel-devicetree.bbclass
@@ -6,7 +6,12 @@ python () {
d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-image-zimage-bundle")
}
-FILES:${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
+# recursivly search for devicetree files
+FILES:${KERNEL_PACKAGE_NAME}-devicetree = " \
+ /${KERNEL_DTBDEST}/**/*.dtb \
+ /${KERNEL_DTBDEST}/**/*.dtbo \
+"
+
FILES:${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
# Generate kernel+devicetree bundle
@@ -67,12 +72,16 @@ do_compile:append() {
}
do_install:append() {
+ install -d ${D}/${KERNEL_DTBDEST}
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
- dtb_ext=${dtb##*.}
- dtb_base_name=`basename $dtb .$dtb_ext`
dtb_path=`get_real_dtb_path_in_kernel "$dtb"`
- install -m 0644 $dtb_path ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext
+ if "${@'false' if oe.types.boolean(d.getVar('KERNEL_DTBVENDORED')) else 'true'}"; then
+ dtb_ext=${dtb##*.}
+ dtb_base_name=`basename $dtb .$dtb_ext`
+ dtb=$dtb_base_name.$dtb_ext
+ fi
+ install -Dm 0644 $dtb_path ${D}/${KERNEL_DTBDEST}/$dtb
done
}
@@ -82,7 +91,10 @@ do_deploy:append() {
dtb_ext=${dtb##*.}
dtb_base_name=`basename $dtb .$dtb_ext`
install -d $deployDir
- install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
+ if "${@'false' if oe.types.boolean(d.getVar('KERNEL_DTBVENDORED')) else 'true'}"; then
+ dtb=$dtb_base_name.$dtb_ext
+ fi
+ install -m 0644 ${D}/${KERNEL_DTBDEST}/$dtb $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then
ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
fi
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass
index 7e09b075ff..27e17db951 100644
--- a/meta/classes/kernel-fitimage.bbclass
+++ b/meta/classes/kernel-fitimage.bbclass
@@ -67,6 +67,9 @@ FIT_CONF_PREFIX[doc] = "Prefix to use for FIT configuration node name"
FIT_SUPPORTED_INITRAMFS_FSTYPES ?= "cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst cpio.gz ext2.gz cpio"
+# Allow user to select the default DTB for FIT image when multiple dtb's exists.
+FIT_CONF_DEFAULT_DTB ?= ""
+
# Keys used to sign individually image nodes.
# The keys to sign image nodes must be different from those used to sign
# configuration nodes, otherwise the "required" property, from
@@ -148,7 +151,7 @@ fitimage_emit_section_kernel() {
kernel-$2 {
description = "Linux kernel";
data = /incbin/("$3");
- type = "kernel";
+ type = "${UBOOT_MKIMAGE_KERNEL_TYPE}";
arch = "${UBOOT_ARCH}";
os = "linux";
compression = "$4";
@@ -346,6 +349,7 @@ fitimage_emit_section_config() {
conf_csum="${FIT_HASH_ALG}"
conf_sign_algo="${FIT_SIGN_ALG}"
+ conf_padding_algo="${FIT_PAD_ALG}"
if [ "${UBOOT_SIGN_ENABLE}" = "1" ] ; then
conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
fi
@@ -368,6 +372,7 @@ fitimage_emit_section_config() {
bootscr_line=""
setup_line=""
default_line=""
+ default_dtb_image="${FIT_CONF_DEFAULT_DTB}"
# conf node name is selected based on dtb ID if it is present,
# otherwise its selected based on kernel ID
@@ -410,7 +415,17 @@ fitimage_emit_section_config() {
# default node is selected based on dtb ID if it is present,
# otherwise its selected based on kernel ID
if [ -n "$dtb_image" ]; then
- default_line="default = \"${FIT_CONF_PREFIX}$dtb_image\";"
+ # Select default node as user specified dtb when
+ # multiple dtb exists.
+ if [ -n "$default_dtb_image" ]; then
+ if [ -s "${EXTERNAL_KERNEL_DEVICETREE}/$default_dtb_image" ]; then
+ default_line="default = \"${FIT_CONF_PREFIX}$default_dtb_image\";"
+ else
+ bbwarn "Couldn't find a valid user specified dtb in ${EXTERNAL_KERNEL_DEVICETREE}/$default_dtb_image"
+ fi
+ else
+ default_line="default = \"${FIT_CONF_PREFIX}$dtb_image\";"
+ fi
else
default_line="default = \"${FIT_CONF_PREFIX}$kernel_id\";"
fi
@@ -465,6 +480,7 @@ EOF
signature-1 {
algo = "$conf_csum,$conf_sign_algo";
key-name-hint = "$conf_sign_keyname";
+ padding = "$conf_padding_algo";
$sign_line
};
EOF
@@ -527,6 +543,10 @@ fitimage_assemble() {
fi
DTB=$(echo "$DTB" | tr '/' '_')
+
+ # Skip DTB if we've picked it up previously
+ echo "$DTBS" | tr ' ' '\n' | grep -xq "$DTB" && continue
+
DTBS="$DTBS $DTB"
fitimage_emit_section_dtb $1 $DTB $DTB_PATH
done
@@ -534,8 +554,13 @@ fitimage_assemble() {
if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ]; then
dtbcount=1
- for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" \( -name '*.dtb' -o -name '*.dtbo' \) -printf '%P\n' | sort); do
+ for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" -name '*.dtb' -printf '%P\n' | sort) \
+ $(find "${EXTERNAL_KERNEL_DEVICETREE}" -name '*.dtbo' -printf '%P\n' | sort); do
DTB=$(echo "$DTB" | tr '/' '_')
+
+ # Skip DTB/DTBO if we've picked it up previously
+ echo "$DTBS" | tr ' ' '\n' | grep -xq "$DTB" && continue
+
DTBS="$DTBS $DTB"
fitimage_emit_section_dtb $1 $DTB "${EXTERNAL_KERNEL_DEVICETREE}/$DTB"
done
diff --git a/meta/classes/kernel-uboot.bbclass b/meta/classes/kernel-uboot.bbclass
index 2daa068298..1bc98e042d 100644
--- a/meta/classes/kernel-uboot.bbclass
+++ b/meta/classes/kernel-uboot.bbclass
@@ -2,6 +2,9 @@
FIT_KERNEL_COMP_ALG ?= "gzip"
FIT_KERNEL_COMP_ALG_EXTENSION ?= ".gz"
+# Kernel image type passed to mkimage (i.e. kernel kernel_noload...)
+UBOOT_MKIMAGE_KERNEL_TYPE ?= "kernel"
+
uboot_prep_kimage() {
if [ -e arch/${ARCH}/boot/compressed/vmlinux ]; then
vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
@@ -15,6 +18,12 @@ uboot_prep_kimage() {
linux_comp="none"
else
vmlinux_path="vmlinux"
+ # Use vmlinux.initramfs for linux.bin when INITRAMFS_IMAGE_BUNDLE set
+ # As per the implementation in kernel.bbclass.
+ # See do_bundle_initramfs function
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ] && [ -e vmlinux.initramfs ]; then
+ vmlinux_path="vmlinux.initramfs"
+ fi
linux_suffix="${FIT_KERNEL_COMP_ALG_EXTENSION}"
linux_comp="${FIT_KERNEL_COMP_ALG}"
fi
diff --git a/meta/classes/kernel-uimage.bbclass b/meta/classes/kernel-uimage.bbclass
index cedb4fa070..2e661ea916 100644
--- a/meta/classes/kernel-uimage.bbclass
+++ b/meta/classes/kernel-uimage.bbclass
@@ -30,6 +30,6 @@ do_uboot_mkimage() {
awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
fi
- uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${B}/arch/${ARCH}/boot/uImage
+ uboot-mkimage -A ${UBOOT_ARCH} -O linux -T ${UBOOT_MKIMAGE_KERNEL_TYPE} -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${B}/arch/${ARCH}/boot/uImage
rm -f linux.bin
}
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
index afccffcf17..4f8e391428 100644
--- a/meta/classes/kernel-yocto.bbclass
+++ b/meta/classes/kernel-yocto.bbclass
@@ -206,7 +206,7 @@ do_kernel_metadata() {
# SRC_URI. If they were supplied, we convert them into include directives
# for the update part of the process
for f in ${feat_dirs}; do
- if [ -d "${WORKDIR}/$f/meta" ]; then
+ if [ -d "${WORKDIR}/$f/kernel-meta" ]; then
includes="$includes -I${WORKDIR}/$f/kernel-meta"
elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
includes="$includes -I${WORKDIR}/../oe-local-files/$f"
@@ -322,7 +322,11 @@ do_patch() {
meta_dir=$(kgit --meta)
(cd ${meta_dir}; ln -sf patch.queue series)
if [ -f "${meta_dir}/series" ]; then
- kgit-s2q --gen -v --patches .kernel-meta/
+ kgit_extra_args=""
+ if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
+ kgit_extra_args="--commit-sha author"
+ fi
+ kgit-s2q --gen -v $kgit_extra_args --patches .kernel-meta/
if [ $? -ne 0 ]; then
bberror "Could not apply patches for ${KMACHINE}."
bbfatal_log "Patch failures can be resolved in the linux source directory ${S})"
@@ -496,7 +500,7 @@ python do_config_analysis() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--blame', c], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
outfile = d.getVar( 'CONFIG_ANALYSIS_FILE' )
@@ -504,7 +508,7 @@ python do_config_analysis() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--summary', '--extended', '--sanity', c], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
outfile = d.getVar( 'CONFIG_AUDIT_FILE' )
@@ -565,7 +569,7 @@ python do_kernel_configcheck() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--mismatches', extra_params], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
if analysis:
outfile = "{}/{}/cfg/mismatch.txt".format( s, kmeta )
@@ -587,7 +591,7 @@ python do_kernel_configcheck() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--invalid', extra_params], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
if analysis:
outfile = "{}/{}/cfg/invalid.txt".format(s,kmeta)
@@ -606,7 +610,7 @@ python do_kernel_configcheck() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--sanity'], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
if analysis:
outfile = "{}/{}/cfg/redefinition.txt".format(s,kmeta)
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
index 8299b394a7..dbd89057f3 100644
--- a/meta/classes/kernel.bbclass
+++ b/meta/classes/kernel.bbclass
@@ -176,13 +176,14 @@ do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILD
do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
python do_symlink_kernsrc () {
s = d.getVar("S")
- if s[-1] == '/':
- # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
- s=s[:-1]
kernsrc = d.getVar("STAGING_KERNEL_DIR")
if s != kernsrc:
bb.utils.mkdirhier(kernsrc)
bb.utils.remove(kernsrc, recurse=True)
+ if s[-1] == '/':
+ # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as
+ # directory name and fail
+ s = s[:-1]
if d.getVar("EXTERNALSRC"):
# With EXTERNALSRC S will not be wiped so we can symlink to it
os.symlink(s, kernsrc)
@@ -204,15 +205,14 @@ PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-firmware-.*"
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
-export KBUILD_BUILD_VERSION = "1"
-export KBUILD_BUILD_USER ?= "oe-user"
-export KBUILD_BUILD_HOST ?= "oe-host"
KERNEL_RELEASE ?= "${KERNEL_VERSION}"
# The directory where built kernel lies in the kernel tree
KERNEL_OUTPUT_DIR ?= "arch/${ARCH}/boot"
KERNEL_IMAGEDEST ?= "boot"
+KERNEL_DTBDEST ?= "${KERNEL_IMAGEDEST}"
+KERNEL_DTBVENDORED ?= "0"
#
# configuration
@@ -231,8 +231,9 @@ UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
# Some Linux kernel configurations need additional parameters on the command line
KERNEL_EXTRA_ARGS ?= ""
-EXTRA_OEMAKE = " HOSTCC="${BUILD_CC}" HOSTCFLAGS="${BUILD_CFLAGS}" HOSTLDFLAGS="${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
-EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX}" HOSTCXXFLAGS="${BUILD_CXXFLAGS}" PAHOLE=false"
+EXTRA_OEMAKE += ' CC="${KERNEL_CC}" LD="${KERNEL_LD}"'
+EXTRA_OEMAKE += ' HOSTCC="${BUILD_CC}" HOSTCFLAGS="${BUILD_CFLAGS}" HOSTLDFLAGS="${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}"'
+EXTRA_OEMAKE += ' HOSTCXX="${BUILD_CXX}" HOSTCXXFLAGS="${BUILD_CXXFLAGS}"'
KERNEL_ALT_IMAGETYPE ??= ""
@@ -360,6 +361,10 @@ kernel_do_compile() {
export KBUILD_BUILD_TIMESTAMP="$ts"
export KCONFIG_NOTIMESTAMP=1
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ else
+ ts=`LC_ALL=C date`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
fi
# The $use_alternate_initrd is only set from
# do_bundle_initramfs() This variable is specifically for the
@@ -375,7 +380,7 @@ kernel_do_compile() {
use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
fi
for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
- oe_runmake ${typeformake} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
+ oe_runmake ${PARALLEL_MAKE} ${typeformake} ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
done
}
@@ -391,6 +396,13 @@ addtask transform_kernel after do_compile before do_install
do_compile_kernelmodules() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
+
+ # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
+ export PKG_CONFIG_DIR="${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig"
+ export PKG_CONFIG_PATH="$PKG_CONFIG_DIR:${STAGING_DATADIR_NATIVE}/pkgconfig"
+ export PKG_CONFIG_LIBDIR="$PKG_CONFIG_DIR"
+ export PKG_CONFIG_SYSROOT_DIR=""
+
if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
# kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
# be set....
@@ -405,11 +417,15 @@ do_compile_kernelmodules() {
export KBUILD_BUILD_TIMESTAMP="$ts"
export KCONFIG_NOTIMESTAMP=1
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ else
+ ts=`LC_ALL=C date`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
fi
if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
- oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
+ oe_runmake -C ${B} ${PARALLEL_MAKE} modules ${KERNEL_EXTRA_ARGS}
- # Module.symvers gets updated during the
+ # Module.symvers gets updated during the
# building of the kernel modules. We need to
# update this in the shared workdir since some
# external kernel modules has a dependency on
@@ -433,10 +449,10 @@ kernel_do_install() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install
- rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
- rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
- # If the kernel/ directory is empty remove it to prevent QA issues
- rmdir --ignore-fail-on-non-empty "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel"
+ rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
+ rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
+ # Remove empty module directories to prevent QA issues
+ find "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel" -type d -empty -delete
else
bbnote "no modules to install"
fi
@@ -532,6 +548,7 @@ do_shared_workdir () {
#
echo "${KERNEL_VERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-abiversion
+ echo "${KERNEL_LOCALVERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-localversion
# Copy files required for module builds
cp System.map $kerneldir/System.map-${KERNEL_VERSION}
@@ -584,14 +601,28 @@ do_shared_workdir () {
cp tools/objtool/objtool ${kerneldir}/tools/objtool/
fi
fi
+
+ # When building with CONFIG_MODVERSIONS=y and CONFIG_RANDSTRUCT=y we need
+ # to copy the build assets generated for the randstruct seed to
+ # STAGING_KERNEL_BUILDDIR, otherwise the out-of-tree modules build will
+ # generate those assets which will result in a different
+ # RANDSTRUCT_HASHED_SEED
+ if [ -d scripts/basic ]; then
+ mkdir -p ${kerneldir}/scripts
+ cp -r scripts/basic ${kerneldir}/scripts
+ fi
+
+ if [ -d scripts/gcc-plugins ]; then
+ mkdir -p ${kerneldir}/scripts
+ cp -r scripts/gcc-plugins ${kerneldir}/scripts
+ fi
+
}
# We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware
-sysroot_stage_all () {
- :
-}
+SYSROOT_DIRS = ""
-KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} CC="${KERNEL_CC}" LD="${KERNEL_LD}" O=${B} olddefconfig || oe_runmake -C ${S} O=${B} CC="${KERNEL_CC}" LD="${KERNEL_LD}" oldnoconfig"
+KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} O=${B} olddefconfig || oe_runmake -C ${S} O=${B} oldnoconfig"
python check_oldest_kernel() {
oldest_kernel = d.getVar('OLDEST_KERNEL')
@@ -607,12 +638,31 @@ python check_oldest_kernel() {
check_oldest_kernel[vardepsexclude] += "OLDEST_KERNEL KERNEL_VERSION"
do_configure[prefuncs] += "check_oldest_kernel"
+KERNEL_LOCALVERSION ??= ""
+
+# 6.3+ requires the variable LOCALVERSION to be set to not get a "+" in
+# the local version. Having it empty means nothing will be added, and any
+# value will be appended to the local kernel version. This replaces the
+# use of .scmversion file for setting a localversion without using
+# the CONFIG_LOCALVERSION option.
+#
+# Note: This class saves the value of localversion to a file
+# so other recipes like make-mod-scripts can restore it via the
+# helper function get_kernellocalversion_file
+export LOCALVERSION="${KERNEL_LOCALVERSION}"
+
kernel_do_configure() {
# fixes extra + in /lib/modules/2.6.37+
# $ scripts/setlocalversion . => +
# $ make kernelversion => 2.6.37
# $ make kernelrelease => 2.6.37+
- touch ${B}/.scmversion ${S}/.scmversion
+ # See kernel-arch.bbclass for post v6.3 removal of the extra
+ # + in localversion. .scmversion is no longer used, and the
+ # variable LOCALVERSION must be used
+ if [ ! -e ${B}/.scmversion -a ! -e ${S}/.scmversion ]; then
+ echo ${KERNEL_LOCALVERSION} > ${B}/.scmversion
+ echo ${KERNEL_LOCALVERSION} > ${S}/.scmversion
+ fi
if [ "${S}" != "${B}" ] && [ -f "${S}/.config" ] && [ ! -f "${B}/.config" ]; then
mv "${S}/.config" "${B}/.config"
@@ -634,9 +684,10 @@ do_savedefconfig() {
do_savedefconfig[nostamp] = "1"
addtask savedefconfig after do_configure
-inherit cml1
+inherit cml1 pkgconfig
-KCONFIG_CONFIG_COMMAND:append = " PAHOLE=false LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'"
+# Need LD, HOSTLDFLAGS and more for config operations
+KCONFIG_CONFIG_COMMAND:append = " ${EXTRA_OEMAKE}"
EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure
@@ -649,6 +700,7 @@ FILES:${KERNEL_PACKAGE_NAME}-image = ""
FILES:${KERNEL_PACKAGE_NAME}-dev = "/${KERNEL_IMAGEDEST}/System.map* /${KERNEL_IMAGEDEST}/Module.symvers* /${KERNEL_IMAGEDEST}/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
FILES:${KERNEL_PACKAGE_NAME}-vmlinux = "/${KERNEL_IMAGEDEST}/vmlinux-${KERNEL_VERSION_NAME}"
FILES:${KERNEL_PACKAGE_NAME}-modules = ""
+FILES:${KERNEL_PACKAGE_NAME}-dbg = "/usr/lib/debug /usr/src/debug"
RDEPENDS:${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base (= ${EXTENDPKGV})"
# Allow machines to override this dependency if kernel image files are
# not wanted in images as standard
diff --git a/meta/classes/kernelsrc.bbclass b/meta/classes/kernelsrc.bbclass
index a951ba3325..a79bf18b09 100644
--- a/meta/classes/kernelsrc.bbclass
+++ b/meta/classes/kernelsrc.bbclass
@@ -5,6 +5,7 @@ do_patch[depends] += "virtual/kernel:do_shared_workdir"
do_patch[noexec] = "1"
do_package[depends] += "virtual/kernel:do_populate_sysroot"
KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
+LOCAL_VERSION = "${@get_kernellocalversion_file("${STAGING_KERNEL_BUILDDIR}")}"
inherit linux-kernel-base
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
index 13ef8cdc0d..baab8fc9a9 100644
--- a/meta/classes/libc-package.bbclass
+++ b/meta/classes/libc-package.bbclass
@@ -45,6 +45,7 @@ PACKAGE_NO_GCONV ?= "0"
OVERRIDES:append = ":${TARGET_ARCH}-${TARGET_OS}"
locale_base_postinst_ontarget() {
+mkdir ${libdir}/locale
localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
}
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
index 4ebfc4fb92..b92838c030 100644
--- a/meta/classes/license.bbclass
+++ b/meta/classes/license.bbclass
@@ -223,7 +223,7 @@ def find_license_files(d):
bb.fatal('%s: %s' % (d.getVar('PF'), exc))
except SyntaxError:
oe.qa.handle_error("license-syntax",
- "%s: Failed to parse it's LICENSE field." % (d.getVar('PF')), d)
+ "%s: Failed to parse LICENSE: %s" % (d.getVar('PF'), d.getVar('LICENSE')), d)
# Add files from LIC_FILES_CHKSUM to list of license files
lic_chksum_paths = defaultdict(OrderedDict)
for path, data in sorted(lic_chksums.items()):
diff --git a/meta/classes/license_image.bbclass b/meta/classes/license_image.bbclass
index 3213ea758e..1c06a02951 100644
--- a/meta/classes/license_image.bbclass
+++ b/meta/classes/license_image.bbclass
@@ -229,7 +229,7 @@ def get_deployed_dependencies(d):
deploy = {}
# Get all the dependencies for the current task (rootfs).
taskdata = d.getVar("BB_TASKDEPDATA", False)
- pn = d.getVar("PN", True)
+ pn = d.getVar("PN")
depends = list(set([dep[0] for dep
in list(taskdata.values())
if not dep[0].endswith("-native") and not dep[0] == pn]))
diff --git a/meta/classes/linux-kernel-base.bbclass b/meta/classes/linux-kernel-base.bbclass
index ba59222c24..0e2a4a4abe 100644
--- a/meta/classes/linux-kernel-base.bbclass
+++ b/meta/classes/linux-kernel-base.bbclass
@@ -33,9 +33,24 @@ def get_kernelversion_file(p):
except IOError:
return None
+def get_kernellocalversion_file(p):
+ fn = p + '/kernel-localversion'
+
+ try:
+ with open(fn, 'r') as f:
+ return f.readlines()[0].strip()
+ except IOError:
+ return ""
+
+ return ""
+
def linux_module_packages(s, d):
suffix = ""
return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
+export KBUILD_BUILD_VERSION = "1"
+export KBUILD_BUILD_USER ?= "oe-user"
+export KBUILD_BUILD_HOST ?= "oe-host"
+
# that's all
diff --git a/meta/classes/meson.bbclass b/meta/classes/meson.bbclass
index 19b54e0fdc..fb6660c1a2 100644
--- a/meta/classes/meson.bbclass
+++ b/meta/classes/meson.bbclass
@@ -105,6 +105,7 @@ nm = ${@meson_array('BUILD_NM', d)}
strip = ${@meson_array('BUILD_STRIP', d)}
readelf = ${@meson_array('BUILD_READELF', d)}
objcopy = ${@meson_array('BUILD_OBJCOPY', d)}
+llvm-config = '${STAGING_BINDIR_NATIVE}/llvm-config'
pkgconfig = 'pkg-config-native'
${@rust_tool(d, "BUILD_SYS")}
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
index ffdccff5fb..3720c00ae5 100644
--- a/meta/classes/mirrors.bbclass
+++ b/meta/classes/mirrors.bbclass
@@ -61,8 +61,7 @@ osc://.*/.* http://sources.openembedded.org/ \
https?://.*/.* http://sources.openembedded.org/ \
ftp://.*/.* http://sources.openembedded.org/ \
npm://.*/?.* http://sources.openembedded.org/ \
-${CPAN_MIRROR} http://cpan.metacpan.org/ \
-${CPAN_MIRROR} http://search.cpan.org/CPAN/ \
+${CPAN_MIRROR} https://cpan.metacpan.org/ \
https?://downloads.yoctoproject.org/releases/uninative/ https://mirrors.kernel.org/yocto/uninative/ \
https?://downloads.yoctoproject.org/mirror/sources/ https://mirrors.kernel.org/yocto-sources/ \
"
@@ -84,6 +83,7 @@ BB_GIT_SHALLOW:pn-binutils-cross-${TARGET_ARCH} = "1"
BB_GIT_SHALLOW:pn-binutils-cross-canadian-${TRANSLATED_TARGET_ARCH} = "1"
BB_GIT_SHALLOW:pn-binutils-cross-testsuite = "1"
BB_GIT_SHALLOW:pn-binutils-crosssdk-${SDK_SYS} = "1"
+BB_GIT_SHALLOW:pn-binutils-native = "1"
BB_GIT_SHALLOW:pn-glibc = "1"
PREMIRRORS += "git://sourceware.org/git/glibc.git https://downloads.yoctoproject.org/mirror/sources/ \
git://sourceware.org/git/binutils-gdb.git https://downloads.yoctoproject.org/mirror/sources/"
diff --git a/meta/classes/module-base.bbclass b/meta/classes/module-base.bbclass
index 27bd69ff33..5b2fde8144 100644
--- a/meta/classes/module-base.bbclass
+++ b/meta/classes/module-base.bbclass
@@ -14,6 +14,7 @@ export CROSS_COMPILE = "${TARGET_PREFIX}"
export KBUILD_OUTPUT = "${STAGING_KERNEL_BUILDDIR}"
export KERNEL_VERSION = "${@oe.utils.read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}"
+export LOCALVERSION = "${@oe.utils.read_file('${STAGING_KERNEL_BUILDDIR}/kernel-localversion')}"
KERNEL_OBJECT_SUFFIX = ".ko"
# kernel modules are generally machine specific
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
index 5859ca8d21..a0be559970 100644
--- a/meta/classes/multilib.bbclass
+++ b/meta/classes/multilib.bbclass
@@ -45,6 +45,7 @@ python multilib_virtclass_handler () {
e.data.setVar("RECIPE_SYSROOT", "${WORKDIR}/recipe-sysroot")
e.data.setVar("STAGING_DIR_TARGET", "${WORKDIR}/recipe-sysroot")
e.data.setVar("STAGING_DIR_HOST", "${WORKDIR}/recipe-sysroot")
+ e.data.setVar("RECIPE_SYSROOT_MANIFEST_SUBDIR", "nativesdk-" + variant)
e.data.setVar("MLPREFIX", variant + "-")
override = ":virtclass-multilib-" + variant
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
index fc7422c5d7..56726301bd 100644
--- a/meta/classes/native.bbclass
+++ b/meta/classes/native.bbclass
@@ -23,6 +23,8 @@ TARGET_CFLAGS = "${BUILD_CFLAGS}"
TARGET_CXXFLAGS = "${BUILD_CXXFLAGS}"
TARGET_LDFLAGS = "${BUILD_LDFLAGS}"
TARGET_FPU = ""
+TUNE_FEATURES = ""
+ABIEXTENSION = ""
HOST_ARCH = "${BUILD_ARCH}"
HOST_OS = "${BUILD_OS}"
@@ -153,7 +155,7 @@ python native_virtclass_handler () {
newdeps.append(dep.replace(pn, bpn) + "-native")
else:
newdeps.append(dep)
- d.setVar(varname, " ".join(newdeps), parsing=True)
+ d.setVar(varname, " ".join(newdeps))
map_dependencies("DEPENDS", e.data, selfref=False)
for pkg in e.data.getVar("PACKAGES", False).split():
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
index f8e9607513..e46739e325 100644
--- a/meta/classes/nativesdk.bbclass
+++ b/meta/classes/nativesdk.bbclass
@@ -55,6 +55,7 @@ TARGET_CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
TARGET_LDFLAGS = "${BUILDSDK_LDFLAGS}"
TARGET_FPU = ""
EXTRA_OECONF_GCC_FLOAT = ""
+TUNE_FEATURES = ""
CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
CFLAGS = "${BUILDSDK_CFLAGS}"
diff --git a/meta/classes/npm.bbclass b/meta/classes/npm.bbclass
index ba50fcac20..45e6b4fac7 100644
--- a/meta/classes/npm.bbclass
+++ b/meta/classes/npm.bbclass
@@ -19,7 +19,7 @@
inherit python3native
-DEPENDS:prepend = "nodejs-native "
+DEPENDS:prepend = "nodejs-native nodejs-oe-cache-native "
RDEPENDS:${PN}:append:class-target = " nodejs"
EXTRA_OENPM = ""
@@ -46,6 +46,7 @@ NPM_ARCH ?= "${@npm_target_arch_map(d.getVar("TARGET_ARCH"))}"
NPM_PACKAGE = "${WORKDIR}/npm-package"
NPM_CACHE = "${WORKDIR}/npm-cache"
NPM_BUILD = "${WORKDIR}/npm-build"
+NPM_REGISTRY = "${WORKDIR}/npm-registry"
def npm_global_configs(d):
"""Get the npm global configuration"""
@@ -57,13 +58,36 @@ def npm_global_configs(d):
configs.append(("cache", d.getVar("NPM_CACHE")))
return configs
+## 'npm pack' runs 'prepare' and 'prepack' scripts. Support for
+## 'ignore-scripts' which prevents this behavior has been removed
+## from nodejs 16. Use simple 'tar' instead of.
def npm_pack(env, srcdir, workdir):
- """Run 'npm pack' on a specified directory"""
- import shlex
- cmd = "npm pack %s" % shlex.quote(srcdir)
- args = [("ignore-scripts", "true")]
- tarball = env.run(cmd, args=args, workdir=workdir).strip("\n")
- return os.path.join(workdir, tarball)
+ """Emulate 'npm pack' on a specified directory"""
+ import subprocess
+ import os
+ import json
+
+ src = os.path.join(srcdir, 'package.json')
+ with open(src) as f:
+ j = json.load(f)
+
+ # base does not really matter and is for documentation purposes
+ # only. But the 'version' part must exist because other parts of
+ # the bbclass rely on it.
+ base = j['name'].split('/')[-1]
+ tarball = os.path.join(workdir, "%s-%s.tgz" % (base, j['version']));
+
+ # TODO: real 'npm pack' does not include directories while 'tar'
+ # does. But this does not seem to matter...
+ subprocess.run(['tar', 'czf', tarball,
+ '--exclude', './node-modules',
+ '--exclude-vcs',
+ '--transform', r's,^\./,package/,',
+ '--mtime', '1985-10-26T08:15:00.000Z',
+ '.'],
+ check = True, cwd = srcdir)
+
+ return (tarball, j)
python npm_do_configure() {
"""
@@ -86,27 +110,24 @@ python npm_do_configure() {
from bb.fetch2.npm import npm_unpack
from bb.fetch2.npmsw import foreach_dependencies
from bb.progress import OutOfProgressHandler
+ from oe.npm_registry import NpmRegistry
bb.utils.remove(d.getVar("NPM_CACHE"), recurse=True)
bb.utils.remove(d.getVar("NPM_PACKAGE"), recurse=True)
env = NpmEnvironment(d, configs=npm_global_configs(d))
+ registry = NpmRegistry(d.getVar('NPM_REGISTRY'), d.getVar('NPM_CACHE'))
- def _npm_cache_add(tarball):
- """Run 'npm cache add' for a specified tarball"""
- cmd = "npm cache add %s" % shlex.quote(tarball)
- env.run(cmd)
+ def _npm_cache_add(tarball, pkg):
+ """Add tarball to local registry and register it in the
+ cache"""
+ registry.add_pkg(tarball, pkg)
def _npm_integrity(tarball):
"""Return the npm integrity of a specified tarball"""
sha512 = bb.utils.sha512_file(tarball)
return "sha512-" + base64.b64encode(bytes.fromhex(sha512)).decode()
- def _npm_version(tarball):
- """Return the version of a specified tarball"""
- regex = r"-(\d+\.\d+\.\d+(-.*)?(\+.*)?)\.tgz"
- return re.search(regex, tarball).group(1)
-
def _npmsw_dependency_dict(orig, deptree):
"""
Return the sub dictionary in the 'orig' dictionary corresponding to the
@@ -163,11 +184,11 @@ python npm_do_configure() {
with tempfile.TemporaryDirectory() as tmpdir:
# Add the dependency to the npm cache
destdir = os.path.join(d.getVar("S"), destsuffix)
- tarball = npm_pack(env, destdir, tmpdir)
- _npm_cache_add(tarball)
+ (tarball, pkg) = npm_pack(env, destdir, tmpdir)
+ _npm_cache_add(tarball, pkg)
# Add its signature to the cached shrinkwrap
dep = _npmsw_dependency_dict(cached_shrinkwrap, deptree)
- dep["version"] = _npm_version(tarball)
+ dep["version"] = pkg['version']
dep["integrity"] = _npm_integrity(tarball)
if params.get("dev", False):
dep["dev"] = True
@@ -184,7 +205,7 @@ python npm_do_configure() {
# Configure the main package
with tempfile.TemporaryDirectory() as tmpdir:
- tarball = npm_pack(env, d.getVar("S"), tmpdir)
+ (tarball, _) = npm_pack(env, d.getVar("S"), tmpdir)
npm_unpack(tarball, d.getVar("NPM_PACKAGE"), d)
# Configure the cached manifest file and cached shrinkwrap file
@@ -257,7 +278,7 @@ python npm_do_compile() {
args.append(("build-from-source", "true"))
# Pack and install the main package
- tarball = npm_pack(env, d.getVar("NPM_PACKAGE"), tmpdir)
+ (tarball, _) = npm_pack(env, d.getVar("NPM_PACKAGE"), tmpdir)
cmd = "npm install %s %s" % (shlex.quote(tarball), d.getVar("EXTRA_OENPM"))
env.run(cmd, args=args)
}
diff --git a/meta/classes/overlayfs-etc.bbclass b/meta/classes/overlayfs-etc.bbclass
index 91afee695c..40116e4c6e 100644
--- a/meta/classes/overlayfs-etc.bbclass
+++ b/meta/classes/overlayfs-etc.bbclass
@@ -34,6 +34,7 @@ OVERLAYFS_ETC_DEVICE ??= ""
OVERLAYFS_ETC_USE_ORIG_INIT_NAME ??= "1"
OVERLAYFS_ETC_MOUNT_OPTIONS ??= "defaults"
OVERLAYFS_ETC_INIT_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-etc-preinit.sh.in"
+OVERLAYFS_ETC_EXPOSE_LOWER ??= "0"
python create_overlayfs_etc_preinit() {
overlayEtcMountPoint = d.getVar("OVERLAYFS_ETC_MOUNT_POINT")
@@ -54,13 +55,15 @@ python create_overlayfs_etc_preinit() {
preinitPath = oe.path.join(d.getVar("IMAGE_ROOTFS"), d.getVar("base_sbindir"), "preinit")
initBaseName = oe.path.join(d.getVar("base_sbindir"), "init")
origInitNameSuffix = ".orig"
+ exposeLower = oe.types.boolean(d.getVar('OVERLAYFS_ETC_EXPOSE_LOWER'))
args = {
'OVERLAYFS_ETC_MOUNT_POINT': overlayEtcMountPoint,
'OVERLAYFS_ETC_MOUNT_OPTIONS': d.getVar('OVERLAYFS_ETC_MOUNT_OPTIONS'),
'OVERLAYFS_ETC_FSTYPE': overlayEtcFsType,
'OVERLAYFS_ETC_DEVICE': overlayEtcDevice,
- 'SBIN_INIT_NAME': initBaseName + origInitNameSuffix if useOrigInit else initBaseName
+ 'SBIN_INIT_NAME': initBaseName + origInitNameSuffix if useOrigInit else initBaseName,
+ 'OVERLAYFS_ETC_EXPOSE_LOWER': "true" if exposeLower else "false"
}
if useOrigInit:
diff --git a/meta/classes/overlayfs.bbclass b/meta/classes/overlayfs.bbclass
index f7069edd41..c3564b6ec1 100644
--- a/meta/classes/overlayfs.bbclass
+++ b/meta/classes/overlayfs.bbclass
@@ -96,7 +96,11 @@ python do_create_overlayfs_units() {
overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT")
for mountPoint in overlayMountPoints:
bb.debug(1, "Process variable flag %s" % mountPoint)
- for lower in d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint).split():
+ lowerList = d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint)
+ if not lowerList:
+ bb.note("No mount points defined for %s flag, skipping" % (mountPoint))
+ continue
+ for lower in lowerList.split():
bb.debug(1, "Prepare mount unit for %s with data mount point %s" %
(lower, d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint)))
prepareUnits(d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint), lower)
diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass
index ef972740ce..30c7ccd8e7 100644
--- a/meta/classes/own-mirrors.bbclass
+++ b/meta/classes/own-mirrors.bbclass
@@ -11,4 +11,5 @@ https?://.*/.* ${SOURCE_MIRROR_URL} \
ftp://.*/.* ${SOURCE_MIRROR_URL} \
npm://.*/?.* ${SOURCE_MIRROR_URL} \
s3://.*/.* ${SOURCE_MIRROR_URL} \
+crate://.*/.* ${SOURCE_MIRROR_URL} \
"
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index 62050a18b8..67351b2510 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -262,7 +262,7 @@ def files_from_filevars(filevars):
f = '.' + f
if not f.startswith("./"):
f = './' + f
- globbed = glob.glob(f)
+ globbed = glob.glob(f, recursive=True)
if globbed:
if [ f ] != globbed:
files += globbed
@@ -382,6 +382,11 @@ def splitdebuginfo(file, dvar, dv, d):
debugfile = dvar + dest
sources = []
+ if file.endswith(".ko") and file.find("/lib/modules/") != -1:
+ if oe.package.is_kernel_module_signed(file):
+ bb.debug(1, "Skip strip on signed module %s" % file)
+ return (file, sources)
+
# Split the file...
bb.utils.mkdirhier(os.path.dirname(debugfile))
#bb.note("Split %s -> %s" % (file, debugfile))
@@ -479,16 +484,31 @@ def inject_minidebuginfo(file, dvar, dv, d):
bb.debug(1, 'ELF file {} has no debuginfo, skipping minidebuginfo injection'.format(file))
return
+ # minidebuginfo does not make sense to apply to ELF objects other than
+ # executables and shared libraries, skip applying the minidebuginfo
+ # generation for objects like kernel modules.
+ for line in subprocess.check_output([readelf, '-h', debugfile], universal_newlines=True).splitlines():
+ if not line.strip().startswith("Type:"):
+ continue
+ elftype = line.split(":")[1].strip()
+ if not any(elftype.startswith(i) for i in ["EXEC", "DYN"]):
+ bb.debug(1, 'ELF file {} is not executable/shared, skipping minidebuginfo injection'.format(file))
+ return
+ break
+
# Find non-allocated PROGBITS, NOTE, and NOBITS sections in the debuginfo.
# We will exclude all of these from minidebuginfo to save space.
remove_section_names = []
for line in subprocess.check_output([readelf, '-W', '-S', debugfile], universal_newlines=True).splitlines():
- fields = line.split()
- if len(fields) < 8:
+ # strip the leading " [ 1]" section index to allow splitting on space
+ if ']' not in line:
+ continue
+ fields = line[line.index(']') + 1:].split()
+ if len(fields) < 7:
continue
name = fields[0]
type = fields[1]
- flags = fields[7]
+ flags = fields[6]
# .debug_ sections will be removed by objcopy -S so no need to explicitly remove them
if name.startswith('.debug_'):
continue
@@ -553,13 +573,25 @@ def copydebugsources(debugsrcdir, sources, d):
strip = d.getVar("STRIP")
objcopy = d.getVar("OBJCOPY")
workdir = d.getVar("WORKDIR")
+ sdir = d.getVar("S")
+ sparentdir = os.path.dirname(os.path.dirname(sdir))
+ sbasedir = os.path.basename(os.path.dirname(sdir)) + "/" + os.path.basename(sdir)
workparentdir = os.path.dirname(os.path.dirname(workdir))
workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir)
+ # If S isnt based on WORKDIR we can infer our sources are located elsewhere,
+ # e.g. using externalsrc; use S as base for our dirs
+ if workdir in sdir or 'work-shared' in sdir:
+ basedir = workbasedir
+ parentdir = workparentdir
+ else:
+ basedir = sbasedir
+ parentdir = sparentdir
+
# If build path exists in sourcefile, it means toolchain did not use
# -fdebug-prefix-map to compile
if checkbuildpath(sourcefile, d):
- localsrc_prefix = workparentdir + "/"
+ localsrc_prefix = parentdir + "/"
else:
localsrc_prefix = "/usr/src/debug/"
@@ -581,7 +613,7 @@ def copydebugsources(debugsrcdir, sources, d):
processdebugsrc += "sed 's#%s##g' | "
processdebugsrc += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)"
- cmd = processdebugsrc % (sourcefile, workbasedir, localsrc_prefix, workparentdir, dvar, debugsrcdir)
+ cmd = processdebugsrc % (sourcefile, basedir, localsrc_prefix, parentdir, dvar, debugsrcdir)
try:
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
@@ -591,9 +623,29 @@ def copydebugsources(debugsrcdir, sources, d):
# cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
# Work around this by manually finding and copying any symbolic links that made it through.
cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s')" % \
- (dvar, debugsrcdir, dvar, debugsrcdir, workparentdir, dvar, debugsrcdir)
+ (dvar, debugsrcdir, dvar, debugsrcdir, parentdir, dvar, debugsrcdir)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+ # debugsources.list may be polluted from the host if we used externalsrc,
+ # cpio uses copy-pass and may have just created a directory structure
+ # matching the one from the host, if thats the case move those files to
+ # debugsrcdir to avoid host contamination.
+ # Empty dir structure will be deleted in the next step.
+
+ # Same check as above for externalsrc
+ if workdir not in sdir:
+ if os.path.exists(dvar + debugsrcdir + sdir):
+ # Special case for /build since we need to move into
+ # /usr/src/debug/build so rename sdir to build.build
+ if sdir == "/build" or sdir.find("/build/") == 0:
+ cmd = "mv %s%s%s %s%s%s" % (dvar, debugsrcdir, "/build", dvar, debugsrcdir, "/build.build")
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+ sdir = sdir.replace("/build", "/build.build", 1)
+
+ cmd = "mv %s%s%s/* %s%s" % (dvar, debugsrcdir, sdir, dvar,debugsrcdir)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
# The copy by cpio may have resulted in some empty directories! Remove these
cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
@@ -2126,18 +2178,18 @@ python package_do_pkgconfig () {
with open(file, 'r') as f:
lines = f.readlines()
for l in lines:
- m = var_re.match(l)
- if m:
- name = m.group(1)
- val = m.group(2)
- pd.setVar(name, pd.expand(val))
- continue
m = field_re.match(l)
if m:
hdr = m.group(1)
exp = pd.expand(m.group(2))
if hdr == 'Requires':
pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
+ continue
+ m = var_re.match(l)
+ if m:
+ name = m.group(1)
+ val = m.group(2)
+ pd.setVar(name, pd.expand(val))
for pkg in packages.split():
pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
index e9ff1f7e65..f403af5343 100644
--- a/meta/classes/package_rpm.bbclass
+++ b/meta/classes/package_rpm.bbclass
@@ -4,6 +4,7 @@ IMAGE_PKGTYPE ?= "rpm"
RPM="rpm"
RPMBUILD="rpmbuild"
+RPMBUILD_COMPMODE ?= "${@'w19T%d.zstdio' % int(d.getVar('ZSTD_THREADS'))}"
PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
@@ -193,8 +194,6 @@ python write_specfile () {
if path.endswith("DEBIAN") or path.endswith("CONTROL"):
continue
path = path.replace("%", "%%%%%%%%")
- path = path.replace("[", "?")
- path = path.replace("]", "?")
# Treat all symlinks to directories as normal files.
# os.walk() lists them as directories.
@@ -214,8 +213,6 @@ python write_specfile () {
if dir == "CONTROL" or dir == "DEBIAN":
continue
dir = dir.replace("%", "%%%%%%%%")
- dir = dir.replace("[", "?")
- dir = dir.replace("]", "?")
# All packages own the directories their files are in...
target.append('%dir "' + path + '/' + dir + '"')
else:
@@ -230,8 +227,6 @@ python write_specfile () {
if file == "CONTROL" or file == "DEBIAN":
continue
file = file.replace("%", "%%%%%%%%")
- file = file.replace("[", "?")
- file = file.replace("]", "?")
if conffiles.count(path + '/' + file):
target.append('%config "' + path + '/' + file + '"')
else:
@@ -658,6 +653,7 @@ python do_package_rpm () {
# Setup the rpmbuild arguments...
rpmbuild = d.getVar('RPMBUILD')
+ rpmbuild_compmode = d.getVar('RPMBUILD_COMPMODE')
targetsys = d.getVar('TARGET_SYS')
targetvendor = d.getVar('HOST_VENDOR')
@@ -684,8 +680,8 @@ python do_package_rpm () {
cmd = cmd + " --define '_use_internal_dependency_generator 0'"
cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
cmd = cmd + " --define '_build_id_links none'"
- cmd = cmd + " --define '_binary_payload w19T%d.zstdio'" % int(d.getVar("ZSTD_THREADS"))
- cmd = cmd + " --define '_source_payload w19T%d.zstdio'" % int(d.getVar("ZSTD_THREADS"))
+ cmd = cmd + " --define '_source_payload %s'" % rpmbuild_compmode
+ cmd = cmd + " --define '_binary_payload %s'" % rpmbuild_compmode
cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'"
cmd = cmd + " --define '_buildhost reproducible'"
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
index 16f929bf59..fb00460172 100644
--- a/meta/classes/populate_sdk_base.bbclass
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -53,6 +53,8 @@ TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
SDK_ARCHIVE_TYPE ?= "tar.xz"
SDK_XZ_COMPRESSION_LEVEL ?= "-9"
SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}"
+SDK_ZIP_OPTIONS ?= "-y"
+
# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz
python () {
@@ -60,7 +62,7 @@ python () {
d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native')
# SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR}
# recommand to cd into input dir first to avoid archive with buildpath
- d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r -y ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
+ d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r ${SDK_ZIP_OPTIONS} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
else:
d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native')
d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
index e2019f9bbf..bdd86863c6 100644
--- a/meta/classes/populate_sdk_ext.bbclass
+++ b/meta/classes/populate_sdk_ext.bbclass
@@ -114,7 +114,7 @@ python write_host_sdk_ext_manifest () {
f.write("%s %s %s\n" % (info[1], info[2], info[3]))
}
-SDK_POSTPROCESS_COMMAND:append:task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
+SDK_POSTPROCESS_COMMAND:append:task-populate-sdk-ext = " write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
SDK_TITLE:task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
@@ -227,7 +227,7 @@ python copy_buildsystem () {
# Write out config file for devtool
import configparser
- config = configparser.SafeConfigParser()
+ config = configparser.ConfigParser()
config.add_section('General')
config.set('General', 'bitbake_subdir', conf_bbpath)
config.set('General', 'init_path', conf_initpath)
@@ -363,7 +363,8 @@ python copy_buildsystem () {
f.write('BUILDCFG_HEADER = ""\n\n')
# Write METADATA_REVISION
- f.write('METADATA_REVISION = "%s"\n\n' % d.getVar('METADATA_REVISION'))
+ # Needs distro override so it can override the value set in the bbclass code (later than local.conf)
+ f.write('METADATA_REVISION:%s = "%s"\n\n' % (d.getVar('DISTRO'), d.getVar('METADATA_REVISION')))
f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
f.write('WITHIN_EXT_SDK = "1"\n\n')
@@ -714,7 +715,7 @@ sdk_ext_postinst() {
# A bit of another hack, but we need this in the path only for devtool
# so put it at the end of $PATH.
- echo "export PATH=$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH" >> $env_setup_script
+ echo "export PATH=\"$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH\"" >> $env_setup_script
echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script
diff --git a/meta/classes/qemuboot.bbclass b/meta/classes/qemuboot.bbclass
index ad8489902a..f2ebe94ca4 100644
--- a/meta/classes/qemuboot.bbclass
+++ b/meta/classes/qemuboot.bbclass
@@ -7,6 +7,7 @@
# QB_OPT_APPEND: options to append to qemu, e.g., "-device usb-mouse"
#
# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
+# e.g., "bzImage-initramfs-qemux86-64.bin" if INITRAMFS_IMAGE_BUNDLE is set to 1.
#
# QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4"
#
@@ -87,7 +88,7 @@
QB_MEM ?= "-m 256"
QB_SMP ?= ""
QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
-QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
+QB_DEFAULT_KERNEL ?= "${@bb.utils.contains("INITRAMFS_IMAGE_BUNDLE", "1", "${KERNEL_IMAGETYPE}-${INITRAMFS_LINK_NAME}.bin", "${KERNEL_IMAGETYPE}", d)}"
QB_DEFAULT_FSTYPE ?= "ext4"
QB_RNG ?= "-object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-pci,rng=rng0"
QB_OPT_APPEND ?= ""
diff --git a/meta/classes/recipe_sanity.bbclass b/meta/classes/recipe_sanity.bbclass
index 7fa4a849ea..df6e9a7db9 100644
--- a/meta/classes/recipe_sanity.bbclass
+++ b/meta/classes/recipe_sanity.bbclass
@@ -10,7 +10,7 @@ def bad_runtime_vars(cfgdata, d):
for var in d.getVar("__recipe_sanity_badruntimevars").split():
val = d.getVar(var, False)
if val and val != cfgdata.get(var):
- __note("%s should be %s_${PN}" % (var, var), d)
+ __note("%s should be %s:${PN}" % (var, var), d)
__recipe_sanity_reqvars = "DESCRIPTION"
__recipe_sanity_reqdiffvars = ""
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index 5f12d5aaeb..f7ededff26 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -27,6 +27,13 @@ BB_SCHEDULER ?= "completion"
BB_TASK_IONICE_LEVEL:task-rm_work = "3.0"
do_rm_work () {
+ # Force using the HOSTTOOLS 'rm' - otherwise the SYSROOT_NATIVE 'rm' can be selected depending on PATH
+ # Avoids race-condition accessing 'rm' when deleting WORKDIR folders at the end of this function
+ RM_BIN="$(PATH=${HOSTTOOLS_DIR} command -v rm)"
+ if [ -z "${RM_BIN}" ]; then
+ bbfatal "Binary 'rm' not found in HOSTTOOLS_DIR, cannot remove WORKDIR data."
+ fi
+
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
for p in ${RM_WORK_EXCLUDE}; do
if [ "$p" = "${PN}" ]; then
@@ -73,7 +80,7 @@ do_rm_work () {
# sstate version since otherwise we'd need to leave 'plaindirs' around
# such as 'packages' and 'packages-split' and these can be large. No end
# of chain tasks depend directly on do_package anymore.
- rm -f -- $i;
+ "${RM_BIN}" -f -- $i;
;;
*_setscene*)
# Skip stamps which are already setscene versions
@@ -90,7 +97,7 @@ do_rm_work () {
;;
esac
done
- rm -f -- $i
+ "${RM_BIN}" -f -- $i
esac
done
@@ -100,12 +107,14 @@ do_rm_work () {
# Retain only logs and other files in temp, safely ignore
# failures of removing pseudo folers on NFS2/3 server.
if [ $dir = 'pseudo' ]; then
- rm -rf -- $dir 2> /dev/null || true
+ "${RM_BIN}" -rf -- $dir 2> /dev/null || true
elif ! echo "$excludes" | grep -q -w "$dir"; then
- rm -rf -- $dir
+ "${RM_BIN}" -rf -- $dir
fi
done
}
+do_rm_work[vardepsexclude] += "SSTATETASKS"
+
do_rm_work_all () {
:
}
@@ -172,7 +181,7 @@ python inject_rm_work() {
# other recipes and thus will typically run much later than completion of
# work in the recipe itself.
# In practice, addtask() here merely updates the dependencies.
- bb.build.addtask('do_rm_work', 'do_build', ' '.join(deps), d)
+ bb.build.addtask('do_rm_work', 'do_rm_work_all do_build', ' '.join(deps), d)
# Always update do_build_without_rm_work dependencies.
bb.build.addtask('do_build_without_rm_work', '', ' '.join(deps), d)
diff --git a/meta/classes/rootfs-postcommands.bbclass b/meta/classes/rootfs-postcommands.bbclass
index fc179613fb..5c0b3ec37c 100644
--- a/meta/classes/rootfs-postcommands.bbclass
+++ b/meta/classes/rootfs-postcommands.bbclass
@@ -14,7 +14,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'deb
# Create /etc/timestamp during image construction to give a reasonably sane default time setting
ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp; "
-# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
+# Tweak files in /etc if read-only-rootfs is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
# We also need to do the same for the kernel boot parameters,
@@ -103,20 +103,24 @@ read_only_rootfs_hook () {
# If we're using openssh and the /etc/ssh directory has no pre-generated keys,
# we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
# and the keys under /var/run/ssh.
- if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
- if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
- echo "SYSCONFDIR=\${SYSCONFDIR:-/etc/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
- echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
- else
- echo "SYSCONFDIR=\${SYSCONFDIR:-/var/run/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
- echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ # If overlayfs-etc is used this is not done as /etc is treated as writable
+ # If stateless-rootfs is enabled this is always done as we don't want to save keys then
+ if ${@ 'true' if not bb.utils.contains('IMAGE_FEATURES', 'overlayfs-etc', True, False, d) or bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else 'false'}; then
+ if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
+ if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
+ echo "SYSCONFDIR=\${SYSCONFDIR:-/etc/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ else
+ echo "SYSCONFDIR=\${SYSCONFDIR:-/var/run/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ fi
fi
- fi
- # Also tweak the key location for dropbear in the same way.
- if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
- if [ ! -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
- echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
+ # Also tweak the key location for dropbear in the same way.
+ if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
+ if [ ! -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
+ echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
+ fi
fi
fi
@@ -305,7 +309,7 @@ rootfs_trim_schemas () {
}
rootfs_check_host_user_contaminated () {
- contaminated="${WORKDIR}/host-user-contaminated.txt"
+ contaminated="${S}/host-user-contaminated.txt"
HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index eb0ca05804..293e405f62 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -351,6 +351,7 @@ def check_connectivity(d):
if len(msg) == 0:
msg = "%s.\n" % err
msg += " Please ensure your host's network is configured correctly.\n"
+ msg += " Please ensure CONNECTIVITY_CHECK_URIS is correct and specified URIs are available.\n"
msg += " If your ISP or network is blocking the above URL,\n"
msg += " try with another domain name, for example by setting:\n"
msg += " CONNECTIVITY_CHECK_URIS = \"https://www.example.com/\""
@@ -497,6 +498,14 @@ def check_tar_version(sanity_data):
version = result.split()[3]
if bb.utils.vercmp_string_op(version, "1.28", "<"):
return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n"
+
+ try:
+ result = subprocess.check_output(["tar", "--help"], stderr=subprocess.STDOUT).decode('utf-8')
+ if "--xattrs" not in result:
+ return "Your tar doesn't support --xattrs, please use GNU tar.\n"
+ except subprocess.CalledProcessError as e:
+ return "Unable to execute tar --help, exit code %d\n%s\n" % (e.returncode, e.output)
+
return None
# We use git parameters and functionality only found in 1.7.8 or later
@@ -858,7 +867,7 @@ def check_sanity_everybuild(status, d):
mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
protocols = ['http', 'ftp', 'file', 'https', \
'git', 'gitsm', 'hg', 'osc', 'p4', 'svn', \
- 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3', 'az' ]
+ 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3', 'az', 'ftps', 'crate']
for mirror_var in mirror_vars:
mirrors = (d.getVar(mirror_var) or '').replace('\\n', ' ').split()
@@ -990,13 +999,6 @@ def check_sanity(sanity_data):
if status.messages != "":
raise_sanity_error(sanity_data.expand(status.messages), sanity_data, status.network_error)
-# Create a copy of the datastore and finalise it to ensure appends and
-# overrides are set - the datastore has yet to be finalised at ConfigParsed
-def copy_data(e):
- sanity_data = bb.data.createCopy(e.data)
- sanity_data.finalize()
- return sanity_data
-
addhandler config_reparse_eventhandler
config_reparse_eventhandler[eventmask] = "bb.event.ConfigParsed"
python config_reparse_eventhandler() {
@@ -1007,13 +1009,13 @@ addhandler check_sanity_eventhandler
check_sanity_eventhandler[eventmask] = "bb.event.SanityCheck bb.event.NetworkTest"
python check_sanity_eventhandler() {
if bb.event.getName(e) == "SanityCheck":
- sanity_data = copy_data(e)
+ sanity_data = bb.data.createCopy(e.data)
check_sanity(sanity_data)
if e.generateevents:
sanity_data.setVar("SANITY_USE_EVENTS", "1")
bb.event.fire(bb.event.SanityCheckPassed(), e.data)
elif bb.event.getName(e) == "NetworkTest":
- sanity_data = copy_data(e)
+ sanity_data = bb.data.createCopy(e.data)
if e.generateevents:
sanity_data.setVar("SANITY_USE_EVENTS", "1")
bb.event.fire(bb.event.NetworkTestFailed() if check_connectivity(sanity_data) else bb.event.NetworkTestPassed(), e.data)
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
index 80f8382107..ffe43bb7c9 100644
--- a/meta/classes/scons.bbclass
+++ b/meta/classes/scons.bbclass
@@ -3,7 +3,9 @@ inherit python3native
DEPENDS += "python3-scons-native"
EXTRA_OESCONS ?= ""
-
+# This value below is derived from $(getconf ARG_MAX)
+SCONS_MAXLINELENGTH ?= "MAXLINELENGTH=2097152"
+EXTRA_OESCONS:append = " ${SCONS_MAXLINELENGTH}"
do_configure() {
if [ -n "${CONFIGURESTAMPFILE}" -a "${S}" = "${B}" ]; then
if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then
@@ -25,4 +27,8 @@ scons_do_install() {
die "scons install execution failed."
}
+do_configure[vardepsexclude] = "SCONS_MAXLINELENGTH"
+do_compile[vardepsexclude] = "SCONS_MAXLINELENGTH"
+do_install[vardepsexclude] = "SCONS_MAXLINELENGTH"
+
EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index 3513269bca..dd6cf12920 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -1084,7 +1084,7 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
- directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx"]
+ directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx", "do_deploy_archives"]
def isNativeCross(x):
return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
index bf8ca58b0b..a78839bdc2 100644
--- a/meta/classes/staging.bbclass
+++ b/meta/classes/staging.bbclass
@@ -269,6 +269,10 @@ python extend_recipe_sysroot() {
pn = d.getVar("PN")
stagingdir = d.getVar("STAGING_DIR")
sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
+ # only needed by multilib cross-canadian since it redefines RECIPE_SYSROOT
+ manifestprefix = d.getVar("RECIPE_SYSROOT_MANIFEST_SUBDIR")
+ if manifestprefix:
+ sharedmanifests = sharedmanifests + "/" + manifestprefix
recipesysroot = d.getVar("RECIPE_SYSROOT")
recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
@@ -644,7 +648,7 @@ python staging_taskhandler() {
bbtasks = e.tasklist
for task in bbtasks:
deps = d.getVarFlag(task, "depends")
- if task == "do_configure" or (deps and "populate_sysroot" in deps):
+ if task != 'do_prepare_recipe_sysroot' and (task == "do_configure" or (deps and "populate_sysroot" in deps)):
d.prependVarFlag(task, "prefuncs", "extend_recipe_sysroot ")
}
staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
index 09ec52792d..c07332d5b6 100644
--- a/meta/classes/systemd.bbclass
+++ b/meta/classes/systemd.bbclass
@@ -146,6 +146,7 @@ python systemd_populate_packages() {
def systemd_check_services():
searchpaths = [oe.path.join(d.getVar("sysconfdir"), "systemd", "system"),]
searchpaths.append(d.getVar("systemd_system_unitdir"))
+ searchpaths.append(d.getVar("systemd_user_unitdir"))
systemd_packages = d.getVar('SYSTEMD_PACKAGES')
keys = 'Also'
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
index 8ffaeab284..0241f29dfb 100644
--- a/meta/classes/testimage.bbclass
+++ b/meta/classes/testimage.bbclass
@@ -101,36 +101,12 @@ TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR"
testimage_dump_target () {
- top -bn1
- ps
- free
- df
- # The next command will export the default gateway IP
- export DEFAULT_GATEWAY=$(ip route | awk '/default/ { print $3}')
- ping -c3 $DEFAULT_GATEWAY
- dmesg
- netstat -an
- ip address
- # Next command will dump logs from /var/log/
- find /var/log/ -type f 2>/dev/null -exec echo "====================" \; -exec echo {} \; -exec echo "====================" \; -exec cat {} \; -exec echo "" \;
}
testimage_dump_host () {
- top -bn1
- iostat -x -z -N -d -p ALL 20 2
- ps -ef
- free
- df
- memstat
- dmesg
- ip -s link
- netstat -an
}
testimage_dump_monitor () {
- query-status
- query-block
- dump-guest-memory {"paging":false,"protocol":"file:%s.img"}
}
python do_testimage() {
@@ -240,7 +216,7 @@ def testimage_main(d):
with open(tdname, "r") as f:
td = json.load(f)
except FileNotFoundError as err:
- bb.fatal('File %s not found (%s).\nHave you built the image with INHERIT += "testimage" in the conf/local.conf?' % (tdname, err))
+ bb.fatal('File %s not found (%s).\nHave you built the image with IMAGE_CLASSES += "testimage" in the conf/local.conf?' % (tdname, err))
# Some variables need to be updates (mostly paths) with the
# ones of the current environment because some tests require them.
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
index 1d7c703748..d735d434e6 100644
--- a/meta/classes/toolchain-scripts.bbclass
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -31,7 +31,7 @@ toolchain_create_sdk_env_script () {
echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script
echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script
echo '# Only disable this check if you are absolutely know what you are doing!' >> $script
- echo 'if [ ! -z "$LD_LIBRARY_PATH" ]; then' >> $script
+ echo 'if [ ! -z "${LD_LIBRARY_PATH:-}" ]; then' >> $script
echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script
echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script
echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script
@@ -47,7 +47,7 @@ toolchain_create_sdk_env_script () {
for i in ${CANADIANEXTRAOS}; do
EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
done
- echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
+ echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':"$PATH"' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script
echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass
index b9ad35821a..fe85521877 100644
--- a/meta/classes/uboot-config.bbclass
+++ b/meta/classes/uboot-config.bbclass
@@ -59,10 +59,6 @@ UBOOT_ENV_BINARY ?= "${UBOOT_ENV}.${UBOOT_ENV_SUFFIX}"
UBOOT_ENV_IMAGE ?= "${UBOOT_ENV}-${MACHINE}-${PV}-${PR}.${UBOOT_ENV_SUFFIX}"
UBOOT_ENV_SYMLINK ?= "${UBOOT_ENV}-${MACHINE}.${UBOOT_ENV_SUFFIX}"
-# Default name of u-boot initial env, but enable individual recipes to change
-# this value.
-UBOOT_INITIAL_ENV ?= "${PN}-initial-env"
-
# U-Boot EXTLINUX variables. U-Boot searches for /boot/extlinux/extlinux.conf
# to find EXTLINUX conf file.
UBOOT_EXTLINUX_INSTALL_DIR ?= "/boot/extlinux"
diff --git a/meta/classes/uboot-extlinux-config.bbclass b/meta/classes/uboot-extlinux-config.bbclass
index dcebe7ff31..ba7a213ea2 100644
--- a/meta/classes/uboot-extlinux-config.bbclass
+++ b/meta/classes/uboot-extlinux-config.bbclass
@@ -33,11 +33,11 @@
# UBOOT_EXTLINUX_DEFAULT_LABEL ??= "Linux Default"
# UBOOT_EXTLINUX_TIMEOUT ??= "30"
#
-# UBOOT_EXTLINUX_KERNEL_IMAGE_default ??= "../zImage"
-# UBOOT_EXTLINUX_MENU_DESCRIPTION_default ??= "Linux Default"
+# UBOOT_EXTLINUX_KERNEL_IMAGE:default ??= "../zImage"
+# UBOOT_EXTLINUX_MENU_DESCRIPTION:default ??= "Linux Default"
#
-# UBOOT_EXTLINUX_KERNEL_IMAGE_fallback ??= "../zImage-fallback"
-# UBOOT_EXTLINUX_MENU_DESCRIPTION_fallback ??= "Linux Fallback"
+# UBOOT_EXTLINUX_KERNEL_IMAGE:fallback ??= "../zImage-fallback"
+# UBOOT_EXTLINUX_MENU_DESCRIPTION:fallback ??= "Linux Fallback"
#
# Results:
#
@@ -152,7 +152,7 @@ python do_create_extlinux_config() {
bb.fatal('Unable to open %s' % (cfile))
}
UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT KERNEL_ARGS INITRD"
-do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s_%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
+do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s:%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
do_create_extlinux_config[vardepsexclude] += "OVERRIDES"
addtask create_extlinux_config before do_install do_deploy after do_compile
diff --git a/meta/classes/uboot-sign.bbclass b/meta/classes/uboot-sign.bbclass
index 31ffe1f472..6bb4ddc600 100644
--- a/meta/classes/uboot-sign.bbclass
+++ b/meta/classes/uboot-sign.bbclass
@@ -73,6 +73,9 @@ UBOOT_FIT_HASH_ALG ?= "sha256"
FIT_SIGN_ALG ?= "rsa2048"
UBOOT_FIT_SIGN_ALG ?= "rsa2048"
+# Kernel / U-Boot fitImage Padding Algo
+FIT_PAD_ALG ?= "pkcs-1.5"
+
# Generate keys for signing Kernel / U-Boot fitImage
FIT_GENERATE_KEYS ?= "0"
UBOOT_FIT_GENERATE_KEYS ?= "0"
@@ -289,7 +292,7 @@ do_uboot_generate_rsa_keys() {
"${UBOOT_FIT_SIGN_NUMBITS}"
echo "Generating certificate for signing U-Boot fitImage"
- openssl req ${FIT_KEY_REQ_ARGS} "${UBOOT_FIT_KEY_SIGN_PKCS}" \
+ openssl req ${UBOOT_FIT_KEY_REQ_ARGS} "${UBOOT_FIT_KEY_SIGN_PKCS}" \
-key "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key \
-out "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".crt
fi
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
index 6a9e862bcd..7f0591d49a 100644
--- a/meta/classes/uninative.bbclass
+++ b/meta/classes/uninative.bbclass
@@ -167,5 +167,7 @@ python uninative_changeinterp () {
if not elf.isDynamic():
continue
+ os.chmod(f, s[stat.ST_MODE] | stat.S_IWUSR)
subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT)
+ os.chmod(f, s[stat.ST_MODE])
}
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
index fc1ffd828c..2804299fc4 100644
--- a/meta/classes/update-alternatives.bbclass
+++ b/meta/classes/update-alternatives.bbclass
@@ -1,5 +1,5 @@
# This class is used to help the alternatives system which is useful when
-# multiple sources provide same command. You can use update-alternatives
+# multiple sources provide the same command. You can use update-alternatives
# command directly in your recipe, but in most cases this class simplifies
# that job.
#
@@ -29,7 +29,7 @@
# A non-default link to create for a target
# ALTERNATIVE_TARGET[name] = "target"
#
-# This is the name of the binary as it's been install by do_install
+# This is the name of the binary as it's been installed by do_install
# i.e. ALTERNATIVE_TARGET[sh] = "/bin/bash"
#
# A package specific link for a target
@@ -62,7 +62,7 @@ ALTERNATIVE_PRIORITY = "10"
# We need special processing for vardeps because it can not work on
# modified flag values. So we aggregate the flags into a new variable
-# and include that vairable in the set.
+# and include that variable in the set.
UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY"
PACKAGE_WRITE_DEPS += "virtual/update-alternatives-native"
@@ -80,10 +80,10 @@ def gen_updatealternativesvardeps(d):
for p in pkgs:
for v in vars:
- for flag in sorted((d.getVarFlags("%s_%s" % (v,p)) or {}).keys()):
+ for flag in sorted((d.getVarFlags("%s:%s" % (v,p)) or {}).keys()):
if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
continue
- d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False)))
+ d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s:%s' % (v,p), flag, False)))
def ua_extend_depends(d):
if not 'virtual/update-alternatives' in d.getVar('PROVIDES'):
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
index 3acf59cd46..e5527f0529 100644
--- a/meta/classes/useradd-staticids.bbclass
+++ b/meta/classes/useradd-staticids.bbclass
@@ -41,7 +41,7 @@ def update_useradd_static_config(d):
def handle_missing_id(id, type, pkg, files, var, value):
# For backwards compatibility we accept "1" in addition to "error"
error_dynamic = d.getVar('USERADD_ERROR_DYNAMIC')
- msg = "%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN'), pkg, type, id)
+ msg = 'Recipe %s, package %s: %sname "%s" does not have a static ID defined.' % (d.getVar('PN'), pkg, type, id)
if files:
msg += " Add %s to one of these files: %s" % (id, files)
else:
diff --git a/meta/conf/bitbake.conf b/meta/conf/bitbake.conf
index 2a3cf6f8aa..290dfda6c8 100644
--- a/meta/conf/bitbake.conf
+++ b/meta/conf/bitbake.conf
@@ -523,7 +523,7 @@ HOSTTOOLS += " \
python3 pzstd ranlib readelf readlink realpath rm rmdir rpcgen sed seq sh \
sha1sum sha224sum sha256sum sha384sum sha512sum \
sleep sort split stat strings strip tail tar tee test touch tr true uname \
- uniq wc wget which xargs zstd \
+ uniq unzstd wc wget which xargs zstd \
"
# Tools needed to run testimage runtime image testing
@@ -671,7 +671,7 @@ export PYTHONHASHSEED = "0"
export PERL_HASH_SEED = "0"
export SOURCE_DATE_EPOCH ?= "${@get_source_date_epoch_value(d)}"
# A SOURCE_DATE_EPOCH of '0' might be misinterpreted as no SDE
-export SOURCE_DATE_EPOCH_FALLBACK ??= "1302044400"
+SOURCE_DATE_EPOCH_FALLBACK ??= "1302044400"
REPRODUCIBLE_TIMESTAMP_ROOTFS ??= "1520598896"
##################################################################
@@ -924,7 +924,7 @@ SHELL[unexport] = "1"
TRANSLATED_TARGET_ARCH ??= "${@d.getVar('TARGET_ARCH').replace("_", "-")}"
# Set a default umask to use for tasks for determinism
-BB_DEFAULT_UMASK = "022"
+BB_DEFAULT_UMASK ??= "022"
# Complete output from bitbake
BB_CONSOLELOG ?= "${LOG_DIR}/cooker/${MACHINE}/${DATETIME}.log"
@@ -948,7 +948,7 @@ BB_HASHCONFIG_IGNORE_VARS ?= "${BB_HASHEXCLUDE_COMMON} DATE TIME SSH_AGENT_PID \
PARALLEL_MAKE BB_NUMBER_THREADS BB_ORIGENV BB_INVALIDCONF BBINCLUDED \
GIT_PROXY_COMMAND ALL_PROXY all_proxy NO_PROXY no_proxy FTP_PROXY ftp_proxy \
HTTP_PROXY http_proxy HTTPS_PROXY https_proxy SOCKS5_USER SOCKS5_PASSWD \
- BB_SETSCENE_ENFORCE BB_CMDLINE BB_SERVER_TIMEOUT"
+ BB_SETSCENE_ENFORCE BB_CMDLINE BB_SERVER_TIMEOUT BB_NICE_LEVEL"
BB_SIGNATURE_EXCLUDE_FLAGS ?= "doc deps depends \
lockfiles vardepsexclude vardeps vardepvalue vardepvalueexclude \
file-checksums python task nostamp \
diff --git a/meta/conf/distro/include/cve-extra-exclusions.inc b/meta/conf/distro/include/cve-extra-exclusions.inc
index 993ee2811a..cb2d920441 100644
--- a/meta/conf/distro/include/cve-extra-exclusions.inc
+++ b/meta/conf/distro/include/cve-extra-exclusions.inc
@@ -15,6 +15,11 @@
# the aim of sharing that work and ensuring we don't duplicate it.
#
+#cargo https://nvd.nist.gov/vuln/detail/CVE-2022-46176
+#cargo security advisor https://blog.rust-lang.org/2023/01/10/cve-2022-46176.html
+#This CVE is a security issue when using cargo ssh. In kirkstone, rust 1.59.0 is used and the rust on-target is not supported, so the target images are not vulnerable to the cve.
+#The bitbake using the 'wget' (which uses 'https') for fetching the sources instead of ssh. So, the cargo-native are also not vulnerable to this cve and so added to excluded list.
+CVE_CHECK_IGNORE += "CVE-2022-46176"
# strace https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2000-0006
# CVE is more than 20 years old with no resolution evident
@@ -90,24 +95,24 @@ CVE_CHECK_IGNORE += "CVE-2022-0185 CVE-2022-0264 CVE-2022-0286 CVE-2022-0330 CVE
CVE-2022-28356 CVE-2022-28388 CVE-2022-28389 CVE-2022-28390 CVE-2022-28796 CVE-2022-28893 CVE-2022-29156 \
CVE-2022-29582 CVE-2022-29968"
-#### CPE update pending ####
-
-# groff:groff-native https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2000-0803
-# Appears it was fixed in https://git.savannah.gnu.org/cgit/groff.git/commit/?id=07f95f1674217275ed4612f1dcaa95a88435c6a7
-# so from 1.17 onwards. Reported to the database for update by RP 2021/5/9. Update accepted 2021/5/10.
-#CVE_CHECK_IGNORE += "CVE-2000-0803"
-
-
-
-#### Upstream still working on ####
# qemu:qemu-native:qemu-system-native https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2021-20255
# There was a proposed patch https://lists.gnu.org/archive/html/qemu-devel/2021-02/msg06098.html
-# however qemu maintainers are sure the patch is incorrect and should not be applied.
-
-# wget https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2021-31879
-# https://mail.gnu.org/archive/html/bug-wget/2021-02/msg00002.html
-# No response upstream as of 2021/5/12
+# qemu maintainers say the patch is incorrect and should not be applied
+# Ignore from OE's perspectivee as the issue is of low impact, at worst sitting in an infinite loop rather than exploitable
+CVE_CHECK_IGNORE += "CVE-2021-20255"
+
+# qemu:qemu-native:qemu-system-native https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2019-12067
+# There was a proposed patch but rejected by upstream qemu. It is unclear if the issue can
+# still be reproduced or where exactly any bug is.
+# Ignore from OE's perspective as we'll pick up any fix when upstream accepts one.
+CVE_CHECK_IGNORE += "CVE-2019-12067"
+
+# nasm:nasm-native https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2020-18974
+# It is a fuzzing related buffer overflow. It is of low impact since most devices
+# wouldn't expose an assembler. The upstream is inactive and there is little to be
+# done about the bug, ignore from an OE perspective.
+CVE_CHECK_IGNORE += "CVE-2020-18974"
diff --git a/meta/conf/distro/include/maintainers.inc b/meta/conf/distro/include/maintainers.inc
index 0a1897fc92..bfc14951fe 100644
--- a/meta/conf/distro/include/maintainers.inc
+++ b/meta/conf/distro/include/maintainers.inc
@@ -42,7 +42,7 @@ RECIPE_MAINTAINER:pn-alsa-utils-scripts = "Michael Opdenacker <michael.opdenacke
RECIPE_MAINTAINER:pn-apmd = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER:pn-apr = "Hongxu Jia <hongxu.jia@windriver.com>"
RECIPE_MAINTAINER:pn-apr-util = "Hongxu Jia <hongxu.jia@windriver.com>"
-RECIPE_MAINTAINER:pn-apt = "Aníbal Limón <limon.anibal@gmail.com>"
+RECIPE_MAINTAINER:pn-apt = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-argp-standalone = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER:pn-asciidoc = "Yi Zhao <yi.zhao@windriver.com>"
RECIPE_MAINTAINER:pn-aspell = "Anuj Mittal <anuj.mittal@intel.com>"
@@ -62,7 +62,7 @@ RECIPE_MAINTAINER:pn-base-passwd = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER:pn-bash = "Hongxu Jia <hongxu.jia@windriver.com>"
RECIPE_MAINTAINER:pn-bash-completion = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER:pn-bc = "Anuj Mittal <anuj.mittal@intel.com>"
-RECIPE_MAINTAINER:pn-bind = "Armin Kuster <akuster808@gmail.com>"
+RECIPE_MAINTAINER:pn-bind = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-binutils = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER:pn-binutils-cross-${TARGET_ARCH} = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER:pn-binutils-cross-canadian-${TRANSLATED_TARGET_ARCH} = "Khem Raj <raj.khem@gmail.com>"
@@ -95,8 +95,8 @@ RECIPE_MAINTAINER:pn-cantarell-fonts = "Alexander Kanavin <alex.kanavin@gmail.co
RECIPE_MAINTAINER:pn-ccache = "Robert Yang <liezhi.yang@windriver.com>"
RECIPE_MAINTAINER:pn-cdrtools-native = "Yi Zhao <yi.zhao@windriver.com>"
RECIPE_MAINTAINER:pn-chrpath = "Yi Zhao <yi.zhao@windriver.com>"
-RECIPE_MAINTAINER:pn-cmake = "Pascal Bach <pascal.bach@siemens.com>"
-RECIPE_MAINTAINER:pn-cmake-native = "Pascal Bach <pascal.bach@siemens.com>"
+RECIPE_MAINTAINER:pn-cmake = "Unassigned <unassigned@yoctoproject.org>"
+RECIPE_MAINTAINER:pn-cmake-native = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-connman = "Changhyeok Bae <changhyeok.bae@gmail.com>"
RECIPE_MAINTAINER:pn-connman-conf = "Ross Burton <ross.burton@arm.com>"
RECIPE_MAINTAINER:pn-connman-gnome = "Ross Burton <ross.burton@arm.com>"
@@ -152,7 +152,7 @@ RECIPE_MAINTAINER:pn-docbook-xml-dtd4 = "Yi Zhao <yi.zhao@windriver.com>"
RECIPE_MAINTAINER:pn-docbook-xsl-stylesheets = "Yi Zhao <yi.zhao@windriver.com>"
RECIPE_MAINTAINER:pn-dos2unix = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER:pn-dosfstools = "Yi Zhao <yi.zhao@windriver.com>"
-RECIPE_MAINTAINER:pn-dpkg = "Aníbal Limón <limon.anibal@gmail.com>"
+RECIPE_MAINTAINER:pn-dpkg = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-dropbear = "Yi Zhao <yi.zhao@windriver.com>"
RECIPE_MAINTAINER:pn-dtc = "Wang Mingyu <wangmy@fujitsu.com>"
RECIPE_MAINTAINER:pn-dwarfsrcfiles = "Alexander Kanavin <alex.kanavin@gmail.com>"
@@ -165,7 +165,7 @@ RECIPE_MAINTAINER:pn-ell = "Zang Ruochen <zangrc.fnst@fujitsu.com>"
RECIPE_MAINTAINER:pn-enchant2 = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER:pn-encodings = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-epiphany = "Alexander Kanavin <alex.kanavin@gmail.com>"
-RECIPE_MAINTAINER:pn-erofs-utils = "Richard Weinberger <richard@nod.at>"
+RECIPE_MAINTAINER:pn-erofs-utils = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-ethtool = "Changhyeok Bae <changhyeok.bae@gmail.com>"
RECIPE_MAINTAINER:pn-eudev = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER:pn-expat = "Yi Zhao <yi.zhao@windriver.com>"
@@ -189,7 +189,7 @@ RECIPE_MAINTAINER:pn-gcc-cross-canadian-${TRANSLATED_TARGET_ARCH} = "Khem Raj <r
RECIPE_MAINTAINER:pn-gcc-crosssdk-${SDK_SYS} = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER:pn-gcc-runtime = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER:pn-gcc-sanitizers = "Khem Raj <raj.khem@gmail.com>"
-RECIPE_MAINTAINER:pn-gcc-source-11.3.0 = "Khem Raj <raj.khem@gmail.com>"
+RECIPE_MAINTAINER:pn-gcc-source-11.4.0 = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER:pn-gconf = "Ross Burton <ross.burton@arm.com>"
RECIPE_MAINTAINER:pn-gcr = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER:pn-gdb = "Khem Raj <raj.khem@gmail.com>"
@@ -280,8 +280,8 @@ RECIPE_MAINTAINER:pn-intltool = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER:pn-iproute2 = "Changhyeok Bae <changhyeok.bae@gmail.com>"
RECIPE_MAINTAINER:pn-iptables = "Changhyeok Bae <changhyeok.bae@gmail.com>"
RECIPE_MAINTAINER:pn-iputils = "Changhyeok Bae <changhyeok.bae@gmail.com>"
-RECIPE_MAINTAINER:pn-iso-codes = "Wang Mingyu <wangmy@cn.ujitsu.com>"
-RECIPE_MAINTAINER:pn-itstool = "Andreas Müller <schnitzeltony@gmail.com>"
+RECIPE_MAINTAINER:pn-iso-codes = "Wang Mingyu <wangmy@cn.fujitsu.com>"
+RECIPE_MAINTAINER:pn-itstool = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-iw = "Changhyeok Bae <changhyeok.bae@gmail.com>"
RECIPE_MAINTAINER:pn-libjpeg-turbo = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER:pn-json-c = "Yi Zhao <yi.zhao@windriver.com>"
@@ -294,7 +294,7 @@ RECIPE_MAINTAINER:pn-kernel-devsrc = "Bruce Ashfield <bruce.ashfield@gmail.com>"
RECIPE_MAINTAINER:pn-kexec-tools = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-keymaps = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER:pn-kmod = "Chen Qi <Qi.Chen@windriver.com>"
-RECIPE_MAINTAINER:pn-kmscube = "Carlos Rafael Giani <dv@pseudoterminal.org>"
+RECIPE_MAINTAINER:pn-kmscube = "Carlos Rafael Giani <crg7475@mailbox.org>"
RECIPE_MAINTAINER:pn-l3afpad = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER:pn-lame = "Michael Opdenacker <michael.opdenacker@bootlin.com>"
RECIPE_MAINTAINER:pn-ldconfig-native = "Khem Raj <raj.khem@gmail.com>"
@@ -402,7 +402,7 @@ RECIPE_MAINTAINER:pn-liburcu = "Wang Mingyu <wangmy@fujitsu.com>"
RECIPE_MAINTAINER:pn-liburi-perl = "Tim Orling <tim.orling@konsulko.com>"
RECIPE_MAINTAINER:pn-libusb1 = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER:pn-libubootenv = "Stefano Babic <sbabic@denx.de>"
-RECIPE_MAINTAINER:pn-libuv = "Armin Kuster <akuster@mvista.com>"
+RECIPE_MAINTAINER:pn-libuv = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-libva = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER:pn-libva-initial = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER:pn-libva-utils = "Anuj Mittal <anuj.mittal@intel.com>"
@@ -541,17 +541,17 @@ RECIPE_MAINTAINER:pn-npth = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER:pn-nss-myhostname = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER:pn-numactl = "Richard Purdie <richard.purdie@linuxfoundation.org>"
RECIPE_MAINTAINER:pn-ofono = "Ross Burton <ross.burton@arm.com>"
-RECIPE_MAINTAINER:pn-opensbi = "Alistair Francis <alistair.francis@wdc.com>"
+RECIPE_MAINTAINER:pn-opensbi = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-openssh = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-openssl = "Alexander Kanavin <alex.kanavin@gmail.com>"
-RECIPE_MAINTAINER:pn-opkg = "Alejandro del Castillo <alejandro.delcastillo@ni.com>"
-RECIPE_MAINTAINER:pn-opkg-arch-config = "Alejandro del Castillo <alejandro.delcastillo@ni.com>"
-RECIPE_MAINTAINER:pn-opkg-keyrings = "Alejandro del Castillo <alejandro.delcastillo@ni.com>"
-RECIPE_MAINTAINER:pn-opkg-utils = "Alejandro del Castillo <alejandro.delcastillo@ni.com>"
+RECIPE_MAINTAINER:pn-opkg = "Alex Stewart <alex.stewart@ni.com>"
+RECIPE_MAINTAINER:pn-opkg-arch-config = "Alex Stewart <alex.stewart@ni.com>"
+RECIPE_MAINTAINER:pn-opkg-keyrings = "Alex Stewart <alex.stewart@ni.com>"
+RECIPE_MAINTAINER:pn-opkg-utils = "Alex Stewart <alex.stewart@ni.com>"
RECIPE_MAINTAINER:pn-orc = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER:pn-os-release = "Ross Burton <ross.burton@arm.com>"
-RECIPE_MAINTAINER:pn-ovmf = "Ricardo Neri <ricardo.neri-calderon@linux.intel.com>"
-RECIPE_MAINTAINER:pn-ovmf-shell-image = "Ricardo Neri <ricardo.neri-calderon@linux.intel.com>"
+RECIPE_MAINTAINER:pn-ovmf = "Unassigned <unassigned@yoctoproject.org>"
+RECIPE_MAINTAINER:pn-ovmf-shell-image = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-p11-kit = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER:pn-package-index = "Ross Burton <ross.burton@arm.com>"
RECIPE_MAINTAINER:pn-pango = "Ross Burton <ross.burton@arm.com>"
@@ -700,7 +700,7 @@ RECIPE_MAINTAINER:pn-quilt-native = "Robert Yang <liezhi.yang@windriver.com>"
RECIPE_MAINTAINER:pn-quota = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER:pn-re2c = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER:pn-readline = "Hongxu Jia <hongxu.jia@windriver.com>"
-RECIPE_MAINTAINER:pn-repo = "Jasper Orschulko <Jasper.Orschulko@iris-sensing.com>"
+RECIPE_MAINTAINER:pn-repo = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-resolvconf = "Chen Qi <Qi.Chen@windriver.com>"
RECIPE_MAINTAINER:pn-rgb = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-rpcbind = "Hongxu Jia <hongxu.jia@windriver.com>"
@@ -816,7 +816,7 @@ RECIPE_MAINTAINER:pn-weston-init = "Denys Dmytriyenko <denis@denix.org>"
RECIPE_MAINTAINER:pn-wget = "Yi Zhao <yi.zhao@windriver.com>"
RECIPE_MAINTAINER:pn-which = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER:pn-wic-tools = "Anuj Mittal <anuj.mittal@intel.com>"
-RECIPE_MAINTAINER:pn-wireless-regdb = "Adrian Bunk <bunk@kernel.org>"
+RECIPE_MAINTAINER:pn-wireless-regdb = "Unassigned <unassigned@yoctoproject.org>"
RECIPE_MAINTAINER:pn-wpa-supplicant = "Changhyeok Bae <changhyeok.bae@gmail.com>"
RECIPE_MAINTAINER:pn-wpebackend-fdo = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER:pn-x11perf = "Unassigned <unassigned@yoctoproject.org>"
diff --git a/meta/conf/distro/include/ptest-packagelists.inc b/meta/conf/distro/include/ptest-packagelists.inc
index b51cce4d9e..5c6a30635f 100644
--- a/meta/conf/distro/include/ptest-packagelists.inc
+++ b/meta/conf/distro/include/ptest-packagelists.inc
@@ -22,12 +22,14 @@ PTESTS_FAST = "\
gettext-ptest \
glib-networking-ptest \
gzip-ptest \
+ json-c-ptest \
json-glib-ptest \
libconvert-asn1-perl-ptest \
liberror-perl-ptest \
libnl-ptest \
libmodule-build-perl-ptest \
libpcre-ptest \
+ libpng-ptest \
libssh2-ptest \
libtimedate-perl-ptest \
libtest-needs-perl-ptest \
@@ -99,7 +101,7 @@ PTESTS_SLOW = "\
"
PTESTS_SLOW:remove:riscv64 = "valgrind-ptest"
-PTESTS_PROBLEMS:append:riscv64 = "valgrind-ptest"
+PTESTS_PROBLEMS:append:riscv64 = " valgrind-ptest"
# ruby-ptest \ # Timeout
# lz4-ptest \ # Needs a rewrite
diff --git a/meta/conf/distro/include/yocto-uninative.inc b/meta/conf/distro/include/yocto-uninative.inc
index 411fe45a24..4ac66fd506 100644
--- a/meta/conf/distro/include/yocto-uninative.inc
+++ b/meta/conf/distro/include/yocto-uninative.inc
@@ -6,10 +6,10 @@
# to the distro running on the build machine.
#
-UNINATIVE_MAXGLIBCVERSION = "2.35"
-UNINATIVE_VERSION = "3.6"
+UNINATIVE_MAXGLIBCVERSION = "2.39"
+UNINATIVE_VERSION = "4.4"
UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/${UNINATIVE_VERSION}/"
-UNINATIVE_CHECKSUM[aarch64] ?= "d64831cf2792c8e470c2e42230660e1a8e5de56a579cdd59978791f663c2f3ed"
-UNINATIVE_CHECKSUM[i686] ?= "2f0ee9b66b1bb2c85e2b592fb3c9c7f5d77399fa638d74961330cdb8de34ca3b"
-UNINATIVE_CHECKSUM[x86_64] ?= "9bfc4c970495b3716b2f9e52c4df9f968c02463a9a95000f6657fbc3fde1f098"
+UNINATIVE_CHECKSUM[aarch64] ?= "b61876130f494f75092f21086b4a64ea5fb064045769bf1d32e9cb6af17ea8ec"
+UNINATIVE_CHECKSUM[i686] ?= "9f28627828f0082cc0344eede4d9a861a9a064bfa8f36e072e46212f0fe45fcc"
+UNINATIVE_CHECKSUM[x86_64] ?= "d81c54284be2bb886931fc87281d58177a2cd381cf99d1981f8923039a72a302"
diff --git a/meta/conf/documentation.conf b/meta/conf/documentation.conf
index ab2addb321..e55bfa288d 100644
--- a/meta/conf/documentation.conf
+++ b/meta/conf/documentation.conf
@@ -253,6 +253,7 @@ KERNEL_MODULE_PROBECONF[doc] = "Lists kernel modules for which the build system
KERNEL_PACKAGE_NAME[doc] = "Name prefix for kernel packages. Defaults to 'kernel'."
KERNEL_PATH[doc] = "The location of the kernel sources. This variable is set to the value of the STAGING_KERNEL_DIR within the module class (module.bbclass)."
KERNEL_SRC[doc] = "The location of the kernel sources. This variable is set to the value of the STAGING_KERNEL_DIR within the module class (module.bbclass)."
+KERNEL_LOCALVERSION[doc] = "Appends a string to the name of the local version of the kernel image."
KFEATURE_DESCRIPTION[doc] = "Provides a short description of a configuration fragment. You use this variable in the .scc file that describes a configuration fragment file."
KMACHINE[doc] = "The machine as known by the kernel."
KTYPE[doc] = "Defines the kernel type to be used in assembling the configuration."
diff --git a/meta/conf/layer.conf b/meta/conf/layer.conf
index ea57123601..1f329c3efe 100644
--- a/meta/conf/layer.conf
+++ b/meta/conf/layer.conf
@@ -69,6 +69,7 @@ SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS += " \
initramfs-module-install->grub \
initramfs-module-install->parted \
initramfs-module-install->util-linux \
+ initramfs-module-setup-live->udev-extraconf \
grub-efi->grub-bootconf \
liberation-fonts->fontconfig \
cantarell-fonts->fontconfig \
diff --git a/meta/conf/machine/include/arm/arch-arm64.inc b/meta/conf/machine/include/arm/arch-arm64.inc
index 0e2efb5a40..832d0000ac 100644
--- a/meta/conf/machine/include/arm/arch-arm64.inc
+++ b/meta/conf/machine/include/arm/arch-arm64.inc
@@ -37,3 +37,8 @@ TUNE_ARCH = "${@bb.utils.contains('TUNE_FEATURES', 'aarch64', '${TUNE_ARCH_64}',
TUNE_PKGARCH = "${@bb.utils.contains('TUNE_FEATURES', 'aarch64', '${TUNE_PKGARCH_64}', '${TUNE_PKGARCH_32}', d)}"
ABIEXTENSION = "${@bb.utils.contains('TUNE_FEATURES', 'aarch64', '${ABIEXTENSION_64}', '${ABIEXTENSION_32}', d)}"
TARGET_FPU = "${@bb.utils.contains('TUNE_FEATURES', 'aarch64', '${TARGET_FPU_64}', '${TARGET_FPU_32}', d)}"
+
+# Emit branch protection (PAC/BTI) instructions. On hardware that doesn't
+# support these they're meaningless NOP instructions, so there's very little
+# reason not to.
+TUNE_CCARGS .= "${@bb.utils.contains('TUNE_FEATURES', 'aarch64', ' -mbranch-protection=standard', '', d)}"
diff --git a/meta/conf/machine/include/arm/arch-armv9a.inc b/meta/conf/machine/include/arm/arch-armv9a.inc
new file mode 100644
index 0000000000..c38d6cfdf6
--- /dev/null
+++ b/meta/conf/machine/include/arm/arch-armv9a.inc
@@ -0,0 +1,28 @@
+DEFAULTTUNE ?= "armv9a-crc"
+
+TUNEVALID[armv9a] = "Enable instructions for ARMv9-a"
+TUNE_CCARGS_MARCH .= "${@bb.utils.contains('TUNE_FEATURES', 'armv9a', ' -march=armv9-a', '', d)}"
+MACHINEOVERRIDES =. "${@bb.utils.contains('TUNE_FEATURES', 'armv9a', 'armv9a:', '', d)}"
+
+require conf/machine/include/arm/arch-arm64.inc
+require conf/machine/include/arm/feature-arm-crc.inc
+require conf/machine/include/arm/feature-arm-crypto.inc
+
+# Little Endian base configs
+AVAILTUNES += "armv9a armv9a-crc armv9a-crc-crypto armv9a-crypto"
+ARMPKGARCH:tune-armv9a ?= "armv9a"
+ARMPKGARCH:tune-armv9a-crc ?= "armv9a"
+ARMPKGARCH:tune-armv9a-crypto ?= "armv9a"
+ARMPKGARCH:tune-armv9a-crc-crypto ?= "armv9a"
+TUNE_FEATURES:tune-armv9a = "aarch64 armv9a"
+TUNE_FEATURES:tune-armv9a-crc = "${TUNE_FEATURES:tune-armv9a} crc"
+TUNE_FEATURES:tune-armv9a-crypto = "${TUNE_FEATURES:tune-armv9a} crypto"
+TUNE_FEATURES:tune-armv9a-crc-crypto = "${TUNE_FEATURES:tune-armv9a-crc} crypto"
+PACKAGE_EXTRA_ARCHS:tune-armv9a = "aarch64 armv9a"
+PACKAGE_EXTRA_ARCHS:tune-armv9a-crc = "${PACKAGE_EXTRA_ARCHS:tune-armv9a} armv9a-crc"
+PACKAGE_EXTRA_ARCHS:tune-armv9a-crypto = "${PACKAGE_EXTRA_ARCHS:tune-armv9a} armv9a-crypto"
+PACKAGE_EXTRA_ARCHS:tune-armv9a-crc-crypto = "${PACKAGE_EXTRA_ARCHS:tune-armv9a-crc} armv9a-crypto armv9a-crc-crypto"
+BASE_LIB:tune-armv9a = "lib64"
+BASE_LIB:tune-armv9a-crc = "lib64"
+BASE_LIB:tune-armv9a-crypto = "lib64"
+BASE_LIB:tune-armv9a-crc-crypto = "lib64"
diff --git a/meta/conf/machine/include/arm/armv9a/tune-neoversen2.inc b/meta/conf/machine/include/arm/armv9a/tune-neoversen2.inc
index 36355f7bed..d26ab25e48 100644
--- a/meta/conf/machine/include/arm/armv9a/tune-neoversen2.inc
+++ b/meta/conf/machine/include/arm/armv9a/tune-neoversen2.inc
@@ -6,17 +6,15 @@ DEFAULTTUNE ?= "neoversen2"
TUNEVALID[neoversen2] = "Enable Neoverse-N2 specific processor optimizations"
TUNE_CCARGS .= "${@bb.utils.contains('TUNE_FEATURES', 'neoversen2', ' -mcpu=neoverse-n2', '', d)}"
-# Even though the Neoverse N2 core implemnts the Arm v9.0-A architecture,
-# but the support of it in GCC is based on the Arm v8.5-A architecture.
-require conf/machine/include/arm/arch-armv8-5a.inc
+require conf/machine/include/arm/arch-armv9a.inc
# Little Endian base configs
AVAILTUNES += "neoversen2 neoversen2-crypto"
ARMPKGARCH:tune-neoversen2 = "neoversen2"
ARMPKGARCH:tune-neoversen2-crypto = "neoversen2-crypto"
-TUNE_FEATURES:tune-neoversen2 = "${TUNE_FEATURES:tune-armv8-5a} neoversen2"
+TUNE_FEATURES:tune-neoversen2 = "${TUNE_FEATURES:tune-armv9a} neoversen2"
TUNE_FEATURES:tune-neoversen2-crypto = "${TUNE_FEATURES:tune-neoversen2} crypto"
-PACKAGE_EXTRA_ARCHS:tune-neoversen2 = "${PACKAGE_EXTRA_ARCHS:tune-armv8-5a} neoversen2"
-PACKAGE_EXTRA_ARCHS:tune-neoversen2-crypto = "${PACKAGE_EXTRA_ARCHS:tune-armv8-5a-crypto} neoversen2 neoversen2-crypto"
+PACKAGE_EXTRA_ARCHS:tune-neoversen2 = "${PACKAGE_EXTRA_ARCHS:tune-armv9a} neoversen2"
+PACKAGE_EXTRA_ARCHS:tune-neoversen2-crypto = "${PACKAGE_EXTRA_ARCHS:tune-armv9a-crypto} neoversen2 neoversen2-crypto"
BASE_LIB:tune-neoversen2 = "lib64"
BASE_LIB:tune-neoversen2-crypto = "lib64"
diff --git a/meta/conf/machine/include/microblaze/feature-microblaze-versions.inc b/meta/conf/machine/include/microblaze/feature-microblaze-versions.inc
index 5c37f49abb..658e87b8cd 100644
--- a/meta/conf/machine/include/microblaze/feature-microblaze-versions.inc
+++ b/meta/conf/machine/include/microblaze/feature-microblaze-versions.inc
@@ -16,7 +16,7 @@ def microblaze_current_version(d, gcc = False):
# find the current version, and convert it to major/minor integers
version = None
for t in (d.getVar("TUNE_FEATURES") or "").split():
- m = re.search("^v(\d+)\.(\d+)", t)
+ m = re.search(r"^v(\d+)\.(\d+)", t)
if m:
version = int(m.group(1)), int(m.group(2))
break
diff --git a/meta/files/common-licenses/LGPL-3.0-with-zeromq-exception b/meta/files/common-licenses/LGPL-3.0-with-zeromq-exception
new file mode 100644
index 0000000000..02e943c4ac
--- /dev/null
+++ b/meta/files/common-licenses/LGPL-3.0-with-zeromq-exception
@@ -0,0 +1,181 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
+
+--------------------------------------------------------------------------------
+
+ SPECIAL EXCEPTION GRANTED BY COPYRIGHT HOLDERS
+
+As a special exception, copyright holders give you permission to link this
+library with independent modules to produce an executable, regardless of
+the license terms of these independent modules, and to copy and distribute
+the resulting executable under terms of your choice, provided that you also
+meet, for each linked independent module, the terms and conditions of
+the license of that module. An independent module is a module which is not
+derived from or based on this library. If you modify this library, you must
+extend this exception to your version of the library.
+
+Note: this exception relieves you of any obligations under sections 4 and 5
+of this license, and section 6 of the GNU General Public License.
diff --git a/meta/files/overlayfs-etc-preinit.sh.in b/meta/files/overlayfs-etc-preinit.sh.in
index 43c9b04eb9..8db076f4ba 100644
--- a/meta/files/overlayfs-etc-preinit.sh.in
+++ b/meta/files/overlayfs-etc-preinit.sh.in
@@ -15,19 +15,32 @@ mount -t sysfs sysfs /sys
[ -z "$CONSOLE" ] && CONSOLE="/dev/console"
+BASE_OVERLAY_ETC_DIR={OVERLAYFS_ETC_MOUNT_POINT}/overlay-etc
+UPPER_DIR=$BASE_OVERLAY_ETC_DIR/upper
+WORK_DIR=$BASE_OVERLAY_ETC_DIR/work
+LOWER_DIR=$BASE_OVERLAY_ETC_DIR/lower
+
mkdir -p {OVERLAYFS_ETC_MOUNT_POINT}
if mount -n -t {OVERLAYFS_ETC_FSTYPE} \
-o {OVERLAYFS_ETC_MOUNT_OPTIONS} \
{OVERLAYFS_ETC_DEVICE} {OVERLAYFS_ETC_MOUNT_POINT}
then
- mkdir -p {OVERLAYFS_ETC_MOUNT_POINT}/overlay-etc/upper
- mkdir -p {OVERLAYFS_ETC_MOUNT_POINT}/overlay-etc/work
+ mkdir -p $UPPER_DIR
+ mkdir -p $WORK_DIR
+
+ if {OVERLAYFS_ETC_EXPOSE_LOWER}; then
+ mkdir -p $LOWER_DIR
+
+ # provide read-only access to original /etc content
+ mount -o bind,ro /etc $LOWER_DIR
+ fi
+
mount -n -t overlay \
- -o upperdir={OVERLAYFS_ETC_MOUNT_POINT}/overlay-etc/upper \
+ -o upperdir=$UPPER_DIR \
-o lowerdir=/etc \
- -o workdir={OVERLAYFS_ETC_MOUNT_POINT}/overlay-etc/work \
+ -o workdir=$WORK_DIR \
-o index=off,xino=off,redirect_dir=off,metacopy=off \
- {OVERLAYFS_ETC_MOUNT_POINT}/overlay-etc/upper /etc || \
+ $UPPER_DIR /etc || \
echo "PREINIT: Mounting etc-overlay failed!"
else
echo "PREINIT: Mounting </data> failed!"
diff --git a/meta/lib/oe/cve_check.py b/meta/lib/oe/cve_check.py
index aa06497727..ca2b393116 100644
--- a/meta/lib/oe/cve_check.py
+++ b/meta/lib/oe/cve_check.py
@@ -73,33 +73,33 @@ def get_patched_cves(d):
import re
import oe.patch
- pn = d.getVar("PN")
- cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
+ cve_match = re.compile(r"CVE:( CVE-\d{4}-\d+)+")
# Matches the last "CVE-YYYY-ID" in the file name, also if written
# in lowercase. Possible to have multiple CVE IDs in a single
# file name, but only the last one will be detected from the file name.
# However, patch files contents addressing multiple CVE IDs are supported
# (cve_match regular expression)
-
- cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
+ cve_file_name_match = re.compile(r".*(CVE-\d{4}-\d+)", re.IGNORECASE)
patched_cves = set()
- bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
- for url in oe.patch.src_patches(d):
+ patches = oe.patch.src_patches(d)
+ bb.debug(2, "Scanning %d patches for CVEs" % len(patches))
+ for url in patches:
patch_file = bb.fetch.decodeurl(url)[2]
- # Remote compressed patches may not be unpacked, so silently ignore them
- if not os.path.isfile(patch_file):
- bb.warn("%s does not exist, cannot extract CVE list" % patch_file)
- continue
-
# Check patch file name for CVE ID
fname_match = cve_file_name_match.search(patch_file)
if fname_match:
cve = fname_match.group(1).upper()
patched_cves.add(cve)
- bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
+ bb.debug(2, "Found %s from patch file name %s" % (cve, patch_file))
+
+ # Remote patches won't be present and compressed patches won't be
+ # unpacked, so say we're not scanning them
+ if not os.path.isfile(patch_file):
+ bb.note("%s is remote or compressed, not scanning content" % patch_file)
+ continue
with open(patch_file, "r", encoding="utf-8") as f:
try:
@@ -143,7 +143,7 @@ def get_cpe_ids(cve_product, version):
else:
vendor = "*"
- cpe_id = f'cpe:2.3:a:{vendor}:{product}:{version}:*:*:*:*:*:*:*'
+ cpe_id = 'cpe:2.3:a:{}:{}:{}:*:*:*:*:*:*:*'.format(vendor, product, version)
cpe_ids.append(cpe_id)
return cpe_ids
@@ -159,7 +159,7 @@ def cve_check_merge_jsons(output, data):
for product in output["package"]:
if product["name"] == data["package"][0]["name"]:
- bb.error("Error adding the same package twice")
+ bb.error("Error adding the same package %s twice" % product["name"])
return
output["package"].append(data["package"][0])
@@ -173,3 +173,42 @@ def update_symlinks(target_path, link_path):
if os.path.exists(os.path.realpath(link_path)):
os.remove(link_path)
os.symlink(os.path.basename(target_path), link_path)
+
+
+def convert_cve_version(version):
+ """
+ This function converts from CVE format to Yocto version format.
+ eg 8.3_p1 -> 8.3p1, 6.2_rc1 -> 6.2-rc1
+
+ Unless it is redefined using CVE_VERSION in the recipe,
+ cve_check uses the version in the name of the recipe (${PV})
+ to check vulnerabilities against a CVE in the database downloaded from NVD.
+
+ When the version has an update, i.e.
+ "p1" in OpenSSH 8.3p1,
+ "-rc1" in linux kernel 6.2-rc1,
+ the database stores the version as version_update (8.3_p1, 6.2_rc1).
+ Therefore, we must transform this version before comparing to the
+ recipe version.
+
+ In this case, the parameter of the function is 8.3_p1.
+ If the version uses the Release Candidate format, "rc",
+ this function replaces the '_' by '-'.
+ If the version uses the Update format, "p",
+ this function removes the '_' completely.
+ """
+ import re
+
+ matches = re.match('^([0-9.]+)_((p|rc)[0-9]+)$', version)
+
+ if not matches:
+ return version
+
+ version = matches.group(1)
+ update = matches.group(2)
+
+ if matches.group(3) == "rc":
+ return version + '-' + update
+
+ return version + update
+
diff --git a/meta/lib/oe/go.py b/meta/lib/oe/go.py
new file mode 100644
index 0000000000..9996057f12
--- /dev/null
+++ b/meta/lib/oe/go.py
@@ -0,0 +1,32 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import re
+
+def map_arch(a):
+ if re.match('i.86', a):
+ return '386'
+ elif a == 'x86_64':
+ return 'amd64'
+ elif re.match('arm.*', a):
+ return 'arm'
+ elif re.match('aarch64.*', a):
+ return 'arm64'
+ elif re.match('mips64el.*', a):
+ return 'mips64le'
+ elif re.match('mips64.*', a):
+ return 'mips64'
+ elif a == 'mips':
+ return 'mips'
+ elif a == 'mipsel':
+ return 'mipsle'
+ elif re.match('p(pc|owerpc)(64le)', a):
+ return 'ppc64le'
+ elif re.match('p(pc|owerpc)(64)', a):
+ return 'ppc64'
+ elif a == 'riscv64':
+ return 'riscv64'
+ return ''
diff --git a/meta/lib/oe/npm_registry.py b/meta/lib/oe/npm_registry.py
new file mode 100644
index 0000000000..3f70e4f5c8
--- /dev/null
+++ b/meta/lib/oe/npm_registry.py
@@ -0,0 +1,169 @@
+import bb
+import json
+import subprocess
+
+_ALWAYS_SAFE = frozenset('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ 'abcdefghijklmnopqrstuvwxyz'
+ '0123456789'
+ '_.-~()')
+
+MISSING_OK = object()
+
+REGISTRY = "https://registry.npmjs.org"
+
+# we can not use urllib.parse here because npm expects lowercase
+# hex-chars but urllib generates uppercase ones
+def uri_quote(s, safe = '/'):
+ res = ""
+ safe_set = set(safe)
+ for c in s:
+ if c in _ALWAYS_SAFE or c in safe_set:
+ res += c
+ else:
+ res += '%%%02x' % ord(c)
+ return res
+
+class PackageJson:
+ def __init__(self, spec):
+ self.__spec = spec
+
+ @property
+ def name(self):
+ return self.__spec['name']
+
+ @property
+ def version(self):
+ return self.__spec['version']
+
+ @property
+ def empty_manifest(self):
+ return {
+ 'name': self.name,
+ 'description': self.__spec.get('description', ''),
+ 'versions': {},
+ }
+
+ def base_filename(self):
+ return uri_quote(self.name, safe = '@')
+
+ def as_manifest_entry(self, tarball_uri):
+ res = {}
+
+ ## NOTE: 'npm install' requires more than basic meta information;
+ ## e.g. it takes 'bin' from this manifest entry but not the actual
+ ## 'package.json'
+ for (idx,dflt) in [('name', None),
+ ('description', ""),
+ ('version', None),
+ ('bin', MISSING_OK),
+ ('man', MISSING_OK),
+ ('scripts', MISSING_OK),
+ ('directories', MISSING_OK),
+ ('dependencies', MISSING_OK),
+ ('devDependencies', MISSING_OK),
+ ('optionalDependencies', MISSING_OK),
+ ('license', "unknown")]:
+ if idx in self.__spec:
+ res[idx] = self.__spec[idx]
+ elif dflt == MISSING_OK:
+ pass
+ elif dflt != None:
+ res[idx] = dflt
+ else:
+ raise Exception("%s-%s: missing key %s" % (self.name,
+ self.version,
+ idx))
+
+ res['dist'] = {
+ 'tarball': tarball_uri,
+ }
+
+ return res
+
+class ManifestImpl:
+ def __init__(self, base_fname, spec):
+ self.__base = base_fname
+ self.__spec = spec
+
+ def load(self):
+ try:
+ with open(self.filename, "r") as f:
+ res = json.load(f)
+ except IOError:
+ res = self.__spec.empty_manifest
+
+ return res
+
+ def save(self, meta):
+ with open(self.filename, "w") as f:
+ json.dump(meta, f, indent = 2)
+
+ @property
+ def filename(self):
+ return self.__base + ".meta"
+
+class Manifest:
+ def __init__(self, base_fname, spec):
+ self.__base = base_fname
+ self.__spec = spec
+ self.__lockf = None
+ self.__impl = None
+
+ def __enter__(self):
+ self.__lockf = bb.utils.lockfile(self.__base + ".lock")
+ self.__impl = ManifestImpl(self.__base, self.__spec)
+ return self.__impl
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ bb.utils.unlockfile(self.__lockf)
+
+class NpmCache:
+ def __init__(self, cache):
+ self.__cache = cache
+
+ @property
+ def path(self):
+ return self.__cache
+
+ def run(self, type, key, fname):
+ subprocess.run(['oe-npm-cache', self.__cache, type, key, fname],
+ check = True)
+
+class NpmRegistry:
+ def __init__(self, path, cache):
+ self.__path = path
+ self.__cache = NpmCache(cache + '/_cacache')
+ bb.utils.mkdirhier(self.__path)
+ bb.utils.mkdirhier(self.__cache.path)
+
+ @staticmethod
+ ## This function is critical and must match nodejs expectations
+ def _meta_uri(spec):
+ return REGISTRY + '/' + uri_quote(spec.name, safe = '@')
+
+ @staticmethod
+ ## Exact return value does not matter; just make it look like a
+ ## usual registry url
+ def _tarball_uri(spec):
+ return '%s/%s/-/%s-%s.tgz' % (REGISTRY,
+ uri_quote(spec.name, safe = '@'),
+ uri_quote(spec.name, safe = '@/'),
+ spec.version)
+
+ def add_pkg(self, tarball, pkg_json):
+ pkg_json = PackageJson(pkg_json)
+ base = os.path.join(self.__path, pkg_json.base_filename())
+
+ with Manifest(base, pkg_json) as manifest:
+ meta = manifest.load()
+ tarball_uri = self._tarball_uri(pkg_json)
+
+ meta['versions'][pkg_json.version] = pkg_json.as_manifest_entry(tarball_uri)
+
+ manifest.save(meta)
+
+ ## Cache entries are a little bit dependent on the nodejs
+ ## version; version specific cache implementation must
+ ## mitigate differences
+ self.__cache.run('meta', self._meta_uri(pkg_json), manifest.filename);
+ self.__cache.run('tgz', tarball_uri, tarball);
diff --git a/meta/lib/oe/overlayfs.py b/meta/lib/oe/overlayfs.py
index b5d5e88e80..590c0de58a 100644
--- a/meta/lib/oe/overlayfs.py
+++ b/meta/lib/oe/overlayfs.py
@@ -38,7 +38,11 @@ def unitFileList(d):
bb.fatal("Missing required mount point for OVERLAYFS_MOUNT_POINT[%s] in your MACHINE configuration" % mountPoint)
for mountPoint in overlayMountPoints:
- for path in d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint).split():
+ mountPointList = d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint)
+ if not mountPointList:
+ bb.debug(1, "No mount points defined for %s flag, don't add to file list", mountPoint)
+ continue
+ for path in mountPointList.split():
fileList.append(mountUnitName(path))
fileList.append(helperUnitName(path))
diff --git a/meta/lib/oe/package_manager/__init__.py b/meta/lib/oe/package_manager/__init__.py
index 80bc1a6bc6..6615258470 100644
--- a/meta/lib/oe/package_manager/__init__.py
+++ b/meta/lib/oe/package_manager/__init__.py
@@ -467,7 +467,10 @@ def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencie
# Detect bitbake -b usage
nodeps = d.getVar("BB_LIMITEDDEPS") or False
if nodeps or not filterbydependencies:
- oe.path.symlink(deploydir, subrepo_dir, True)
+ for arch in d.getVar("ALL_MULTILIB_PACKAGE_ARCHS").split() + d.getVar("ALL_MULTILIB_PACKAGE_ARCHS").replace("-", "_").split():
+ target = os.path.join(deploydir + "/" + arch)
+ if os.path.exists(target):
+ oe.path.symlink(target, subrepo_dir + "/" + arch, True)
return
start = None
diff --git a/meta/lib/oe/package_manager/deb/__init__.py b/meta/lib/oe/package_manager/deb/__init__.py
index 86ddb130ad..910f217b62 100644
--- a/meta/lib/oe/package_manager/deb/__init__.py
+++ b/meta/lib/oe/package_manager/deb/__init__.py
@@ -80,15 +80,15 @@ class DpkgIndexer(Indexer):
return
oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
- if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
- signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND', True))
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
signer = None
if signer:
for f in index_sign_files:
signer.detach_sign(f,
- self.d.getVar('PACKAGE_FEED_GPG_NAME', True),
- self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE', True),
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
output_suffix="gpg",
use_sha256=True)
diff --git a/meta/lib/oe/package_manager/ipk/__init__.py b/meta/lib/oe/package_manager/ipk/__init__.py
index 4cd3963111..fd61340087 100644
--- a/meta/lib/oe/package_manager/ipk/__init__.py
+++ b/meta/lib/oe/package_manager/ipk/__init__.py
@@ -102,12 +102,14 @@ class OpkgDpkgPM(PackageManager):
This method extracts the common parts for Opkg and Dpkg
"""
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- except subprocess.CalledProcessError as e:
+ proc = subprocess.run(cmd, capture_output=True, encoding="utf-8", shell=True)
+ if proc.returncode:
bb.fatal("Unable to list available packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
- return opkg_query(output)
+ "returned %d:\n%s" % (cmd, proc.returncode, proc.stderr))
+ elif proc.stderr:
+ bb.note("Command '%s' returned stderr: %s" % (cmd, proc.stderr))
+
+ return opkg_query(proc.stdout)
def extract(self, pkg, pkg_info):
"""
@@ -243,7 +245,7 @@ class OpkgPM(OpkgDpkgPM):
"""
if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
for arch in self.pkg_archs.split():
- cfg_file_name = os.path.join(self.target_rootfs,
+ cfg_file_name = oe.path.join(self.target_rootfs,
self.d.getVar("sysconfdir"),
"opkg",
"local-%s-feed.conf" % arch)
@@ -443,15 +445,16 @@ class OpkgPM(OpkgDpkgPM):
cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
opkg_args,
' '.join(pkgs))
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
+ proc = subprocess.run(cmd, capture_output=True, encoding="utf-8", shell=True)
+ if proc.returncode:
bb.fatal("Unable to dummy install packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+ "returned %d:\n%s" % (cmd, proc.returncode, proc.stderr))
+ elif proc.stderr:
+ bb.note("Command '%s' returned stderr: %s" % (cmd, proc.stderr))
bb.utils.remove(temp_rootfs, True)
- return output
+ return proc.stdout
def backup_packaging_data(self):
# Save the opkglib for increment ipk image generation
diff --git a/meta/lib/oe/package_manager/ipk/manifest.py b/meta/lib/oe/package_manager/ipk/manifest.py
index ae451c5c70..22669f97c0 100644
--- a/meta/lib/oe/package_manager/ipk/manifest.py
+++ b/meta/lib/oe/package_manager/ipk/manifest.py
@@ -62,7 +62,7 @@ class PkgManifest(Manifest):
if len(pkgs_to_install) == 0:
return
- output = pm.dummy_install(pkgs_to_install).decode('utf-8')
+ output = pm.dummy_install(pkgs_to_install)
with open(self.full_manifest, 'w+') as manifest:
pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
diff --git a/meta/lib/oe/package_manager/rpm/__init__.py b/meta/lib/oe/package_manager/rpm/__init__.py
index b392581069..97ef387f3b 100644
--- a/meta/lib/oe/package_manager/rpm/__init__.py
+++ b/meta/lib/oe/package_manager/rpm/__init__.py
@@ -96,11 +96,15 @@ class RpmPM(PackageManager):
archs = ["sdk_provides_dummy_target"] + archs
confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
bb.utils.mkdirhier(confdir)
- open(confdir + "arch", 'w').write(":".join(archs))
+ with open(confdir + "arch", 'w') as f:
+ f.write(":".join(archs))
+
distro_codename = self.d.getVar('DISTRO_CODENAME')
- open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '')
+ with open(confdir + "releasever", 'w') as f:
+ f.write(distro_codename if distro_codename is not None else '')
- open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("")
+ with open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w') as f:
+ f.write("")
def _configure_rpm(self):
@@ -110,14 +114,17 @@ class RpmPM(PackageManager):
platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
bb.utils.mkdirhier(platformconfdir)
- open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
+ with open(platformconfdir + "platform", 'w') as f:
+ f.write("%s-pc-linux" % self.primary_arch)
with open(rpmrcconfdir + "rpmrc", 'w') as f:
f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
f.write("buildarch_compat: %s: noarch\n" % self.primary_arch)
- open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
+ with open(platformconfdir + "macros", 'w') as f:
+ f.write("%_transaction_color 7\n")
if self.d.getVar('RPM_PREFER_ELF_ARCH'):
- open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
+ with open(platformconfdir + "macros", 'a') as f:
+ f.write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
@@ -164,13 +171,13 @@ class RpmPM(PackageManager):
repo_uri = uri + "/" + arch
repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/"))
repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
- open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write(
- "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
+ with open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a') as f:
+ f.write("[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
else:
repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
repo_uri = uri
- open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write(
- "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
+ with open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w') as f:
+ f.write("[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
def _prepare_pkg_transaction(self):
os.environ['D'] = self.target_rootfs
@@ -329,7 +336,8 @@ class RpmPM(PackageManager):
return e.output.decode("utf-8")
def dump_install_solution(self, pkgs):
- open(self.solution_manifest, 'w').write(" ".join(pkgs))
+ with open(self.solution_manifest, 'w') as f:
+ f.write(" ".join(pkgs))
return pkgs
def load_old_install_solution(self):
@@ -363,7 +371,8 @@ class RpmPM(PackageManager):
bb.utils.mkdirhier(target_path)
num = self._script_num_prefix(target_path)
saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
- open(saved_script_name, 'w').write(output)
+ with open(saved_script_name, 'w') as f:
+ f.write(output)
os.chmod(saved_script_name, 0o755)
def _handle_intercept_failure(self, registered_pkgs):
diff --git a/meta/lib/oe/package_manager/rpm/rootfs.py b/meta/lib/oe/package_manager/rpm/rootfs.py
index 00d07cd9cc..a120092b83 100644
--- a/meta/lib/oe/package_manager/rpm/rootfs.py
+++ b/meta/lib/oe/package_manager/rpm/rootfs.py
@@ -108,7 +108,7 @@ class PkgRootfs(Rootfs):
if self.progress_reporter:
self.progress_reporter.next_stage()
- self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
+ self._setup_dbg_rootfs(['/etc/rpm', '/etc/rpmrc', '/etc/dnf', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
execute_pre_post_process(self.d, rpm_post_process_cmds)
diff --git a/meta/lib/oe/package_manager/rpm/sdk.py b/meta/lib/oe/package_manager/rpm/sdk.py
index c5f232431f..04dccf49d7 100644
--- a/meta/lib/oe/package_manager/rpm/sdk.py
+++ b/meta/lib/oe/package_manager/rpm/sdk.py
@@ -110,5 +110,6 @@ class PkgSdk(Sdk):
for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
self.movefile(f, native_sysconf_dir)
for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
- self.movefile(f, native_sysconf_dir)
+ self.mkdirhier(native_sysconf_dir + "/dnf")
+ self.movefile(f, native_sysconf_dir + "/dnf")
self.remove(os.path.join(self.sdk_output, "etc"), True)
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py
index 95b915a6ab..4ec9caed45 100644
--- a/meta/lib/oe/patch.py
+++ b/meta/lib/oe/patch.py
@@ -299,10 +299,10 @@ class GitApplyTree(PatchTree):
PatchTree.__init__(self, dir, d)
self.commituser = d.getVar('PATCH_GIT_USER_NAME')
self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
- if not self._isInitialized():
+ if not self._isInitialized(d):
self._initRepo()
- def _isInitialized(self):
+ def _isInitialized(self, d):
cmd = "git rev-parse --show-toplevel"
try:
output = runcmd(cmd.split(), self.dir).strip()
@@ -310,8 +310,8 @@ class GitApplyTree(PatchTree):
## runcmd returned non-zero which most likely means 128
## Not a git directory
return False
- ## Make sure repo is in builddir to not break top-level git repos
- return os.path.samefile(output, self.dir)
+ ## Make sure repo is in builddir to not break top-level git repos, or under workdir
+ return os.path.samefile(output, self.dir) or oe.path.is_path_parent(d.getVar('WORKDIR'), output)
def _initRepo(self):
runcmd("git init".split(), self.dir)
diff --git a/meta/lib/oe/recipeutils.py b/meta/lib/oe/recipeutils.py
index 872ff97b89..b04992c66d 100644
--- a/meta/lib/oe/recipeutils.py
+++ b/meta/lib/oe/recipeutils.py
@@ -666,7 +666,7 @@ def get_bbappend_path(d, destlayerdir, wildcardver=False):
return (appendpath, pathok)
-def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None):
+def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None, params=None):
"""
Writes a bbappend file for a recipe
Parameters:
@@ -696,6 +696,9 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
redirect_output:
If specified, redirects writing the output file to the
specified directory (for dry-run purposes)
+ params:
+ Parameters to use when adding entries to SRC_URI. If specified,
+ should be a list of dicts with the same length as srcfiles.
"""
if not removevalues:
@@ -762,12 +765,14 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
copyfiles = {}
if srcfiles:
instfunclines = []
- for newfile, origsrcfile in srcfiles.items():
+ for i, (newfile, origsrcfile) in enumerate(srcfiles.items()):
srcfile = origsrcfile
srcurientry = None
if not srcfile:
srcfile = os.path.basename(newfile)
srcurientry = 'file://%s' % srcfile
+ if params and params[i]:
+ srcurientry = '%s;%s' % (srcurientry, ';'.join('%s=%s' % (k,v) for k,v in params[i].items()))
# Double-check it's not there already
# FIXME do we care if the entry is added by another bbappend that might go away?
if not srcurientry in rd.getVar('SRC_URI').split():
diff --git a/meta/lib/oe/reproducible.py b/meta/lib/oe/reproducible.py
index 35b8be6d08..768fd4f19c 100644
--- a/meta/lib/oe/reproducible.py
+++ b/meta/lib/oe/reproducible.py
@@ -113,7 +113,8 @@ def get_source_date_epoch_from_git(d, sourcedir):
return None
bb.debug(1, "git repository: %s" % gitpath)
- p = subprocess.run(['git', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'], check=True, stdout=subprocess.PIPE)
+ p = subprocess.run(['git', '-c', 'log.showSignature=false', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'],
+ check=True, stdout=subprocess.PIPE)
return int(p.stdout.decode('utf-8'))
def get_source_date_epoch_from_youngest_file(d, sourcedir):
@@ -152,7 +153,6 @@ def fixed_source_date_epoch(d):
def get_source_date_epoch(d, sourcedir):
return (
get_source_date_epoch_from_git(d, sourcedir) or
- get_source_date_epoch_from_known_files(d, sourcedir) or
get_source_date_epoch_from_youngest_file(d, sourcedir) or
fixed_source_date_epoch(d) # Last resort
)
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py
index 9e6b411fb6..2824d4f037 100644
--- a/meta/lib/oe/rootfs.py
+++ b/meta/lib/oe/rootfs.py
@@ -104,7 +104,7 @@ class Rootfs(object, metaclass=ABCMeta):
def _cleanup(self):
pass
- def _setup_dbg_rootfs(self, dirs):
+ def _setup_dbg_rootfs(self, package_paths):
gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS') or '0'
if gen_debugfs != '1':
return
@@ -120,11 +120,12 @@ class Rootfs(object, metaclass=ABCMeta):
bb.utils.mkdirhier(self.image_rootfs)
bb.note(" Copying back package database...")
- for dir in dirs:
- if not os.path.isdir(self.image_rootfs + '-orig' + dir):
- continue
- bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(dir))
- shutil.copytree(self.image_rootfs + '-orig' + dir, self.image_rootfs + dir, symlinks=True)
+ for path in package_paths:
+ bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(path))
+ if os.path.isdir(self.image_rootfs + '-orig' + path):
+ shutil.copytree(self.image_rootfs + '-orig' + path, self.image_rootfs + path, symlinks=True)
+ elif os.path.isfile(self.image_rootfs + '-orig' + path):
+ shutil.copyfile(self.image_rootfs + '-orig' + path, self.image_rootfs + path)
# Copy files located in /usr/lib/debug or /usr/src/debug
for dir in ["/usr/lib/debug", "/usr/src/debug"]:
@@ -160,6 +161,13 @@ class Rootfs(object, metaclass=ABCMeta):
bb.note(" Install extra debug packages...")
self.pm.install(extra_debug_pkgs.split(), True)
+ bb.note(" Removing package database...")
+ for path in package_paths:
+ if os.path.isdir(self.image_rootfs + path):
+ shutil.rmtree(self.image_rootfs + path)
+ elif os.path.isfile(self.image_rootfs + path):
+ os.remove(self.image_rootfs + path)
+
bb.note(" Rename debug rootfs...")
try:
shutil.rmtree(self.image_rootfs + '-dbg')
@@ -384,6 +392,10 @@ def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None)
def image_list_installed_packages(d, rootfs_dir=None):
+ # Theres no rootfs for baremetal images
+ if bb.data.inherits_class('baremetal-image', d):
+ return ""
+
if not rootfs_dir:
rootfs_dir = d.getVar('IMAGE_ROOTFS')
diff --git a/meta/lib/oe/sbom.py b/meta/lib/oe/sbom.py
index 3372f13a9d..52bf51440e 100644
--- a/meta/lib/oe/sbom.py
+++ b/meta/lib/oe/sbom.py
@@ -32,7 +32,7 @@ def get_sdk_spdxid(sdk):
return "SPDXRef-SDK-%s" % sdk
-def write_doc(d, spdx_doc, subdir, spdx_deploy=None):
+def write_doc(d, spdx_doc, subdir, spdx_deploy=None, indent=None):
from pathlib import Path
if spdx_deploy is None:
@@ -41,7 +41,7 @@ def write_doc(d, spdx_doc, subdir, spdx_deploy=None):
dest = spdx_deploy / subdir / (spdx_doc.name + ".spdx.json")
dest.parent.mkdir(exist_ok=True, parents=True)
with dest.open("wb") as f:
- doc_sha1 = spdx_doc.to_json(f, sort_keys=True)
+ doc_sha1 = spdx_doc.to_json(f, sort_keys=True, indent=indent)
l = spdx_deploy / "by-namespace" / spdx_doc.documentNamespace.replace("/", "_")
l.parent.mkdir(exist_ok=True, parents=True)
diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py
index 27347667e8..2383bd58b7 100644
--- a/meta/lib/oe/sdk.py
+++ b/meta/lib/oe/sdk.py
@@ -68,7 +68,7 @@ class Sdk(object, metaclass=ABCMeta):
#FIXME: using umbrella exc catching because bb.utils method raises it
except Exception as e:
bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
- bb.error("unable to place %s in final SDK location" % sourcefile)
+ bb.fatal("unable to place %s in final SDK location" % sourcefile)
def mkdirhier(self, dirpath):
try:
diff --git a/meta/lib/oe/spdx.py b/meta/lib/oe/spdx.py
index 14ca706895..6d56ed90df 100644
--- a/meta/lib/oe/spdx.py
+++ b/meta/lib/oe/spdx.py
@@ -218,7 +218,7 @@ class SPDXPackage(SPDXObject):
SPDXID = _String()
versionInfo = _String()
downloadLocation = _String(default="NOASSERTION")
- packageSupplier = _String(default="NOASSERTION")
+ supplier = _String(default="NOASSERTION")
homepage = _String()
licenseConcluded = _String(default="NOASSERTION")
licenseDeclared = _String(default="NOASSERTION")
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py
index 7150bd0929..30f27b0f4f 100644
--- a/meta/lib/oe/sstatesig.py
+++ b/meta/lib/oe/sstatesig.py
@@ -24,10 +24,25 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCaches):
return "/allarch.bbclass" in inherits
def isImage(mc, fn):
return "/image.bbclass" in " ".join(dataCaches[mc].inherits[fn])
+ def isSPDXTask(task):
+ return task in ("do_create_spdx", "do_create_runtime_spdx")
depmc, _, deptaskname, depmcfn = bb.runqueue.split_tid_mcfn(dep)
mc, _ = bb.runqueue.split_mc(fn)
+ # We can skip the rm_work task signature to avoid running the task
+ # when we remove some tasks from the dependencie chain
+ # i.e INHERIT:remove = "create-spdx" will trigger the do_rm_work
+ if task == "do_rm_work":
+ return False
+
+ # Keep all dependencies between SPDX tasks in the signature. SPDX documents
+ # are linked together by hashes, which means if a dependent document changes,
+ # all downstream documents must be re-written (even if they are "safe"
+ # dependencies).
+ if isSPDXTask(task) and isSPDXTask(deptaskname):
+ return True
+
# (Almost) always include our own inter-task dependencies (unless it comes
# from a mcdepends). The exception is the special
# do_kernel_configme->do_unpack_and_patch dependency from archiver.bbclass.
@@ -452,11 +467,15 @@ def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
pkgarchs.append('allarch')
pkgarchs.append('${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}')
+ searched_manifests = []
+
for pkgarch in pkgarchs:
manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname))
if os.path.exists(manifest):
return manifest, d2
- bb.fatal("Manifest %s not found in %s (variant '%s')?" % (manifest, d2.expand(" ".join(pkgarchs)), variant))
+ searched_manifests.append(manifest)
+ bb.fatal("The sstate manifest for task '%s:%s' (multilib variant '%s') could not be found.\nThe pkgarchs considered were: %s.\nBut none of these manifests exists:\n %s"
+ % (taskdata, taskname, variant, d2.expand(", ".join(pkgarchs)),"\n ".join(searched_manifests)))
return None, d2
def OEOuthashBasic(path, sigfile, task, d):
@@ -641,6 +660,10 @@ def OEOuthashBasic(path, sigfile, task, d):
if f == 'fixmepath':
continue
process(os.path.join(root, f))
+
+ for dir in dirs:
+ if os.path.islink(os.path.join(root, dir)):
+ process(os.path.join(root, dir))
finally:
os.chdir(prev_dir)
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py
index de8dcebf94..b674335654 100644
--- a/meta/lib/oe/terminal.py
+++ b/meta/lib/oe/terminal.py
@@ -102,6 +102,10 @@ class Rxvt(XTerminal):
command = 'rxvt -T "{title}" -e {command}'
priority = 1
+class URxvt(XTerminal):
+ command = 'urxvt -T "{title}" -e {command}'
+ priority = 1
+
class Screen(Terminal):
command = 'screen -D -m -t "{title}" -S devshell {command}'
diff --git a/meta/lib/oeqa/core/target/ssh.py b/meta/lib/oeqa/core/target/ssh.py
index f956a7744f..6ee40083bd 100644
--- a/meta/lib/oeqa/core/target/ssh.py
+++ b/meta/lib/oeqa/core/target/ssh.py
@@ -34,6 +34,8 @@ class OESSHTarget(OETarget):
self.timeout = timeout
self.user = user
ssh_options = [
+ '-o', 'ServerAliveCountMax=2',
+ '-o', 'ServerAliveInterval=30',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'-o', 'LogLevel=ERROR'
@@ -224,26 +226,35 @@ def SSHCall(command, logger, timeout=None, **opts):
def run():
nonlocal output
nonlocal process
+ output_raw = b''
starttime = time.time()
process = subprocess.Popen(command, **options)
if timeout:
endtime = starttime + timeout
eof = False
+ os.set_blocking(process.stdout.fileno(), False)
while time.time() < endtime and not eof:
- logger.debug('time: %s, endtime: %s' % (time.time(), endtime))
try:
+ logger.debug('Waiting for process output: time: %s, endtime: %s' % (time.time(), endtime))
if select.select([process.stdout], [], [], 5)[0] != []:
- reader = codecs.getreader('utf-8')(process.stdout, 'ignore')
- data = reader.read(1024, 4096)
+ # wait a bit for more data, tries to avoid reading single characters
+ time.sleep(0.2)
+ data = process.stdout.read()
if not data:
- process.stdout.close()
eof = True
else:
- output += data
- logger.debug('Partial data from SSH call: %s' % data)
+ output_raw += data
+ # ignore errors to capture as much as possible
+ logger.debug('Partial data from SSH call:\n%s' % data.decode('utf-8', errors='ignore'))
endtime = time.time() + timeout
except InterruptedError:
+ logger.debug('InterruptedError')
continue
+ except BlockingIOError:
+ logger.debug('BlockingIOError')
+ continue
+
+ process.stdout.close()
# process hasn't returned yet
if not eof:
@@ -252,16 +263,32 @@ def SSHCall(command, logger, timeout=None, **opts):
try:
process.kill()
except OSError:
+ logger.debug('OSError when killing process')
pass
endtime = time.time() - starttime
lastline = ("\nProcess killed - no output for %d seconds. Total"
" running time: %d seconds." % (timeout, endtime))
- logger.debug('Received data from SSH call %s ' % lastline)
+ logger.debug('Received data from SSH call:\n%s ' % lastline)
output += lastline
+ process.wait()
else:
- output = process.communicate()[0].decode('utf-8', errors='ignore')
- logger.debug('Data from SSH call: %s' % output.rstrip())
+ output_raw = process.communicate()[0]
+
+ output = output_raw.decode('utf-8', errors='ignore')
+ logger.debug('Data from SSH call:\n%s' % output.rstrip())
+
+ # timout or not, make sure process exits and is not hanging
+ if process.returncode == None:
+ try:
+ process.wait(timeout=5)
+ except TimeoutExpired:
+ try:
+ process.kill()
+ except OSError:
+ logger.debug('OSError')
+ pass
+ process.wait()
options = {
"stdout": subprocess.PIPE,
@@ -288,6 +315,9 @@ def SSHCall(command, logger, timeout=None, **opts):
# whilst running and ensure we don't leave a process behind.
if process.poll() is None:
process.kill()
+ if process.returncode == None:
+ process.wait()
logger.debug('Something went wrong, killing SSH process')
raise
- return (process.wait(), output.rstrip())
+
+ return (process.returncode, output.rstrip())
diff --git a/meta/lib/oeqa/core/utils/concurrencytest.py b/meta/lib/oeqa/core/utils/concurrencytest.py
index 161a2f6e90..fe6ea29525 100644
--- a/meta/lib/oeqa/core/utils/concurrencytest.py
+++ b/meta/lib/oeqa/core/utils/concurrencytest.py
@@ -57,6 +57,7 @@ class BBThreadsafeForwardingResult(ThreadsafeForwardingResult):
self.outputbuf = output
self.finalresult = finalresult
self.finalresult.buffer = True
+ self.target = target
def _add_result_with_semaphore(self, method, test, *args, **kwargs):
self.semaphore.acquire()
@@ -65,13 +66,14 @@ class BBThreadsafeForwardingResult(ThreadsafeForwardingResult):
self.result.starttime[test.id()] = self._test_start.timestamp()
self.result.threadprogress[self.threadnum].append(test.id())
totalprogress = sum(len(x) for x in self.result.threadprogress.values())
- self.result.progressinfo[test.id()] = "%s: %s/%s %s/%s (%ss) (%s)" % (
+ self.result.progressinfo[test.id()] = "%s: %s/%s %s/%s (%ss) (%s failed) (%s)" % (
self.threadnum,
len(self.result.threadprogress[self.threadnum]),
self.totalinprocess,
totalprogress,
self.totaltests,
"{0:.2f}".format(time.time()-self._test_start.timestamp()),
+ self.target.failed_tests,
test.id())
finally:
self.semaphore.release()
diff --git a/meta/lib/oeqa/oetest.py b/meta/lib/oeqa/oetest.py
index 9c84466dd0..b53c611062 100644
--- a/meta/lib/oeqa/oetest.py
+++ b/meta/lib/oeqa/oetest.py
@@ -256,7 +256,7 @@ class TestContext(object):
modules = []
for test in self.testslist:
- if re.search("\w+\.\w+\.test_\S+", test):
+ if re.search(r"\w+\.\w+\.test_\S+", test):
test = '.'.join(t.split('.')[:3])
module = pkgutil.get_loader(test)
modules.append(module)
diff --git a/meta/lib/oeqa/runtime/cases/dnf.py b/meta/lib/oeqa/runtime/cases/dnf.py
index f40c63026e..2cfb36425c 100644
--- a/meta/lib/oeqa/runtime/cases/dnf.py
+++ b/meta/lib/oeqa/runtime/cases/dnf.py
@@ -144,7 +144,7 @@ class DnfRepoTest(DnfTest):
self.assertEqual(0, status, output)
@OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
- @skipIfNotInDataVar('DISTRO_FEATURES', 'usrmerge', 'Test run when enable usrmege')
+ @skipIfNotInDataVar('DISTRO_FEATURES', 'usrmerge', 'Test run when enable usrmerge')
@OEHasPackage('busybox')
def test_dnf_installroot_usrmerge(self):
rootpath = '/home/root/chroot/test'
diff --git a/meta/lib/oeqa/runtime/cases/ltp.py b/meta/lib/oeqa/runtime/cases/ltp.py
index a66d5d13d7..879f2a673c 100644
--- a/meta/lib/oeqa/runtime/cases/ltp.py
+++ b/meta/lib/oeqa/runtime/cases/ltp.py
@@ -67,7 +67,7 @@ class LtpTest(LtpTestBase):
def runltp(self, ltp_group):
cmd = '/opt/ltp/runltp -f %s -p -q -r /opt/ltp -l /opt/ltp/results/%s -I 1 -d /opt/ltp' % (ltp_group, ltp_group)
starttime = time.time()
- (status, output) = self.target.run(cmd)
+ (status, output) = self.target.run(cmd, timeout=1200)
endtime = time.time()
with open(os.path.join(self.ltptest_log_dir, "%s-raw.log" % ltp_group), 'w') as f:
diff --git a/meta/lib/oeqa/runtime/cases/parselogs.py b/meta/lib/oeqa/runtime/cases/parselogs.py
index 1f9365f3a8..2d59bcf5f7 100644
--- a/meta/lib/oeqa/runtime/cases/parselogs.py
+++ b/meta/lib/oeqa/runtime/cases/parselogs.py
@@ -64,6 +64,7 @@ common_errors = [
"[pulseaudio] authkey.c: Failed to load authentication key",
"was skipped because of a failed condition check",
"was skipped because all trigger condition checks failed",
+ "xf86OpenConsole: Switching VT failed",
]
video_related = [
@@ -140,6 +141,7 @@ ignore_errors = {
'Failed to initialize \'/amba/timer@101e3000\': -22',
'jitterentropy: Initialization failed with host not compliant with requirements: 2',
'clcd-pl11x: probe of 10120000.display failed with error -2',
+ 'arm-charlcd 10008000.lcd: error -ENXIO: IRQ index 0 not found'
] + common_errors,
'qemuarm64' : [
'Fatal server error:',
diff --git a/meta/lib/oeqa/runtime/cases/rpm.py b/meta/lib/oeqa/runtime/cases/rpm.py
index a4339116bf..7226b8af6a 100644
--- a/meta/lib/oeqa/runtime/cases/rpm.py
+++ b/meta/lib/oeqa/runtime/cases/rpm.py
@@ -49,21 +49,20 @@ class RpmBasicTest(OERuntimeTestCase):
msg = 'status: %s. Cannot run rpm -qa: %s' % (status, output)
self.assertEqual(status, 0, msg=msg)
- def check_no_process_for_user(u):
- _, output = self.target.run(self.tc.target_cmds['ps'])
- if u + ' ' in output:
- return False
- else:
- return True
+ def wait_for_no_process_for_user(u, timeout = 120):
+ timeout_at = time.time() + timeout
+ while time.time() < timeout_at:
+ _, output = self.target.run(self.tc.target_cmds['ps'])
+ if u + ' ' not in output:
+ return
+ time.sleep(1)
+ user_pss = [ps for ps in output.split("\n") if u + ' ' in ps]
+ msg = "User %s has processes still running: %s" % (u, "\n".join(user_pss))
+ self.fail(msg=msg)
def unset_up_test_user(u):
# ensure no test1 process in running
- timeout = time.time() + 30
- while time.time() < timeout:
- if check_no_process_for_user(u):
- break
- else:
- time.sleep(1)
+ wait_for_no_process_for_user(u)
status, output = self.target.run('userdel -r %s' % u)
msg = 'Failed to erase user: %s' % output
self.assertTrue(status == 0, msg=msg)
diff --git a/meta/lib/oeqa/runtime/cases/rt.py b/meta/lib/oeqa/runtime/cases/rt.py
new file mode 100644
index 0000000000..849ac1914e
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/rt.py
@@ -0,0 +1,17 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+
+class RtTest(OERuntimeTestCase):
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_is_rt(self):
+ """
+ Check that the kernel has CONFIG_PREEMPT_RT enabled.
+ """
+ status, output = self.target.run("uname -a")
+ self.assertEqual(status, 0, msg=output)
+ # Split so we don't get a substring false-positive
+ self.assertIn("PREEMPT_RT", output.split())
diff --git a/meta/lib/oeqa/runtime/cases/rtc.py b/meta/lib/oeqa/runtime/cases/rtc.py
index c4e6681324..39f4d29f23 100644
--- a/meta/lib/oeqa/runtime/cases/rtc.py
+++ b/meta/lib/oeqa/runtime/cases/rtc.py
@@ -1,5 +1,6 @@
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfFeature
from oeqa.runtime.decorator.package import OEHasPackage
import re
@@ -16,12 +17,14 @@ class RTCTest(OERuntimeTestCase):
self.logger.debug('Starting systemd-timesyncd daemon')
self.target.run('systemctl enable --now --runtime systemd-timesyncd')
+ @skipIfFeature('read-only-rootfs',
+ 'Test does not work with read-only-rootfs in IMAGE_FEATURES')
@OETestDepends(['ssh.SSHTest.test_ssh'])
@OEHasPackage(['coreutils', 'busybox'])
def test_rtc(self):
(status, output) = self.target.run('hwclock -r')
self.assertEqual(status, 0, msg='Failed to get RTC time, output: %s' % output)
-
+
(status, current_datetime) = self.target.run('date +"%m%d%H%M%Y"')
self.assertEqual(status, 0, msg='Failed to get system current date & time, output: %s' % current_datetime)
@@ -32,7 +35,6 @@ class RTCTest(OERuntimeTestCase):
(status, output) = self.target.run('date %s' % current_datetime)
self.assertEqual(status, 0, msg='Failed to reset system date & time, output: %s' % output)
-
+
(status, output) = self.target.run('hwclock -w')
self.assertEqual(status, 0, msg='Failed to reset RTC time, output: %s' % output)
-
diff --git a/meta/lib/oeqa/runtime/cases/scp.py b/meta/lib/oeqa/runtime/cases/scp.py
index 3a5f292152..f2bbc947d6 100644
--- a/meta/lib/oeqa/runtime/cases/scp.py
+++ b/meta/lib/oeqa/runtime/cases/scp.py
@@ -23,7 +23,7 @@ class ScpTest(OERuntimeTestCase):
os.remove(cls.tmp_path)
@OETestDepends(['ssh.SSHTest.test_ssh'])
- @OEHasPackage(['openssh-scp', 'dropbear'])
+ @OEHasPackage(['openssh-scp'])
def test_scp_file(self):
dst = '/tmp/test_scp_file'
diff --git a/meta/lib/oeqa/runtime/context.py b/meta/lib/oeqa/runtime/context.py
index 8092dd0bae..0c5d1869ab 100644
--- a/meta/lib/oeqa/runtime/context.py
+++ b/meta/lib/oeqa/runtime/context.py
@@ -67,11 +67,11 @@ class OERuntimeTestContextExecutor(OETestContextExecutor):
% self.default_target_type)
runtime_group.add_argument('--target-ip', action='store',
default=self.default_target_ip,
- help="IP address of device under test, default: %s" \
+ help="IP address and optionally ssh port (default 22) of device under test, for example '192.168.0.7:22'. Default: %s" \
% self.default_target_ip)
runtime_group.add_argument('--server-ip', action='store',
default=self.default_target_ip,
- help="IP address of device under test, default: %s" \
+ help="IP address of the test host from test target machine, default: %s" \
% self.default_server_ip)
runtime_group.add_argument('--host-dumper-dir', action='store',
diff --git a/meta/lib/oeqa/sdk/buildtools-cases/sanity.py b/meta/lib/oeqa/sdk/buildtools-cases/sanity.py
index 64baaa8f84..68b19f4d47 100644
--- a/meta/lib/oeqa/sdk/buildtools-cases/sanity.py
+++ b/meta/lib/oeqa/sdk/buildtools-cases/sanity.py
@@ -19,4 +19,4 @@ class SanityTests(OESDKTestCase):
# Canonicalise the location of this command
tool_path = os.path.realpath(self._run("command -v %s" % command).strip())
# Assert that the tool was found inside the SDK root
- self.assertEquals(os.path.commonprefix((sdk_base, tool_path)), sdk_base)
+ self.assertEqual(os.path.commonprefix((sdk_base, tool_path)), sdk_base)
diff --git a/meta/lib/oeqa/sdk/cases/buildepoxy.py b/meta/lib/oeqa/sdk/cases/buildepoxy.py
index f69f720cd6..1c41b04169 100644
--- a/meta/lib/oeqa/sdk/cases/buildepoxy.py
+++ b/meta/lib/oeqa/sdk/cases/buildepoxy.py
@@ -32,7 +32,7 @@ class EpoxyTest(OESDKTestCase):
self.assertTrue(os.path.isdir(dirs["source"]))
os.makedirs(dirs["build"])
- log = self._run("meson -Degl=no -Dglx=no -Dx11=false {build} {source}".format(**dirs))
+ log = self._run("meson --warnlevel 1 -Degl=no -Dglx=no -Dx11=false {build} {source}".format(**dirs))
# Check that Meson thinks we're doing a cross build and not a native
self.assertIn("Build type: cross build", log)
self._run("ninja -C {build} -v".format(**dirs))
diff --git a/meta/lib/oeqa/sdk/cases/python.py b/meta/lib/oeqa/sdk/cases/python.py
index a334abce5f..d43354c32a 100644
--- a/meta/lib/oeqa/sdk/cases/python.py
+++ b/meta/lib/oeqa/sdk/cases/python.py
@@ -8,17 +8,6 @@ from oeqa.sdk.case import OESDKTestCase
from oeqa.utils.subprocesstweak import errors_have_output
errors_have_output()
-class Python2Test(OESDKTestCase):
- def setUp(self):
- if not (self.tc.hasHostPackage("nativesdk-python-core") or
- self.tc.hasHostPackage("python-core-native")):
- raise unittest.SkipTest("No python package in the SDK")
-
- def test_python2(self):
- cmd = "python -c \"import codecs; print(codecs.encode('Uryyb, jbeyq', 'rot13'))\""
- output = self._run(cmd)
- self.assertEqual(output, "Hello, world\n")
-
class Python3Test(OESDKTestCase):
def setUp(self):
if not (self.tc.hasHostPackage("nativesdk-python3-core") or
diff --git a/meta/lib/oeqa/sdkext/cases/devtool.py b/meta/lib/oeqa/sdkext/cases/devtool.py
index a5c6a76e02..5ffb732556 100644
--- a/meta/lib/oeqa/sdkext/cases/devtool.py
+++ b/meta/lib/oeqa/sdkext/cases/devtool.py
@@ -112,7 +112,7 @@ class SdkUpdateTest(OESDKExtTestCase):
cmd = 'oe-publish-sdk %s %s' % (tcname_new, self.publish_dir)
subprocess.check_output(cmd, shell=True)
- self.http_service = HTTPService(self.publish_dir)
+ self.http_service = HTTPService(self.publish_dir, logger=self.logger)
self.http_service.start()
self.http_url = "http://127.0.0.1:%d" % self.http_service.port
diff --git a/meta/lib/oeqa/selftest/cases/bblayers.py b/meta/lib/oeqa/selftest/cases/bblayers.py
index 7d74833f61..0b9f16eeae 100644
--- a/meta/lib/oeqa/selftest/cases/bblayers.py
+++ b/meta/lib/oeqa/selftest/cases/bblayers.py
@@ -46,7 +46,7 @@ class BitbakeLayers(OESelftestTestCase):
bb_file = os.path.join(testoutdir, recipe_path, recipe_file)
self.assertTrue(os.path.isfile(bb_file), msg = "Cannot find xcursor-transparent-theme_0.1.1.bb in the test_bitbakelayers_flatten local dir.")
contents = ftools.read_file(bb_file)
- find_in_contents = re.search("##### bbappended from meta-selftest #####\n(.*\n)*include test_recipe.inc", contents)
+ find_in_contents = re.search(r"##### bbappended from meta-selftest #####\n(.*\n)*include test_recipe.inc", contents)
self.assertTrue(find_in_contents, msg = "Flattening layers did not work. bitbake-layers flatten output: %s" % result.output)
def test_bitbakelayers_add_remove(self):
diff --git a/meta/lib/oeqa/selftest/cases/bbtests.py b/meta/lib/oeqa/selftest/cases/bbtests.py
index cfac7afcf4..d91c8ea82b 100644
--- a/meta/lib/oeqa/selftest/cases/bbtests.py
+++ b/meta/lib/oeqa/selftest/cases/bbtests.py
@@ -188,6 +188,10 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
self.assertTrue(find, "No version returned for searched recipe. bitbake output: %s" % result.output)
def test_prefile(self):
+ # Test when the prefile does not exist
+ result = runCmd('bitbake -r conf/prefile.conf', ignore_status=True)
+ self.assertEqual(1, result.status, "bitbake didn't error and should have when a specified prefile didn't exist: %s" % result.output)
+ # Test when the prefile exists
preconf = os.path.join(self.builddir, 'conf/prefile.conf')
self.track_for_cleanup(preconf)
ftools.write_file(preconf ,"TEST_PREFILE=\"prefile\"")
@@ -198,6 +202,10 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
self.assertIn('localconf', result.output)
def test_postfile(self):
+ # Test when the postfile does not exist
+ result = runCmd('bitbake -R conf/postfile.conf', ignore_status=True)
+ self.assertEqual(1, result.status, "bitbake didn't error and should have when a specified postfile didn't exist: %s" % result.output)
+ # Test when the postfile exists
postconf = os.path.join(self.builddir, 'conf/postfile.conf')
self.track_for_cleanup(postconf)
ftools.write_file(postconf , "TEST_POSTFILE=\"postfile\"")
@@ -350,4 +358,4 @@ INHERIT:remove = \"report-error\"
self.write_config("DISTROOVERRIDES .= \":gitunpack-enable-recipe\"")
result = bitbake('gitunpackoffline-fail -c fetch', ignore_status=True)
- self.assertTrue("Recipe uses a floating tag/branch without a fixed SRCREV" in result.output, msg = "Recipe without PV set to SRCPV should have failed: %s" % result.output)
+ self.assertTrue(re.search("Recipe uses a floating tag/branch .* for repo .* without a fixed SRCREV yet doesn't call bb.fetch2.get_srcrev()", result.output), msg = "Recipe without PV set to SRCPV should have failed: %s" % result.output)
diff --git a/meta/lib/oeqa/selftest/cases/cve_check.py b/meta/lib/oeqa/selftest/cases/cve_check.py
index d0b2213703..22ffeffd29 100644
--- a/meta/lib/oeqa/selftest/cases/cve_check.py
+++ b/meta/lib/oeqa/selftest/cases/cve_check.py
@@ -48,6 +48,25 @@ class CVECheck(OESelftestTestCase):
self.assertTrue( result ,msg="Failed to compare version with suffix '1.0_patch2' < '1.0_patch3'")
+ def test_convert_cve_version(self):
+ from oe.cve_check import convert_cve_version
+
+ # Default format
+ self.assertEqual(convert_cve_version("8.3"), "8.3")
+ self.assertEqual(convert_cve_version(""), "")
+
+ # OpenSSL format version
+ self.assertEqual(convert_cve_version("1.1.1t"), "1.1.1t")
+
+ # OpenSSH format
+ self.assertEqual(convert_cve_version("8.3_p1"), "8.3p1")
+ self.assertEqual(convert_cve_version("8.3_p22"), "8.3p22")
+
+ # Linux kernel format
+ self.assertEqual(convert_cve_version("6.2_rc8"), "6.2-rc8")
+ self.assertEqual(convert_cve_version("6.2_rc31"), "6.2-rc31")
+
+
def test_recipe_report_json(self):
config = """
INHERIT += "cve-check"
diff --git a/meta/lib/oeqa/selftest/cases/devtool.py b/meta/lib/oeqa/selftest/cases/devtool.py
index 3eea2b1a0e..dc0fc35062 100644
--- a/meta/lib/oeqa/selftest/cases/devtool.py
+++ b/meta/lib/oeqa/selftest/cases/devtool.py
@@ -8,6 +8,7 @@ import shutil
import tempfile
import glob
import fnmatch
+import unittest
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, create_temp_layer
@@ -38,6 +39,13 @@ def setUpModule():
canonical_layerpath = os.path.realpath(canonical_layerpath) + '/'
edited_layers.append(layerpath)
oldmetapath = os.path.realpath(layerpath)
+
+ # when downloading poky from tar.gz some tests will be skipped (BUG 12389)
+ try:
+ runCmd('git rev-parse --is-inside-work-tree', cwd=canonical_layerpath)
+ except:
+ raise unittest.SkipTest("devtool tests require folder to be a git repo")
+
result = runCmd('git rev-parse --show-toplevel', cwd=canonical_layerpath)
oldreporoot = result.output.rstrip()
newmetapath = os.path.join(corecopydir, os.path.relpath(oldmetapath, oldreporoot))
@@ -218,6 +226,34 @@ class DevtoolTestCase(OESelftestTestCase):
filelist.append(' '.join(splitline))
return filelist
+ def _check_diff(self, diffoutput, addlines, removelines):
+ """Check output from 'git diff' matches expectation"""
+ remaining_addlines = addlines[:]
+ remaining_removelines = removelines[:]
+ for line in diffoutput.splitlines():
+ if line.startswith('+++') or line.startswith('---'):
+ continue
+ elif line.startswith('+'):
+ matched = False
+ for item in addlines:
+ if re.match(item, line[1:].strip()):
+ matched = True
+ remaining_addlines.remove(item)
+ break
+ self.assertTrue(matched, 'Unexpected diff add line: %s' % line)
+ elif line.startswith('-'):
+ matched = False
+ for item in removelines:
+ if re.match(item, line[1:].strip()):
+ matched = True
+ remaining_removelines.remove(item)
+ break
+ self.assertTrue(matched, 'Unexpected diff remove line: %s' % line)
+ if remaining_addlines:
+ self.fail('Expected added lines not found: %s' % remaining_addlines)
+ if remaining_removelines:
+ self.fail('Expected removed lines not found: %s' % remaining_removelines)
+
class DevtoolBase(DevtoolTestCase):
@@ -230,6 +266,7 @@ class DevtoolBase(DevtoolTestCase):
cls.sstate_conf = 'SSTATE_DIR = "%s"\n' % cls.devtool_sstate
cls.sstate_conf += ('SSTATE_MIRRORS += "file://.* file:///%s/PATH"\n'
% cls.original_sstate)
+ cls.sstate_conf += ('BB_HASHSERVE_UPSTREAM = "hashserv.yocto.io:8687"\n')
@classmethod
def tearDownClass(cls):
@@ -311,6 +348,38 @@ class DevtoolAddTests(DevtoolBase):
bindir = bindir[1:]
self.assertTrue(os.path.isfile(os.path.join(installdir, bindir, 'pv')), 'pv binary not found in D')
+ def test_devtool_add_binary(self):
+ # Create a binary package containing a known test file
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ pn = 'tst-bin'
+ pv = '1.0'
+ test_file_dir = "var/lib/%s/" % pn
+ test_file_name = "test_file"
+ test_file_content = "TEST CONTENT"
+ test_file_package_root = os.path.join(tempdir, pn)
+ test_file_dir_full = os.path.join(test_file_package_root, test_file_dir)
+ bb.utils.mkdirhier(test_file_dir_full)
+ with open(os.path.join(test_file_dir_full, test_file_name), "w") as f:
+ f.write(test_file_content)
+ bin_package_path = os.path.join(tempdir, "%s.tar.gz" % pn)
+ runCmd("tar czf %s -C %s ." % (bin_package_path, test_file_package_root))
+
+ # Test devtool add -b on the binary package
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake -c cleansstate %s' % pn)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool add -b %s %s' % (pn, bin_package_path))
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
+
+ # Build the resulting recipe
+ result = runCmd('devtool build %s' % pn)
+ installdir = get_bb_var('D', pn)
+ self.assertTrue(installdir, 'Could not query installdir variable')
+
+ # Check that a known file from the binary package has indeed been installed
+ self.assertTrue(os.path.isfile(os.path.join(installdir, test_file_dir, test_file_name)), '%s not found in D' % test_file_name)
+
def test_devtool_add_git_local(self):
# We need dbus built so that DEPENDS recognition works
bitbake('dbus')
@@ -444,7 +513,7 @@ class DevtoolAddTests(DevtoolBase):
checkvars['S'] = '${WORKDIR}/MarkupSafe-%s' % testver
checkvars['SRC_URI'] = url
self._test_recipe_contents(recipefile, checkvars, [])
-
+
def test_devtool_add_fetch_git(self):
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
@@ -544,6 +613,19 @@ class DevtoolAddTests(DevtoolBase):
# Test devtool build
result = runCmd('devtool build %s' % pn)
+ def test_devtool_add_python_egg_requires(self):
+ # Fetch source
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ testver = '0.14.0'
+ url = 'https://files.pythonhosted.org/packages/e9/9e/25d59f5043cf763833b2581c8027fa92342c4cf8ee523b498ecdf460c16d/uvicorn-%s.tar.gz' % testver
+ testrecipe = 'python3-uvicorn'
+ srcdir = os.path.join(tempdir, testrecipe)
+ # Test devtool add
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool add %s %s -f %s' % (testrecipe, srcdir, url))
+
class DevtoolModifyTests(DevtoolBase):
def test_devtool_modify(self):
@@ -705,6 +787,7 @@ class DevtoolModifyTests(DevtoolBase):
self.assertTrue(bbclassextended, 'None of these recipes are BBCLASSEXTENDed to native - need to adjust testrecipes list: %s' % ', '.join(testrecipes))
self.assertTrue(inheritnative, 'None of these recipes do "inherit native" - need to adjust testrecipes list: %s' % ', '.join(testrecipes))
+
def test_devtool_modify_localfiles_only(self):
# Check preconditions
testrecipe = 'base-files'
@@ -836,7 +919,7 @@ class DevtoolModifyTests(DevtoolBase):
runCmd('git -C %s checkout %s' % (tempdir, branch))
with open(source, "rt") as f:
content = f.read()
- self.assertEquals(content, expected)
+ self.assertEqual(content, expected)
check('devtool', 'This is a test for something\n')
check('devtool-no-overrides', 'This is a test for something\n')
check('devtool-override-qemuarm', 'This is a test for qemuarm\n')
@@ -917,23 +1000,7 @@ class DevtoolUpdateTests(DevtoolBase):
srcurilines[0] = 'SRC_URI = "' + srcurilines[0]
srcurilines.append('"')
removelines = ['SRCREV = ".*"'] + srcurilines
- for line in result.output.splitlines():
- if line.startswith('+++') or line.startswith('---'):
- continue
- elif line.startswith('+'):
- matched = False
- for item in addlines:
- if re.match(item, line[1:].strip()):
- matched = True
- break
- self.assertTrue(matched, 'Unexpected diff add line: %s' % line)
- elif line.startswith('-'):
- matched = False
- for item in removelines:
- if re.match(item, line[1:].strip()):
- matched = True
- break
- self.assertTrue(matched, 'Unexpected diff remove line: %s' % line)
+ self._check_diff(result.output, addlines, removelines)
# Now try with auto mode
runCmd('cd %s; git checkout %s %s' % (os.path.dirname(recipefile), testrecipe, os.path.basename(recipefile)))
result = runCmd('devtool update-recipe %s' % testrecipe)
@@ -1303,6 +1370,73 @@ class DevtoolUpdateTests(DevtoolBase):
expected_status = []
self._check_repo_status(os.path.dirname(recipefile), expected_status)
+ def test_devtool_finish_modify_git_subdir(self):
+ # Check preconditions
+ testrecipe = 'dos2unix'
+ bb_vars = get_bb_vars(['SRC_URI', 'S', 'WORKDIR', 'FILE'], testrecipe)
+ self.assertIn('git://', bb_vars['SRC_URI'], 'This test expects the %s recipe to be a git recipe' % testrecipe)
+ workdir_git = '%s/git/' % bb_vars['WORKDIR']
+ if not bb_vars['S'].startswith(workdir_git):
+ self.fail('This test expects the %s recipe to be building from a subdirectory of the git repo' % testrecipe)
+ subdir = bb_vars['S'].split(workdir_git, 1)[1]
+ # Clean up anything in the workdir/sysroot/sstate cache
+ bitbake('%s -c cleansstate' % testrecipe)
+ # Try modifying a recipe
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
+ testsrcfile = os.path.join(tempdir, subdir, 'dos2unix.c')
+ self.assertExists(testsrcfile, 'Extracted source could not be found')
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created. devtool output: %s' % result.output)
+ self.assertNotExists(os.path.join(tempdir, subdir, '.git'), 'Subdirectory has been initialised as a git repo')
+ # Check git repo
+ self._check_src_repo(tempdir)
+ # Modify file
+ runCmd("sed -i '1s:^:/* Add a comment */\\n:' %s" % testsrcfile)
+ result = runCmd('git commit -a -m "Add a comment"', cwd=tempdir)
+ # Now try updating original recipe
+ recipefile = bb_vars['FILE']
+ recipedir = os.path.dirname(recipefile)
+ self.add_command_to_tearDown('cd %s; rm -f %s/*.patch; git checkout .' % (recipedir, testrecipe))
+ result = runCmd('devtool update-recipe %s' % testrecipe)
+ expected_status = [(' M', '.*/%s$' % os.path.basename(recipefile)),
+ ('??', '.*/%s/%s/$' % (testrecipe, testrecipe))]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+ result = runCmd('git diff %s' % os.path.basename(recipefile), cwd=os.path.dirname(recipefile))
+ removelines = ['SRC_URI = "git://.*"']
+ addlines = [
+ 'SRC_URI = "git://.* \\\\',
+ 'file://0001-Add-a-comment.patch;patchdir=.. \\\\',
+ '"'
+ ]
+ self._check_diff(result.output, addlines, removelines)
+ # Put things back so we can run devtool finish on a different layer
+ runCmd('cd %s; rm -f %s/*.patch; git checkout .' % (recipedir, testrecipe))
+ # Run devtool finish
+ res = re.search('recipes-.*', recipedir)
+ self.assertTrue(res, 'Unable to find recipe subdirectory')
+ recipesubdir = res[0]
+ self.add_command_to_tearDown('rm -rf %s' % os.path.join(self.testlayer_path, recipesubdir))
+ result = runCmd('devtool finish %s meta-selftest' % testrecipe)
+ # Check bbappend file contents
+ appendfn = os.path.join(self.testlayer_path, recipesubdir, '%s_%%.bbappend' % testrecipe)
+ with open(appendfn, 'r') as f:
+ appendlines = f.readlines()
+ expected_appendlines = [
+ 'FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
+ '\n',
+ 'SRC_URI += "file://0001-Add-a-comment.patch;patchdir=.."\n',
+ '\n'
+ ]
+ self.assertEqual(appendlines, expected_appendlines)
+ self.assertExists(os.path.join(os.path.dirname(appendfn), testrecipe, '0001-Add-a-comment.patch'))
+ # Try building
+ bitbake('%s -c patch' % testrecipe)
+
+
class DevtoolExtractTests(DevtoolBase):
def test_devtool_extract(self):
diff --git a/meta/lib/oeqa/selftest/cases/externalsrc.py b/meta/lib/oeqa/selftest/cases/externalsrc.py
new file mode 100644
index 0000000000..1d800dc82c
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/externalsrc.py
@@ -0,0 +1,44 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import shutil
+import tempfile
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import get_bb_var, runCmd
+
+class ExternalSrc(OESelftestTestCase):
+ # test that srctree_hash_files does not crash
+ # we should be actually checking do_compile[file-checksums] but oeqa currently does not support it
+ # so we check only that a recipe with externalsrc can be parsed
+ def test_externalsrc_srctree_hash_files(self):
+ test_recipe = "git-submodule-test"
+ git_url = "git://git.yoctoproject.org/git-submodule-test"
+ externalsrc_dir = tempfile.TemporaryDirectory(prefix="externalsrc").name
+
+ self.write_config(
+ """
+INHERIT += "externalsrc"
+EXTERNALSRC:pn-%s = "%s"
+""" % (test_recipe, externalsrc_dir)
+ )
+
+ # test with git without submodules
+ runCmd('git clone %s %s' % (git_url, externalsrc_dir))
+ os.unlink(externalsrc_dir + "/.gitmodules")
+ open(".gitmodules", 'w').close() # local file .gitmodules in cwd should not affect externalsrc parsing
+ self.assertEqual(get_bb_var("S", test_recipe), externalsrc_dir, msg = "S does not equal to EXTERNALSRC")
+ os.unlink(".gitmodules")
+
+ # test with git with submodules
+ runCmd('git checkout .gitmodules', cwd=externalsrc_dir)
+ runCmd('git submodule update --init --recursive', cwd=externalsrc_dir)
+ self.assertEqual(get_bb_var("S", test_recipe), externalsrc_dir, msg = "S does not equal to EXTERNALSRC")
+
+ # test without git
+ shutil.rmtree(os.path.join(externalsrc_dir, ".git"))
+ self.assertEqual(get_bb_var("S", test_recipe), externalsrc_dir, msg = "S does not equal to EXTERNALSRC")
diff --git a/meta/lib/oeqa/selftest/cases/fitimage.py b/meta/lib/oeqa/selftest/cases/fitimage.py
index e6bfd1257e..4d820faf92 100644
--- a/meta/lib/oeqa/selftest/cases/fitimage.py
+++ b/meta/lib/oeqa/selftest/cases/fitimage.py
@@ -202,7 +202,7 @@ UBOOT_MKIMAGE_SIGN_ARGS = "-c 'a smart comment'"
signed_sections = {}
for line in result.output.splitlines():
if line.startswith((' Configuration', ' Image')):
- in_signed = re.search('\((.*)\)', line).groups()[0]
+ in_signed = re.search(r'\((.*)\)', line).groups()[0]
elif re.match('^ *', line) in (' ', ''):
in_signed = None
elif in_signed:
@@ -521,7 +521,7 @@ UBOOT_FIT_HASH_ALG = "sha256"
signed_sections = {}
for line in result.output.splitlines():
if line.startswith((' Image')):
- in_signed = re.search('\((.*)\)', line).groups()[0]
+ in_signed = re.search(r'\((.*)\)', line).groups()[0]
elif re.match(' \w', line):
in_signed = None
elif in_signed:
@@ -675,7 +675,7 @@ FIT_SIGN_INDIVIDUAL = "1"
signed_sections = {}
for line in result.output.splitlines():
if line.startswith((' Image')):
- in_signed = re.search('\((.*)\)', line).groups()[0]
+ in_signed = re.search(r'\((.*)\)', line).groups()[0]
elif re.match(' \w', line):
in_signed = None
elif in_signed:
@@ -738,6 +738,7 @@ UBOOT_LOADADDRESS = "0x80000000"
UBOOT_DTB_LOADADDRESS = "0x82000000"
UBOOT_ARCH = "arm"
UBOOT_MKIMAGE_DTCOPTS = "-I dts -O dtb -p 2000"
+UBOOT_MKIMAGE_KERNEL_TYPE = "kernel"
UBOOT_EXTLINUX = "0"
FIT_GENERATE_KEYS = "1"
KERNEL_IMAGETYPE_REPLACEMENT = "zImage"
@@ -763,6 +764,7 @@ FIT_HASH_ALG = "sha256"
kernel_load = str(get_bb_var('UBOOT_LOADADDRESS'))
kernel_entry = str(get_bb_var('UBOOT_ENTRYPOINT'))
+ kernel_type = str(get_bb_var('UBOOT_MKIMAGE_KERNEL_TYPE'))
kernel_compression = str(get_bb_var('FIT_KERNEL_COMP_ALG'))
uboot_arch = str(get_bb_var('UBOOT_ARCH'))
fit_hash_alg = str(get_bb_var('FIT_HASH_ALG'))
@@ -775,7 +777,7 @@ FIT_HASH_ALG = "sha256"
'kernel-1 {',
'description = "Linux kernel";',
'data = /incbin/("linux.bin");',
- 'type = "kernel";',
+ 'type = "' + kernel_type + '";',
'arch = "' + uboot_arch + '";',
'os = "linux";',
'compression = "' + kernel_compression + '";',
diff --git a/meta/lib/oeqa/selftest/cases/glibc.py b/meta/lib/oeqa/selftest/cases/glibc.py
index 6fc98e9cb4..f42593a27f 100644
--- a/meta/lib/oeqa/selftest/cases/glibc.py
+++ b/meta/lib/oeqa/selftest/cases/glibc.py
@@ -24,7 +24,7 @@ class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
features.append('TOOLCHAIN_TEST_HOST_USER = "root"')
features.append('TOOLCHAIN_TEST_HOST_PORT = "22"')
# force single threaded test execution
- features.append('EGLIBCPARALLELISM_task-check:pn-glibc-testsuite = "PARALLELMFLAGS="-j1""')
+ features.append('EGLIBCPARALLELISM:task-check:pn-glibc-testsuite = "PARALLELMFLAGS="-j1""')
self.write_config("\n".join(features))
bitbake("glibc-testsuite -c check")
@@ -41,7 +41,7 @@ class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
with contextlib.ExitStack() as s:
# use the base work dir, as the nfs mount, since the recipe directory may not exist
tmpdir = get_bb_var("BASE_WORKDIR")
- nfsport, mountport = s.enter_context(unfs_server(tmpdir))
+ nfsport, mountport = s.enter_context(unfs_server(tmpdir, udp = False))
# build core-image-minimal with required packages
default_installed_packages = [
@@ -61,7 +61,7 @@ class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
bitbake("core-image-minimal")
# start runqemu
- qemu = s.enter_context(runqemu("core-image-minimal", runqemuparams = "nographic"))
+ qemu = s.enter_context(runqemu("core-image-minimal", runqemuparams = "nographic", qemuparams = "-m 1024"))
# validate that SSH is working
status, _ = qemu.run("uname")
@@ -70,7 +70,7 @@ class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
# setup nfs mount
if qemu.run("mkdir -p \"{0}\"".format(tmpdir))[0] != 0:
raise Exception("Failed to setup NFS mount directory on target")
- mountcmd = "mount -o noac,nfsvers=3,port={0},udp,mountport={1} \"{2}:{3}\" \"{3}\"".format(nfsport, mountport, qemu.server_ip, tmpdir)
+ mountcmd = "mount -o noac,nfsvers=3,port={0},mountport={1} \"{2}:{3}\" \"{3}\"".format(nfsport, mountport, qemu.server_ip, tmpdir)
status, output = qemu.run(mountcmd)
if status != 0:
raise Exception("Failed to setup NFS mount on target ({})".format(repr(output)))
diff --git a/meta/lib/oeqa/selftest/cases/gotoolchain.py b/meta/lib/oeqa/selftest/cases/gotoolchain.py
index c809d7c9b1..978898b86f 100644
--- a/meta/lib/oeqa/selftest/cases/gotoolchain.py
+++ b/meta/lib/oeqa/selftest/cases/gotoolchain.py
@@ -43,12 +43,6 @@ class oeGoToolchainSelfTest(OESelftestTestCase):
@classmethod
def tearDownClass(cls):
- # Go creates file which are readonly
- for dirpath, dirnames, filenames in os.walk(cls.tmpdir_SDKQA):
- for filename in filenames + dirnames:
- f = os.path.join(dirpath, filename)
- if not os.path.islink(f):
- os.chmod(f, 0o775)
shutil.rmtree(cls.tmpdir_SDKQA, ignore_errors=True)
super(oeGoToolchainSelfTest, cls).tearDownClass()
@@ -56,6 +50,8 @@ class oeGoToolchainSelfTest(OESelftestTestCase):
cmd = "cd %s/src/%s/%s; " % (self.go_path, proj, name)
cmd = cmd + ". %s; " % self.env_SDK
cmd = cmd + "export GOPATH=%s; " % self.go_path
+ cmd = cmd + "export GOFLAGS=-modcacherw; "
+ cmd = cmd + "export CGO_ENABLED=1; "
cmd = cmd + "${CROSS_COMPILE}go %s" % gocmd
return runCmd(cmd).status
diff --git a/meta/lib/oeqa/selftest/cases/git.py b/meta/lib/oeqa/selftest/cases/intercept.py
index f12874dc7d..f12874dc7d 100644
--- a/meta/lib/oeqa/selftest/cases/git.py
+++ b/meta/lib/oeqa/selftest/cases/intercept.py
diff --git a/meta/lib/oeqa/selftest/cases/liboe.py b/meta/lib/oeqa/selftest/cases/liboe.py
index afe8f8809f..da88ff480e 100644
--- a/meta/lib/oeqa/selftest/cases/liboe.py
+++ b/meta/lib/oeqa/selftest/cases/liboe.py
@@ -97,6 +97,6 @@ class LibOE(OESelftestTestCase):
dstcnt = len(os.listdir(dst))
srccnt = len(os.listdir(src))
- self.assertEquals(dstcnt, len(testfiles), "Number of files in dst (%s) differs from number of files in src(%s)." % (dstcnt, srccnt))
+ self.assertEqual(dstcnt, len(testfiles), "Number of files in dst (%s) differs from number of files in src(%s)." % (dstcnt, srccnt))
oe.path.remove(testloc)
diff --git a/meta/lib/oeqa/selftest/cases/lic_checksum.py b/meta/lib/oeqa/selftest/cases/lic_checksum.py
index 8f1226e6a5..bc0a2b5d8e 100644
--- a/meta/lib/oeqa/selftest/cases/lic_checksum.py
+++ b/meta/lib/oeqa/selftest/cases/lic_checksum.py
@@ -26,6 +26,7 @@ LIC_FILES_CHKSUM = "file://%s;md5=d41d8cd98f00b204e9800998ecf8427e"
SRC_URI = "file://%s;md5=d41d8cd98f00b204e9800998ecf8427e"
""" % (urllib.parse.quote(lic_path), urllib.parse.quote(lic_path)))
result = bitbake(bitbake_cmd)
+ self.delete_recipeinc('emptytest')
# Verify that changing a license file that has an absolute path causes
@@ -51,5 +52,6 @@ SRC_URI = "file://%s;md5=d41d8cd98f00b204e9800998ecf8427e"
f.write("data")
result = bitbake(bitbake_cmd, ignore_status=True)
+ self.delete_recipeinc('emptytest')
if error_msg not in result.output:
raise AssertionError(result.output)
diff --git a/meta/lib/oeqa/selftest/cases/locales.py b/meta/lib/oeqa/selftest/cases/locales.py
new file mode 100644
index 0000000000..433991abf9
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/locales.py
@@ -0,0 +1,45 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.core.decorator import OETestTag
+from oeqa.utils.commands import bitbake, runqemu
+
+class LocalesTest(OESelftestTestCase):
+
+ @OETestTag("runqemu")
+ def test_locales_on(self):
+ """
+ Summary: Test the locales are generated
+ Expected: 1. Check the locale exist in the locale-archive
+ 2. Check the locale exist for the glibc
+ 3. Check the locale can be generated
+ Product: oe-core
+ Author: Louis Rannou <lrannou@baylibre.com>
+ AutomatedBy: Louis Rannou <lrannou@baylibre.com>
+ """
+
+ features = []
+ features.append('EXTRA_IMAGE_FEATURES = "empty-root-password allow-empty-password allow-root-login"')
+ features.append('IMAGE_INSTALL:append = " glibc-utils localedef"')
+ features.append('GLIBC_GENERATE_LOCALES = "en_US.UTF-8 fr_FR.UTF-8"')
+ features.append('IMAGE_LINGUAS:append = " en-us fr-fr"')
+ features.append('ENABLE_BINARY_LOCALE_GENERATION = "1"')
+ self.write_config("\n".join(features))
+
+ # Build a core-image-minimal
+ bitbake('core-image-minimal')
+
+ with runqemu("core-image-minimal", ssh=False, runqemuparams='nographic') as qemu:
+ cmd = "locale -a"
+ status, output = qemu.run_serial(cmd)
+ # output must includes fr_FR or fr_FR.UTF-8
+ self.assertEqual(status, 1, msg='locale test command failed: output: %s' % output)
+ self.assertIn("fr_FR", output, msg='locale -a test failed: output: %s' % output)
+
+ cmd = "localedef --list-archive -v"
+ status, output = qemu.run_serial(cmd)
+ # output must includes fr_FR.utf8
+ self.assertEqual(status, 1, msg='localedef test command failed: output: %s' % output)
+ self.assertIn("fr_FR.utf8", output, msg='localedef test failed: output: %s' % output)
diff --git a/meta/lib/oeqa/selftest/cases/minidebuginfo.py b/meta/lib/oeqa/selftest/cases/minidebuginfo.py
new file mode 100644
index 0000000000..414dad64a3
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/minidebuginfo.py
@@ -0,0 +1,49 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+import os
+import subprocess
+import tempfile
+import shutil
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, get_bb_var, runCmd
+
+
+class Minidebuginfo(OESelftestTestCase):
+ def test_minidebuginfo(self):
+ target_sys = get_bb_var("TARGET_SYS")
+ binutils = "binutils-cross-{}".format(get_bb_var("TARGET_ARCH"))
+
+ self.write_config("""
+PACKAGE_MINIDEBUGINFO = "1"
+IMAGE_FSTYPES = "tar.bz2"
+""")
+ bitbake("core-image-minimal {}:do_addto_recipe_sysroot".format(binutils))
+
+ deploy_dir = get_bb_var("DEPLOY_DIR_IMAGE")
+ native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", binutils)
+ readelf = get_bb_var("READELF", "core-image-minimal")
+
+ # add usr/bin/${TARGET_SYS} to PATH
+ env = os.environ.copy()
+ paths = [os.path.join(native_sysroot, "usr", "bin", target_sys)]
+ paths += env["PATH"].split(":")
+ env["PATH"] = ":".join(paths)
+
+ # confirm that executables and shared libraries contain an ELF section
+ # ".gnu_debugdata" which stores minidebuginfo.
+ with tempfile.TemporaryDirectory(prefix = "unpackfs-") as unpackedfs:
+ filename = os.path.join(deploy_dir, "core-image-minimal-{}.tar.bz2".format(self.td["MACHINE"]))
+ shutil.unpack_archive(filename, unpackedfs)
+
+ r = runCmd([readelf, "-W", "-S", os.path.join(unpackedfs, "bin", "busybox")],
+ native_sysroot = native_sysroot, env = env)
+ self.assertIn(".gnu_debugdata", r.output)
+
+ r = runCmd([readelf, "-W", "-S", os.path.join(unpackedfs, "lib", "libc.so.6")],
+ native_sysroot = native_sysroot, env = env)
+ self.assertIn(".gnu_debugdata", r.output)
+
diff --git a/meta/lib/oeqa/selftest/cases/oelib/buildhistory.py b/meta/lib/oeqa/selftest/cases/oelib/buildhistory.py
index 802a91a488..ae12aa0865 100644
--- a/meta/lib/oeqa/selftest/cases/oelib/buildhistory.py
+++ b/meta/lib/oeqa/selftest/cases/oelib/buildhistory.py
@@ -3,6 +3,7 @@
#
import os
+import sys
from oeqa.selftest.case import OESelftestTestCase
import tempfile
import operator
@@ -11,15 +12,14 @@ from oeqa.utils.commands import get_bb_var
class TestBlobParsing(OESelftestTestCase):
def setUp(self):
- import time
self.repo_path = tempfile.mkdtemp(prefix='selftest-buildhistory',
dir=get_bb_var('TOPDIR'))
try:
from git import Repo
self.repo = Repo.init(self.repo_path)
- except ImportError:
- self.skipTest('Python module GitPython is not present')
+ except ImportError as e:
+ self.skipTest('Python module GitPython is not present (%s) (%s)' % (e, sys.path))
self.test_file = "test"
self.var_map = {}
@@ -28,6 +28,16 @@ class TestBlobParsing(OESelftestTestCase):
import shutil
shutil.rmtree(self.repo_path)
+ @property
+ def heads_default(self):
+ """
+ Support repos defaulting to master or to main branch
+ """
+ try:
+ return self.repo.heads.main
+ except AttributeError:
+ return self.repo.heads.master
+
def commit_vars(self, to_add={}, to_remove = [], msg="A commit message"):
if len(to_add) == 0 and len(to_remove) == 0:
return
@@ -65,10 +75,10 @@ class TestBlobParsing(OESelftestTestCase):
changesmap = { "foo-2" : ("2", "8"), "bar" : ("","4"), "bar-2" : ("","5")}
self.commit_vars(to_add = { "foo" : "1", "foo-2" : "2", "foo-3" : "3" })
- blob1 = self.repo.heads.master.commit.tree.blobs[0]
+ blob1 = self.heads_default.commit.tree.blobs[0]
self.commit_vars(to_add = { "foo-2" : "8", "bar" : "4", "bar-2" : "5" })
- blob2 = self.repo.heads.master.commit.tree.blobs[0]
+ blob2 = self.heads_default.commit.tree.blobs[0]
change_records = compare_dict_blobs(os.path.join(self.repo_path, self.test_file),
blob1, blob2, False, False)
@@ -84,10 +94,10 @@ class TestBlobParsing(OESelftestTestCase):
defaultmap = { x : ("default", "1") for x in ["PKG", "PKGE", "PKGV", "PKGR"]}
self.commit_vars(to_add = { "foo" : "1" })
- blob1 = self.repo.heads.master.commit.tree.blobs[0]
+ blob1 = self.heads_default.commit.tree.blobs[0]
self.commit_vars(to_add = { "PKG" : "1", "PKGE" : "1", "PKGV" : "1", "PKGR" : "1" })
- blob2 = self.repo.heads.master.commit.tree.blobs[0]
+ blob2 = self.heads_default.commit.tree.blobs[0]
change_records = compare_dict_blobs(os.path.join(self.repo_path, self.test_file),
blob1, blob2, False, False)
diff --git a/meta/lib/oeqa/selftest/cases/prservice.py b/meta/lib/oeqa/selftest/cases/prservice.py
index 10158ca7c2..a41812148a 100644
--- a/meta/lib/oeqa/selftest/cases/prservice.py
+++ b/meta/lib/oeqa/selftest/cases/prservice.py
@@ -75,7 +75,7 @@ class BitbakePrTests(OESelftestTestCase):
exported_db_path = os.path.join(self.builddir, 'export.inc')
export_result = runCmd("bitbake-prserv-tool export %s" % exported_db_path, ignore_status=True)
self.assertEqual(export_result.status, 0, msg="PR Service database export failed: %s" % export_result.output)
- self.assertTrue(os.path.exists(exported_db_path))
+ self.assertTrue(os.path.exists(exported_db_path), msg="%s didn't exist, tool output %s" % (exported_db_path, export_result.output))
if replace_current_db:
current_db_path = os.path.join(get_bb_var('PERSISTENT_DIR'), 'prserv.sqlite3')
diff --git a/meta/lib/oeqa/selftest/cases/recipetool.py b/meta/lib/oeqa/selftest/cases/recipetool.py
index 510dae6bad..a2d8d292ad 100644
--- a/meta/lib/oeqa/selftest/cases/recipetool.py
+++ b/meta/lib/oeqa/selftest/cases/recipetool.py
@@ -440,16 +440,18 @@ class RecipetoolCreateTests(RecipetoolBase):
self._test_recipe_contents(recipefile, checkvars, inherits)
def test_recipetool_create_github(self):
- # Basic test to see if github URL mangling works
+ # Basic test to see if github URL mangling works. Deliberately use an
+ # older release of Meson at present so we don't need a toml parser.
temprecipe = os.path.join(self.tempdir, 'recipe')
os.makedirs(temprecipe)
recipefile = os.path.join(temprecipe, 'meson_git.bb')
- srcuri = 'https://github.com/mesonbuild/meson;rev=0.32.0'
- result = runCmd(['recipetool', 'create', '-o', temprecipe, srcuri])
- self.assertTrue(os.path.isfile(recipefile))
+ srcuri = 'https://github.com/mesonbuild/meson;rev=0.52.1'
+ cmd = ['recipetool', 'create', '-o', temprecipe, srcuri]
+ result = runCmd(cmd)
+ self.assertTrue(os.path.isfile(recipefile), msg="recipe %s not created for command %s, output %s" % (recipefile, " ".join(cmd), result.output))
checkvars = {}
- checkvars['LICENSE'] = set(['Apache-2.0'])
- checkvars['SRC_URI'] = 'git://github.com/mesonbuild/meson;protocol=https;branch=master'
+ checkvars['LICENSE'] = set(['Apache-2.0', "Unknown"])
+ checkvars['SRC_URI'] = 'git://github.com/mesonbuild/meson;protocol=https;branch=0.52'
inherits = ['setuptools3']
self._test_recipe_contents(recipefile, checkvars, inherits)
@@ -473,10 +475,11 @@ class RecipetoolCreateTests(RecipetoolBase):
self._test_recipe_contents(recipefile, checkvars, inherits)
def test_recipetool_create_github_tarball(self):
- # Basic test to ensure github URL mangling doesn't apply to release tarballs
+ # Basic test to ensure github URL mangling doesn't apply to release tarballs.
+ # Deliberately use an older release of Meson at present so we don't need a toml parser.
temprecipe = os.path.join(self.tempdir, 'recipe')
os.makedirs(temprecipe)
- pv = '0.32.0'
+ pv = '0.52.1'
recipefile = os.path.join(temprecipe, 'meson_%s.bb' % pv)
srcuri = 'https://github.com/mesonbuild/meson/releases/download/%s/meson-%s.tar.gz' % (pv, pv)
result = runCmd('recipetool create -o %s %s' % (temprecipe, srcuri))
@@ -579,7 +582,10 @@ class RecipetoolTests(RecipetoolBase):
commonlicdir = get_bb_var('COMMON_LICENSE_DIR')
- d = bb.tinfoil.TinfoilDataStoreConnector
+ class DataConnectorCopy(bb.tinfoil.TinfoilDataStoreConnector):
+ pass
+
+ d = DataConnectorCopy
d.getVar = Mock(return_value=commonlicdir)
srctree = tempfile.mkdtemp(prefix='recipetoolqa')
diff --git a/meta/lib/oeqa/selftest/cases/reproducible.py b/meta/lib/oeqa/selftest/cases/reproducible.py
index 5042c11d8e..49318be43a 100644
--- a/meta/lib/oeqa/selftest/cases/reproducible.py
+++ b/meta/lib/oeqa/selftest/cases/reproducible.py
@@ -126,15 +126,23 @@ class DiffoscopeTests(OESelftestTestCase):
class ReproducibleTests(OESelftestTestCase):
# Test the reproducibility of whatever is built between sstate_targets and targets
- package_classes = ['deb', 'ipk', 'rpm']
+ package_classes = get_bb_var("OEQA_REPRODUCIBLE_TEST_PACKAGE")
+ if package_classes:
+ package_classes = package_classes.split()
+ else:
+ package_classes = ['deb', 'ipk', 'rpm']
# Maximum report size, in bytes
max_report_size = 250 * 1024 * 1024
# targets are the things we want to test the reproducibility of
- targets = ['core-image-minimal', 'core-image-sato', 'core-image-full-cmdline', 'core-image-weston', 'world']
+ targets = get_bb_var("OEQA_REPRODUCIBLE_TEST_TARGET")
+ if targets:
+ targets = targets.split()
+ else:
+ targets = ['core-image-minimal', 'core-image-sato', 'core-image-full-cmdline', 'core-image-weston', 'world']
# sstate targets are things to pull from sstate to potentially cut build/debugging time
- sstate_targets = []
+ sstate_targets = (get_bb_var("OEQA_REPRODUCIBLE_TEST_SSTATE_TARGETS") or "").split()
save_results = False
if 'OEQA_DEBUGGING_SAVED_OUTPUT' in os.environ:
save_results = os.environ['OEQA_DEBUGGING_SAVED_OUTPUT']
@@ -149,7 +157,7 @@ class ReproducibleTests(OESelftestTestCase):
def setUpLocal(self):
super().setUpLocal()
- needed_vars = ['TOPDIR', 'TARGET_PREFIX', 'BB_NUMBER_THREADS']
+ needed_vars = ['TOPDIR', 'TARGET_PREFIX', 'BB_NUMBER_THREADS', 'BB_HASHSERVE']
bb_vars = get_bb_vars(needed_vars)
for v in needed_vars:
setattr(self, v.lower(), bb_vars[v])
@@ -223,7 +231,7 @@ class ReproducibleTests(OESelftestTestCase):
# mirror, forcing a complete build from scratch
config += textwrap.dedent('''\
SSTATE_DIR = "${TMPDIR}/sstate"
- SSTATE_MIRRORS = ""
+ SSTATE_MIRRORS = "file://.*/.*-native.* http://sstate.yoctoproject.org/all/PATH;downloadfilename=PATH file://.*/.*-cross.* http://sstate.yoctoproject.org/all/PATH;downloadfilename=PATH"
''')
self.logger.info("Building %s (sstate%s allowed)..." % (name, '' if use_sstate else ' NOT'))
diff --git a/meta/lib/oeqa/selftest/cases/resulttooltests.py b/meta/lib/oeqa/selftest/cases/resulttooltests.py
index dac5c46801..490f3fc5cf 100644
--- a/meta/lib/oeqa/selftest/cases/resulttooltests.py
+++ b/meta/lib/oeqa/selftest/cases/resulttooltests.py
@@ -69,7 +69,7 @@ class ResultToolTests(OESelftestTestCase):
self.assertTrue('target_result1' in results['runtime/mydistro/qemux86/image'], msg="Pair not correct:%s" % results)
self.assertTrue('target_result3' in results['runtime/mydistro/qemux86-64/image'], msg="Pair not correct:%s" % results)
- def test_regrresion_can_get_regression_result(self):
+ def test_regression_can_get_regression_result(self):
base_result_data = {'result': {'test1': {'status': 'PASSED'},
'test2': {'status': 'PASSED'},
'test3': {'status': 'FAILED'},
diff --git a/meta/lib/oeqa/selftest/cases/runtime_test.py b/meta/lib/oeqa/selftest/cases/runtime_test.py
index 8eacde40ad..7dcdfd0ab2 100644
--- a/meta/lib/oeqa/selftest/cases/runtime_test.py
+++ b/meta/lib/oeqa/selftest/cases/runtime_test.py
@@ -8,7 +8,7 @@ from oeqa.core.decorator import OETestTag
import os
import tempfile
import oe.lsb
-from oeqa.core.decorator.data import skipIfNotQemu
+from oeqa.core.decorator.data import skipIfNotQemu, skipIfNotMachine
class TestExport(OESelftestTestCase):
@@ -200,6 +200,8 @@ class TestImage(OESelftestTestCase):
bitbake('core-image-full-cmdline socat')
bitbake('-c testimage core-image-full-cmdline')
+ # https://bugzilla.yoctoproject.org/show_bug.cgi?id=14966
+ @skipIfNotMachine("qemux86-64", "test needs qemux86-64")
def test_testimage_virgl_gtk_sdl(self):
"""
Summary: Check host-assisted accelerate OpenGL functionality in qemu with gtk and SDL frontends
@@ -219,6 +221,8 @@ class TestImage(OESelftestTestCase):
self.skipTest('virgl isn\'t working with Centos 7')
if distro and distro == 'opensuseleap-15.0':
self.skipTest('virgl isn\'t working with Opensuse 15.0')
+ if distro and distro == 'ubuntu-18.04':
+ self.skipTest('virgl isn\'t working with Ubuntu 18.04')
qemu_packageconfig = get_bb_var('PACKAGECONFIG', 'qemu-system-native')
qemu_distrofeatures = get_bb_var('DISTRO_FEATURES', 'qemu-system-native')
@@ -241,6 +245,7 @@ class TestImage(OESelftestTestCase):
bitbake('core-image-minimal')
bitbake('-c testimage core-image-minimal')
+ @skipIfNotMachine("qemux86-64", "test needs qemux86-64")
def test_testimage_virgl_headless(self):
"""
Summary: Check host-assisted accelerate OpenGL functionality in qemu with egl-headless frontend
@@ -252,7 +257,8 @@ class TestImage(OESelftestTestCase):
import subprocess, os
distro = oe.lsb.distro_identifier()
- if distro and distro in ['debian-9', 'debian-10', 'centos-7', 'centos-8', 'ubuntu-16.04', 'ubuntu-18.04', 'almalinux-8.5', 'almalinux-8.6']:
+ if distro and (distro in ['debian-9', 'debian-10', 'centos-7', 'centos-8', 'ubuntu-16.04', 'ubuntu-18.04'] or
+ distro.startswith('almalinux') or distro.startswith('rocky')):
self.skipTest('virgl headless cannot be tested with %s' %(distro))
render_hint = """If /dev/dri/renderD* is absent due to lack of suitable GPU, 'modprobe vgem' will create one suitable for mesa llvmpipe software renderer."""
@@ -263,7 +269,7 @@ class TestImage(OESelftestTestCase):
except FileNotFoundError:
self.fail("/dev/dri directory does not exist; no render nodes available on this machine. %s" %(render_hint))
try:
- dripath = subprocess.check_output("pkg-config --variable=dridriverdir dri", shell=True)
+ dripath = subprocess.check_output("PATH=/bin:/usr/bin:$PATH pkg-config --variable=dridriverdir dri", shell=True)
except subprocess.CalledProcessError as e:
self.fail("Could not determine the path to dri drivers on the host via pkg-config.\nPlease install Mesa development files (particularly, dri.pc) on the host machine.")
qemu_distrofeatures = get_bb_var('DISTRO_FEATURES', 'qemu-system-native')
diff --git a/meta/lib/oeqa/selftest/cases/tinfoil.py b/meta/lib/oeqa/selftest/cases/tinfoil.py
index c81d56d82b..4b261dad00 100644
--- a/meta/lib/oeqa/selftest/cases/tinfoil.py
+++ b/meta/lib/oeqa/selftest/cases/tinfoil.py
@@ -64,6 +64,20 @@ class TinfoilTests(OESelftestTestCase):
localdata.setVar('PN', 'hello')
self.assertEqual('hello', localdata.getVar('BPN'))
+ # The config_data API tp parse_recipe_file is used by:
+ # layerindex-web layerindex/update_layer.py
+ def test_parse_recipe_custom_data(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False, quiet=2)
+ localdata = bb.data.createCopy(tinfoil.config_data)
+ localdata.setVar("TESTVAR", "testval")
+ testrecipe = 'mdadm'
+ best = tinfoil.find_best_provider(testrecipe)
+ if not best:
+ self.fail('Unable to find recipe providing %s' % testrecipe)
+ rd = tinfoil.parse_recipe_file(best[3], config_data=localdata)
+ self.assertEqual("testval", rd.getVar('TESTVAR'))
+
def test_list_recipes(self):
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=False, quiet=2)
diff --git a/meta/lib/oeqa/selftest/cases/wic.py b/meta/lib/oeqa/selftest/cases/wic.py
index de74c07a03..49fb6fe52c 100644
--- a/meta/lib/oeqa/selftest/cases/wic.py
+++ b/meta/lib/oeqa/selftest/cases/wic.py
@@ -1420,7 +1420,7 @@ class ModifyTests(WicTestCase):
# list directory content of the first partition
result = runCmd("wic ls %s:1 -n %s" % (images[0], sysroot))
- self.assertIn('\n%s ' % kerneltype.upper(), result.output)
+ self.assertIn('\n%s ' % kerneltype.upper(), result.output)
self.assertIn('\nEFI <DIR> ', result.output)
# remove file. EFI partitions are case-insensitive so exercise that too
diff --git a/meta/lib/oeqa/utils/dump.py b/meta/lib/oeqa/utils/dump.py
index 95a79a571c..6fd5832051 100644
--- a/meta/lib/oeqa/utils/dump.py
+++ b/meta/lib/oeqa/utils/dump.py
@@ -91,37 +91,55 @@ class HostDumper(BaseDumper):
self._write_dump(cmd.split()[0], result.output)
class TargetDumper(BaseDumper):
- """ Class to get dumps from target, it only works with QemuRunner """
+ """ Class to get dumps from target, it only works with QemuRunner.
+ Will give up permanently after 5 errors from running commands over
+ serial console. This helps to end testing when target is really dead, hanging
+ or unresponsive.
+ """
def __init__(self, cmds, parent_dir, runner):
super(TargetDumper, self).__init__(cmds, parent_dir)
self.runner = runner
+ self.errors = 0
def dump_target(self, dump_dir=""):
+ if self.errors >= 5:
+ print("Too many errors when dumping data from target, assuming it is dead! Will not dump data anymore!")
+ return
if dump_dir:
self.dump_dir = dump_dir
for cmd in self.cmds:
# We can continue with the testing if serial commands fail
try:
(status, output) = self.runner.run_serial(cmd)
+ if status == 0:
+ self.errors = self.errors + 1
self._write_dump(cmd.split()[0], output)
except:
+ self.errors = self.errors + 1
print("Tried to dump info from target but "
"serial console failed")
print("Failed CMD: %s" % (cmd))
class MonitorDumper(BaseDumper):
- """ Class to get dumps via the Qemu Monitor, it only works with QemuRunner """
+ """ Class to get dumps via the Qemu Monitor, it only works with QemuRunner
+ Will stop completely if there are more than 5 errors when dumping monitor data.
+ This helps to end testing when target is really dead, hanging or unresponsive.
+ """
def __init__(self, cmds, parent_dir, runner):
super(MonitorDumper, self).__init__(cmds, parent_dir)
self.runner = runner
+ self.errors = 0
def dump_monitor(self, dump_dir=""):
if self.runner is None:
return
if dump_dir:
self.dump_dir = dump_dir
+ if self.errors >= 5:
+ print("Too many errors when dumping data from qemu monitor, assuming it is dead! Will not dump data anymore!")
+ return
for cmd in self.cmds:
cmd_name = cmd.split()[0]
try:
@@ -135,4 +153,5 @@ class MonitorDumper(BaseDumper):
output = self.runner.run_monitor(cmd_name)
self._write_dump(cmd_name, output)
except Exception as e:
+ self.errors = self.errors + 1
print("Failed to dump QMP CMD: %s with\nException: %s" % (cmd_name, e))
diff --git a/meta/lib/oeqa/utils/httpserver.py b/meta/lib/oeqa/utils/httpserver.py
index 58d3c3b3f8..0d602e2dfa 100644
--- a/meta/lib/oeqa/utils/httpserver.py
+++ b/meta/lib/oeqa/utils/httpserver.py
@@ -38,6 +38,12 @@ class HTTPService(object):
self.port = self.server.server_port
self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir, self.logger])
+ def handle_error(self, request, client_address):
+ import traceback
+ exception = traceback.format_exc()
+ self.logger.warn("Exception when handling %s: %s" % (request, exception))
+ self.server.handle_error = handle_error
+
# The signal handler from testimage.bbclass can cause deadlocks here
# if the HTTPServer is terminated before it can restore the standard
#signal behaviour
diff --git a/meta/lib/oeqa/utils/metadata.py b/meta/lib/oeqa/utils/metadata.py
index 8013aa684d..15ec190c4a 100644
--- a/meta/lib/oeqa/utils/metadata.py
+++ b/meta/lib/oeqa/utils/metadata.py
@@ -27,9 +27,9 @@ def metadata_from_bb():
data_dict = get_bb_vars()
# Distro information
- info_dict['distro'] = {'id': data_dict['DISTRO'],
- 'version_id': data_dict['DISTRO_VERSION'],
- 'pretty_name': '%s %s' % (data_dict['DISTRO'], data_dict['DISTRO_VERSION'])}
+ info_dict['distro'] = {'id': data_dict.get('DISTRO', 'NODISTRO'),
+ 'version_id': data_dict.get('DISTRO_VERSION', 'NO_DISTRO_VERSION'),
+ 'pretty_name': '%s %s' % (data_dict.get('DISTRO', 'NODISTRO'), data_dict.get('DISTRO_VERSION', 'NO_DISTRO_VERSION'))}
# Host distro information
os_release = get_os_release()
diff --git a/meta/lib/oeqa/utils/nfs.py b/meta/lib/oeqa/utils/nfs.py
index a37686c914..c9bac050a4 100644
--- a/meta/lib/oeqa/utils/nfs.py
+++ b/meta/lib/oeqa/utils/nfs.py
@@ -8,7 +8,7 @@ from oeqa.utils.commands import bitbake, get_bb_var, Command
from oeqa.utils.network import get_free_port
@contextlib.contextmanager
-def unfs_server(directory, logger = None):
+def unfs_server(directory, logger = None, udp = True):
unfs_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "unfs3-native")
if not os.path.exists(os.path.join(unfs_sysroot, "usr", "bin", "unfsd")):
# build native tool
@@ -22,7 +22,7 @@ def unfs_server(directory, logger = None):
exports.write("{0} (rw,no_root_squash,no_all_squash,insecure)\n".format(directory).encode())
# find some ports for the server
- nfsport, mountport = get_free_port(udp = True), get_free_port(udp = True)
+ nfsport, mountport = get_free_port(udp), get_free_port(udp)
nenv = dict(os.environ)
nenv['PATH'] = "{0}/sbin:{0}/usr/sbin:{0}/usr/bin:".format(unfs_sysroot) + nenv.get('PATH', '')
diff --git a/meta/lib/oeqa/utils/qemurunner.py b/meta/lib/oeqa/utils/qemurunner.py
index 76296d50cd..925d05a339 100644
--- a/meta/lib/oeqa/utils/qemurunner.py
+++ b/meta/lib/oeqa/utils/qemurunner.py
@@ -195,7 +195,7 @@ class QemuRunner:
qmp_file = "." + next(tempfile._get_candidate_names())
qmp_param = ' -S -qmp unix:./%s,server,wait' % (qmp_file)
qmp_port = self.tmpdir + "/" + qmp_file
- # Create a second socket connection for debugging use,
+ # Create a second socket connection for debugging use,
# note this will NOT cause qemu to block waiting for the connection
qmp_file2 = "." + next(tempfile._get_candidate_names())
qmp_param += ' -qmp unix:./%s,server,nowait' % (qmp_file2)
@@ -342,6 +342,8 @@ class QemuRunner:
return False
try:
+ # set timeout value for all QMP calls
+ self.qmp.settimeout(self.runqemutime)
self.qmp.connect()
connect_time = time.time()
self.logger.info("QMP connected to QEMU at %s and took %s seconds" %
@@ -459,6 +461,8 @@ class QemuRunner:
socklist.remove(self.server_socket)
self.logger.debug("Connection from %s:%s" % addr)
else:
+ # try to avoid reading only a single character at a time
+ time.sleep(0.1)
data = data + sock.recv(1024)
if data:
bootlog += data
@@ -471,9 +475,9 @@ class QemuRunner:
self.server_socket = qemusock
stopread = True
reachedlogin = True
- self.logger.debug("Reached login banner in %s seconds (%s)" %
+ self.logger.debug("Reached login banner in %s seconds (%s, %s)" %
(time.time() - (endtime - self.boottime),
- time.strftime("%D %H:%M:%S")))
+ time.strftime("%D %H:%M:%S"), time.time()))
else:
# no need to check if reachedlogin unless we support multiple connections
self.logger.debug("QEMU socket disconnected before login banner reached. (%s)" %
@@ -532,10 +536,13 @@ class QemuRunner:
except OSError as e:
if e.errno != errno.ESRCH:
raise
- endtime = time.time() + self.runqemutime
- while self.runqemu.poll() is None and time.time() < endtime:
- time.sleep(1)
- if self.runqemu.poll() is None:
+ try:
+ outs, errs = self.runqemu.communicate(timeout = self.runqemutime)
+ if outs:
+ self.logger.info("Output from runqemu:\n%s", outs.decode("utf-8"))
+ if errs:
+ self.logger.info("Stderr from runqemu:\n%s", errs.decode("utf-8"))
+ except TimeoutExpired:
self.logger.debug("Sending SIGKILL to runqemu")
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGKILL)
if not self.runqemu.stdout.closed:
@@ -612,12 +619,15 @@ class QemuRunner:
def run_monitor(self, command, args=None, timeout=60):
if hasattr(self, 'qmp') and self.qmp:
+ self.qmp.settimeout(timeout)
if args is not None:
return self.qmp.cmd(command, args)
else:
return self.qmp.cmd(command)
def run_serial(self, command, raw=False, timeout=60):
+ # Returns (status, output) where status is 1 on success and 0 on error
+
# We assume target system have echo to get command status
if not raw:
command = "%s; echo $?\n" % command
@@ -637,6 +647,8 @@ class QemuRunner:
except InterruptedError:
continue
if sread:
+ # try to avoid reading single character at a time
+ time.sleep(0.1)
answer = self.server_socket.recv(1024)
if answer:
data += answer.decode('utf-8')
diff --git a/meta/lib/rootfspostcommands.py b/meta/lib/rootfspostcommands.py
index fdb9f5b850..12f66d2ce2 100644
--- a/meta/lib/rootfspostcommands.py
+++ b/meta/lib/rootfspostcommands.py
@@ -58,3 +58,10 @@ def sort_passwd(sysconfdir):
remove_backup(filename)
if os.path.exists(filename):
sort_file(filename, mapping)
+ # Drop other known backup shadow-utils.
+ for filename in (
+ 'subgid',
+ 'subuid',
+ ):
+ filepath = os.path.join(sysconfdir, filename)
+ remove_backup(filepath)
diff --git a/meta/recipes-bsp/alsa-state/alsa-state.bb b/meta/recipes-bsp/alsa-state/alsa-state.bb
index df546633f1..27b2eccbe4 100644
--- a/meta/recipes-bsp/alsa-state/alsa-state.bb
+++ b/meta/recipes-bsp/alsa-state/alsa-state.bb
@@ -8,8 +8,11 @@ SUMMARY = "Alsa scenario files to enable alsa state restoration"
HOMEPAGE = "http://www.alsa-project.org/"
DESCRIPTION = "Alsa Scenario Files - an init script and state files to restore \
sound state at system boot and save it at system shut down."
-LICENSE = "MIT"
-LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
+LICENSE = "MIT & GPL-2.0-or-later"
+LIC_FILES_CHKSUM = " \
+ file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420 \
+ file://alsa-state-init;beginline=3;endline=4;md5=3ff7ecbf534d7d503941abe8e268ef50 \
+"
PV = "0.2.0"
PR = "r5"
diff --git a/meta/recipes-bsp/alsa-state/alsa-state/alsa-state-init b/meta/recipes-bsp/alsa-state/alsa-state/alsa-state-init
index eee59cb321..a04cc27004 100755
--- a/meta/recipes-bsp/alsa-state/alsa-state/alsa-state-init
+++ b/meta/recipes-bsp/alsa-state/alsa-state/alsa-state-init
@@ -1,10 +1,9 @@
#! /bin/sh
#
# Copyright Matthias Hentges <devel@hentges.net> (c) 2007
-# License: GPL (see http://www.gnu.org/licenses/gpl.txt for a copy of the license)
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# Filename: alsa-state
-# Date: 20070308 (YMD)
# source function library
. /etc/init.d/functions
diff --git a/meta/recipes-bsp/efibootmgr/efibootmgr_17.bb b/meta/recipes-bsp/efibootmgr/efibootmgr_17.bb
index 11d8b9061d..be6571b3fa 100644
--- a/meta/recipes-bsp/efibootmgr/efibootmgr_17.bb
+++ b/meta/recipes-bsp/efibootmgr/efibootmgr_17.bb
@@ -34,6 +34,4 @@ do_install () {
}
CLEANBROKEN = "1"
-# https://github.com/rhboot/efivar/issues/202
-COMPATIBLE_HOST:libc-musl = 'null'
diff --git a/meta/recipes-bsp/efivar/efivar/0001-Fix-invalid-free-in-main.patch b/meta/recipes-bsp/efivar/efivar/0001-Fix-invalid-free-in-main.patch
new file mode 100644
index 0000000000..7e63df578e
--- /dev/null
+++ b/meta/recipes-bsp/efivar/efivar/0001-Fix-invalid-free-in-main.patch
@@ -0,0 +1,30 @@
+From 085f027e9e9f1478f68ddda705f83b244ee3bd88 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Mon, 18 Apr 2022 13:08:18 -0400
+Subject: [PATCH] Fix invalid free in main()
+
+data is allocated by mmap() in prepare_data().
+
+Resolves: #173
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Upstream-Status: Backport
+Link: https://github.com/rhboot/efivar/commit/6be2cb1c0139ac177e754b0767abf1ca1533847f
+Signed-off-by: Grygorii Tertychnyi <grygorii.tertychnyi@leica-geosystems.com>
+
+---
+ src/efivar.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/efivar.c b/src/efivar.c
+index 5cd1eb2bc73c..09f85edd0a38 100644
+--- a/src/efivar.c
++++ b/src/efivar.c
+@@ -633,7 +633,7 @@ int main(int argc, char *argv[])
+ if (sz < 0)
+ err(1, "Could not import data from \"%s\"", infile);
+
+- free(data);
++ munmap(data, data_size);
+ data = NULL;
+ data_size = 0;
+
diff --git a/meta/recipes-bsp/efivar/efivar/0001-Remove-deprecated-add-needed-linker-flag.patch b/meta/recipes-bsp/efivar/efivar/0001-Remove-deprecated-add-needed-linker-flag.patch
new file mode 100644
index 0000000000..fb6d2e8580
--- /dev/null
+++ b/meta/recipes-bsp/efivar/efivar/0001-Remove-deprecated-add-needed-linker-flag.patch
@@ -0,0 +1,45 @@
+From b23aba1469de8bb7a115751f9cd294ad3aaa6680 Mon Sep 17 00:00:00 2001
+From: Ali Abdel-Qader <abdelqaderali@protonmail.com>
+Date: Tue, 31 May 2022 11:53:32 -0400
+Subject: [PATCH] Remove deprecated --add-needed linker flag
+
+Resolves #204
+Signed-off-by: Ali Abdel-Qader <abdelqaderali@protonmail.com>
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+---
+Upstream-Status: Backport [https://github.com/rhboot/efivar/pull/218/commits/b23aba1469de8bb7a115751f9cd294ad3aaa6680]
+
+ src/include/defaults.mk | 2 --
+ src/include/gcc.specs | 2 +-
+ 2 files changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/src/include/defaults.mk b/src/include/defaults.mk
+index b8cc590..42bd3d6 100644
+--- a/src/include/defaults.mk
++++ b/src/include/defaults.mk
+@@ -51,7 +51,6 @@ LDFLAGS ?=
+ override _CCLDFLAGS := $(CCLDFLAGS)
+ override _LDFLAGS := $(LDFLAGS)
+ override LDFLAGS = $(CFLAGS) -L. $(_LDFLAGS) $(_CCLDFLAGS) \
+- -Wl,--add-needed \
+ -Wl,--build-id \
+ -Wl,--no-allow-shlib-undefined \
+ -Wl,--no-undefined-version \
+@@ -98,7 +97,6 @@ override _HOST_LDFLAGS := $(HOST_LDFLAGS)
+ override _HOST_CCLDFLAGS := $(HOST_CCLDFLAGS)
+ override HOST_LDFLAGS = $(HOST_CFLAGS) -L. \
+ $(_HOST_LDFLAGS) $(_HOST_CCLDFLAGS) \
+- -Wl,--add-needed \
+ -Wl,--build-id \
+ -Wl,--no-allow-shlib-undefined \
+ -Wl,-z,now \
+diff --git a/src/include/gcc.specs b/src/include/gcc.specs
+index ef28e2b..d85e865 100644
+--- a/src/include/gcc.specs
++++ b/src/include/gcc.specs
+@@ -5,4 +5,4 @@
+ + %{!shared:%{!static:%{!r:-pie}}} %{static:-Wl,-no-fatal-warnings -Wl,-static -static -Wl,-z,relro,-z,now} -grecord-gcc-switches
+
+ *link:
+-+ %{!static:--fatal-warnings} --no-undefined-version --no-allow-shlib-undefined --add-needed -z now --build-id %{!static:%{!shared:-pie}} %{shared:-z relro} %{static:%<pie}
+++ %{!static:--fatal-warnings} --no-undefined-version --no-allow-shlib-undefined -z now --build-id %{!static:%{!shared:-pie}} %{shared:-z relro} %{static:%<pie}
diff --git a/meta/recipes-bsp/efivar/efivar/0002-Add-T-workaround-for-GNU-ld-2.36.patch b/meta/recipes-bsp/efivar/efivar/0002-Add-T-workaround-for-GNU-ld-2.36.patch
new file mode 100644
index 0000000000..a175673922
--- /dev/null
+++ b/meta/recipes-bsp/efivar/efivar/0002-Add-T-workaround-for-GNU-ld-2.36.patch
@@ -0,0 +1,33 @@
+From 8469d6f72ee2450753f044080b018f9ad7ff62dc Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Mon, 17 Jan 2022 12:34:55 -0500
+Subject: [PATCH] Add -T workaround for GNU ld 2.36
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Resolves: #195
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+---
+Upstream-Status: Backport [https://github.com/rhboot/efivar/commit/197a0874ea4010061b98b4b55eff65b33b1cd741]
+
+ src/include/workarounds.mk | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/include/workarounds.mk b/src/include/workarounds.mk
+index 3118834..143e790 100644
+--- a/src/include/workarounds.mk
++++ b/src/include/workarounds.mk
+@@ -4,12 +4,12 @@
+
+ LD_FLAVOR := $(shell $(LD) --version | grep -E '^(LLD|GNU ld)'|sed 's/ .*//g')
+ LD_VERSION := $(shell $(LD) --version | grep -E '^(LLD|GNU ld)'|sed 's/.* //')
+-# I haven't tested 2.36 here; 2.35 is definitely broken and 2.37 seems to work
++# 2.35 is definitely broken and 2.36 seems to work
+ LD_DASH_T := $(shell \
+ if [ "x${LD_FLAVOR}" = xLLD ] ; then \
+ echo '-T' ; \
+ elif [ "x${LD_FLAVOR}" = xGNU ] ; then \
+- if echo "${LD_VERSION}" | grep -q -E '^2\.3[789]|^2\.[456789]|^[3456789]|^[[:digit:]][[:digit:]]' ; then \
++ if echo "${LD_VERSION}" | grep -q -E '^2\.3[6789]|^2\.[456789]|^[3456789]|^[[:digit:]][[:digit:]]' ; then \
+ echo '-T' ; \
+ else \
+ echo "" ; \
diff --git a/meta/recipes-bsp/efivar/efivar/0003-Set-LC_ALL-C-to-force-English-output-from-ld.patch b/meta/recipes-bsp/efivar/efivar/0003-Set-LC_ALL-C-to-force-English-output-from-ld.patch
new file mode 100644
index 0000000000..e53c31a673
--- /dev/null
+++ b/meta/recipes-bsp/efivar/efivar/0003-Set-LC_ALL-C-to-force-English-output-from-ld.patch
@@ -0,0 +1,33 @@
+From 8ea2cf0ab6182f29ecd8568cdc674b2736f6ffba Mon Sep 17 00:00:00 2001
+From: Mike Gilbert <floppym@gentoo.org>
+Date: Fri, 24 Jun 2022 17:00:33 -0400
+Subject: [PATCH] Set LC_ALL=C to force English output from ld
+
+If the user has a different locale set, ld --version may not contain the
+string "GNU ld".
+
+For example, in Italian, ld --version outputs "ld di GNU".
+
+Signed-off-by: Mike Gilbert <floppym@gentoo.org>
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+---
+Upstream-Status: Backport [https://github.com/rhboot/efivar/commit/01de7438520868650bfaa1ef3e2bfaf00cacbcc6]
+
+ src/include/workarounds.mk | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/include/workarounds.mk b/src/include/workarounds.mk
+index 143e790..b72fbaf 100644
+--- a/src/include/workarounds.mk
++++ b/src/include/workarounds.mk
+@@ -2,8 +2,8 @@
+ #
+ # workarounds.mk - workarounds for weird stuff behavior
+
+-LD_FLAVOR := $(shell $(LD) --version | grep -E '^(LLD|GNU ld)'|sed 's/ .*//g')
+-LD_VERSION := $(shell $(LD) --version | grep -E '^(LLD|GNU ld)'|sed 's/.* //')
++LD_FLAVOR := $(shell LC_ALL=C $(LD) --version | grep -E '^(LLD|GNU ld)'|sed 's/ .*//g')
++LD_VERSION := $(shell LC_ALL=C $(LD) --version | grep -E '^(LLD|GNU ld)'|sed 's/.* //')
+ # 2.35 is definitely broken and 2.36 seems to work
+ LD_DASH_T := $(shell \
+ if [ "x${LD_FLAVOR}" = xLLD ] ; then \
diff --git a/meta/recipes-bsp/efivar/efivar/0004-LLD-fix-detection-and-remove-not-needed-workarounds.patch b/meta/recipes-bsp/efivar/efivar/0004-LLD-fix-detection-and-remove-not-needed-workarounds.patch
new file mode 100644
index 0000000000..f1a545140a
--- /dev/null
+++ b/meta/recipes-bsp/efivar/efivar/0004-LLD-fix-detection-and-remove-not-needed-workarounds.patch
@@ -0,0 +1,45 @@
+From 09b9ddc51cb83ce547872a82271d1af4d11325da Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Tomasz=20Pawe=C5=82=20Gajc?= <tpgxyz@gmail.com>
+Date: Wed, 29 Jun 2022 21:44:29 +0200
+Subject: [PATCH] LLD: fix detection and remove not needed workarounds
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Tomasz Paweł Gajc <tpgxyz@gmail.com>
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+---
+Upstream-Status: Backport [https://github.com/rhboot/efivar/commit/1f247260c9b4bd6fcda30f3e4cc358852aeb9e4d]
+
+ src/include/workarounds.mk | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/src/include/workarounds.mk b/src/include/workarounds.mk
+index b72fbaf..57394ed 100644
+--- a/src/include/workarounds.mk
++++ b/src/include/workarounds.mk
+@@ -2,12 +2,12 @@
+ #
+ # workarounds.mk - workarounds for weird stuff behavior
+
+-LD_FLAVOR := $(shell LC_ALL=C $(LD) --version | grep -E '^(LLD|GNU ld)'|sed 's/ .*//g')
+-LD_VERSION := $(shell LC_ALL=C $(LD) --version | grep -E '^(LLD|GNU ld)'|sed 's/.* //')
++LD_FLAVOR := $(shell LC_ALL=C $(LD) --version | grep -E '^((.* )?LLD|GNU ld)'|sed 's/.* LLD/LLD/;s/ .*//g')
++LD_VERSION := $(shell LC_ALL=C $(LD) --version | grep -E '^((.* )?LLD|GNU ld)'|sed 's/.* LLD/LLD/;s/.* //')
+ # 2.35 is definitely broken and 2.36 seems to work
+ LD_DASH_T := $(shell \
+ if [ "x${LD_FLAVOR}" = xLLD ] ; then \
+- echo '-T' ; \
++ echo "" ; \
+ elif [ "x${LD_FLAVOR}" = xGNU ] ; then \
+ if echo "${LD_VERSION}" | grep -q -E '^2\.3[6789]|^2\.[456789]|^[3456789]|^[[:digit:]][[:digit:]]' ; then \
+ echo '-T' ; \
+@@ -15,7 +15,7 @@ LD_DASH_T := $(shell \
+ echo "" ; \
+ fi ; \
+ else \
+- echo "Your linker is not supported" ; \
++ echo "Your linker ${LD_FLAVOR} version ${LD_VERSION} is not supported" ; \
+ exit 1 ; \
+ fi)
+
diff --git a/meta/recipes-bsp/efivar/efivar/0005-Revamp-efi_well_known_-variable-handling.patch b/meta/recipes-bsp/efivar/efivar/0005-Revamp-efi_well_known_-variable-handling.patch
new file mode 100644
index 0000000000..758a151138
--- /dev/null
+++ b/meta/recipes-bsp/efivar/efivar/0005-Revamp-efi_well_known_-variable-handling.patch
@@ -0,0 +1,262 @@
+From 8c20b2242925616dfccc97b9be29f36afcf8034d Mon Sep 17 00:00:00 2001
+From: Nicholas Vinson <nvinson234@gmail.com>
+Date: Mon, 10 Oct 2022 14:22:36 -0400
+Subject: [PATCH] Revamp efi_well_known_* variable handling
+
+The current implementation attempts to use the linker to create aliases
+for efi_well_known_guids and efi_well_known_names. It also tries to use
+the linker to generate the variables efi_well_known_guids_end and
+efi_well_known_names_end.
+
+When building with clang, the generated linker result results in a
+broken libefivar.so that causes programs to segfault when linked against
+it. This change does away with linker script hacker and instead
+introduces pointers to store the locations of efi_well_known_guids_end
+and efi_well_known_names_end.
+
+Additionally, efi_well_known_guids and efi_well_known_names are now
+created as pointers that point to the beginning of their respective
+arrays.
+
+Signed-off-by: Nicholas Vinson <nvinson234@gmail.com>
+Fixes: #234
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+---
+Upstream-Status: Backport [https://github.com/rhboot/efivar/commit/cfd686de51494d3e34be896a91835657ccab37d4]
+
+ src/Makefile | 7 ++--
+ src/include/rules.mk | 5 +--
+ src/include/workarounds.mk | 24 -------------
+ src/makeguids.c | 72 +++++++++++++-------------------------
+ 4 files changed, 27 insertions(+), 81 deletions(-)
+ delete mode 100644 src/include/workarounds.mk
+
+diff --git a/src/Makefile b/src/Makefile
+index b10051b..c69caf4 100644
+--- a/src/Makefile
++++ b/src/Makefile
+@@ -4,7 +4,6 @@ include $(TOPDIR)/src/include/deprecated.mk
+ include $(TOPDIR)/src/include/version.mk
+ include $(TOPDIR)/src/include/rules.mk
+ include $(TOPDIR)/src/include/defaults.mk
+-include $(TOPDIR)/src/include/workarounds.mk
+
+ LIBTARGETS=libefivar.so libefiboot.so libefisec.so
+ STATICLIBTARGETS=libefivar.a libefiboot.a libefisec.a
+@@ -30,7 +29,7 @@ EFISECDB_OBJECTS = $(patsubst %.S,%.o,$(patsubst %.c,%.o,$(EFISECDB_SOURCES)))
+ GENERATED_SOURCES = include/efivar/efivar-guids.h guid-symbols.c
+ MAKEGUIDS_SOURCES = makeguids.c util-makeguids.c
+ MAKEGUIDS_OBJECTS = $(patsubst %.S,%.o,$(patsubst %.c,%.o,$(MAKEGUIDS_SOURCES)))
+-MAKEGUIDS_OUTPUT = $(GENERATED_SOURCES) guids.lds
++MAKEGUIDS_OUTPUT = $(GENERATED_SOURCES)
+
+ util-makeguids.c :
+ cp util.c util-makeguids.c
+@@ -84,7 +83,7 @@ $(MAKEGUIDS_OUTPUT) : guids.txt
+ if [ "$${missing}" != "no" ]; then \
+ exit 1 ; \
+ fi
+- ./makeguids $(LD_DASH_T) guids.txt guid-symbols.c include/efivar/efivar-guids.h guids.lds
++ ./makeguids guids.txt guid-symbols.c include/efivar/efivar-guids.h
+
+ prep : makeguids $(GENERATED_SOURCES)
+
+@@ -96,7 +95,6 @@ libefivar.a : $(patsubst %.o,%.static.o,$(LIBEFIVAR_OBJECTS))
+ libefivar.so : $(LIBEFIVAR_OBJECTS)
+ libefivar.so : | $(GENERATED_SOURCES) libefivar.map
+ libefivar.so : LIBS=dl
+-libefivar.so : LDSCRIPTS=guids.lds
+ libefivar.so : MAP=libefivar.map
+
+ efivar : $(EFIVAR_OBJECTS) | libefivar.so
+@@ -137,7 +135,6 @@ deps : $(ALL_SOURCES)
+ clean :
+ @rm -rfv *~ *.o *.a *.E *.so *.so.* *.pc *.bin .*.d *.map \
+ makeguids guid-symbols.c include/efivar/efivar-guids.h \
+- guids.lds \
+ $(TARGETS) $(STATICTARGETS)
+ @# remove the deps files we used to create, as well.
+ @rm -rfv .*.P .*.h.P *.S.P include/efivar/.*.h.P
+diff --git a/src/include/rules.mk b/src/include/rules.mk
+index f309f86..8d0b68a 100644
+--- a/src/include/rules.mk
++++ b/src/include/rules.mk
+@@ -3,7 +3,6 @@ default : all
+ .PHONY: default all clean install test
+
+ include $(TOPDIR)/src/include/version.mk
+-include $(TOPDIR)/src/include/workarounds.mk
+
+ comma:= ,
+ empty:=
+@@ -36,9 +35,7 @@ family = $(foreach FAMILY_SUFFIX,$(FAMILY_SUFFIXES),$($(1)_$(FAMILY_SUFFIX)))
+ $(CCLD) $(CCLDFLAGS) $(CPPFLAGS) -o $@ $(sort $^) $(LDLIBS)
+
+ %.so :
+- $(CCLD) $(CCLDFLAGS) $(CPPFLAGS) $(SOFLAGS) \
+- $(foreach LDS,$(LDSCRIPTS),$(LD_DASH_T) $(LDS)) \
+- -o $@ $^ $(LDLIBS)
++ $(CCLD) $(CCLDFLAGS) $(CPPFLAGS) $(SOFLAGS) -o $@ $^ $(LDLIBS)
+ ln -vfs $@ $@.1
+
+ %.abixml : %.so
+diff --git a/src/include/workarounds.mk b/src/include/workarounds.mk
+deleted file mode 100644
+index 57394ed..0000000
+--- a/src/include/workarounds.mk
++++ /dev/null
+@@ -1,24 +0,0 @@
+-# SPDX-License-Identifier: SPDX-License-Identifier: LGPL-2.1-or-later
+-#
+-# workarounds.mk - workarounds for weird stuff behavior
+-
+-LD_FLAVOR := $(shell LC_ALL=C $(LD) --version | grep -E '^((.* )?LLD|GNU ld)'|sed 's/.* LLD/LLD/;s/ .*//g')
+-LD_VERSION := $(shell LC_ALL=C $(LD) --version | grep -E '^((.* )?LLD|GNU ld)'|sed 's/.* LLD/LLD/;s/.* //')
+-# 2.35 is definitely broken and 2.36 seems to work
+-LD_DASH_T := $(shell \
+- if [ "x${LD_FLAVOR}" = xLLD ] ; then \
+- echo "" ; \
+- elif [ "x${LD_FLAVOR}" = xGNU ] ; then \
+- if echo "${LD_VERSION}" | grep -q -E '^2\.3[6789]|^2\.[456789]|^[3456789]|^[[:digit:]][[:digit:]]' ; then \
+- echo '-T' ; \
+- else \
+- echo "" ; \
+- fi ; \
+- else \
+- echo "Your linker ${LD_FLAVOR} version ${LD_VERSION} is not supported" ; \
+- exit 1 ; \
+- fi)
+-
+-export LD_DASH_T
+-
+-# vim:ft=make
+diff --git a/src/makeguids.c b/src/makeguids.c
+index e4ff411..b9e9312 100644
+--- a/src/makeguids.c
++++ b/src/makeguids.c
+@@ -107,51 +107,46 @@ write_guidnames(FILE *out, const char *listname,
+ gn->symbol, gn->name, gn->description);
+ }
+ fprintf(out, "};\n");
++ fprintf(out, "const struct efivar_guidname\n"
++ "\t__attribute__((__visibility__ (\"default\")))\n"
++ "\t* const %s = %s_;\n", listname, listname);
++ fprintf(out, "const struct efivar_guidname\n"
++ "\t__attribute__((__visibility__ (\"default\")))\n"
++ "\t* const %s_end = %s_\n\t+ %zd;\n",
++ listname, listname, n - 1);
+ }
+
+ int
+ main(int argc, char *argv[])
+ {
+ int rc;
+- int argstart = 0;
+- FILE *symout, *header, *ldsout;
+- int dash_t = 0;
++ FILE *symout, *header;
+
+- if (argc < 5) {
++ if (argc < 4) {
+ errx(1, "Not enough arguments.\n");
+- } else if (argc > 5 && !strcmp(argv[1],"-T")) {
+- argstart = 1;
+- dash_t = 1;
+- } else if (argc > 5) {
++ } else if (argc > 4) {
+ errx(1, "Too many arguments.\n");
+ }
+
+- symout = fopen(argv[argstart + 2], "w");
++ symout = fopen(argv[2], "w");
+ if (symout == NULL)
+- err(1, "could not open \"%s\"", argv[argstart + 2]);
+- rc = chmod(argv[argstart + 2], 0644);
++ err(1, "could not open \"%s\"", argv[2]);
++ rc = chmod(argv[2], 0644);
+ if (rc < 0)
+- warn("chmod(%s, 0644)", argv[argstart + 2]);
++ warn("chmod(%s, 0644)", argv[2]);
+
+- header = fopen(argv[argstart + 3], "w");
++ header = fopen(argv[3], "w");
+ if (header == NULL)
+- err(1, "could not open \"%s\"", argv[argstart + 3]);
+- rc = chmod(argv[argstart + 3], 0644);
+- if (rc < 0)
+- warn("chmod(%s, 0644)", argv[argstart + 3]);
+-
+- ldsout = fopen(argv[argstart + 4], "w");
+- if (ldsout == NULL)
+- err(1, "could not open \"%s\"", argv[argstart + 4]);
+- rc = chmod(argv[argstart + 4], 0644);
++ err(1, "could not open \"%s\"", argv[3]);
++ rc = chmod(argv[3], 0644);
+ if (rc < 0)
+- warn("chmod(%s, 0644)", argv[argstart + 4]);
++ warn("chmod(%s, 0644)", argv[3]);
+
+ struct guidname_index *guidnames = NULL;
+
+- rc = read_guids_at(AT_FDCWD, argv[argstart + 1], &guidnames);
++ rc = read_guids_at(AT_FDCWD, argv[1], &guidnames);
+ if (rc < 0)
+- err(1, "could not read \"%s\"", argv[argstart + 1]);
++ err(1, "could not read \"%s\"", argv[1]);
+
+ struct efivar_guidname *outbuf;
+
+@@ -239,12 +234,11 @@ struct efivar_guidname {\n\
+ fprintf(header,
+ "extern const struct efivar_guidname\n"
+ "\t__attribute__((__visibility__ (\"default\")))\n"
+- "\tefi_well_known_guids[%d];\n",
+- i);
++ "\t* const efi_well_known_guids;\n");
+ fprintf(header,
+ "extern const struct efivar_guidname\n"
+ "\t__attribute__((__visibility__ (\"default\")))\n"
+- "\tefi_well_known_guids_end;\n");
++ "\t* const efi_well_known_guids_end;\n");
+ fprintf(header,
+ "extern const uint64_t\n"
+ "\t__attribute__((__visibility__ (\"default\")))\n"
+@@ -252,12 +246,11 @@ struct efivar_guidname {\n\
+ fprintf(header,
+ "extern const struct efivar_guidname\n"
+ "\t__attribute__((__visibility__ (\"default\")))\n"
+- "\tefi_well_known_names[%d];\n",
+- i);
++ "\t* const efi_well_known_names;\n");
+ fprintf(header,
+ "extern const struct efivar_guidname\n"
+ "\t__attribute__((__visibility__ (\"default\")))\n"
+- "\tefi_well_known_names_end;\n");
++ "\t* const efi_well_known_names_end;\n");
+ fprintf(header,
+ "extern const uint64_t\n"
+ "\t__attribute__((__visibility__ (\"default\")))\n"
+@@ -302,23 +295,6 @@ struct efivar_guidname {\n\
+
+ fclose(symout);
+
+- fprintf(ldsout,
+- "SECTIONS\n"
+- "{\n"
+- " .data :\n"
+- " {\n"
+- " efi_well_known_guids = efi_well_known_guids_;\n"
+- " efi_well_known_guids_end = efi_well_known_guids_ + %zd;\n"
+- " efi_well_known_names = efi_well_known_names_;\n"
+- " efi_well_known_names_end = efi_well_known_names_ + %zd;\n"
+- " }\n"
+- "}%s;\n",
+- (line - 1) * sizeof(struct efivar_guidname),
+- (line - 1) * sizeof(struct efivar_guidname),
+- dash_t ? " INSERT AFTER .data" : "");
+-
+- fclose(ldsout);
+-
+ free(guidnames->strtab);
+ free(guidnames);
+
diff --git a/meta/recipes-bsp/efivar/efivar_38.bb b/meta/recipes-bsp/efivar/efivar_38.bb
index 53fe20a95b..dc84b3732f 100644
--- a/meta/recipes-bsp/efivar/efivar_38.bb
+++ b/meta/recipes-bsp/efivar/efivar_38.bb
@@ -11,6 +11,12 @@ SRC_URI = "git://github.com/rhinstaller/efivar.git;branch=main;protocol=https \
file://0001-docs-do-not-build-efisecdb-manpage.patch \
file://0001-src-Makefile-build-util.c-separately-for-makeguids.patch \
file://efisecdb-fix-build-with-musl-libc.patch \
+ file://0001-Fix-invalid-free-in-main.patch \
+ file://0001-Remove-deprecated-add-needed-linker-flag.patch \
+ file://0002-Add-T-workaround-for-GNU-ld-2.36.patch \
+ file://0003-Set-LC_ALL-C-to-force-English-output-from-ld.patch \
+ file://0004-LLD-fix-detection-and-remove-not-needed-workarounds.patch \
+ file://0005-Revamp-efi_well_known_-variable-handling.patch \
"
SRCREV = "1753149d4176ebfb2b135ac0aaf79340bf0e7a93"
@@ -20,10 +26,6 @@ inherit pkgconfig
export CCLD_FOR_BUILD = "${BUILD_CCLD}"
-# Upstream uses --add-needed in gcc.specs which gold doesn't support, so
-# enforce BFD.
-LDFLAGS += "-fuse-ld=bfd"
-
do_compile() {
oe_runmake ERRORS= HOST_CFLAGS="${BUILD_CFLAGS}" HOST_LDFLAGS="${BUILD_LDFLAGS}"
}
diff --git a/meta/recipes-bsp/grub/files/0001-font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch b/meta/recipes-bsp/grub/files/0001-font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch
new file mode 100644
index 0000000000..efa00a3c6c
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0001-font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch
@@ -0,0 +1,115 @@
+From 1f511ae054fe42dce7aedfbfe0f234fa1e0a7a3e Mon Sep 17 00:00:00 2001
+From: Zhang Boyang <zhangboyang.id@gmail.com>
+Date: Fri, 5 Aug 2022 00:51:20 +0800
+Subject: [PATCH] font: Fix size overflow in grub_font_get_glyph_internal()
+
+The length of memory allocation and file read may overflow. This patch
+fixes the problem by using safemath macros.
+
+There is a lot of code repetition like "(x * y + 7) / 8". It is unsafe
+if overflow happens. This patch introduces grub_video_bitmap_calc_1bpp_bufsz().
+It is safe replacement for such code. It has safemath-like prototype.
+
+This patch also introduces grub_cast(value, pointer), it casts value to
+typeof(*pointer) then store the value to *pointer. It returns true when
+overflow occurs or false if there is no overflow. The semantics of arguments
+and return value are designed to be consistent with other safemath macros.
+
+Signed-off-by: Zhang Boyang <zhangboyang.id@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport from
+[https://git.savannah.gnu.org/cgit/grub.git/commit/?id=9c76ec09ae08155df27cd237eaea150b4f02f532]
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+
+---
+ grub-core/font/font.c | 17 +++++++++++++----
+ include/grub/bitmap.h | 18 ++++++++++++++++++
+ include/grub/safemath.h | 2 ++
+ 3 files changed, 33 insertions(+), 4 deletions(-)
+
+diff --git a/grub-core/font/font.c b/grub-core/font/font.c
+index d09bb38..876b5b6 100644
+--- a/grub-core/font/font.c
++++ b/grub-core/font/font.c
+@@ -739,7 +739,8 @@ grub_font_get_glyph_internal (grub_font_t font, grub_uint32_t code)
+ grub_int16_t xoff;
+ grub_int16_t yoff;
+ grub_int16_t dwidth;
+- int len;
++ grub_ssize_t len;
++ grub_size_t sz;
+
+ if (index_entry->glyph)
+ /* Return cached glyph. */
+@@ -766,9 +767,17 @@ grub_font_get_glyph_internal (grub_font_t font, grub_uint32_t code)
+ return 0;
+ }
+
+- len = (width * height + 7) / 8;
+- glyph = grub_malloc (sizeof (struct grub_font_glyph) + len);
+- if (!glyph)
++ /* Calculate real struct size of current glyph. */
++ if (grub_video_bitmap_calc_1bpp_bufsz (width, height, &len) ||
++ grub_add (sizeof (struct grub_font_glyph), len, &sz))
++ {
++ remove_font (font);
++ return 0;
++ }
++
++ /* Allocate and initialize the glyph struct. */
++ glyph = grub_malloc (sz);
++ if (glyph == NULL)
+ {
+ remove_font (font);
+ return 0;
+diff --git a/include/grub/bitmap.h b/include/grub/bitmap.h
+index 5728f8c..0d9603f 100644
+--- a/include/grub/bitmap.h
++++ b/include/grub/bitmap.h
+@@ -23,6 +23,7 @@
+ #include <grub/symbol.h>
+ #include <grub/types.h>
+ #include <grub/video.h>
++#include <grub/safemath.h>
+
+ struct grub_video_bitmap
+ {
+@@ -79,6 +80,23 @@ grub_video_bitmap_get_height (struct grub_video_bitmap *bitmap)
+ return bitmap->mode_info.height;
+ }
+
++/*
++ * Calculate and store the size of data buffer of 1bit bitmap in result.
++ * Equivalent to "*result = (width * height + 7) / 8" if no overflow occurs.
++ * Return true when overflow occurs or false if there is no overflow.
++ * This function is intentionally implemented as a macro instead of
++ * an inline function. Although a bit awkward, it preserves data types for
++ * safemath macros and reduces macro side effects as much as possible.
++ *
++ * XXX: Will report false overflow if width * height > UINT64_MAX.
++ */
++#define grub_video_bitmap_calc_1bpp_bufsz(width, height, result) \
++({ \
++ grub_uint64_t _bitmap_pixels; \
++ grub_mul ((width), (height), &_bitmap_pixels) ? 1 : \
++ grub_cast (_bitmap_pixels / GRUB_CHAR_BIT + !!(_bitmap_pixels % GRUB_CHAR_BIT), (result)); \
++})
++
+ void EXPORT_FUNC (grub_video_bitmap_get_mode_info) (struct grub_video_bitmap *bitmap,
+ struct grub_video_mode_info *mode_info);
+
+diff --git a/include/grub/safemath.h b/include/grub/safemath.h
+index c17b89b..bb0f826 100644
+--- a/include/grub/safemath.h
++++ b/include/grub/safemath.h
+@@ -30,6 +30,8 @@
+ #define grub_sub(a, b, res) __builtin_sub_overflow(a, b, res)
+ #define grub_mul(a, b, res) __builtin_mul_overflow(a, b, res)
+
++#define grub_cast(a, res) grub_add ((a), 0, (res))
++
+ #else
+ #error gcc 5.1 or newer or clang 3.8 or newer is required
+ #endif
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-3695-video-readers-png-Drop-greyscale-support-to-fix-heap.patch b/meta/recipes-bsp/grub/files/CVE-2021-3695-video-readers-png-Drop-greyscale-support-to-fix-heap.patch
new file mode 100644
index 0000000000..7f7bb1acfe
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-3695-video-readers-png-Drop-greyscale-support-to-fix-heap.patch
@@ -0,0 +1,179 @@
+From e623866d9286410156e8b9d2c82d6253a1b22d08 Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Tue, 6 Jul 2021 18:51:35 +1000
+Subject: [PATCH] video/readers/png: Drop greyscale support to fix heap
+ out-of-bounds write
+
+A 16-bit greyscale PNG without alpha is processed in the following loop:
+
+ for (i = 0; i < (data->image_width * data->image_height);
+ i++, d1 += 4, d2 += 2)
+ {
+ d1[R3] = d2[1];
+ d1[G3] = d2[1];
+ d1[B3] = d2[1];
+ }
+
+The increment of d1 is wrong. d1 is incremented by 4 bytes per iteration,
+but there are only 3 bytes allocated for storage. This means that image
+data will overwrite somewhat-attacker-controlled parts of memory - 3 bytes
+out of every 4 following the end of the image.
+
+This has existed since greyscale support was added in 2013 in commit
+3ccf16dff98f (grub-core/video/readers/png.c: Support grayscale).
+
+Saving starfield.png as a 16-bit greyscale image without alpha in the gimp
+and attempting to load it causes grub-emu to crash - I don't think this code
+has ever worked.
+
+Delete all PNG greyscale support.
+
+Fixes: CVE-2021-3695
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport
+CVE: CVE-2021-3695
+
+Reference to upstream patch:
+https://git.savannah.gnu.org/cgit/grub.git/commit/?id=e623866d9286410156e8b9d2c82d6253a1b22d08
+
+Signed-off-by: Yongxin Liu <yongxin.liu@windriver.com>
+---
+ grub-core/video/readers/png.c | 87 +++--------------------------------
+ 1 file changed, 7 insertions(+), 80 deletions(-)
+
+diff --git a/grub-core/video/readers/png.c b/grub-core/video/readers/png.c
+index 35ae553c8..a3161e25b 100644
+--- a/grub-core/video/readers/png.c
++++ b/grub-core/video/readers/png.c
+@@ -100,7 +100,7 @@ struct grub_png_data
+
+ unsigned image_width, image_height;
+ int bpp, is_16bit;
+- int raw_bytes, is_gray, is_alpha, is_palette;
++ int raw_bytes, is_alpha, is_palette;
+ int row_bytes, color_bits;
+ grub_uint8_t *image_data;
+
+@@ -296,13 +296,13 @@ grub_png_decode_image_header (struct grub_png_data *data)
+ data->bpp = 3;
+ else
+ {
+- data->is_gray = 1;
+- data->bpp = 1;
++ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
++ "png: color type not supported");
+ }
+
+ if ((color_bits != 8) && (color_bits != 16)
+ && (color_bits != 4
+- || !(data->is_gray || data->is_palette)))
++ || !data->is_palette))
+ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
+ "png: bit depth must be 8 or 16");
+
+@@ -331,7 +331,7 @@ grub_png_decode_image_header (struct grub_png_data *data)
+ }
+
+ #ifndef GRUB_CPU_WORDS_BIGENDIAN
+- if (data->is_16bit || data->is_gray || data->is_palette)
++ if (data->is_16bit || data->is_palette)
+ #endif
+ {
+ data->image_data = grub_calloc (data->image_height, data->row_bytes);
+@@ -899,27 +899,8 @@ grub_png_convert_image (struct grub_png_data *data)
+ int shift;
+ int mask = (1 << data->color_bits) - 1;
+ unsigned j;
+- if (data->is_gray)
+- {
+- /* Generic formula is
+- (0xff * i) / ((1U << data->color_bits) - 1)
+- but for allowed bit depth of 1, 2 and for it's
+- equivalent to
+- (0xff / ((1U << data->color_bits) - 1)) * i
+- Precompute the multipliers to avoid division.
+- */
+-
+- const grub_uint8_t multipliers[5] = { 0xff, 0xff, 0x55, 0x24, 0x11 };
+- for (i = 0; i < (1U << data->color_bits); i++)
+- {
+- grub_uint8_t col = multipliers[data->color_bits] * i;
+- palette[i][0] = col;
+- palette[i][1] = col;
+- palette[i][2] = col;
+- }
+- }
+- else
+- grub_memcpy (palette, data->palette, 3 << data->color_bits);
++
++ grub_memcpy (palette, data->palette, 3 << data->color_bits);
+ d1c = d1;
+ d2c = d2;
+ for (j = 0; j < data->image_height; j++, d1c += data->image_width * 3,
+@@ -957,60 +938,6 @@ grub_png_convert_image (struct grub_png_data *data)
+ return;
+ }
+
+- if (data->is_gray)
+- {
+- switch (data->bpp)
+- {
+- case 4:
+- /* 16-bit gray with alpha. */
+- for (i = 0; i < (data->image_width * data->image_height);
+- i++, d1 += 4, d2 += 4)
+- {
+- d1[R4] = d2[3];
+- d1[G4] = d2[3];
+- d1[B4] = d2[3];
+- d1[A4] = d2[1];
+- }
+- break;
+- case 2:
+- if (data->is_16bit)
+- /* 16-bit gray without alpha. */
+- {
+- for (i = 0; i < (data->image_width * data->image_height);
+- i++, d1 += 4, d2 += 2)
+- {
+- d1[R3] = d2[1];
+- d1[G3] = d2[1];
+- d1[B3] = d2[1];
+- }
+- }
+- else
+- /* 8-bit gray with alpha. */
+- {
+- for (i = 0; i < (data->image_width * data->image_height);
+- i++, d1 += 4, d2 += 2)
+- {
+- d1[R4] = d2[1];
+- d1[G4] = d2[1];
+- d1[B4] = d2[1];
+- d1[A4] = d2[0];
+- }
+- }
+- break;
+- /* 8-bit gray without alpha. */
+- case 1:
+- for (i = 0; i < (data->image_width * data->image_height);
+- i++, d1 += 3, d2++)
+- {
+- d1[R3] = d2[0];
+- d1[G3] = d2[0];
+- d1[B3] = d2[0];
+- }
+- break;
+- }
+- return;
+- }
+-
+ {
+ /* Only copy the upper 8 bit. */
+ #ifndef GRUB_CPU_WORDS_BIGENDIAN
+--
+2.34.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-3696-video-readers-png-Avoid-heap-OOB-R-W-inserting-huff.patch b/meta/recipes-bsp/grub/files/CVE-2021-3696-video-readers-png-Avoid-heap-OOB-R-W-inserting-huff.patch
new file mode 100644
index 0000000000..f06514e665
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-3696-video-readers-png-Avoid-heap-OOB-R-W-inserting-huff.patch
@@ -0,0 +1,50 @@
+From 210245129c932dc9e1c2748d9d35524fb95b5042 Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Tue, 6 Jul 2021 23:25:07 +1000
+Subject: [PATCH] video/readers/png: Avoid heap OOB R/W inserting huff table
+ items
+
+In fuzzing we observed crashes where a code would attempt to be inserted
+into a huffman table before the start, leading to a set of heap OOB reads
+and writes as table entries with negative indices were shifted around and
+the new code written in.
+
+Catch the case where we would underflow the array and bail.
+
+Fixes: CVE-2021-3696
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport
+CVE: CVE-2021-3696
+
+Reference to upstream patch:
+https://git.savannah.gnu.org/cgit/grub.git/commit/?id=210245129c932dc9e1c2748d9d35524fb95b5042
+
+Signed-off-by: Yongxin Liu <yongxin.liu@windriver.com>
+---
+ grub-core/video/readers/png.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/grub-core/video/readers/png.c b/grub-core/video/readers/png.c
+index a3161e25b..d7ed5aa6c 100644
+--- a/grub-core/video/readers/png.c
++++ b/grub-core/video/readers/png.c
+@@ -438,6 +438,13 @@ grub_png_insert_huff_item (struct huff_table *ht, int code, int len)
+ for (i = len; i < ht->max_length; i++)
+ n += ht->maxval[i];
+
++ if (n > ht->num_values)
++ {
++ grub_error (GRUB_ERR_BAD_FILE_TYPE,
++ "png: out of range inserting huffman table item");
++ return;
++ }
++
+ for (i = 0; i < n; i++)
+ ht->values[ht->num_values - i] = ht->values[ht->num_values - i - 1];
+
+--
+2.34.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-3697-video-readers-jpeg-Block-int-underflow-wild-pointer.patch b/meta/recipes-bsp/grub/files/CVE-2021-3697-video-readers-jpeg-Block-int-underflow-wild-pointer.patch
new file mode 100644
index 0000000000..e9fc52df86
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-3697-video-readers-jpeg-Block-int-underflow-wild-pointer.patch
@@ -0,0 +1,84 @@
+From 22a3f97d39f6a10b08ad7fd1cc47c4dcd10413f6 Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Wed, 7 Jul 2021 15:38:19 +1000
+Subject: [PATCH] video/readers/jpeg: Block int underflow -> wild pointer write
+
+Certain 1 px wide images caused a wild pointer write in
+grub_jpeg_ycrcb_to_rgb(). This was caused because in grub_jpeg_decode_data(),
+we have the following loop:
+
+for (; data->r1 < nr1 && (!data->dri || rst);
+ data->r1++, data->bitmap_ptr += (vb * data->image_width - hb * nc1) * 3)
+
+We did not check if vb * width >= hb * nc1.
+
+On a 64-bit platform, if that turns out to be negative, it will underflow,
+be interpreted as unsigned 64-bit, then be added to the 64-bit pointer, so
+we see data->bitmap_ptr jump, e.g.:
+
+0x6180_0000_0480 to
+0x6181_0000_0498
+ ^
+ ~--- carry has occurred and this pointer is now far away from
+ any object.
+
+On a 32-bit platform, it will decrement the pointer, creating a pointer
+that won't crash but will overwrite random data.
+
+Catch the underflow and error out.
+
+Fixes: CVE-2021-3697
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport
+CVE: CVE-2021-3697
+
+Reference to upstream patch:
+https://git.savannah.gnu.org/cgit/grub.git/commit/?id=22a3f97d39f6a10b08ad7fd1cc47c4dcd10413f6
+
+Signed-off-by: Yongxin Liu <yongxin.liu@windriver.com>
+---
+ grub-core/video/readers/jpeg.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/video/readers/jpeg.c b/grub-core/video/readers/jpeg.c
+index 579bbe8a4..09596fbf5 100644
+--- a/grub-core/video/readers/jpeg.c
++++ b/grub-core/video/readers/jpeg.c
+@@ -23,6 +23,7 @@
+ #include <grub/mm.h>
+ #include <grub/misc.h>
+ #include <grub/bufio.h>
++#include <grub/safemath.h>
+
+ GRUB_MOD_LICENSE ("GPLv3+");
+
+@@ -699,6 +700,7 @@ static grub_err_t
+ grub_jpeg_decode_data (struct grub_jpeg_data *data)
+ {
+ unsigned c1, vb, hb, nr1, nc1;
++ unsigned stride_a, stride_b, stride;
+ int rst = data->dri;
+ grub_err_t err = GRUB_ERR_NONE;
+
+@@ -711,8 +713,14 @@ grub_jpeg_decode_data (struct grub_jpeg_data *data)
+ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
+ "jpeg: attempted to decode data before start of stream");
+
++ if (grub_mul(vb, data->image_width, &stride_a) ||
++ grub_mul(hb, nc1, &stride_b) ||
++ grub_sub(stride_a, stride_b, &stride))
++ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
++ "jpeg: cannot decode image with these dimensions");
++
+ for (; data->r1 < nr1 && (!data->dri || rst);
+- data->r1++, data->bitmap_ptr += (vb * data->image_width - hb * nc1) * 3)
++ data->r1++, data->bitmap_ptr += stride * 3)
+ for (c1 = 0; c1 < nc1 && (!data->dri || rst);
+ c1++, rst--, data->bitmap_ptr += hb * 3)
+ {
+--
+2.34.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-2601.patch b/meta/recipes-bsp/grub/files/CVE-2022-2601.patch
new file mode 100644
index 0000000000..727c509694
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-2601.patch
@@ -0,0 +1,85 @@
+From e8060722acf0bcca037982d7fb29472363ccdfd4 Mon Sep 17 00:00:00 2001
+From: Zhang Boyang <zhangboyang.id@gmail.com>
+Date: Fri, 5 Aug 2022 01:58:27 +0800
+Subject: [PATCH] font: Fix several integer overflows in
+ grub_font_construct_glyph()
+
+This patch fixes several integer overflows in grub_font_construct_glyph().
+Glyphs of invalid size, zero or leading to an overflow, are rejected.
+The inconsistency between "glyph" and "max_glyph_size" when grub_malloc()
+returns NULL is fixed too.
+
+Fixes: CVE-2022-2601
+
+Reported-by: Zhang Boyang <zhangboyang.id@gmail.com>
+Signed-off-by: Zhang Boyang <zhangboyang.id@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport from
+[https://git.savannah.gnu.org/cgit/grub.git/commit/?id=768e1ef2fc159f6e14e7246e4be09363708ac39e]
+CVE: CVE-2022-2601
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+
+---
+ grub-core/font/font.c | 29 +++++++++++++++++------------
+ 1 file changed, 17 insertions(+), 12 deletions(-)
+
+diff --git a/grub-core/font/font.c b/grub-core/font/font.c
+index 876b5b6..0ff5525 100644
+--- a/grub-core/font/font.c
++++ b/grub-core/font/font.c
+@@ -1515,6 +1515,7 @@ grub_font_construct_glyph (grub_font_t hinted_font,
+ struct grub_video_signed_rect bounds;
+ static struct grub_font_glyph *glyph = 0;
+ static grub_size_t max_glyph_size = 0;
++ grub_size_t cur_glyph_size;
+
+ ensure_comb_space (glyph_id);
+
+@@ -1531,29 +1532,33 @@ grub_font_construct_glyph (grub_font_t hinted_font,
+ if (!glyph_id->ncomb && !glyph_id->attributes)
+ return main_glyph;
+
+- if (max_glyph_size < sizeof (*glyph) + (bounds.width * bounds.height + GRUB_CHAR_BIT - 1) / GRUB_CHAR_BIT)
++ if (grub_video_bitmap_calc_1bpp_bufsz (bounds.width, bounds.height, &cur_glyph_size) ||
++ grub_add (sizeof (*glyph), cur_glyph_size, &cur_glyph_size))
++ return main_glyph;
++
++ if (max_glyph_size < cur_glyph_size)
+ {
+ grub_free (glyph);
+- max_glyph_size = (sizeof (*glyph) + (bounds.width * bounds.height + GRUB_CHAR_BIT - 1) / GRUB_CHAR_BIT) * 2;
+- if (max_glyph_size < 8)
+- max_glyph_size = 8;
+- glyph = grub_malloc (max_glyph_size);
++ if (grub_mul (cur_glyph_size, 2, &max_glyph_size))
++ max_glyph_size = 0;
++ glyph = max_glyph_size > 0 ? grub_malloc (max_glyph_size) : NULL;
+ }
+ if (!glyph)
+ {
++ max_glyph_size = 0;
+ grub_errno = GRUB_ERR_NONE;
+ return main_glyph;
+ }
+
+- grub_memset (glyph, 0, sizeof (*glyph)
+- + (bounds.width * bounds.height
+- + GRUB_CHAR_BIT - 1) / GRUB_CHAR_BIT);
++ grub_memset (glyph, 0, cur_glyph_size);
+
+ glyph->font = main_glyph->font;
+- glyph->width = bounds.width;
+- glyph->height = bounds.height;
+- glyph->offset_x = bounds.x;
+- glyph->offset_y = bounds.y;
++ if (bounds.width == 0 || bounds.height == 0 ||
++ grub_cast (bounds.width, &glyph->width) ||
++ grub_cast (bounds.height, &glyph->height) ||
++ grub_cast (bounds.x, &glyph->offset_x) ||
++ grub_cast (bounds.y, &glyph->offset_y))
++ return main_glyph;
+
+ if (glyph_id->attributes & GRUB_UNICODE_GLYPH_ATTRIBUTE_MIRROR)
+ grub_font_blit_glyph_mirror (glyph, main_glyph,
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-28733-net-ip-Do-IP-fragment-maths-safely.patch b/meta/recipes-bsp/grub/files/CVE-2022-28733-net-ip-Do-IP-fragment-maths-safely.patch
new file mode 100644
index 0000000000..8bf9090f94
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-28733-net-ip-Do-IP-fragment-maths-safely.patch
@@ -0,0 +1,63 @@
+From 3e4817538de828319ba6d59ced2fbb9b5ca13287 Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Mon, 20 Dec 2021 19:41:21 +1100
+Subject: [PATCH] net/ip: Do IP fragment maths safely
+
+We can receive packets with invalid IP fragmentation information. This
+can lead to rsm->total_len underflowing and becoming very large.
+
+Then, in grub_netbuff_alloc(), we add to this very large number, which can
+cause it to overflow and wrap back around to a small positive number.
+The allocation then succeeds, but the resulting buffer is too small and
+subsequent operations can write past the end of the buffer.
+
+Catch the underflow here.
+
+Fixes: CVE-2022-28733
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport
+CVE: CVE-2022-28733
+
+Reference to upstream patch:
+https://git.savannah.gnu.org/cgit/grub.git/commit/?id=3e4817538de828319ba6d59ced2fbb9b5ca13287
+
+Signed-off-by: Yongxin Liu <yongxin.liu@windriver.com>
+
+---
+ grub-core/net/ip.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/net/ip.c b/grub-core/net/ip.c
+index e3d62e97f..3c3d0be0e 100644
+--- a/grub-core/net/ip.c
++++ b/grub-core/net/ip.c
+@@ -25,6 +25,7 @@
+ #include <grub/net/netbuff.h>
+ #include <grub/mm.h>
+ #include <grub/priority_queue.h>
++#include <grub/safemath.h>
+ #include <grub/time.h>
+
+ struct iphdr {
+@@ -512,7 +513,14 @@ grub_net_recv_ip4_packets (struct grub_net_buff *nb,
+ {
+ rsm->total_len = (8 * (grub_be_to_cpu16 (iph->frags) & OFFSET_MASK)
+ + (nb->tail - nb->data));
+- rsm->total_len -= ((iph->verhdrlen & 0xf) * sizeof (grub_uint32_t));
++
++ if (grub_sub (rsm->total_len, (iph->verhdrlen & 0xf) * sizeof (grub_uint32_t),
++ &rsm->total_len))
++ {
++ grub_dprintf ("net", "IP reassembly size underflow\n");
++ return GRUB_ERR_NONE;
++ }
++
+ rsm->asm_netbuff = grub_netbuff_alloc (rsm->total_len);
+ if (!rsm->asm_netbuff)
+ {
+--
+2.34.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-28734-net-http-Error-out-on-headers-with-LF-without-CR.patch b/meta/recipes-bsp/grub/files/CVE-2022-28734-net-http-Error-out-on-headers-with-LF-without-CR.patch
new file mode 100644
index 0000000000..f31167d315
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-28734-net-http-Error-out-on-headers-with-LF-without-CR.patch
@@ -0,0 +1,58 @@
+From b26b4c08e7119281ff30d0fb4a6169bd2afa8fe4 Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Tue, 8 Mar 2022 19:04:40 +1100
+Subject: [PATCH] net/http: Error out on headers with LF without CR
+
+In a similar vein to the previous patch, parse_line() would write
+a NUL byte past the end of the buffer if there was an HTTP header
+with a LF rather than a CRLF.
+
+RFC-2616 says:
+
+ Many HTTP/1.1 header field values consist of words separated by LWS
+ or special characters. These special characters MUST be in a quoted
+ string to be used within a parameter value (as defined in section 3.6).
+
+We don't support quoted sections or continuation lines, etc.
+
+If we see an LF that's not part of a CRLF, bail out.
+
+Fixes: CVE-2022-28734
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport
+CVE: CVE-2022-28734
+
+Reference to upstream patch:
+https://git.savannah.gnu.org/cgit/grub.git/commit/?id=b26b4c08e7119281ff30d0fb4a6169bd2afa8fe4
+
+Signed-off-by: Yongxin Liu <yongxin.liu@windriver.com>
+---
+ grub-core/net/http.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/grub-core/net/http.c b/grub-core/net/http.c
+index 33a0a28c4..9291a13e2 100644
+--- a/grub-core/net/http.c
++++ b/grub-core/net/http.c
+@@ -68,7 +68,15 @@ parse_line (grub_file_t file, http_data_t data, char *ptr, grub_size_t len)
+ char *end = ptr + len;
+ while (end > ptr && *(end - 1) == '\r')
+ end--;
++
++ /* LF without CR. */
++ if (end == ptr + len)
++ {
++ data->errmsg = grub_strdup (_("invalid HTTP header - LF without CR"));
++ return GRUB_ERR_NONE;
++ }
+ *end = 0;
++
+ /* Trailing CRLF. */
+ if (data->in_chunk_len == 1)
+ {
+--
+2.34.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-28734-net-http-Fix-OOB-write-for-split-http-headers.patch b/meta/recipes-bsp/grub/files/CVE-2022-28734-net-http-Fix-OOB-write-for-split-http-headers.patch
new file mode 100644
index 0000000000..e0ca1eec44
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-28734-net-http-Fix-OOB-write-for-split-http-headers.patch
@@ -0,0 +1,56 @@
+From ec6bfd3237394c1c7dbf2fd73417173318d22f4b Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Tue, 8 Mar 2022 18:17:03 +1100
+Subject: [PATCH] net/http: Fix OOB write for split http headers
+
+GRUB has special code for handling an http header that is split
+across two packets.
+
+The code tracks the end of line by looking for a "\n" byte. The
+code for split headers has always advanced the pointer just past the
+end of the line, whereas the code that handles unsplit headers does
+not advance the pointer. This extra advance causes the length to be
+one greater, which breaks an assumption in parse_line(), leading to
+it writing a NUL byte one byte past the end of the buffer where we
+reconstruct the line from the two packets.
+
+It's conceivable that an attacker controlled set of packets could
+cause this to zero out the first byte of the "next" pointer of the
+grub_mm_region structure following the current_line buffer.
+
+Do not advance the pointer in the split header case.
+
+Fixes: CVE-2022-28734
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport
+CVE: CVE-2022-28734
+
+Reference to upstream patch:
+https://git.savannah.gnu.org/cgit/grub.git/commit/?id=ec6bfd3237394c1c7dbf2fd73417173318d22f4b
+
+Signed-off-by: Yongxin Liu <yongxin.liu@windriver.com>
+---
+ grub-core/net/http.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/grub-core/net/http.c b/grub-core/net/http.c
+index f8d7bf0cd..33a0a28c4 100644
+--- a/grub-core/net/http.c
++++ b/grub-core/net/http.c
+@@ -190,9 +190,7 @@ http_receive (grub_net_tcp_socket_t sock __attribute__ ((unused)),
+ int have_line = 1;
+ char *t;
+ ptr = grub_memchr (nb->data, '\n', nb->tail - nb->data);
+- if (ptr)
+- ptr++;
+- else
++ if (ptr == NULL)
+ {
+ have_line = 0;
+ ptr = (char *) nb->tail;
+--
+2.34.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-28735-kern-efi-sb-Reject-non-kernel-files-in-the-shim_lock.patch b/meta/recipes-bsp/grub/files/CVE-2022-28735-kern-efi-sb-Reject-non-kernel-files-in-the-shim_lock.patch
new file mode 100644
index 0000000000..7a59f10bfb
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-28735-kern-efi-sb-Reject-non-kernel-files-in-the-shim_lock.patch
@@ -0,0 +1,111 @@
+From 6fe755c5c07bb386fda58306bfd19e4a1c974c53 Mon Sep 17 00:00:00 2001
+From: Julian Andres Klode <julian.klode@canonical.com>
+Date: Thu, 2 Dec 2021 15:03:53 +0100
+Subject: [PATCH] kern/efi/sb: Reject non-kernel files in the shim_lock
+ verifier
+
+We must not allow other verifiers to pass things like the GRUB modules.
+Instead of maintaining a blocklist, maintain an allowlist of things
+that we do not care about.
+
+This allowlist really should be made reusable, and shared by the
+lockdown verifier, but this is the minimal patch addressing
+security concerns where the TPM verifier was able to mark modules
+as verified (or the OpenPGP verifier for that matter), when it
+should not do so on shim-powered secure boot systems.
+
+Fixes: CVE-2022-28735
+
+Signed-off-by: Julian Andres Klode <julian.klode@canonical.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport
+CVE:CVE-2022-28735
+
+Reference to upstream patch:
+https://git.savannah.gnu.org/cgit/grub.git/commit/?id=6fe755c5c07bb386fda58306bfd19e4a1c974c53
+
+Signed-off-by: Yongxin Liu <yongxin.liu@windriver.com>
+---
+ grub-core/kern/efi/sb.c | 39 ++++++++++++++++++++++++++++++++++++---
+ include/grub/verify.h | 1 +
+ 2 files changed, 37 insertions(+), 3 deletions(-)
+
+diff --git a/grub-core/kern/efi/sb.c b/grub-core/kern/efi/sb.c
+index c52ec6226..89c4bb3fd 100644
+--- a/grub-core/kern/efi/sb.c
++++ b/grub-core/kern/efi/sb.c
+@@ -119,10 +119,11 @@ shim_lock_verifier_init (grub_file_t io __attribute__ ((unused)),
+ void **context __attribute__ ((unused)),
+ enum grub_verify_flags *flags)
+ {
+- *flags = GRUB_VERIFY_FLAGS_SKIP_VERIFICATION;
++ *flags = GRUB_VERIFY_FLAGS_NONE;
+
+ switch (type & GRUB_FILE_TYPE_MASK)
+ {
++ /* Files we check. */
+ case GRUB_FILE_TYPE_LINUX_KERNEL:
+ case GRUB_FILE_TYPE_MULTIBOOT_KERNEL:
+ case GRUB_FILE_TYPE_BSD_KERNEL:
+@@ -130,11 +131,43 @@ shim_lock_verifier_init (grub_file_t io __attribute__ ((unused)),
+ case GRUB_FILE_TYPE_PLAN9_KERNEL:
+ case GRUB_FILE_TYPE_EFI_CHAINLOADED_IMAGE:
+ *flags = GRUB_VERIFY_FLAGS_SINGLE_CHUNK;
++ return GRUB_ERR_NONE;
+
+- /* Fall through. */
++ /* Files that do not affect secureboot state. */
++ case GRUB_FILE_TYPE_NONE:
++ case GRUB_FILE_TYPE_LOOPBACK:
++ case GRUB_FILE_TYPE_LINUX_INITRD:
++ case GRUB_FILE_TYPE_OPENBSD_RAMDISK:
++ case GRUB_FILE_TYPE_XNU_RAMDISK:
++ case GRUB_FILE_TYPE_SIGNATURE:
++ case GRUB_FILE_TYPE_PUBLIC_KEY:
++ case GRUB_FILE_TYPE_PUBLIC_KEY_TRUST:
++ case GRUB_FILE_TYPE_PRINT_BLOCKLIST:
++ case GRUB_FILE_TYPE_TESTLOAD:
++ case GRUB_FILE_TYPE_GET_SIZE:
++ case GRUB_FILE_TYPE_FONT:
++ case GRUB_FILE_TYPE_ZFS_ENCRYPTION_KEY:
++ case GRUB_FILE_TYPE_CAT:
++ case GRUB_FILE_TYPE_HEXCAT:
++ case GRUB_FILE_TYPE_CMP:
++ case GRUB_FILE_TYPE_HASHLIST:
++ case GRUB_FILE_TYPE_TO_HASH:
++ case GRUB_FILE_TYPE_KEYBOARD_LAYOUT:
++ case GRUB_FILE_TYPE_PIXMAP:
++ case GRUB_FILE_TYPE_GRUB_MODULE_LIST:
++ case GRUB_FILE_TYPE_CONFIG:
++ case GRUB_FILE_TYPE_THEME:
++ case GRUB_FILE_TYPE_GETTEXT_CATALOG:
++ case GRUB_FILE_TYPE_FS_SEARCH:
++ case GRUB_FILE_TYPE_LOADENV:
++ case GRUB_FILE_TYPE_SAVEENV:
++ case GRUB_FILE_TYPE_VERIFY_SIGNATURE:
++ *flags = GRUB_VERIFY_FLAGS_SKIP_VERIFICATION;
++ return GRUB_ERR_NONE;
+
++ /* Other files. */
+ default:
+- return GRUB_ERR_NONE;
++ return grub_error (GRUB_ERR_ACCESS_DENIED, N_("prohibited by secure boot policy"));
+ }
+ }
+
+diff --git a/include/grub/verify.h b/include/grub/verify.h
+index cd129c398..672ae1692 100644
+--- a/include/grub/verify.h
++++ b/include/grub/verify.h
+@@ -24,6 +24,7 @@
+
+ enum grub_verify_flags
+ {
++ GRUB_VERIFY_FLAGS_NONE = 0,
+ GRUB_VERIFY_FLAGS_SKIP_VERIFICATION = 1,
+ GRUB_VERIFY_FLAGS_SINGLE_CHUNK = 2,
+ /* Defer verification to another authority. */
+--
+2.34.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-28736-loader-efi-chainloader-Use-grub_loader_set_ex.patch b/meta/recipes-bsp/grub/files/CVE-2022-28736-loader-efi-chainloader-Use-grub_loader_set_ex.patch
new file mode 100644
index 0000000000..5741e53f42
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-28736-loader-efi-chainloader-Use-grub_loader_set_ex.patch
@@ -0,0 +1,86 @@
+From 04c86e0bb7b58fc2f913f798cdb18934933e532d Mon Sep 17 00:00:00 2001
+From: Chris Coulson <chris.coulson@canonical.com>
+Date: Tue, 5 Apr 2022 11:48:58 +0100
+Subject: [PATCH] loader/efi/chainloader: Use grub_loader_set_ex()
+
+This ports the EFI chainloader to use grub_loader_set_ex() in order to fix
+a use-after-free bug that occurs when grub_cmd_chainloader() is executed
+more than once before a boot attempt is performed.
+
+Fixes: CVE-2022-28736
+
+Signed-off-by: Chris Coulson <chris.coulson@canonical.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport
+CVE: CVE-2022-28736
+
+Reference to upstream patch:
+https://git.savannah.gnu.org/cgit/grub.git/commit/?id=04c86e0bb7b58fc2f913f798cdb18934933e532d
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ grub-core/loader/efi/chainloader.c | 16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/grub-core/loader/efi/chainloader.c b/grub-core/loader/efi/chainloader.c
+index d1602c89b..7557eb269 100644
+--- a/grub-core/loader/efi/chainloader.c
++++ b/grub-core/loader/efi/chainloader.c
+@@ -44,11 +44,10 @@ GRUB_MOD_LICENSE ("GPLv3+");
+
+ static grub_dl_t my_mod;
+
+-static grub_efi_handle_t image_handle;
+-
+ static grub_err_t
+-grub_chainloader_unload (void)
++grub_chainloader_unload (void *context)
+ {
++ grub_efi_handle_t image_handle = (grub_efi_handle_t) context;
+ grub_efi_loaded_image_t *loaded_image;
+ grub_efi_boot_services_t *b;
+
+@@ -64,8 +63,9 @@ grub_chainloader_unload (void)
+ }
+
+ static grub_err_t
+-grub_chainloader_boot (void)
++grub_chainloader_boot (void *context)
+ {
++ grub_efi_handle_t image_handle = (grub_efi_handle_t) context;
+ grub_efi_boot_services_t *b;
+ grub_efi_status_t status;
+ grub_efi_uintn_t exit_data_size;
+@@ -225,6 +225,7 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+ grub_efi_physical_address_t address = 0;
+ grub_efi_uintn_t pages = 0;
+ grub_efi_char16_t *cmdline = NULL;
++ grub_efi_handle_t image_handle = NULL;
+
+ if (argc == 0)
+ return grub_error (GRUB_ERR_BAD_ARGUMENT, N_("filename expected"));
+@@ -405,7 +406,7 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+ efi_call_2 (b->free_pages, address, pages);
+ grub_free (file_path);
+
+- grub_loader_set (grub_chainloader_boot, grub_chainloader_unload, 0);
++ grub_loader_set_ex (grub_chainloader_boot, grub_chainloader_unload, image_handle, 0);
+ return 0;
+
+ fail:
+@@ -423,10 +424,7 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+ efi_call_2 (b->free_pages, address, pages);
+
+ if (image_handle != NULL)
+- {
+- efi_call_1 (b->unload_image, image_handle);
+- image_handle = NULL;
+- }
++ efi_call_1 (b->unload_image, image_handle);
+
+ grub_dl_unref (my_mod);
+
+--
+2.34.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-3775.patch b/meta/recipes-bsp/grub/files/CVE-2022-3775.patch
new file mode 100644
index 0000000000..853efd0486
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-3775.patch
@@ -0,0 +1,95 @@
+From fdbe7209152ad6f09a1166f64f162017f2145ba3 Mon Sep 17 00:00:00 2001
+From: Zhang Boyang <zhangboyang.id@gmail.com>
+Date: Mon, 24 Oct 2022 08:05:35 +0800
+Subject: [PATCH] font: Fix an integer underflow in blit_comb()
+
+The expression (ctx.bounds.height - combining_glyphs[i]->height) / 2 may
+evaluate to a very big invalid value even if both ctx.bounds.height and
+combining_glyphs[i]->height are small integers. For example, if
+ctx.bounds.height is 10 and combining_glyphs[i]->height is 12, this
+expression evaluates to 2147483647 (expected -1). This is because
+coordinates are allowed to be negative but ctx.bounds.height is an
+unsigned int. So, the subtraction operates on unsigned ints and
+underflows to a very big value. The division makes things even worse.
+The quotient is still an invalid value even if converted back to int.
+
+This patch fixes the problem by casting ctx.bounds.height to int. As
+a result the subtraction will operate on int and grub_uint16_t which
+will be promoted to an int. So, the underflow will no longer happen. Other
+uses of ctx.bounds.height (and ctx.bounds.width) are also casted to int,
+to ensure coordinates are always calculated on signed integers.
+
+Fixes: CVE-2022-3775
+
+Reported-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Zhang Boyang <zhangboyang.id@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport from
+[https://git.savannah.gnu.org/cgit/grub.git/commit/?id=992c06191babc1e109caf40d6a07ec6fdef427af]
+CVE: CVE-2022-3775
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+
+---
+ grub-core/font/font.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/grub-core/font/font.c b/grub-core/font/font.c
+index 0ff5525..7b1cbde 100644
+--- a/grub-core/font/font.c
++++ b/grub-core/font/font.c
+@@ -1206,12 +1206,12 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+ ctx.bounds.height = main_glyph->height;
+
+ above_rightx = main_glyph->offset_x + main_glyph->width;
+- above_righty = ctx.bounds.y + ctx.bounds.height;
++ above_righty = ctx.bounds.y + (int) ctx.bounds.height;
+
+ above_leftx = main_glyph->offset_x;
+- above_lefty = ctx.bounds.y + ctx.bounds.height;
++ above_lefty = ctx.bounds.y + (int) ctx.bounds.height;
+
+- below_rightx = ctx.bounds.x + ctx.bounds.width;
++ below_rightx = ctx.bounds.x + (int) ctx.bounds.width;
+ below_righty = ctx.bounds.y;
+
+ comb = grub_unicode_get_comb (glyph_id);
+@@ -1224,7 +1224,7 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+
+ if (!combining_glyphs[i])
+ continue;
+- targetx = (ctx.bounds.width - combining_glyphs[i]->width) / 2 + ctx.bounds.x;
++ targetx = ((int) ctx.bounds.width - combining_glyphs[i]->width) / 2 + ctx.bounds.x;
+ /* CGJ is to avoid diacritics reordering. */
+ if (comb[i].code
+ == GRUB_UNICODE_COMBINING_GRAPHEME_JOINER)
+@@ -1234,8 +1234,8 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+ case GRUB_UNICODE_COMB_OVERLAY:
+ do_blit (combining_glyphs[i],
+ targetx,
+- (ctx.bounds.height - combining_glyphs[i]->height) / 2
+- - (ctx.bounds.height + ctx.bounds.y), &ctx);
++ ((int) ctx.bounds.height - combining_glyphs[i]->height) / 2
++ - ((int) ctx.bounds.height + ctx.bounds.y), &ctx);
+ if (min_devwidth < combining_glyphs[i]->width)
+ min_devwidth = combining_glyphs[i]->width;
+ break;
+@@ -1308,7 +1308,7 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+ /* Fallthrough. */
+ case GRUB_UNICODE_STACK_ATTACHED_ABOVE:
+ do_blit (combining_glyphs[i], targetx,
+- -(ctx.bounds.height + ctx.bounds.y + space
++ -((int) ctx.bounds.height + ctx.bounds.y + space
+ + combining_glyphs[i]->height), &ctx);
+ if (min_devwidth < combining_glyphs[i]->width)
+ min_devwidth = combining_glyphs[i]->width;
+@@ -1316,7 +1316,7 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+
+ case GRUB_UNICODE_COMB_HEBREW_DAGESH:
+ do_blit (combining_glyphs[i], targetx,
+- -(ctx.bounds.height / 2 + ctx.bounds.y
++ -((int) ctx.bounds.height / 2 + ctx.bounds.y
+ + combining_glyphs[i]->height / 2), &ctx);
+ if (min_devwidth < combining_glyphs[i]->width)
+ min_devwidth = combining_glyphs[i]->width;
diff --git a/meta/recipes-bsp/grub/files/CVE-2023-4692.patch b/meta/recipes-bsp/grub/files/CVE-2023-4692.patch
new file mode 100644
index 0000000000..4780e35b7a
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2023-4692.patch
@@ -0,0 +1,97 @@
+From 43651027d24e62a7a463254165e1e46e42aecdea Mon Sep 17 00:00:00 2001
+From: Maxim Suhanov <dfirblog@gmail.com>
+Date: Thu, 16 Nov 2023 07:21:50 +0000
+Subject: [PATCH] fs/ntfs: Fix an OOB write when parsing the $ATTRIBUTE_LIST
+ attribute for the $MFT file
+
+When parsing an extremely fragmented $MFT file, i.e., the file described
+using the $ATTRIBUTE_LIST attribute, current NTFS code will reuse a buffer
+containing bytes read from the underlying drive to store sector numbers,
+which are consumed later to read data from these sectors into another buffer.
+
+These sectors numbers, two 32-bit integers, are always stored at predefined
+offsets, 0x10 and 0x14, relative to first byte of the selected entry within
+the $ATTRIBUTE_LIST attribute. Usually, this won't cause any problem.
+
+However, when parsing a specially-crafted file system image, this may cause
+the NTFS code to write these integers beyond the buffer boundary, likely
+causing the GRUB memory allocator to misbehave or fail. These integers contain
+values which are controlled by on-disk structures of the NTFS file system.
+
+Such modification and resulting misbehavior may touch a memory range not
+assigned to the GRUB and owned by firmware or another EFI application/driver.
+
+This fix introduces checks to ensure that these sector numbers are never
+written beyond the boundary.
+
+Fixes: CVE-2023-4692
+
+Reported-by: Maxim Suhanov <dfirblog@gmail.com>
+Signed-off-by: Maxim Suhanov <dfirblog@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+CVE: CVE-2023-4692
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=43651027d24e62a7a463254165e1e46e42aecdea]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ grub-core/fs/ntfs.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/fs/ntfs.c b/grub-core/fs/ntfs.c
+index 2f34f76..6009e49 100644
+--- a/grub-core/fs/ntfs.c
++++ b/grub-core/fs/ntfs.c
+@@ -184,7 +184,7 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr)
+ }
+ if (at->attr_end)
+ {
+- grub_uint8_t *pa;
++ grub_uint8_t *pa, *pa_end;
+
+ at->emft_buf = grub_malloc (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR);
+ if (at->emft_buf == NULL)
+@@ -209,11 +209,13 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr)
+ }
+ at->attr_nxt = at->edat_buf;
+ at->attr_end = at->edat_buf + u32at (pa, 0x30);
++ pa_end = at->edat_buf + n;
+ }
+ else
+ {
+ at->attr_nxt = at->attr_end + u16at (pa, 0x14);
+ at->attr_end = at->attr_end + u32at (pa, 4);
++ pa_end = at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR);
+ }
+ at->flags |= GRUB_NTFS_AF_ALST;
+ while (at->attr_nxt < at->attr_end)
+@@ -230,6 +232,13 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr)
+ at->flags |= GRUB_NTFS_AF_GPOS;
+ at->attr_cur = at->attr_nxt;
+ pa = at->attr_cur;
++
++ if ((pa >= pa_end) || (pa_end - pa < 0x18))
++ {
++ grub_error (GRUB_ERR_BAD_FS, "can\'t parse attribute list");
++ return NULL;
++ }
++
+ grub_set_unaligned32 ((char *) pa + 0x10,
+ grub_cpu_to_le32 (at->mft->data->mft_start));
+ grub_set_unaligned32 ((char *) pa + 0x14,
+@@ -240,6 +249,13 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr)
+ {
+ if (*pa != attr)
+ break;
++
++ if ((pa >= pa_end) || (pa_end - pa < 0x18))
++ {
++ grub_error (GRUB_ERR_BAD_FS, "can\'t parse attribute list");
++ return NULL;
++ }
++
+ if (read_attr
+ (at, pa + 0x10,
+ u32at (pa, 0x10) * (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR),
+--
+2.40.0
diff --git a/meta/recipes-bsp/grub/files/CVE-2023-4693.patch b/meta/recipes-bsp/grub/files/CVE-2023-4693.patch
new file mode 100644
index 0000000000..1b6013d86d
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2023-4693.patch
@@ -0,0 +1,62 @@
+From 0ed2458cc4eff6d9a9199527e2a0b6d445802f94 Mon Sep 17 00:00:00 2001
+From: Maxim Suhanov <dfirblog@gmail.com>
+Date: Mon, 28 Aug 2023 16:32:33 +0300
+Subject: [PATCH] fs/ntfs: Fix an OOB read when reading data from the resident
+ $DATA attribute
+
+When reading a file containing resident data, i.e., the file data is stored in
+the $DATA attribute within the NTFS file record, not in external clusters,
+there are no checks that this resident data actually fits the corresponding
+file record segment.
+
+When parsing a specially-crafted file system image, the current NTFS code will
+read the file data from an arbitrary, attacker-chosen memory offset and of
+arbitrary, attacker-chosen length.
+
+This allows an attacker to display arbitrary chunks of memory, which could
+contain sensitive information like password hashes or even plain-text,
+obfuscated passwords from BS EFI variables.
+
+This fix implements a check to ensure that resident data is read from the
+corresponding file record segment only.
+
+Fixes: CVE-2023-4693
+
+Reported-by: Maxim Suhanov <dfirblog@gmail.com>
+Signed-off-by: Maxim Suhanov <dfirblog@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=0ed2458cc4eff6d9a9199527e2a0b6d445802f94]
+CVE: CVE-2023-4693
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/fs/ntfs.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/fs/ntfs.c b/grub-core/fs/ntfs.c
+index 7e43fd6..8f63c83 100644
+--- a/grub-core/fs/ntfs.c
++++ b/grub-core/fs/ntfs.c
+@@ -401,7 +401,18 @@ read_data (struct grub_ntfs_attr *at, grub_uint8_t *pa, grub_uint8_t *dest,
+ {
+ if (ofs + len > u32at (pa, 0x10))
+ return grub_error (GRUB_ERR_BAD_FS, "read out of range");
+- grub_memcpy (dest, pa + u32at (pa, 0x14) + ofs, len);
++
++ if (u32at (pa, 0x10) > (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR))
++ return grub_error (GRUB_ERR_BAD_FS, "resident attribute too large");
++
++ if (pa >= at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR))
++ return grub_error (GRUB_ERR_BAD_FS, "resident attribute out of range");
++
++ if (u16at (pa, 0x14) + u32at (pa, 0x10) >
++ (grub_addr_t) at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR) - (grub_addr_t) pa)
++ return grub_error (GRUB_ERR_BAD_FS, "resident attribute out of range");
++
++ grub_memcpy (dest, pa + u16at (pa, 0x14) + ofs, len);
+ return 0;
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/commands-boot-Add-API-to-pass-context-to-loader.patch b/meta/recipes-bsp/grub/files/commands-boot-Add-API-to-pass-context-to-loader.patch
new file mode 100644
index 0000000000..a2c0530f04
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/commands-boot-Add-API-to-pass-context-to-loader.patch
@@ -0,0 +1,168 @@
+From 14ceb3b3ff6db664649138442b6562c114dcf56e Mon Sep 17 00:00:00 2001
+From: Chris Coulson <chris.coulson@canonical.com>
+Date: Tue, 5 Apr 2022 10:58:28 +0100
+Subject: [PATCH] commands/boot: Add API to pass context to loader
+
+Loaders rely on global variables for saving context which is consumed
+in the boot hook and freed in the unload hook. In the case where a loader
+command is executed twice, calling grub_loader_set() a second time executes
+the unload hook, but in some cases this runs when the loader's global
+context has already been updated, resulting in the updated context being
+freed and potential use-after-free bugs when the boot hook is subsequently
+called.
+
+This adds a new API, grub_loader_set_ex(), which allows a loader to specify
+context that is passed to its boot and unload hooks. This is an alternative
+to requiring that loaders call grub_loader_unset() before mutating their
+global context.
+
+Signed-off-by: Chris Coulson <chris.coulson@canonical.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport
+
+Reference to upstream patch:
+https://git.savannah.gnu.org/cgit/grub.git/commit/?id=14ceb3b3ff6db664649138442b6562c114dcf56e
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ grub-core/commands/boot.c | 66 ++++++++++++++++++++++++++++++++++-----
+ include/grub/loader.h | 5 +++
+ 2 files changed, 63 insertions(+), 8 deletions(-)
+
+diff --git a/grub-core/commands/boot.c b/grub-core/commands/boot.c
+index bbca81e94..61514788e 100644
+--- a/grub-core/commands/boot.c
++++ b/grub-core/commands/boot.c
+@@ -27,10 +27,20 @@
+
+ GRUB_MOD_LICENSE ("GPLv3+");
+
+-static grub_err_t (*grub_loader_boot_func) (void);
+-static grub_err_t (*grub_loader_unload_func) (void);
++static grub_err_t (*grub_loader_boot_func) (void *context);
++static grub_err_t (*grub_loader_unload_func) (void *context);
++static void *grub_loader_context;
+ static int grub_loader_flags;
+
++struct grub_simple_loader_hooks
++{
++ grub_err_t (*boot) (void);
++ grub_err_t (*unload) (void);
++};
++
++/* Don't heap allocate this to avoid making grub_loader_set() fallible. */
++static struct grub_simple_loader_hooks simple_loader_hooks;
++
+ struct grub_preboot
+ {
+ grub_err_t (*preboot_func) (int);
+@@ -44,6 +54,29 @@ static int grub_loader_loaded;
+ static struct grub_preboot *preboots_head = 0,
+ *preboots_tail = 0;
+
++static grub_err_t
++grub_simple_boot_hook (void *context)
++{
++ struct grub_simple_loader_hooks *hooks;
++
++ hooks = (struct grub_simple_loader_hooks *) context;
++ return hooks->boot ();
++}
++
++static grub_err_t
++grub_simple_unload_hook (void *context)
++{
++ struct grub_simple_loader_hooks *hooks;
++ grub_err_t ret;
++
++ hooks = (struct grub_simple_loader_hooks *) context;
++
++ ret = hooks->unload ();
++ grub_memset (hooks, 0, sizeof (*hooks));
++
++ return ret;
++}
++
+ int
+ grub_loader_is_loaded (void)
+ {
+@@ -110,28 +143,45 @@ grub_loader_unregister_preboot_hook (struct grub_preboot *hnd)
+ }
+
+ void
+-grub_loader_set (grub_err_t (*boot) (void),
+- grub_err_t (*unload) (void),
+- int flags)
++grub_loader_set_ex (grub_err_t (*boot) (void *context),
++ grub_err_t (*unload) (void *context),
++ void *context,
++ int flags)
+ {
+ if (grub_loader_loaded && grub_loader_unload_func)
+- grub_loader_unload_func ();
++ grub_loader_unload_func (grub_loader_context);
+
+ grub_loader_boot_func = boot;
+ grub_loader_unload_func = unload;
++ grub_loader_context = context;
+ grub_loader_flags = flags;
+
+ grub_loader_loaded = 1;
+ }
+
++void
++grub_loader_set (grub_err_t (*boot) (void),
++ grub_err_t (*unload) (void),
++ int flags)
++{
++ grub_loader_set_ex (grub_simple_boot_hook,
++ grub_simple_unload_hook,
++ &simple_loader_hooks,
++ flags);
++
++ simple_loader_hooks.boot = boot;
++ simple_loader_hooks.unload = unload;
++}
++
+ void
+ grub_loader_unset(void)
+ {
+ if (grub_loader_loaded && grub_loader_unload_func)
+- grub_loader_unload_func ();
++ grub_loader_unload_func (grub_loader_context);
+
+ grub_loader_boot_func = 0;
+ grub_loader_unload_func = 0;
++ grub_loader_context = 0;
+
+ grub_loader_loaded = 0;
+ }
+@@ -158,7 +208,7 @@ grub_loader_boot (void)
+ return err;
+ }
+ }
+- err = (grub_loader_boot_func) ();
++ err = (grub_loader_boot_func) (grub_loader_context);
+
+ for (cur = preboots_tail; cur; cur = cur->prev)
+ if (! err)
+diff --git a/include/grub/loader.h b/include/grub/loader.h
+index b20864282..97f231054 100644
+--- a/include/grub/loader.h
++++ b/include/grub/loader.h
+@@ -40,6 +40,11 @@ void EXPORT_FUNC (grub_loader_set) (grub_err_t (*boot) (void),
+ grub_err_t (*unload) (void),
+ int flags);
+
++void EXPORT_FUNC (grub_loader_set_ex) (grub_err_t (*boot) (void *context),
++ grub_err_t (*unload) (void *context),
++ void *context,
++ int flags);
++
+ /* Unset current loader, if any. */
+ void EXPORT_FUNC (grub_loader_unset) (void);
+
+--
+2.34.1
+
diff --git a/meta/recipes-bsp/grub/files/determinism.patch b/meta/recipes-bsp/grub/files/determinism.patch
index 2828e80975..852b95a856 100644
--- a/meta/recipes-bsp/grub/files/determinism.patch
+++ b/meta/recipes-bsp/grub/files/determinism.patch
@@ -14,7 +14,7 @@ missing sorting of the list used to generate it. Add such a sort.
Also ensure the generated unidata.c file is deterministic by sorting the
keys of the dict.
-Upstream-Status: Pending
+Upstream-Status: Submitted [https://lists.gnu.org/archive/html/grub-devel/2023-06/index.html]
Richard Purdie <richard.purdie@linuxfoundation.org>
Signed-off-by: Naveen Saini <naveen.kumar.saini@intel.com>
---
diff --git a/meta/recipes-bsp/grub/files/loader-efi-chainloader-Simplify-the-loader-state.patch b/meta/recipes-bsp/grub/files/loader-efi-chainloader-Simplify-the-loader-state.patch
new file mode 100644
index 0000000000..a43025d425
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/loader-efi-chainloader-Simplify-the-loader-state.patch
@@ -0,0 +1,129 @@
+From 1469983ebb9674753ad333d37087fb8cb20e1dce Mon Sep 17 00:00:00 2001
+From: Chris Coulson <chris.coulson@canonical.com>
+Date: Tue, 5 Apr 2022 10:02:04 +0100
+Subject: [PATCH] loader/efi/chainloader: Simplify the loader state
+
+The chainloader command retains the source buffer and device path passed
+to LoadImage(), requiring the unload hook passed to grub_loader_set() to
+free them. It isn't required to retain this state though - they aren't
+required by StartImage() or anything else in the boot hook, so clean them
+up before grub_cmd_chainloader() finishes.
+
+Signed-off-by: Chris Coulson <chris.coulson@canonical.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport
+
+Reference to upstream patch:
+https://git.savannah.gnu.org/cgit/grub.git/commit/?id=1469983ebb9674753ad333d37087fb8cb20e1dce
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ grub-core/loader/efi/chainloader.c | 38 +++++++++++++++++-------------
+ 1 file changed, 21 insertions(+), 17 deletions(-)
+
+diff --git a/grub-core/loader/efi/chainloader.c b/grub-core/loader/efi/chainloader.c
+index 2bd80f4db..d1602c89b 100644
+--- a/grub-core/loader/efi/chainloader.c
++++ b/grub-core/loader/efi/chainloader.c
+@@ -44,25 +44,20 @@ GRUB_MOD_LICENSE ("GPLv3+");
+
+ static grub_dl_t my_mod;
+
+-static grub_efi_physical_address_t address;
+-static grub_efi_uintn_t pages;
+-static grub_efi_device_path_t *file_path;
+ static grub_efi_handle_t image_handle;
+-static grub_efi_char16_t *cmdline;
+
+ static grub_err_t
+ grub_chainloader_unload (void)
+ {
++ grub_efi_loaded_image_t *loaded_image;
+ grub_efi_boot_services_t *b;
+
++ loaded_image = grub_efi_get_loaded_image (image_handle);
++ if (loaded_image != NULL)
++ grub_free (loaded_image->load_options);
++
+ b = grub_efi_system_table->boot_services;
+ efi_call_1 (b->unload_image, image_handle);
+- efi_call_2 (b->free_pages, address, pages);
+-
+- grub_free (file_path);
+- grub_free (cmdline);
+- cmdline = 0;
+- file_path = 0;
+
+ grub_dl_unref (my_mod);
+ return GRUB_ERR_NONE;
+@@ -140,7 +135,7 @@ make_file_path (grub_efi_device_path_t *dp, const char *filename)
+ char *dir_start;
+ char *dir_end;
+ grub_size_t size;
+- grub_efi_device_path_t *d;
++ grub_efi_device_path_t *d, *file_path;
+
+ dir_start = grub_strchr (filename, ')');
+ if (! dir_start)
+@@ -222,11 +217,14 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+ grub_efi_status_t status;
+ grub_efi_boot_services_t *b;
+ grub_device_t dev = 0;
+- grub_efi_device_path_t *dp = 0;
++ grub_efi_device_path_t *dp = NULL, *file_path = NULL;
+ grub_efi_loaded_image_t *loaded_image;
+ char *filename;
+ void *boot_image = 0;
+ grub_efi_handle_t dev_handle = 0;
++ grub_efi_physical_address_t address = 0;
++ grub_efi_uintn_t pages = 0;
++ grub_efi_char16_t *cmdline = NULL;
+
+ if (argc == 0)
+ return grub_error (GRUB_ERR_BAD_ARGUMENT, N_("filename expected"));
+@@ -234,11 +232,6 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+
+ grub_dl_ref (my_mod);
+
+- /* Initialize some global variables. */
+- address = 0;
+- image_handle = 0;
+- file_path = 0;
+-
+ b = grub_efi_system_table->boot_services;
+
+ file = grub_file_open (filename, GRUB_FILE_TYPE_EFI_CHAINLOADED_IMAGE);
+@@ -408,6 +401,10 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+ grub_file_close (file);
+ grub_device_close (dev);
+
++ /* We're finished with the source image buffer and file path now. */
++ efi_call_2 (b->free_pages, address, pages);
++ grub_free (file_path);
++
+ grub_loader_set (grub_chainloader_boot, grub_chainloader_unload, 0);
+ return 0;
+
+@@ -419,11 +416,18 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+ if (file)
+ grub_file_close (file);
+
++ grub_free (cmdline);
+ grub_free (file_path);
+
+ if (address)
+ efi_call_2 (b->free_pages, address, pages);
+
++ if (image_handle != NULL)
++ {
++ efi_call_1 (b->unload_image, image_handle);
++ image_handle = NULL;
++ }
++
+ grub_dl_unref (my_mod);
+
+ return grub_errno;
+--
+2.34.1
+
diff --git a/meta/recipes-bsp/grub/files/video-Remove-trailing-whitespaces.patch b/meta/recipes-bsp/grub/files/video-Remove-trailing-whitespaces.patch
new file mode 100644
index 0000000000..2db9bcbbc5
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/video-Remove-trailing-whitespaces.patch
@@ -0,0 +1,693 @@
+From 1f48917d8ddb490dcdc70176e0f58136b7f7811a Mon Sep 17 00:00:00 2001
+From: Elyes Haouas <ehaouas@noos.fr>
+Date: Fri, 4 Mar 2022 07:42:13 +0100
+Subject: [PATCH] video: Remove trailing whitespaces
+
+Signed-off-by: Elyes Haouas <ehaouas@noos.fr>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport
+
+Reference to upstream patch:
+https://git.savannah.gnu.org/cgit/grub.git/commit/?id=1f48917d8ddb490dcdc70176e0f58136b7f7811a
+
+Signed-off-by: Yongxin Liu <yongxin.liu@windriver.com>
+---
+ grub-core/video/bochs.c | 2 +-
+ grub-core/video/capture.c | 2 +-
+ grub-core/video/cirrus.c | 4 ++--
+ grub-core/video/coreboot/cbfb.c | 2 +-
+ grub-core/video/efi_gop.c | 22 +++++++++----------
+ grub-core/video/fb/fbblit.c | 8 +++----
+ grub-core/video/fb/video_fb.c | 10 ++++-----
+ grub-core/video/i386/pc/vbe.c | 34 ++++++++++++++---------------
+ grub-core/video/i386/pc/vga.c | 6 ++---
+ grub-core/video/ieee1275.c | 4 ++--
+ grub-core/video/radeon_fuloong2e.c | 6 ++---
+ grub-core/video/radeon_yeeloong3a.c | 6 ++---
+ grub-core/video/readers/png.c | 2 +-
+ grub-core/video/readers/tga.c | 2 +-
+ grub-core/video/sis315_init.c | 2 +-
+ grub-core/video/sis315pro.c | 8 +++----
+ grub-core/video/sm712.c | 10 ++++-----
+ grub-core/video/video.c | 8 +++----
+ 18 files changed, 69 insertions(+), 69 deletions(-)
+
+diff --git a/grub-core/video/bochs.c b/grub-core/video/bochs.c
+index 30ea1bd82..edc651697 100644
+--- a/grub-core/video/bochs.c
++++ b/grub-core/video/bochs.c
+@@ -212,7 +212,7 @@ find_card (grub_pci_device_t dev, grub_pci_id_t pciid, void *data)
+
+ if (((class >> 16) & 0xffff) != 0x0300 || pciid != 0x11111234)
+ return 0;
+-
++
+ addr = grub_pci_make_address (dev, GRUB_PCI_REG_ADDRESS_REG0);
+ framebuffer.base = grub_pci_read (addr) & GRUB_PCI_ADDR_MEM_MASK;
+ if (!framebuffer.base)
+diff --git a/grub-core/video/capture.c b/grub-core/video/capture.c
+index 4d3195e01..c653d89f9 100644
+--- a/grub-core/video/capture.c
++++ b/grub-core/video/capture.c
+@@ -92,7 +92,7 @@ grub_video_capture_start (const struct grub_video_mode_info *mode_info,
+ framebuffer.ptr = grub_calloc (framebuffer.mode_info.height, framebuffer.mode_info.pitch);
+ if (!framebuffer.ptr)
+ return grub_errno;
+-
++
+ err = grub_video_fb_create_render_target_from_pointer (&framebuffer.render_target,
+ &framebuffer.mode_info,
+ framebuffer.ptr);
+diff --git a/grub-core/video/cirrus.c b/grub-core/video/cirrus.c
+index e2149e8ce..f5542ccdc 100644
+--- a/grub-core/video/cirrus.c
++++ b/grub-core/video/cirrus.c
+@@ -354,11 +354,11 @@ grub_video_cirrus_setup (unsigned int width, unsigned int height,
+ grub_uint8_t sr_ext = 0, hidden_dac = 0;
+
+ grub_vga_set_geometry (&config, grub_vga_cr_write);
+-
++
+ grub_vga_gr_write (GRUB_VGA_GR_MODE_256_COLOR | GRUB_VGA_GR_MODE_READ_MODE1,
+ GRUB_VGA_GR_MODE);
+ grub_vga_gr_write (GRUB_VGA_GR_GR6_GRAPHICS_MODE, GRUB_VGA_GR_GR6);
+-
++
+ grub_vga_sr_write (GRUB_VGA_SR_MEMORY_MODE_NORMAL, GRUB_VGA_SR_MEMORY_MODE);
+
+ grub_vga_cr_write ((config.pitch >> CIRRUS_CR_EXTENDED_DISPLAY_PITCH_SHIFT)
+diff --git a/grub-core/video/coreboot/cbfb.c b/grub-core/video/coreboot/cbfb.c
+index 9af81fa5b..986003c51 100644
+--- a/grub-core/video/coreboot/cbfb.c
++++ b/grub-core/video/coreboot/cbfb.c
+@@ -106,7 +106,7 @@ grub_video_cbfb_setup (unsigned int width, unsigned int height,
+
+ grub_video_fb_set_palette (0, GRUB_VIDEO_FBSTD_NUMCOLORS,
+ grub_video_fbstd_colors);
+-
++
+ return err;
+ }
+
+diff --git a/grub-core/video/efi_gop.c b/grub-core/video/efi_gop.c
+index b7590dc6c..7a5054631 100644
+--- a/grub-core/video/efi_gop.c
++++ b/grub-core/video/efi_gop.c
+@@ -273,7 +273,7 @@ grub_video_gop_iterate (int (*hook) (const struct grub_video_mode_info *info, vo
+ grub_efi_status_t status;
+ struct grub_efi_gop_mode_info *info = NULL;
+ struct grub_video_mode_info mode_info;
+-
++
+ status = efi_call_4 (gop->query_mode, gop, mode, &size, &info);
+
+ if (status)
+@@ -390,7 +390,7 @@ grub_video_gop_setup (unsigned int width, unsigned int height,
+ found = 1;
+ }
+ }
+-
++
+ if (!found)
+ {
+ unsigned mode;
+@@ -399,7 +399,7 @@ grub_video_gop_setup (unsigned int width, unsigned int height,
+ {
+ grub_efi_uintn_t size;
+ grub_efi_status_t status;
+-
++
+ status = efi_call_4 (gop->query_mode, gop, mode, &size, &info);
+ if (status)
+ {
+@@ -472,11 +472,11 @@ grub_video_gop_setup (unsigned int width, unsigned int height,
+ framebuffer.ptr = (void *) (grub_addr_t) gop->mode->fb_base;
+ framebuffer.offscreen
+ = grub_malloc (framebuffer.mode_info.height
+- * framebuffer.mode_info.width
++ * framebuffer.mode_info.width
+ * sizeof (struct grub_efi_gop_blt_pixel));
+
+ buffer = framebuffer.offscreen;
+-
++
+ if (!buffer)
+ {
+ grub_dprintf ("video", "GOP: couldn't allocate shadow\n");
+@@ -485,11 +485,11 @@ grub_video_gop_setup (unsigned int width, unsigned int height,
+ &framebuffer.mode_info);
+ buffer = framebuffer.ptr;
+ }
+-
++
+ grub_dprintf ("video", "GOP: initialising FB @ %p %dx%dx%d\n",
+ framebuffer.ptr, framebuffer.mode_info.width,
+ framebuffer.mode_info.height, framebuffer.mode_info.bpp);
+-
++
+ err = grub_video_fb_create_render_target_from_pointer
+ (&framebuffer.render_target, &framebuffer.mode_info, buffer);
+
+@@ -498,15 +498,15 @@ grub_video_gop_setup (unsigned int width, unsigned int height,
+ grub_dprintf ("video", "GOP: Couldn't create FB target\n");
+ return err;
+ }
+-
++
+ err = grub_video_fb_set_active_render_target (framebuffer.render_target);
+-
++
+ if (err)
+ {
+ grub_dprintf ("video", "GOP: Couldn't set FB target\n");
+ return err;
+ }
+-
++
+ err = grub_video_fb_set_palette (0, GRUB_VIDEO_FBSTD_NUMCOLORS,
+ grub_video_fbstd_colors);
+
+@@ -514,7 +514,7 @@ grub_video_gop_setup (unsigned int width, unsigned int height,
+ grub_dprintf ("video", "GOP: Couldn't set palette\n");
+ else
+ grub_dprintf ("video", "GOP: Success\n");
+-
++
+ return err;
+ }
+
+diff --git a/grub-core/video/fb/fbblit.c b/grub-core/video/fb/fbblit.c
+index d55924837..1010ef393 100644
+--- a/grub-core/video/fb/fbblit.c
++++ b/grub-core/video/fb/fbblit.c
+@@ -466,7 +466,7 @@ grub_video_fbblit_replace_24bit_indexa (struct grub_video_fbblit_info *dst,
+ for (i = 0; i < width; i++)
+ {
+ register grub_uint32_t col;
+- if (*srcptr == 0xf0)
++ if (*srcptr == 0xf0)
+ col = palette[16];
+ else
+ col = palette[*srcptr & 0xf];
+@@ -478,7 +478,7 @@ grub_video_fbblit_replace_24bit_indexa (struct grub_video_fbblit_info *dst,
+ *dstptr++ = col >> 0;
+ *dstptr++ = col >> 8;
+ *dstptr++ = col >> 16;
+-#endif
++#endif
+ srcptr++;
+ }
+
+@@ -651,7 +651,7 @@ grub_video_fbblit_blend_24bit_indexa (struct grub_video_fbblit_info *dst,
+ for (i = 0; i < width; i++)
+ {
+ register grub_uint32_t col;
+- if (*srcptr != 0xf0)
++ if (*srcptr != 0xf0)
+ {
+ col = palette[*srcptr & 0xf];
+ #ifdef GRUB_CPU_WORDS_BIGENDIAN
+@@ -662,7 +662,7 @@ grub_video_fbblit_blend_24bit_indexa (struct grub_video_fbblit_info *dst,
+ *dstptr++ = col >> 0;
+ *dstptr++ = col >> 8;
+ *dstptr++ = col >> 16;
+-#endif
++#endif
+ }
+ else
+ dstptr += 3;
+diff --git a/grub-core/video/fb/video_fb.c b/grub-core/video/fb/video_fb.c
+index ae6b89f9a..fa4ebde26 100644
+--- a/grub-core/video/fb/video_fb.c
++++ b/grub-core/video/fb/video_fb.c
+@@ -754,7 +754,7 @@ grub_video_fb_unmap_color_int (struct grub_video_fbblit_info * source,
+ *alpha = 0;
+ return;
+ }
+-
++
+ /* If we have an out-of-bounds color, return transparent black. */
+ if (color > 255)
+ {
+@@ -1141,7 +1141,7 @@ grub_video_fb_scroll (grub_video_color_t color, int dx, int dy)
+ /* If everything is aligned on 32-bit use 32-bit copy. */
+ if ((grub_addr_t) grub_video_fb_get_video_ptr (&target, src_x, src_y)
+ % sizeof (grub_uint32_t) == 0
+- && (grub_addr_t) grub_video_fb_get_video_ptr (&target, dst_x, dst_y)
++ && (grub_addr_t) grub_video_fb_get_video_ptr (&target, dst_x, dst_y)
+ % sizeof (grub_uint32_t) == 0
+ && linelen % sizeof (grub_uint32_t) == 0
+ && linedelta % sizeof (grub_uint32_t) == 0)
+@@ -1155,7 +1155,7 @@ grub_video_fb_scroll (grub_video_color_t color, int dx, int dy)
+ else if ((grub_addr_t) grub_video_fb_get_video_ptr (&target, src_x, src_y)
+ % sizeof (grub_uint16_t) == 0
+ && (grub_addr_t) grub_video_fb_get_video_ptr (&target,
+- dst_x, dst_y)
++ dst_x, dst_y)
+ % sizeof (grub_uint16_t) == 0
+ && linelen % sizeof (grub_uint16_t) == 0
+ && linedelta % sizeof (grub_uint16_t) == 0)
+@@ -1170,7 +1170,7 @@ grub_video_fb_scroll (grub_video_color_t color, int dx, int dy)
+ {
+ grub_uint8_t *src, *dst;
+ DO_SCROLL
+- }
++ }
+ }
+
+ /* 4. Fill empty space with specified color. In this implementation
+@@ -1615,7 +1615,7 @@ grub_video_fb_setup (unsigned int mode_type, unsigned int mode_mask,
+ framebuffer.render_target = framebuffer.back_target;
+ return GRUB_ERR_NONE;
+ }
+-
++
+ mode_info->mode_type &= ~(GRUB_VIDEO_MODE_TYPE_DOUBLE_BUFFERED
+ | GRUB_VIDEO_MODE_TYPE_UPDATING_SWAP);
+
+diff --git a/grub-core/video/i386/pc/vbe.c b/grub-core/video/i386/pc/vbe.c
+index b7f911926..0e65b5206 100644
+--- a/grub-core/video/i386/pc/vbe.c
++++ b/grub-core/video/i386/pc/vbe.c
+@@ -219,7 +219,7 @@ grub_vbe_disable_mtrr (int mtrr)
+ }
+
+ /* Call VESA BIOS 0x4f09 to set palette data, return status. */
+-static grub_vbe_status_t
++static grub_vbe_status_t
+ grub_vbe_bios_set_palette_data (grub_uint32_t color_count,
+ grub_uint32_t start_index,
+ struct grub_vbe_palette_data *palette_data)
+@@ -237,7 +237,7 @@ grub_vbe_bios_set_palette_data (grub_uint32_t color_count,
+ }
+
+ /* Call VESA BIOS 0x4f00 to get VBE Controller Information, return status. */
+-grub_vbe_status_t
++grub_vbe_status_t
+ grub_vbe_bios_get_controller_info (struct grub_vbe_info_block *ci)
+ {
+ struct grub_bios_int_registers regs;
+@@ -251,7 +251,7 @@ grub_vbe_bios_get_controller_info (struct grub_vbe_info_block *ci)
+ }
+
+ /* Call VESA BIOS 0x4f01 to get VBE Mode Information, return status. */
+-grub_vbe_status_t
++grub_vbe_status_t
+ grub_vbe_bios_get_mode_info (grub_uint32_t mode,
+ struct grub_vbe_mode_info_block *mode_info)
+ {
+@@ -285,7 +285,7 @@ grub_vbe_bios_set_mode (grub_uint32_t mode,
+ }
+
+ /* Call VESA BIOS 0x4f03 to return current VBE Mode, return status. */
+-grub_vbe_status_t
++grub_vbe_status_t
+ grub_vbe_bios_get_mode (grub_uint32_t *mode)
+ {
+ struct grub_bios_int_registers regs;
+@@ -298,7 +298,7 @@ grub_vbe_bios_get_mode (grub_uint32_t *mode)
+ return regs.eax & 0xffff;
+ }
+
+-grub_vbe_status_t
++grub_vbe_status_t
+ grub_vbe_bios_getset_dac_palette_width (int set, int *dac_mask_size)
+ {
+ struct grub_bios_int_registers regs;
+@@ -346,7 +346,7 @@ grub_vbe_bios_get_memory_window (grub_uint32_t window,
+ }
+
+ /* Call VESA BIOS 0x4f06 to set scanline length (in bytes), return status. */
+-grub_vbe_status_t
++grub_vbe_status_t
+ grub_vbe_bios_set_scanline_length (grub_uint32_t length)
+ {
+ struct grub_bios_int_registers regs;
+@@ -354,14 +354,14 @@ grub_vbe_bios_set_scanline_length (grub_uint32_t length)
+ regs.ecx = length;
+ regs.eax = 0x4f06;
+ /* BL = 2, Set Scan Line in Bytes. */
+- regs.ebx = 0x0002;
++ regs.ebx = 0x0002;
+ regs.flags = GRUB_CPU_INT_FLAGS_DEFAULT;
+ grub_bios_interrupt (0x10, &regs);
+ return regs.eax & 0xffff;
+ }
+
+ /* Call VESA BIOS 0x4f06 to return scanline length (in bytes), return status. */
+-grub_vbe_status_t
++grub_vbe_status_t
+ grub_vbe_bios_get_scanline_length (grub_uint32_t *length)
+ {
+ struct grub_bios_int_registers regs;
+@@ -377,7 +377,7 @@ grub_vbe_bios_get_scanline_length (grub_uint32_t *length)
+ }
+
+ /* Call VESA BIOS 0x4f07 to set display start, return status. */
+-static grub_vbe_status_t
++static grub_vbe_status_t
+ grub_vbe_bios_set_display_start (grub_uint32_t x, grub_uint32_t y)
+ {
+ struct grub_bios_int_registers regs;
+@@ -390,7 +390,7 @@ grub_vbe_bios_set_display_start (grub_uint32_t x, grub_uint32_t y)
+ regs.edx = y;
+ regs.eax = 0x4f07;
+ /* BL = 80h, Set Display Start during Vertical Retrace. */
+- regs.ebx = 0x0080;
++ regs.ebx = 0x0080;
+ regs.flags = GRUB_CPU_INT_FLAGS_DEFAULT;
+ grub_bios_interrupt (0x10, &regs);
+
+@@ -401,7 +401,7 @@ grub_vbe_bios_set_display_start (grub_uint32_t x, grub_uint32_t y)
+ }
+
+ /* Call VESA BIOS 0x4f07 to get display start, return status. */
+-grub_vbe_status_t
++grub_vbe_status_t
+ grub_vbe_bios_get_display_start (grub_uint32_t *x,
+ grub_uint32_t *y)
+ {
+@@ -419,7 +419,7 @@ grub_vbe_bios_get_display_start (grub_uint32_t *x,
+ }
+
+ /* Call VESA BIOS 0x4f0a. */
+-grub_vbe_status_t
++grub_vbe_status_t
+ grub_vbe_bios_get_pm_interface (grub_uint16_t *segment, grub_uint16_t *offset,
+ grub_uint16_t *length)
+ {
+@@ -896,7 +896,7 @@ vbe2videoinfo (grub_uint32_t mode,
+ case GRUB_VBE_MEMORY_MODEL_YUV:
+ mode_info->mode_type |= GRUB_VIDEO_MODE_TYPE_YUV;
+ break;
+-
++
+ case GRUB_VBE_MEMORY_MODEL_DIRECT_COLOR:
+ mode_info->mode_type |= GRUB_VIDEO_MODE_TYPE_RGB;
+ break;
+@@ -923,10 +923,10 @@ vbe2videoinfo (grub_uint32_t mode,
+ break;
+ case 8:
+ mode_info->bytes_per_pixel = 1;
+- break;
++ break;
+ case 4:
+ mode_info->bytes_per_pixel = 0;
+- break;
++ break;
+ }
+
+ if (controller_info.version >= 0x300)
+@@ -976,7 +976,7 @@ grub_video_vbe_iterate (int (*hook) (const struct grub_video_mode_info *info, vo
+
+ static grub_err_t
+ grub_video_vbe_setup (unsigned int width, unsigned int height,
+- grub_video_mode_type_t mode_type,
++ grub_video_mode_type_t mode_type,
+ grub_video_mode_type_t mode_mask)
+ {
+ grub_uint16_t *p;
+@@ -1193,7 +1193,7 @@ grub_video_vbe_print_adapter_specific_info (void)
+ controller_info.version & 0xFF,
+ controller_info.oem_software_rev >> 8,
+ controller_info.oem_software_rev & 0xFF);
+-
++
+ /* The total_memory field is in 64 KiB units. */
+ grub_printf_ (N_(" total memory: %d KiB\n"),
+ (controller_info.total_memory << 6));
+diff --git a/grub-core/video/i386/pc/vga.c b/grub-core/video/i386/pc/vga.c
+index b2f776c99..50d0b5e02 100644
+--- a/grub-core/video/i386/pc/vga.c
++++ b/grub-core/video/i386/pc/vga.c
+@@ -48,7 +48,7 @@ static struct
+ int back_page;
+ } framebuffer;
+
+-static unsigned char
++static unsigned char
+ grub_vga_set_mode (unsigned char mode)
+ {
+ struct grub_bios_int_registers regs;
+@@ -182,10 +182,10 @@ grub_video_vga_setup (unsigned int width, unsigned int height,
+
+ is_target = 1;
+ err = grub_video_fb_set_active_render_target (framebuffer.render_target);
+-
++
+ if (err)
+ return err;
+-
++
+ err = grub_video_fb_set_palette (0, GRUB_VIDEO_FBSTD_NUMCOLORS,
+ grub_video_fbstd_colors);
+
+diff --git a/grub-core/video/ieee1275.c b/grub-core/video/ieee1275.c
+index f437fb0df..ca3d3c3b2 100644
+--- a/grub-core/video/ieee1275.c
++++ b/grub-core/video/ieee1275.c
+@@ -233,7 +233,7 @@ grub_video_ieee1275_setup (unsigned int width, unsigned int height,
+ /* TODO. */
+ return grub_error (GRUB_ERR_IO, "can't set mode %dx%d", width, height);
+ }
+-
++
+ err = grub_video_ieee1275_fill_mode_info (dev, &framebuffer.mode_info);
+ if (err)
+ {
+@@ -260,7 +260,7 @@ grub_video_ieee1275_setup (unsigned int width, unsigned int height,
+
+ grub_video_ieee1275_set_palette (0, framebuffer.mode_info.number_of_colors,
+ grub_video_fbstd_colors);
+-
++
+ return err;
+ }
+
+diff --git a/grub-core/video/radeon_fuloong2e.c b/grub-core/video/radeon_fuloong2e.c
+index b4da34b5e..40917acb7 100644
+--- a/grub-core/video/radeon_fuloong2e.c
++++ b/grub-core/video/radeon_fuloong2e.c
+@@ -75,7 +75,7 @@ find_card (grub_pci_device_t dev, grub_pci_id_t pciid, void *data)
+ if (((class >> 16) & 0xffff) != GRUB_PCI_CLASS_SUBCLASS_VGA
+ || pciid != 0x515a1002)
+ return 0;
+-
++
+ *found = 1;
+
+ addr = grub_pci_make_address (dev, GRUB_PCI_REG_ADDRESS_REG0);
+@@ -139,7 +139,7 @@ grub_video_radeon_fuloong2e_setup (unsigned int width, unsigned int height,
+ framebuffer.mapped = 1;
+
+ /* Prevent garbage from appearing on the screen. */
+- grub_memset (framebuffer.ptr, 0x55,
++ grub_memset (framebuffer.ptr, 0x55,
+ framebuffer.mode_info.height * framebuffer.mode_info.pitch);
+
+ #ifndef TEST
+@@ -152,7 +152,7 @@ grub_video_radeon_fuloong2e_setup (unsigned int width, unsigned int height,
+ return err;
+
+ err = grub_video_fb_set_active_render_target (framebuffer.render_target);
+-
++
+ if (err)
+ return err;
+
+diff --git a/grub-core/video/radeon_yeeloong3a.c b/grub-core/video/radeon_yeeloong3a.c
+index 52614feb6..48631c181 100644
+--- a/grub-core/video/radeon_yeeloong3a.c
++++ b/grub-core/video/radeon_yeeloong3a.c
+@@ -74,7 +74,7 @@ find_card (grub_pci_device_t dev, grub_pci_id_t pciid, void *data)
+ if (((class >> 16) & 0xffff) != GRUB_PCI_CLASS_SUBCLASS_VGA
+ || pciid != 0x96151002)
+ return 0;
+-
++
+ *found = 1;
+
+ addr = grub_pci_make_address (dev, GRUB_PCI_REG_ADDRESS_REG0);
+@@ -137,7 +137,7 @@ grub_video_radeon_yeeloong3a_setup (unsigned int width, unsigned int height,
+ #endif
+
+ /* Prevent garbage from appearing on the screen. */
+- grub_memset (framebuffer.ptr, 0,
++ grub_memset (framebuffer.ptr, 0,
+ framebuffer.mode_info.height * framebuffer.mode_info.pitch);
+
+ #ifndef TEST
+@@ -150,7 +150,7 @@ grub_video_radeon_yeeloong3a_setup (unsigned int width, unsigned int height,
+ return err;
+
+ err = grub_video_fb_set_active_render_target (framebuffer.render_target);
+-
++
+ if (err)
+ return err;
+
+diff --git a/grub-core/video/readers/png.c b/grub-core/video/readers/png.c
+index 0157ff742..54dfedf43 100644
+--- a/grub-core/video/readers/png.c
++++ b/grub-core/video/readers/png.c
+@@ -916,7 +916,7 @@ grub_png_convert_image (struct grub_png_data *data)
+ }
+ return;
+ }
+-
++
+ if (data->is_gray)
+ {
+ switch (data->bpp)
+diff --git a/grub-core/video/readers/tga.c b/grub-core/video/readers/tga.c
+index 7cb9d1d2a..a9ec3a1b6 100644
+--- a/grub-core/video/readers/tga.c
++++ b/grub-core/video/readers/tga.c
+@@ -127,7 +127,7 @@ tga_load_palette (struct tga_data *data)
+
+ if (len > sizeof (data->palette))
+ len = sizeof (data->palette);
+-
++
+ if (grub_file_read (data->file, &data->palette, len)
+ != (grub_ssize_t) len)
+ return grub_errno;
+diff --git a/grub-core/video/sis315_init.c b/grub-core/video/sis315_init.c
+index ae5c1419c..09c3c7bbe 100644
+--- a/grub-core/video/sis315_init.c
++++ b/grub-core/video/sis315_init.c
+@@ -1,4 +1,4 @@
+-static const struct { grub_uint8_t reg; grub_uint8_t val; } sr_dump [] =
++static const struct { grub_uint8_t reg; grub_uint8_t val; } sr_dump [] =
+ {
+ { 0x28, 0x81 },
+ { 0x2a, 0x00 },
+diff --git a/grub-core/video/sis315pro.c b/grub-core/video/sis315pro.c
+index 22a0c85a6..4d2f9999a 100644
+--- a/grub-core/video/sis315pro.c
++++ b/grub-core/video/sis315pro.c
+@@ -103,7 +103,7 @@ find_card (grub_pci_device_t dev, grub_pci_id_t pciid, void *data)
+ if (((class >> 16) & 0xffff) != GRUB_PCI_CLASS_SUBCLASS_VGA
+ || pciid != GRUB_SIS315PRO_PCIID)
+ return 0;
+-
++
+ *found = 1;
+
+ addr = grub_pci_make_address (dev, GRUB_PCI_REG_ADDRESS_REG0);
+@@ -218,7 +218,7 @@ grub_video_sis315pro_setup (unsigned int width, unsigned int height,
+
+ #ifndef TEST
+ /* Prevent garbage from appearing on the screen. */
+- grub_memset (framebuffer.ptr, 0,
++ grub_memset (framebuffer.ptr, 0,
+ framebuffer.mode_info.height * framebuffer.mode_info.pitch);
+ grub_arch_sync_dma_caches (framebuffer.ptr,
+ framebuffer.mode_info.height
+@@ -231,7 +231,7 @@ grub_video_sis315pro_setup (unsigned int width, unsigned int height,
+ | GRUB_VGA_IO_MISC_EXTERNAL_CLOCK_0
+ | GRUB_VGA_IO_MISC_28MHZ
+ | GRUB_VGA_IO_MISC_ENABLE_VRAM_ACCESS
+- | GRUB_VGA_IO_MISC_COLOR,
++ | GRUB_VGA_IO_MISC_COLOR,
+ GRUB_VGA_IO_MISC_WRITE + GRUB_MACHINE_PCI_IO_BASE);
+
+ grub_vga_sr_write (0x86, 5);
+@@ -335,7 +335,7 @@ grub_video_sis315pro_setup (unsigned int width, unsigned int height,
+ {
+ if (read_sis_cmd (0x5) != 0xa1)
+ write_sis_cmd (0x86, 0x5);
+-
++
+ write_sis_cmd (read_sis_cmd (0x20) | 0xa1, 0x20);
+ write_sis_cmd (read_sis_cmd (0x1e) | 0xda, 0x1e);
+
+diff --git a/grub-core/video/sm712.c b/grub-core/video/sm712.c
+index 10c46eb65..65f59f84b 100644
+--- a/grub-core/video/sm712.c
++++ b/grub-core/video/sm712.c
+@@ -167,7 +167,7 @@ enum
+ GRUB_SM712_CR_SHADOW_VGA_VBLANK_START = 0x46,
+ GRUB_SM712_CR_SHADOW_VGA_VBLANK_END = 0x47,
+ GRUB_SM712_CR_SHADOW_VGA_VRETRACE_START = 0x48,
+- GRUB_SM712_CR_SHADOW_VGA_VRETRACE_END = 0x49,
++ GRUB_SM712_CR_SHADOW_VGA_VRETRACE_END = 0x49,
+ GRUB_SM712_CR_SHADOW_VGA_OVERFLOW = 0x4a,
+ GRUB_SM712_CR_SHADOW_VGA_CELL_HEIGHT = 0x4b,
+ GRUB_SM712_CR_SHADOW_VGA_HDISPLAY_END = 0x4c,
+@@ -375,7 +375,7 @@ find_card (grub_pci_device_t dev, grub_pci_id_t pciid, void *data)
+ if (((class >> 16) & 0xffff) != GRUB_PCI_CLASS_SUBCLASS_VGA
+ || pciid != GRUB_SM712_PCIID)
+ return 0;
+-
++
+ *found = 1;
+
+ addr = grub_pci_make_address (dev, GRUB_PCI_REG_ADDRESS_REG0);
+@@ -471,7 +471,7 @@ grub_video_sm712_setup (unsigned int width, unsigned int height,
+
+ #if !defined (TEST) && !defined(GENINIT)
+ /* Prevent garbage from appearing on the screen. */
+- grub_memset ((void *) framebuffer.cached_ptr, 0,
++ grub_memset ((void *) framebuffer.cached_ptr, 0,
+ framebuffer.mode_info.height * framebuffer.mode_info.pitch);
+ #endif
+
+@@ -482,7 +482,7 @@ grub_video_sm712_setup (unsigned int width, unsigned int height,
+ grub_sm712_sr_write (0x2, 0x6b);
+ grub_sm712_write_reg (0, GRUB_VGA_IO_PIXEL_MASK);
+ grub_sm712_sr_write (GRUB_VGA_SR_RESET_ASYNC, GRUB_VGA_SR_RESET);
+- grub_sm712_write_reg (GRUB_VGA_IO_MISC_NEGATIVE_VERT_POLARITY
++ grub_sm712_write_reg (GRUB_VGA_IO_MISC_NEGATIVE_VERT_POLARITY
+ | GRUB_VGA_IO_MISC_NEGATIVE_HORIZ_POLARITY
+ | GRUB_VGA_IO_MISC_UPPER_64K
+ | GRUB_VGA_IO_MISC_EXTERNAL_CLOCK_0
+@@ -694,7 +694,7 @@ grub_video_sm712_setup (unsigned int width, unsigned int height,
+ for (i = 0; i < ARRAY_SIZE (dda_lookups); i++)
+ grub_sm712_write_dda_lookup (i, dda_lookups[i].compare, dda_lookups[i].dda,
+ dda_lookups[i].vcentering);
+-
++
+ /* Undocumented */
+ grub_sm712_cr_write (0, 0x9c);
+ grub_sm712_cr_write (0, 0x9d);
+diff --git a/grub-core/video/video.c b/grub-core/video/video.c
+index 983424107..8937da745 100644
+--- a/grub-core/video/video.c
++++ b/grub-core/video/video.c
+@@ -491,13 +491,13 @@ parse_modespec (const char *current_mode, int *width, int *height, int *depth)
+ current_mode);
+
+ param++;
+-
++
+ *width = grub_strtoul (value, 0, 0);
+ if (grub_errno != GRUB_ERR_NONE)
+ return grub_error (GRUB_ERR_BAD_ARGUMENT,
+ N_("invalid video mode specification `%s'"),
+ current_mode);
+-
++
+ /* Find height value. */
+ value = param;
+ param = grub_strchr(param, 'x');
+@@ -513,13 +513,13 @@ parse_modespec (const char *current_mode, int *width, int *height, int *depth)
+ {
+ /* We have optional color depth value. */
+ param++;
+-
++
+ *height = grub_strtoul (value, 0, 0);
+ if (grub_errno != GRUB_ERR_NONE)
+ return grub_error (GRUB_ERR_BAD_ARGUMENT,
+ N_("invalid video mode specification `%s'"),
+ current_mode);
+-
++
+ /* Convert color depth value. */
+ value = param;
+ *depth = grub_strtoul (value, 0, 0);
+--
+2.34.1
+
diff --git a/meta/recipes-bsp/grub/files/video-readers-jpeg-Abort-sooner-if-a-read-operation-.patch b/meta/recipes-bsp/grub/files/video-readers-jpeg-Abort-sooner-if-a-read-operation-.patch
new file mode 100644
index 0000000000..0c7deae858
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/video-readers-jpeg-Abort-sooner-if-a-read-operation-.patch
@@ -0,0 +1,264 @@
+From d5caac8ab79d068ad9a41030c772d03a4d4fbd7b Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Mon, 28 Jun 2021 14:16:14 +1000
+Subject: [PATCH] video/readers/jpeg: Abort sooner if a read operation fails
+
+Fuzzing revealed some inputs that were taking a long time, potentially
+forever, because they did not bail quickly upon encountering an I/O error.
+
+Try to catch I/O errors sooner and bail out.
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport
+
+Reference to upstream patch:
+https://git.savannah.gnu.org/cgit/grub.git/commit/?id=d5caac8ab79d068ad9a41030c772d03a4d4fbd7b
+
+Signed-off-by: Yongxin Liu <yongxin.liu@windriver.com>
+---
+ grub-core/video/readers/jpeg.c | 86 +++++++++++++++++++++++++++-------
+ 1 file changed, 70 insertions(+), 16 deletions(-)
+
+diff --git a/grub-core/video/readers/jpeg.c b/grub-core/video/readers/jpeg.c
+index c47ffd651..806c56c78 100644
+--- a/grub-core/video/readers/jpeg.c
++++ b/grub-core/video/readers/jpeg.c
+@@ -109,9 +109,17 @@ static grub_uint8_t
+ grub_jpeg_get_byte (struct grub_jpeg_data *data)
+ {
+ grub_uint8_t r;
++ grub_ssize_t bytes_read;
+
+ r = 0;
+- grub_file_read (data->file, &r, 1);
++ bytes_read = grub_file_read (data->file, &r, 1);
++
++ if (bytes_read != 1)
++ {
++ grub_error (GRUB_ERR_BAD_FILE_TYPE,
++ "jpeg: unexpected end of data");
++ return 0;
++ }
+
+ return r;
+ }
+@@ -120,9 +128,17 @@ static grub_uint16_t
+ grub_jpeg_get_word (struct grub_jpeg_data *data)
+ {
+ grub_uint16_t r;
++ grub_ssize_t bytes_read;
+
+ r = 0;
+- grub_file_read (data->file, &r, sizeof (grub_uint16_t));
++ bytes_read = grub_file_read (data->file, &r, sizeof (grub_uint16_t));
++
++ if (bytes_read != sizeof (grub_uint16_t))
++ {
++ grub_error (GRUB_ERR_BAD_FILE_TYPE,
++ "jpeg: unexpected end of data");
++ return 0;
++ }
+
+ return grub_be_to_cpu16 (r);
+ }
+@@ -135,6 +151,11 @@ grub_jpeg_get_bit (struct grub_jpeg_data *data)
+ if (data->bit_mask == 0)
+ {
+ data->bit_save = grub_jpeg_get_byte (data);
++ if (grub_errno != GRUB_ERR_NONE) {
++ grub_error (GRUB_ERR_BAD_FILE_TYPE,
++ "jpeg: file read error");
++ return 0;
++ }
+ if (data->bit_save == JPEG_ESC_CHAR)
+ {
+ if (grub_jpeg_get_byte (data) != 0)
+@@ -143,6 +164,11 @@ grub_jpeg_get_bit (struct grub_jpeg_data *data)
+ "jpeg: invalid 0xFF in data stream");
+ return 0;
+ }
++ if (grub_errno != GRUB_ERR_NONE)
++ {
++ grub_error (GRUB_ERR_BAD_FILE_TYPE, "jpeg: file read error");
++ return 0;
++ }
+ }
+ data->bit_mask = 0x80;
+ }
+@@ -161,7 +187,7 @@ grub_jpeg_get_number (struct grub_jpeg_data *data, int num)
+ return 0;
+
+ msb = value = grub_jpeg_get_bit (data);
+- for (i = 1; i < num; i++)
++ for (i = 1; i < num && grub_errno == GRUB_ERR_NONE; i++)
+ value = (value << 1) + (grub_jpeg_get_bit (data) != 0);
+ if (!msb)
+ value += 1 - (1 << num);
+@@ -208,6 +234,8 @@ grub_jpeg_decode_huff_table (struct grub_jpeg_data *data)
+ while (data->file->offset + sizeof (count) + 1 <= next_marker)
+ {
+ id = grub_jpeg_get_byte (data);
++ if (grub_errno != GRUB_ERR_NONE)
++ return grub_errno;
+ ac = (id >> 4) & 1;
+ id &= 0xF;
+ if (id > 1)
+@@ -258,6 +286,8 @@ grub_jpeg_decode_quan_table (struct grub_jpeg_data *data)
+
+ next_marker = data->file->offset;
+ next_marker += grub_jpeg_get_word (data);
++ if (grub_errno != GRUB_ERR_NONE)
++ return grub_errno;
+
+ if (next_marker > data->file->size)
+ {
+@@ -269,6 +299,8 @@ grub_jpeg_decode_quan_table (struct grub_jpeg_data *data)
+ <= next_marker)
+ {
+ id = grub_jpeg_get_byte (data);
++ if (grub_errno != GRUB_ERR_NONE)
++ return grub_errno;
+ if (id >= 0x10) /* Upper 4-bit is precision. */
+ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
+ "jpeg: only 8-bit precision is supported");
+@@ -300,6 +332,9 @@ grub_jpeg_decode_sof (struct grub_jpeg_data *data)
+ next_marker = data->file->offset;
+ next_marker += grub_jpeg_get_word (data);
+
++ if (grub_errno != GRUB_ERR_NONE)
++ return grub_errno;
++
+ if (grub_jpeg_get_byte (data) != 8)
+ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
+ "jpeg: only 8-bit precision is supported");
+@@ -325,6 +360,8 @@ grub_jpeg_decode_sof (struct grub_jpeg_data *data)
+ return grub_error (GRUB_ERR_BAD_FILE_TYPE, "jpeg: invalid index");
+
+ ss = grub_jpeg_get_byte (data); /* Sampling factor. */
++ if (grub_errno != GRUB_ERR_NONE)
++ return grub_errno;
+ if (!id)
+ {
+ grub_uint8_t vs, hs;
+@@ -504,7 +541,7 @@ grub_jpeg_idct_transform (jpeg_data_unit_t du)
+ }
+ }
+
+-static void
++static grub_err_t
+ grub_jpeg_decode_du (struct grub_jpeg_data *data, int id, jpeg_data_unit_t du)
+ {
+ int h1, h2, qt;
+@@ -519,6 +556,9 @@ grub_jpeg_decode_du (struct grub_jpeg_data *data, int id, jpeg_data_unit_t du)
+ data->dc_value[id] +=
+ grub_jpeg_get_number (data, grub_jpeg_get_huff_code (data, h1));
+
++ if (grub_errno != GRUB_ERR_NONE)
++ return grub_errno;
++
+ du[0] = data->dc_value[id] * (int) data->quan_table[qt][0];
+ pos = 1;
+ while (pos < ARRAY_SIZE (data->quan_table[qt]))
+@@ -533,11 +573,13 @@ grub_jpeg_decode_du (struct grub_jpeg_data *data, int id, jpeg_data_unit_t du)
+ num >>= 4;
+ pos += num;
+
++ if (grub_errno != GRUB_ERR_NONE)
++ return grub_errno;
++
+ if (pos >= ARRAY_SIZE (jpeg_zigzag_order))
+ {
+- grub_error (GRUB_ERR_BAD_FILE_TYPE,
+- "jpeg: invalid position in zigzag order!?");
+- return;
++ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
++ "jpeg: invalid position in zigzag order!?");
+ }
+
+ du[jpeg_zigzag_order[pos]] = val * (int) data->quan_table[qt][pos];
+@@ -545,6 +587,7 @@ grub_jpeg_decode_du (struct grub_jpeg_data *data, int id, jpeg_data_unit_t du)
+ }
+
+ grub_jpeg_idct_transform (du);
++ return GRUB_ERR_NONE;
+ }
+
+ static void
+@@ -603,7 +646,8 @@ grub_jpeg_decode_sos (struct grub_jpeg_data *data)
+ data_offset += grub_jpeg_get_word (data);
+
+ cc = grub_jpeg_get_byte (data);
+-
++ if (grub_errno != GRUB_ERR_NONE)
++ return grub_errno;
+ if (cc != 3 && cc != 1)
+ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
+ "jpeg: component count must be 1 or 3");
+@@ -616,7 +660,8 @@ grub_jpeg_decode_sos (struct grub_jpeg_data *data)
+ id = grub_jpeg_get_byte (data) - 1;
+ if ((id < 0) || (id >= 3))
+ return grub_error (GRUB_ERR_BAD_FILE_TYPE, "jpeg: invalid index");
+-
++ if (grub_errno != GRUB_ERR_NONE)
++ return grub_errno;
+ ht = grub_jpeg_get_byte (data);
+ data->comp_index[id][1] = (ht >> 4);
+ data->comp_index[id][2] = (ht & 0xF) + 2;
+@@ -624,11 +669,14 @@ grub_jpeg_decode_sos (struct grub_jpeg_data *data)
+ if ((data->comp_index[id][1] < 0) || (data->comp_index[id][1] > 3) ||
+ (data->comp_index[id][2] < 0) || (data->comp_index[id][2] > 3))
+ return grub_error (GRUB_ERR_BAD_FILE_TYPE, "jpeg: invalid hufftable index");
++ if (grub_errno != GRUB_ERR_NONE)
++ return grub_errno;
+ }
+
+ grub_jpeg_get_byte (data); /* Skip 3 unused bytes. */
+ grub_jpeg_get_word (data);
+-
++ if (grub_errno != GRUB_ERR_NONE)
++ return grub_errno;
+ if (data->file->offset != data_offset)
+ return grub_error (GRUB_ERR_BAD_FILE_TYPE, "jpeg: extra byte in sos");
+
+@@ -646,6 +694,7 @@ grub_jpeg_decode_data (struct grub_jpeg_data *data)
+ {
+ unsigned c1, vb, hb, nr1, nc1;
+ int rst = data->dri;
++ grub_err_t err = GRUB_ERR_NONE;
+
+ vb = 8 << data->log_vs;
+ hb = 8 << data->log_hs;
+@@ -666,17 +715,22 @@ grub_jpeg_decode_data (struct grub_jpeg_data *data)
+
+ for (r2 = 0; r2 < (1U << data->log_vs); r2++)
+ for (c2 = 0; c2 < (1U << data->log_hs); c2++)
+- grub_jpeg_decode_du (data, 0, data->ydu[r2 * 2 + c2]);
++ {
++ err = grub_jpeg_decode_du (data, 0, data->ydu[r2 * 2 + c2]);
++ if (err != GRUB_ERR_NONE)
++ return err;
++ }
+
+ if (data->color_components >= 3)
+ {
+- grub_jpeg_decode_du (data, 1, data->cbdu);
+- grub_jpeg_decode_du (data, 2, data->crdu);
++ err = grub_jpeg_decode_du (data, 1, data->cbdu);
++ if (err != GRUB_ERR_NONE)
++ return err;
++ err = grub_jpeg_decode_du (data, 2, data->crdu);
++ if (err != GRUB_ERR_NONE)
++ return err;
+ }
+
+- if (grub_errno)
+- return grub_errno;
+-
+ nr2 = (data->r1 == nr1 - 1) ? (data->image_height - data->r1 * vb) : vb;
+ nc2 = (c1 == nc1 - 1) ? (data->image_width - c1 * hb) : hb;
+
+--
+2.34.1
+
diff --git a/meta/recipes-bsp/grub/files/video-readers-jpeg-Refuse-to-handle-multiple-start-o.patch b/meta/recipes-bsp/grub/files/video-readers-jpeg-Refuse-to-handle-multiple-start-o.patch
new file mode 100644
index 0000000000..91ecaad98a
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/video-readers-jpeg-Refuse-to-handle-multiple-start-o.patch
@@ -0,0 +1,53 @@
+From 166a4d61448f74745afe1dac2f2cfb85d04909bf Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Mon, 28 Jun 2021 14:25:17 +1000
+Subject: [PATCH] video/readers/jpeg: Refuse to handle multiple start of
+ streams
+
+An invalid file could contain multiple start of stream blocks, which
+would cause us to reallocate and leak our bitmap. Refuse to handle
+multiple start of streams.
+
+Additionally, fix a grub_error() call formatting.
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport
+
+Reference to upstream patch:
+https://git.savannah.gnu.org/cgit/grub.git/commit/?id=166a4d61448f74745afe1dac2f2cfb85d04909bf
+
+Signed-off-by: Yongxin Liu <yongxin.liu@windriver.com>
+---
+ grub-core/video/readers/jpeg.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/grub-core/video/readers/jpeg.c b/grub-core/video/readers/jpeg.c
+index 2284a6c06..579bbe8a4 100644
+--- a/grub-core/video/readers/jpeg.c
++++ b/grub-core/video/readers/jpeg.c
+@@ -683,6 +683,9 @@ grub_jpeg_decode_sos (struct grub_jpeg_data *data)
+ if (data->file->offset != data_offset)
+ return grub_error (GRUB_ERR_BAD_FILE_TYPE, "jpeg: extra byte in sos");
+
++ if (*data->bitmap)
++ return grub_error (GRUB_ERR_BAD_FILE_TYPE, "jpeg: too many start of scan blocks");
++
+ if (grub_video_bitmap_create (data->bitmap, data->image_width,
+ data->image_height,
+ GRUB_VIDEO_BLIT_FORMAT_RGB_888))
+@@ -705,8 +708,8 @@ grub_jpeg_decode_data (struct grub_jpeg_data *data)
+ nc1 = (data->image_width + hb - 1) >> (3 + data->log_hs);
+
+ if (data->bitmap_ptr == NULL)
+- return grub_error(GRUB_ERR_BAD_FILE_TYPE,
+- "jpeg: attempted to decode data before start of stream");
++ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
++ "jpeg: attempted to decode data before start of stream");
+
+ for (; data->r1 < nr1 && (!data->dri || rst);
+ data->r1++, data->bitmap_ptr += (vb * data->image_width - hb * nc1) * 3)
+--
+2.34.1
+
diff --git a/meta/recipes-bsp/grub/grub2.inc b/meta/recipes-bsp/grub/grub2.inc
index 45852ab9b1..e6c6cd98b4 100644
--- a/meta/recipes-bsp/grub/grub2.inc
+++ b/meta/recipes-bsp/grub/grub2.inc
@@ -22,6 +22,24 @@ SRC_URI = "${GNU_MIRROR}/grub/grub-${PV}.tar.gz \
file://0001-RISC-V-Restore-the-typcast-to-long.patch \
file://CVE-2021-3981-grub-mkconfig-Restore-umask-for-the-grub.cfg.patch \
file://0001-configure.ac-Use-_zicsr_zifencei-extentions-on-riscv.patch \
+ file://video-Remove-trailing-whitespaces.patch \
+ file://CVE-2021-3695-video-readers-png-Drop-greyscale-support-to-fix-heap.patch \
+ file://CVE-2021-3696-video-readers-png-Avoid-heap-OOB-R-W-inserting-huff.patch \
+ file://video-readers-jpeg-Abort-sooner-if-a-read-operation-.patch \
+ file://video-readers-jpeg-Refuse-to-handle-multiple-start-o.patch \
+ file://CVE-2021-3697-video-readers-jpeg-Block-int-underflow-wild-pointer.patch \
+ file://CVE-2022-28733-net-ip-Do-IP-fragment-maths-safely.patch \
+ file://CVE-2022-28734-net-http-Fix-OOB-write-for-split-http-headers.patch \
+ file://CVE-2022-28734-net-http-Error-out-on-headers-with-LF-without-CR.patch \
+ file://CVE-2022-28735-kern-efi-sb-Reject-non-kernel-files-in-the-shim_lock.patch \
+ file://0001-font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch \
+ file://CVE-2022-2601.patch \
+ file://CVE-2022-3775.patch \
+ file://loader-efi-chainloader-Simplify-the-loader-state.patch \
+ file://commands-boot-Add-API-to-pass-context-to-loader.patch \
+ file://CVE-2022-28736-loader-efi-chainloader-Use-grub_loader_set_ex.patch \
+ file://CVE-2023-4692.patch \
+ file://CVE-2023-4693.patch \
"
SRC_URI[sha256sum] = "23b64b4c741569f9426ed2e3d0e6780796fca081bee4c99f62aa3f53ae803f5f"
diff --git a/meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb b/meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb
index c6a4bc4932..dcc09f279e 100644
--- a/meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb
+++ b/meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb
@@ -19,9 +19,12 @@ PACKAGECONFIG[manpages] = "--enable-doc, --disable-doc, libxslt-native xmlto-nat
RDEPENDS:${PN} = "grep bash"
+EXTRA_OECONF = "--libdir=${nonarch_libdir}"
+
do_configure:prepend () {
( cd ${S}; autoreconf -f -i -s )
}
-FILES:${PN} += "${libdir}/${BPN}/*"
+FILES:${PN} += "${nonarch_libdir}/${BPN}/*"
FILES:${PN}-dbg += "${datadir}/doc/pm-utils/README.debugging"
+FILES:${PN}-dev += "${nonarch_libdir}/pkgconfig/pm-utils.pc"
diff --git a/meta/recipes-bsp/u-boot/files/0001-fs-squashfs-Use-kcalloc-when-relevant.patch b/meta/recipes-bsp/u-boot/files/0001-fs-squashfs-Use-kcalloc-when-relevant.patch
new file mode 100644
index 0000000000..70fdbb1031
--- /dev/null
+++ b/meta/recipes-bsp/u-boot/files/0001-fs-squashfs-Use-kcalloc-when-relevant.patch
@@ -0,0 +1,64 @@
+From 50d4b8b9effcf9dc9e5a90034de2f0003fb063f0 Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Mon, 27 Jun 2022 12:20:03 +0200
+Subject: [PATCH] fs/squashfs: Use kcalloc when relevant
+
+A crafted squashfs image could embed a huge number of empty metadata
+blocks in order to make the amount of malloc()'d memory overflow and be
+much smaller than expected. Because of this flaw, any random code
+positioned at the right location in the squashfs image could be memcpy'd
+from the squashfs structures into U-Boot code location while trying to
+access the rearmost blocks, before being executed.
+
+In order to prevent this vulnerability from being exploited in eg. a
+secure boot environment, let's add a check over the amount of data
+that is going to be allocated. Such a check could look like:
+
+if (!elem_size || n > SIZE_MAX / elem_size)
+ return NULL;
+
+The right way to do it would be to enhance the calloc() implementation
+but this is quite an impacting change for such a small fix. Another
+solution would be to add the check before the malloc call in the
+squashfs implementation, but this does not look right. So for now, let's
+use the kcalloc() compatibility function from Linux, which has this
+check.
+
+Fixes: c5100613037 ("fs/squashfs: new filesystem")
+Reported-by: Tatsuhiko Yasumatsu <Tatsuhiko.Yasumatsu@sony.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Tested-by: Tatsuhiko Yasumatsu <Tatsuhiko.Yasumatsu@sony.com>
+
+Upstream-Status: Backport [7f7fb9937c6cb49dd35153bd6708872b390b0a44]
+CVE: CVE-2022-33967
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ fs/squashfs/sqfs.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/fs/squashfs/sqfs.c b/fs/squashfs/sqfs.c
+index e2d91c654c..10e63afbce 100644
+--- a/fs/squashfs/sqfs.c
++++ b/fs/squashfs/sqfs.c
+@@ -13,6 +13,7 @@
+ #include <linux/types.h>
+ #include <linux/byteorder/little_endian.h>
+ #include <linux/byteorder/generic.h>
++#include <linux/compat.h>
+ #include <memalign.h>
+ #include <stdlib.h>
+ #include <string.h>
+@@ -725,7 +726,8 @@ static int sqfs_read_inode_table(unsigned char **inode_table)
+ goto free_itb;
+ }
+
+- *inode_table = malloc(metablks_count * SQFS_METADATA_BLOCK_SIZE);
++ *inode_table = kcalloc(metablks_count, SQFS_METADATA_BLOCK_SIZE,
++ GFP_KERNEL);
+ if (!*inode_table) {
+ ret = -ENOMEM;
+ goto free_itb;
+--
+2.33.0
+
diff --git a/meta/recipes-bsp/u-boot/files/0001-fs-squashfs-sqfs_read-Prevent-arbitrary-code-executi.patch b/meta/recipes-bsp/u-boot/files/0001-fs-squashfs-sqfs_read-Prevent-arbitrary-code-executi.patch
new file mode 100644
index 0000000000..b1650f6baa
--- /dev/null
+++ b/meta/recipes-bsp/u-boot/files/0001-fs-squashfs-sqfs_read-Prevent-arbitrary-code-executi.patch
@@ -0,0 +1,80 @@
+From 65f1066f5abe291c7b10b6075fd60776074a38a9 Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Thu, 9 Jun 2022 16:02:06 +0200
+Subject: [PATCH] fs/squashfs: sqfs_read: Prevent arbitrary code execution
+
+Following Jincheng's report, an out-of-band write leading to arbitrary
+code execution is possible because on one side the squashfs logic
+accepts directory names up to 65535 bytes (u16), while U-Boot fs logic
+accepts directory names up to 255 bytes long.
+
+Prevent such an exploit from happening by capping directory name sizes
+to 255. Use a define for this purpose so that developers can link the
+limitation to its source and eventually kill it some day by dynamically
+allocating this array (if ever desired).
+
+Link: https://lore.kernel.org/all/CALO=DHFB+yBoXxVr5KcsK0iFdg+e7ywko4-e+72kjbcS8JBfPw@mail.gmail.com
+Reported-by: Jincheng Wang <jc.w4ng@gmail.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Tested-by: Jincheng Wang <jc.w4ng@gmail.com>
+
+CVE: CVE-2022-33103
+Upstream-Status: Backport [2ac0baab4aff1a0b45067d0b62f00c15f4e86856]
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ fs/squashfs/sqfs.c | 8 +++++---
+ include/fs.h | 4 +++-
+ 2 files changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/fs/squashfs/sqfs.c b/fs/squashfs/sqfs.c
+index e2d91c654c..a145d754cc 100644
+--- a/fs/squashfs/sqfs.c
++++ b/fs/squashfs/sqfs.c
+@@ -973,6 +973,7 @@ int sqfs_readdir(struct fs_dir_stream *fs_dirs, struct fs_dirent **dentp)
+ int i_number, offset = 0, ret;
+ struct fs_dirent *dent;
+ unsigned char *ipos;
++ u16 name_size;
+
+ dirs = (struct squashfs_dir_stream *)fs_dirs;
+ if (!dirs->size) {
+@@ -1055,9 +1056,10 @@ int sqfs_readdir(struct fs_dir_stream *fs_dirs, struct fs_dirent **dentp)
+ return -SQFS_STOP_READDIR;
+ }
+
+- /* Set entry name */
+- strncpy(dent->name, dirs->entry->name, dirs->entry->name_size + 1);
+- dent->name[dirs->entry->name_size + 1] = '\0';
++ /* Set entry name (capped at FS_DIRENT_NAME_LEN which is a U-Boot limitation) */
++ name_size = min_t(u16, dirs->entry->name_size + 1, FS_DIRENT_NAME_LEN - 1);
++ strncpy(dent->name, dirs->entry->name, name_size);
++ dent->name[name_size] = '\0';
+
+ offset = dirs->entry->name_size + 1 + SQFS_ENTRY_BASE_LENGTH;
+ dirs->entry_count--;
+diff --git a/include/fs.h b/include/fs.h
+index 1c79e299fd..6cb7ec89f4 100644
+--- a/include/fs.h
++++ b/include/fs.h
+@@ -161,6 +161,8 @@ int fs_write(const char *filename, ulong addr, loff_t offset, loff_t len,
+ #define FS_DT_REG 8 /* regular file */
+ #define FS_DT_LNK 10 /* symbolic link */
+
++#define FS_DIRENT_NAME_LEN 256
++
+ /**
+ * struct fs_dirent - directory entry
+ *
+@@ -181,7 +183,7 @@ struct fs_dirent {
+ /** change_time: time of last modification */
+ struct rtc_time change_time;
+ /** name: file name */
+- char name[256];
++ char name[FS_DIRENT_NAME_LEN];
+ };
+
+ /* Note: fs_dir_stream should be treated as opaque to the user of fs layer */
+--
+2.33.0
+
diff --git a/meta/recipes-bsp/u-boot/files/0001-i2c-fix-stack-buffer-overflow-vulnerability-in-i2c-m.patch b/meta/recipes-bsp/u-boot/files/0001-i2c-fix-stack-buffer-overflow-vulnerability-in-i2c-m.patch
new file mode 100644
index 0000000000..04ded5b119
--- /dev/null
+++ b/meta/recipes-bsp/u-boot/files/0001-i2c-fix-stack-buffer-overflow-vulnerability-in-i2c-m.patch
@@ -0,0 +1,126 @@
+From 8f8c04bf1ebbd2f72f1643e7ad9617dafa6e5409 Mon Sep 17 00:00:00 2001
+From: Nicolas Iooss <nicolas.iooss+uboot@ledger.fr>
+Date: Fri, 10 Jun 2022 14:50:25 +0000
+Subject: [PATCH] i2c: fix stack buffer overflow vulnerability in i2c md
+ command
+
+When running "i2c md 0 0 80000100", the function do_i2c_md parses the
+length into an unsigned int variable named length. The value is then
+moved to a signed variable:
+
+ int nbytes = length;
+ #define DISP_LINE_LEN 16
+ int linebytes = (nbytes > DISP_LINE_LEN) ? DISP_LINE_LEN : nbytes;
+ ret = dm_i2c_read(dev, addr, linebuf, linebytes);
+
+On systems where integers are 32 bits wide, 0x80000100 is a negative
+value to "nbytes > DISP_LINE_LEN" is false and linebytes gets assigned
+0x80000100 instead of 16.
+
+The consequence is that the function which reads from the i2c device
+(dm_i2c_read or i2c_read) is called with a 16-byte stack buffer to fill
+but with a size parameter which is too large. In some cases, this could
+trigger a crash. But with some i2c drivers, such as drivers/i2c/nx_i2c.c
+(used with "nexell,s5pxx18-i2c" bus), the size is actually truncated to
+a 16-bit integer. This is because function i2c_transfer expects an
+unsigned short length. In such a case, an attacker who can control the
+response of an i2c device can overwrite the return address of a function
+and execute arbitrary code through Return-Oriented Programming.
+
+Fix this issue by using unsigned integers types in do_i2c_md. While at
+it, make also alen unsigned, as signed sizes can cause vulnerabilities
+when people forgot to check that they can be negative.
+
+Signed-off-by: Nicolas Iooss <nicolas.iooss+uboot@ledger.fr>
+Reviewed-by: Heiko Schocher <hs@denx.de>
+
+CVE: CVE-2022-34835
+Upstream-Status: Backport [8f8c04bf1ebbd2f72f1643e7ad9617dafa6e5409]
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ cmd/i2c.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/cmd/i2c.c b/cmd/i2c.c
+index 9050b2b8d2..bd04b14024 100644
+--- a/cmd/i2c.c
++++ b/cmd/i2c.c
+@@ -200,10 +200,10 @@ void i2c_init_board(void)
+ *
+ * Returns the address length.
+ */
+-static uint get_alen(char *arg, int default_len)
++static uint get_alen(char *arg, uint default_len)
+ {
+- int j;
+- int alen;
++ uint j;
++ uint alen;
+
+ alen = default_len;
+ for (j = 0; j < 8; j++) {
+@@ -247,7 +247,7 @@ static int do_i2c_read(struct cmd_tbl *cmdtp, int flag, int argc,
+ {
+ uint chip;
+ uint devaddr, length;
+- int alen;
++ uint alen;
+ u_char *memaddr;
+ int ret;
+ #if CONFIG_IS_ENABLED(DM_I2C)
+@@ -301,7 +301,7 @@ static int do_i2c_write(struct cmd_tbl *cmdtp, int flag, int argc,
+ {
+ uint chip;
+ uint devaddr, length;
+- int alen;
++ uint alen;
+ u_char *memaddr;
+ int ret;
+ #if CONFIG_IS_ENABLED(DM_I2C)
+@@ -469,8 +469,8 @@ static int do_i2c_md(struct cmd_tbl *cmdtp, int flag, int argc,
+ {
+ uint chip;
+ uint addr, length;
+- int alen;
+- int j, nbytes, linebytes;
++ uint alen;
++ uint j, nbytes, linebytes;
+ int ret;
+ #if CONFIG_IS_ENABLED(DM_I2C)
+ struct udevice *dev;
+@@ -589,9 +589,9 @@ static int do_i2c_mw(struct cmd_tbl *cmdtp, int flag, int argc,
+ {
+ uint chip;
+ ulong addr;
+- int alen;
++ uint alen;
+ uchar byte;
+- int count;
++ uint count;
+ int ret;
+ #if CONFIG_IS_ENABLED(DM_I2C)
+ struct udevice *dev;
+@@ -676,8 +676,8 @@ static int do_i2c_crc(struct cmd_tbl *cmdtp, int flag, int argc,
+ {
+ uint chip;
+ ulong addr;
+- int alen;
+- int count;
++ uint alen;
++ uint count;
+ uchar byte;
+ ulong crc;
+ ulong err;
+@@ -985,7 +985,7 @@ static int do_i2c_loop(struct cmd_tbl *cmdtp, int flag, int argc,
+ char *const argv[])
+ {
+ uint chip;
+- int alen;
++ uint alen;
+ uint addr;
+ uint length;
+ u_char bytes[16];
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/u-boot/files/0001-net-Check-for-the-minimum-IP-fragmented-datagram-siz.patch b/meta/recipes-bsp/u-boot/files/0001-net-Check-for-the-minimum-IP-fragmented-datagram-siz.patch
new file mode 100644
index 0000000000..3f9cc7776b
--- /dev/null
+++ b/meta/recipes-bsp/u-boot/files/0001-net-Check-for-the-minimum-IP-fragmented-datagram-siz.patch
@@ -0,0 +1,207 @@
+From c7cab39de5e4b22620248a190b3d2ee46cff38c2 Mon Sep 17 00:00:00 2001
+From: Fabio Estevam <festevam@denx.de>
+Date: Thu, 26 May 2022 11:14:37 -0300
+Subject: [PATCH] net: Check for the minimum IP fragmented datagram size
+
+Nicolas Bidron and Nicolas Guigo reported the two bugs below:
+
+"
+----------BUG 1----------
+
+In compiled versions of U-Boot that define CONFIG_IP_DEFRAG, a value of
+`ip->ip_len` (IP packet header's Total Length) higher than `IP_HDR_SIZE`
+and strictly lower than `IP_HDR_SIZE+8` will lead to a value for `len`
+comprised between `0` and `7`. This will ultimately result in a
+truncated division by `8` resulting value of `0` forcing the hole
+metadata and fragment to point to the same location. The subsequent
+memcopy will overwrite the hole metadata with the fragment data. Through
+a second fragment, this can be exploited to write to an arbitrary offset
+controlled by that overwritten hole metadata value.
+
+This bug is only exploitable locally as it requires crafting two packets
+the first of which would most likely be dropped through routing due to
+its unexpectedly low Total Length. However, this bug can potentially be
+exploited to root linux based embedded devices locally.
+
+```C
+static struct ip_udp_hdr *__net_defragment(struct ip_udp_hdr *ip, int *lenp)
+{
+ static uchar pkt_buff[IP_PKTSIZE] __aligned(PKTALIGN);
+ static u16 first_hole, total_len;
+ struct hole *payload, *thisfrag, *h, *newh;
+ struct ip_udp_hdr *localip = (struct ip_udp_hdr *)pkt_buff;
+ uchar *indata = (uchar *)ip;
+ int offset8, start, len, done = 0;
+ u16 ip_off = ntohs(ip->ip_off);
+
+ /* payload starts after IP header, this fragment is in there */
+ payload = (struct hole *)(pkt_buff + IP_HDR_SIZE);
+ offset8 = (ip_off & IP_OFFS);
+ thisfrag = payload + offset8;
+ start = offset8 * 8;
+ len = ntohs(ip->ip_len) - IP_HDR_SIZE;
+```
+
+The last line of the previous excerpt from `u-boot/net/net.c` shows how
+the attacker can control the value of `len` to be strictly lower than
+`8` by issuing a packet with `ip_len` between `21` and `27`
+(`IP_HDR_SIZE` has a value of `20`).
+
+Also note that `offset8` here is `0` which leads to `thisfrag = payload`.
+
+```C
+ } else if (h >= thisfrag) {
+ /* overlaps with initial part of the hole: move this hole */
+ newh = thisfrag + (len / 8);
+ *newh = *h;
+ h = newh;
+ if (h->next_hole)
+ payload[h->next_hole].prev_hole = (h - payload);
+ if (h->prev_hole)
+ payload[h->prev_hole].next_hole = (h - payload);
+ else
+ first_hole = (h - payload);
+
+ } else {
+```
+
+Lower down the same function, execution reaches the above code path.
+Here, `len / 8` evaluates to `0` leading to `newh = thisfrag`. Also note
+that `first_hole` here is `0` since `h` and `payload` point to the same
+location.
+
+```C
+ /* finally copy this fragment and possibly return whole packet */
+ memcpy((uchar *)thisfrag, indata + IP_HDR_SIZE, len);
+```
+
+Finally, in the above excerpt the `memcpy` overwrites the hole metadata
+since `thisfrag` and `h` both point to the same location. The hole
+metadata is effectively overwritten with arbitrary data from the
+fragmented IP packet data. If `len` was crafted to be `6`, `last_byte`,
+`next_hole`, and `prev_hole` of the `first_hole` can be controlled by
+the attacker.
+
+Finally the arbitrary offset write occurs through a second fragment that
+only needs to be crafted to write data in the hole pointed to by the
+previously controlled hole metadata (`next_hole`) from the first packet.
+
+ ### Recommendation
+
+Handle cases where `len` is strictly lower than 8 by preventing the
+overwrite of the hole metadata during the memcpy of the fragment. This
+could be achieved by either:
+* Moving the location where the hole metadata is stored when `len` is
+lower than `8`.
+* Or outright rejecting fragmented IP datagram with a Total Length
+(`ip_len`) lower than 28 bytes which is the minimum valid fragmented IP
+datagram size (as defined as the minimum fragment of 8 octets in the IP
+Specification Document:
+[RFC791](https://datatracker.ietf.org/doc/html/rfc791) page 25).
+
+----------BUG 2----------
+
+In compiled versions of U-Boot that define CONFIG_IP_DEFRAG, a value of
+`ip->ip_len` (IP packet header's Total Length) lower than `IP_HDR_SIZE`
+will lead to a negative value for `len` which will ultimately result in
+a buffer overflow during the subsequent `memcpy` that uses `len` as it's
+`count` parameter.
+
+This bug is only exploitable on local ethernet as it requires crafting
+an invalid packet to include an unexpected `ip_len` value in the IP UDP
+header that's lower than the minimum accepted Total Length of a packet
+(21 as defined in the IP Specification Document:
+[RFC791](https://datatracker.ietf.org/doc/html/rfc791)). Such packet
+would in all likelihood be dropped while being routed to its final
+destination through most routing equipment and as such requires the
+attacker to be in a local position in order to be exploited.
+
+```C
+static struct ip_udp_hdr *__net_defragment(struct ip_udp_hdr *ip, int *lenp)
+{
+ static uchar pkt_buff[IP_PKTSIZE] __aligned(PKTALIGN);
+ static u16 first_hole, total_len;
+ struct hole *payload, *thisfrag, *h, *newh;
+ struct ip_udp_hdr *localip = (struct ip_udp_hdr *)pkt_buff;
+ uchar *indata = (uchar *)ip;
+ int offset8, start, len, done = 0;
+ u16 ip_off = ntohs(ip->ip_off);
+
+ /* payload starts after IP header, this fragment is in there */
+ payload = (struct hole *)(pkt_buff + IP_HDR_SIZE);
+ offset8 = (ip_off & IP_OFFS);
+ thisfrag = payload + offset8;
+ start = offset8 * 8;
+ len = ntohs(ip->ip_len) - IP_HDR_SIZE;
+```
+
+The last line of the previous excerpt from `u-boot/net/net.c` shows
+where the underflow to a negative `len` value occurs if `ip_len` is set
+to a value strictly lower than 20 (`IP_HDR_SIZE` being 20). Also note
+that in the above excerpt the `pkt_buff` buffer has a size of
+`CONFIG_NET_MAXDEFRAG` which defaults to 16 KB but can range from 1KB to
+64 KB depending on configurations.
+
+```C
+ /* finally copy this fragment and possibly return whole packet */
+ memcpy((uchar *)thisfrag, indata + IP_HDR_SIZE, len);
+```
+
+In the above excerpt the `memcpy` overflows the destination by
+attempting to make a copy of nearly 4 gigabytes in a buffer that's
+designed to hold `CONFIG_NET_MAXDEFRAG` bytes at most which leads to a DoS.
+
+ ### Recommendation
+
+Stop processing of the packet if `ip_len` is lower than 21 (as defined
+by the minimum length of a data carrying datagram in the IP
+Specification Document:
+[RFC791](https://datatracker.ietf.org/doc/html/rfc791) page 34)."
+
+Add a check for ip_len lesser than 28 and stop processing the packet
+in this case.
+
+Such a check covers the two reported bugs.
+
+Reported-by: Nicolas Bidron <nicolas.bidron@nccgroup.com>
+Signed-off-by: Fabio Estevam <festevam@denx.de>
+
+Upstream-Status: Backport [b85d130ea0cac152c21ec38ac9417b31d41b5552]
+CVE: CVE-2022-30552
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ include/net.h | 2 ++
+ net/net.c | 3 +++
+ 2 files changed, 5 insertions(+)
+
+diff --git a/include/net.h b/include/net.h
+index cec8c98618..09d7e9b9e8 100644
+--- a/include/net.h
++++ b/include/net.h
+@@ -397,6 +397,8 @@ struct ip_hdr {
+
+ #define IP_HDR_SIZE (sizeof(struct ip_hdr))
+
++#define IP_MIN_FRAG_DATAGRAM_SIZE (IP_HDR_SIZE + 8)
++
+ /*
+ * Internet Protocol (IP) + UDP header.
+ */
+diff --git a/net/net.c b/net/net.c
+index c2992a0908..f5400e6dbc 100644
+--- a/net/net.c
++++ b/net/net.c
+@@ -907,6 +907,9 @@ static struct ip_udp_hdr *__net_defragment(struct ip_udp_hdr *ip, int *lenp)
+ int offset8, start, len, done = 0;
+ u16 ip_off = ntohs(ip->ip_off);
+
++ if (ip->ip_len < IP_MIN_FRAG_DATAGRAM_SIZE)
++ return NULL;
++
+ /* payload starts after IP header, this fragment is in there */
+ payload = (struct hole *)(pkt_buff + IP_HDR_SIZE);
+ offset8 = (ip_off & IP_OFFS);
+--
+2.33.0
+
diff --git a/meta/recipes-bsp/u-boot/u-boot.inc b/meta/recipes-bsp/u-boot/u-boot.inc
index f022aed732..54ea2e9e50 100644
--- a/meta/recipes-bsp/u-boot/u-boot.inc
+++ b/meta/recipes-bsp/u-boot/u-boot.inc
@@ -5,7 +5,7 @@ PACKAGE_ARCH = "${MACHINE_ARCH}"
DEPENDS += "${@bb.utils.contains('UBOOT_ENV_SUFFIX', 'scr', 'u-boot-mkimage-native', '', d)}"
-inherit uboot-config uboot-extlinux-config uboot-sign deploy cml1 python3native kernel-arch
+inherit uboot-config uboot-extlinux-config uboot-sign deploy python3native kernel-arch
DEPENDS += "swig-native"
@@ -24,6 +24,10 @@ PACKAGECONFIG[openssl] = ",,openssl-native"
# file already exists it will not be overwritten.
UBOOT_LOCALVERSION ?= ""
+# Default name of u-boot initial env, but enable individual recipes to change
+# this value.
+UBOOT_INITIAL_ENV ?= "${PN}-initial-env"
+
require u-boot-configure.inc
do_compile () {
diff --git a/meta/recipes-bsp/u-boot/u-boot_2022.01.bb b/meta/recipes-bsp/u-boot/u-boot_2022.01.bb
index 0d2464d74b..c4cfcbca19 100644
--- a/meta/recipes-bsp/u-boot/u-boot_2022.01.bb
+++ b/meta/recipes-bsp/u-boot/u-boot_2022.01.bb
@@ -1,8 +1,12 @@
require u-boot-common.inc
require u-boot.inc
-SRC_URI:append = " file://0001-riscv32-Use-double-float-ABI-for-rv32.patch \
+SRC_URI += " file://0001-riscv32-Use-double-float-ABI-for-rv32.patch \
file://0001-riscv-fix-build-with-binutils-2.38.patch \
+ file://0001-i2c-fix-stack-buffer-overflow-vulnerability-in-i2c-m.patch \
+ file://0001-fs-squashfs-sqfs_read-Prevent-arbitrary-code-executi.patch \
+ file://0001-net-Check-for-the-minimum-IP-fragmented-datagram-siz.patch \
+ file://0001-fs-squashfs-Use-kcalloc-when-relevant.patch \
"
DEPENDS += "bc-native dtc-native python3-setuptools-native"
diff --git a/meta/recipes-bsp/v86d/v86d_0.1.10.bb b/meta/recipes-bsp/v86d/v86d_0.1.10.bb
index 5f342b1120..b4fe362f8e 100644
--- a/meta/recipes-bsp/v86d/v86d_0.1.10.bb
+++ b/meta/recipes-bsp/v86d/v86d_0.1.10.bb
@@ -6,7 +6,6 @@ DESCRIPTION = "v86d provides a backend for kernel drivers that need to execute x
LICENSE = "GPL-2.0-only"
LIC_FILES_CHKSUM = "file://README;md5=94ac1971e4f2309dc322d598e7b1f7dd"
-DEPENDS = "virtual/kernel"
RRECOMMENDS:${PN} = "kernel-module-uvesafb"
PR = "r2"
diff --git a/meta/recipes-connectivity/avahi/avahi_0.8.bb b/meta/recipes-connectivity/avahi/avahi_0.8.bb
index 9bb5e5861e..5d1c86978a 100644
--- a/meta/recipes-connectivity/avahi/avahi_0.8.bb
+++ b/meta/recipes-connectivity/avahi/avahi_0.8.bb
@@ -26,6 +26,15 @@ SRC_URI = "https://github.com/lathiat/avahi/releases/download/v${PV}/avahi-${PV}
file://0001-Fix-opening-etc-resolv.conf-error.patch \
file://handle-hup.patch \
file://local-ping.patch \
+ file://CVE-2023-1981.patch \
+ file://CVE-2023-38469-1.patch \
+ file://CVE-2023-38469-2.patch \
+ file://CVE-2023-38470-1.patch \
+ file://CVE-2023-38470-2.patch \
+ file://CVE-2023-38471-1.patch \
+ file://CVE-2023-38471-2.patch \
+ file://CVE-2023-38472.patch \
+ file://CVE-2023-38473.patch \
"
UPSTREAM_CHECK_URI = "https://github.com/lathiat/avahi/releases/"
@@ -83,7 +92,6 @@ RRECOMMENDS:${PN}:append:libc-glibc = " libnss-mdns"
do_install() {
autotools_do_install
rm -rf ${D}/run
- rm -rf ${D}${datadir}/dbus-1/interfaces
test -d ${D}${datadir}/dbus-1 && rmdir --ignore-fail-on-non-empty ${D}${datadir}/dbus-1
rm -rf ${D}${libdir}/avahi
@@ -135,7 +143,7 @@ FILES:avahi-daemon = "${sbindir}/avahi-daemon \
${sysconfdir}/avahi/services \
${sysconfdir}/dbus-1 \
${sysconfdir}/init.d/avahi-daemon \
- ${datadir}/avahi/introspection/*.introspect \
+ ${datadir}/dbus-1/interfaces \
${datadir}/avahi/avahi-service.dtd \
${datadir}/avahi/service-types \
${datadir}/dbus-1/system-services"
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-1981.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-1981.patch
new file mode 100644
index 0000000000..4d7924d13a
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-1981.patch
@@ -0,0 +1,58 @@
+From a2696da2f2c50ac43b6c4903f72290d5c3fa9f6f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Thu, 17 Nov 2022 01:51:53 +0100
+Subject: [PATCH] Emit error if requested service is not found
+
+It currently just crashes instead of replying with error. Check return
+value and emit error instead of passing NULL pointer to reply.
+
+Fixes #375
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-1981.patch?h=ubuntu/jammy-security
+Upstream commit https://github.com/lathiat/avahi/commit/a2696da2f2c50ac43b6c4903f72290d5c3fa9f6f]
+CVE: CVE-2023-1981
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-daemon/dbus-protocol.c | 20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
+
+diff --git a/avahi-daemon/dbus-protocol.c b/avahi-daemon/dbus-protocol.c
+index 70d7687bc..406d0b441 100644
+--- a/avahi-daemon/dbus-protocol.c
++++ b/avahi-daemon/dbus-protocol.c
+@@ -375,10 +375,14 @@ static DBusHandlerResult dbus_get_alternative_host_name(DBusConnection *c, DBusM
+ }
+
+ t = avahi_alternative_host_name(n);
+- avahi_dbus_respond_string(c, m, t);
+- avahi_free(t);
++ if (t) {
++ avahi_dbus_respond_string(c, m, t);
++ avahi_free(t);
+
+- return DBUS_HANDLER_RESULT_HANDLED;
++ return DBUS_HANDLER_RESULT_HANDLED;
++ } else {
++ return avahi_dbus_respond_error(c, m, AVAHI_ERR_NOT_FOUND, "Hostname not found");
++ }
+ }
+
+ static DBusHandlerResult dbus_get_alternative_service_name(DBusConnection *c, DBusMessage *m, DBusError *error) {
+@@ -389,10 +393,14 @@ static DBusHandlerResult dbus_get_alternative_service_name(DBusConnection *c, DB
+ }
+
+ t = avahi_alternative_service_name(n);
+- avahi_dbus_respond_string(c, m, t);
+- avahi_free(t);
++ if (t) {
++ avahi_dbus_respond_string(c, m, t);
++ avahi_free(t);
+
+- return DBUS_HANDLER_RESULT_HANDLED;
++ return DBUS_HANDLER_RESULT_HANDLED;
++ } else {
++ return avahi_dbus_respond_error(c, m, AVAHI_ERR_NOT_FOUND, "Service not found");
++ }
+ }
+
+ static DBusHandlerResult dbus_create_new_entry_group(DBusConnection *c, DBusMessage *m, DBusError *error) {
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38469-1.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38469-1.patch
new file mode 100644
index 0000000000..f0f6c4bf7b
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38469-1.patch
@@ -0,0 +1,47 @@
+From a337a1ba7d15853fb56deef1f464529af6e3a1cf Mon Sep 17 00:00:00 2001
+From: Evgeny Vereshchagin <evvers@ya.ru>
+Date: Mon, 23 Oct 2023 20:29:31 +0000
+Subject: [PATCH]core: reject overly long TXT resource records
+Closes https://github.com/lathiat/avahi/issues/455
+
+Upstream-Status: Backport [https://github.com/lathiat/avahi/pull/500/commits/a337a1ba7d15853fb56deef1f464529af6e3a1cf]
+CVE: CVE-2023-38469
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ avahi-core/rr.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/avahi-core/rr.c b/avahi-core/rr.c
+index 7fa0bee..b03a24c 100644
+--- a/avahi-core/rr.c
++++ b/avahi-core/rr.c
+@@ -32,6 +32,7 @@
+ #include <avahi-common/malloc.h>
+ #include <avahi-common/defs.h>
+
++#include "dns.h"
+ #include "rr.h"
+ #include "log.h"
+ #include "util.h"
+@@ -688,11 +689,17 @@ int avahi_record_is_valid(AvahiRecord *r) {
+ case AVAHI_DNS_TYPE_TXT: {
+
+ AvahiStringList *strlst;
++ size_t used = 0;
+
+- for (strlst = r->data.txt.string_list; strlst; strlst = strlst->next)
++ for (strlst = r->data.txt.string_list; strlst; strlst = strlst->next) {
+ if (strlst->size > 255 || strlst->size <= 0)
+ return 0;
+
++ used += 1+strlst->size;
++ if (used > AVAHI_DNS_RDATA_MAX)
++ return 0;
++ }
++
+ return 1;
+ }
+ }
+--
+2.40.0
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38469-2.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38469-2.patch
new file mode 100644
index 0000000000..f8f60ddca1
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38469-2.patch
@@ -0,0 +1,65 @@
+From c6cab87df290448a63323c8ca759baa516166237 Mon Sep 17 00:00:00 2001
+From: Evgeny Vereshchagin <evvers@ya.ru>
+Date: Wed, 25 Oct 2023 18:15:42 +0000
+Subject: [PATCH] tests: pass overly long TXT resource records
+
+to make sure they don't crash avahi any more.
+It reproduces https://github.com/lathiat/avahi/issues/455
+
+Canonical notes:
+nickgalanis> removed first hunk since there is no .github dir in this release
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38469-2.patch?h=ubuntu/jammy-security
+Upstream commit https://github.com/lathiat/avahi/commit/c6cab87df290448a63323c8ca759baa516166237]
+CVE: CVE-2023-38469
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-client/client-test.c | 14 ++++++++++++++
+ 1 files changed, 14 insertions(+)
+
+Index: avahi-0.8/avahi-client/client-test.c
+===================================================================
+--- avahi-0.8.orig/avahi-client/client-test.c
++++ avahi-0.8/avahi-client/client-test.c
+@@ -22,6 +22,7 @@
+ #endif
+
+ #include <stdio.h>
++#include <string.h>
+ #include <assert.h>
+
+ #include <avahi-client/client.h>
+@@ -33,6 +34,8 @@
+ #include <avahi-common/malloc.h>
+ #include <avahi-common/timeval.h>
+
++#include <avahi-core/dns.h>
++
+ static const AvahiPoll *poll_api = NULL;
+ static AvahiSimplePoll *simple_poll = NULL;
+
+@@ -222,6 +225,9 @@ int main (AVAHI_GCC_UNUSED int argc, AVA
+ uint32_t cookie;
+ struct timeval tv;
+ AvahiAddress a;
++ uint8_t rdata[AVAHI_DNS_RDATA_MAX+1];
++ AvahiStringList *txt = NULL;
++ int r;
+
+ simple_poll = avahi_simple_poll_new();
+ poll_api = avahi_simple_poll_get(simple_poll);
+@@ -258,6 +264,14 @@ int main (AVAHI_GCC_UNUSED int argc, AVA
+ printf("%s\n", avahi_strerror(avahi_entry_group_add_service (group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "Lathiat's Site", "_http._tcp", NULL, NULL, 80, "foo=bar", NULL)));
+ printf("add_record: %d\n", avahi_entry_group_add_record (group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "TestX", 0x01, 0x10, 120, "\5booya", 6));
+
++ memset(rdata, 1, sizeof(rdata));
++ r = avahi_string_list_parse(rdata, sizeof(rdata), &txt);
++ assert(r >= 0);
++ assert(avahi_string_list_serialize(txt, NULL, 0) == sizeof(rdata));
++ error = avahi_entry_group_add_service_strlst(group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "TestX", "_qotd._tcp", NULL, NULL, 123, txt);
++ assert(error == AVAHI_ERR_INVALID_RECORD);
++ avahi_string_list_free(txt);
++
+ avahi_entry_group_commit (group);
+
+ domain = avahi_domain_browser_new (avahi, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, NULL, AVAHI_DOMAIN_BROWSER_BROWSE, 0, avahi_domain_browser_callback, (char*) "omghai3u");
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38470-1.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38470-1.patch
new file mode 100644
index 0000000000..5cf9af6fd6
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38470-1.patch
@@ -0,0 +1,59 @@
+From 26806dbde54c5b40a2bf108d334ba59ec9d242d6 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Tue, 11 Apr 2023 15:29:59 +0200
+Subject: [PATCH]Ensure each label is at least one byte long
+
+The only allowed exception is single dot, where it should return empty
+string.
+
+Fixes #454.
+
+Upstream-Status: Backport [https://github.com/lathiat/avahi/commit/94cb6489114636940ac683515417990b55b5d66c]
+CVE: CVE-2023-38470
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ avahi-common/domain-test.c | 14 ++++++++++++++
+ avahi-common/domain.c | 2 +-
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/avahi-common/domain-test.c b/avahi-common/domain-test.c
+index cf763ec..3acc1c1 100644
+--- a/avahi-common/domain-test.c
++++ b/avahi-common/domain-test.c
+@@ -45,6 +45,20 @@ int main(AVAHI_GCC_UNUSED int argc, AVAHI_GCC_UNUSED char *argv[]) {
+ printf("%s\n", s = avahi_normalize_name_strdup("fo\\\\o\\..f oo."));
+ avahi_free(s);
+
++ printf("%s\n", s = avahi_normalize_name_strdup("."));
++ avahi_free(s);
++
++ s = avahi_normalize_name_strdup(",.=.}.=.?-.}.=.?.?.}.}.?.?.?.z.?.?.}.}."
++ "}.?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM.=.=.?.?.}.}.?.?.}.}.}"
++ ".?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM.=.=.?.?.}.}.?.?.?.zM.?`"
++ "?.}.}.}.?.?.?.r.=.?.}.=.?.?.}.?.?.?.}.=.?.?.}??.}.}.?.?."
++ "?.z.?.?.}.}.}.?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM.?`?.}.}.}."
++ "??.?.zM.?`?.}.}.}.?.?.?.r.=.?.}.=.?.?.}.?.?.?.}.=.?.?.}?"
++ "?.}.}.?.?.?.z.?.?.}.}.}.?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM."
++ "?`?.}.}.}.?.?.?.r.=.=.?.?`.?.?}.}.}.?.?.?.r.=.?.}.=.?.?."
++ "}.?.?.?.}.=.?.?.}");
++ assert(s == NULL);
++
+ printf("%i\n", avahi_domain_equal("\\065aa bbb\\.\\046cc.cc\\\\.dee.fff.", "Aaa BBB\\.\\.cc.cc\\\\.dee.fff"));
+ printf("%i\n", avahi_domain_equal("A", "a"));
+
+diff --git a/avahi-common/domain.c b/avahi-common/domain.c
+index 3b1ab68..e66d241 100644
+--- a/avahi-common/domain.c
++++ b/avahi-common/domain.c
+@@ -201,7 +201,7 @@ char *avahi_normalize_name(const char *s, char *ret_s, size_t size) {
+ }
+
+ if (!empty) {
+- if (size < 1)
++ if (size < 2)
+ return NULL;
+
+ *(r++) = '.';
+--
+2.40.0
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38470-2.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38470-2.patch
new file mode 100644
index 0000000000..e0736bf210
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38470-2.patch
@@ -0,0 +1,52 @@
+From 20dec84b2480821704258bc908e7b2bd2e883b24 Mon Sep 17 00:00:00 2001
+From: Evgeny Vereshchagin <evvers@ya.ru>
+Date: Tue, 19 Sep 2023 03:21:25 +0000
+Subject: [PATCH] [common] bail out when escaped labels can't fit into ret
+
+Fixes:
+```
+==93410==ERROR: AddressSanitizer: stack-buffer-overflow on address 0x7f9e76f14c16 at pc 0x00000047208d bp 0x7ffee90a6a00 sp 0x7ffee90a61c8
+READ of size 1110 at 0x7f9e76f14c16 thread T0
+ #0 0x47208c in __interceptor_strlen (out/fuzz-domain+0x47208c) (BuildId: 731b20c1eef22c2104e75a6496a399b10cfc7cba)
+ #1 0x534eb0 in avahi_strdup avahi/avahi-common/malloc.c:167:12
+ #2 0x53862c in avahi_normalize_name_strdup avahi/avahi-common/domain.c:226:12
+```
+and
+```
+fuzz-domain: fuzz/fuzz-domain.c:38: int LLVMFuzzerTestOneInput(const uint8_t *, size_t): Assertion `avahi_domain_equal(s, t)' failed.
+==101571== ERROR: libFuzzer: deadly signal
+ #0 0x501175 in __sanitizer_print_stack_trace (/home/vagrant/avahi/out/fuzz-domain+0x501175) (BuildId: 682bf6400aff9d41b64b6e2cc3ef5ad600216ea8)
+ #1 0x45ad2c in fuzzer::PrintStackTrace() (/home/vagrant/avahi/out/fuzz-domain+0x45ad2c) (BuildId: 682bf6400aff9d41b64b6e2cc3ef5ad600216ea8)
+ #2 0x43fc07 in fuzzer::Fuzzer::CrashCallback() (/home/vagrant/avahi/out/fuzz-domain+0x43fc07) (BuildId: 682bf6400aff9d41b64b6e2cc3ef5ad600216ea8)
+ #3 0x7f1581d7ebaf (/lib64/libc.so.6+0x3dbaf) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #4 0x7f1581dcf883 in __pthread_kill_implementation (/lib64/libc.so.6+0x8e883) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #5 0x7f1581d7eafd in gsignal (/lib64/libc.so.6+0x3dafd) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #6 0x7f1581d6787e in abort (/lib64/libc.so.6+0x2687e) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #7 0x7f1581d6779a in __assert_fail_base.cold (/lib64/libc.so.6+0x2679a) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #8 0x7f1581d77186 in __assert_fail (/lib64/libc.so.6+0x36186) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #9 0x5344a4 in LLVMFuzzerTestOneInput /home/vagrant/avahi/fuzz/fuzz-domain.c:38:9
+```
+
+It's a follow-up to 94cb6489114636940ac683515417990b55b5d66c
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38470-2.patch?h=ubuntu/jammy-security
+CVE: CVE-2023-38470 #Follow-up patch
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-common/domain.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+Index: avahi-0.8/avahi-common/domain.c
+===================================================================
+--- avahi-0.8.orig/avahi-common/domain.c
++++ avahi-0.8/avahi-common/domain.c
+@@ -210,7 +210,8 @@ char *avahi_normalize_name(const char *s
+ } else
+ empty = 0;
+
+- avahi_escape_label(label, strlen(label), &r, &size);
++ if (!(avahi_escape_label(label, strlen(label), &r, &size)))
++ return NULL;
+ }
+
+ return ret_s;
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38471-1.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38471-1.patch
new file mode 100644
index 0000000000..40b61b71dd
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38471-1.patch
@@ -0,0 +1,73 @@
+From 9cd4ea89b3ac89b7bb0196fda1aa88cd51b106b6 Mon Sep 17 00:00:00 2001
+From: Michal Sekletar <msekleta@redhat.com>
+Date: Mon, 23 Oct 2023 13:38:35 +0200
+Subject: [PATCH] core: extract host name using avahi_unescape_label()
+
+Previously we could create invalid escape sequence when we split the
+string on dot. For example, from valid host name "foo\\.bar" we have
+created invalid name "foo\\" and tried to set that as the host name
+which crashed the daemon.
+
+Fixes #453
+
+Upstream-Status: Backport [https://github.com/lathiat/avahi/commit/894f085f402e023a98cbb6f5a3d117bd88d93b09]
+CVE: CVE-2023-38471
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ avahi-core/server.c | 27 +++++++++++++++++++++------
+ 1 file changed, 21 insertions(+), 6 deletions(-)
+
+diff --git a/avahi-core/server.c b/avahi-core/server.c
+index e507750..40f1d68 100644
+--- a/avahi-core/server.c
++++ b/avahi-core/server.c
+@@ -1295,7 +1295,11 @@ static void update_fqdn(AvahiServer *s) {
+ }
+
+ int avahi_server_set_host_name(AvahiServer *s, const char *host_name) {
+- char *hn = NULL;
++ char label_escaped[AVAHI_LABEL_MAX*4+1];
++ char label[AVAHI_LABEL_MAX];
++ char *hn = NULL, *h;
++ size_t len;
++
+ assert(s);
+
+ AVAHI_CHECK_VALIDITY(s, !host_name || avahi_is_valid_host_name(host_name), AVAHI_ERR_INVALID_HOST_NAME);
+@@ -1305,17 +1309,28 @@ int avahi_server_set_host_name(AvahiServer *s, const char *host_name) {
+ else
+ hn = avahi_normalize_name_strdup(host_name);
+
+- hn[strcspn(hn, ".")] = 0;
++ h = hn;
++ if (!avahi_unescape_label((const char **)&hn, label, sizeof(label))) {
++ avahi_free(h);
++ return AVAHI_ERR_INVALID_HOST_NAME;
++ }
++
++ avahi_free(h);
++
++ h = label_escaped;
++ len = sizeof(label_escaped);
++ if (!avahi_escape_label(label, strlen(label), &h, &len))
++ return AVAHI_ERR_INVALID_HOST_NAME;
+
+- if (avahi_domain_equal(s->host_name, hn) && s->state != AVAHI_SERVER_COLLISION) {
+- avahi_free(hn);
++ if (avahi_domain_equal(s->host_name, label_escaped) && s->state != AVAHI_SERVER_COLLISION)
+ return avahi_server_set_errno(s, AVAHI_ERR_NO_CHANGE);
+- }
+
+ withdraw_host_rrs(s);
+
+ avahi_free(s->host_name);
+- s->host_name = hn;
++ s->host_name = avahi_strdup(label_escaped);
++ if (!s->host_name)
++ return AVAHI_ERR_NO_MEMORY;
+
+ update_fqdn(s);
+
+--
+2.40.0
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38471-2.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38471-2.patch
new file mode 100644
index 0000000000..44737bfc2e
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38471-2.patch
@@ -0,0 +1,52 @@
+From b675f70739f404342f7f78635d6e2dcd85a13460 Mon Sep 17 00:00:00 2001
+From: Evgeny Vereshchagin <evvers@ya.ru>
+Date: Tue, 24 Oct 2023 22:04:51 +0000
+Subject: [PATCH] core: return errors from avahi_server_set_host_name properly
+
+It's a follow-up to 894f085f402e023a98cbb6f5a3d117bd88d93b09
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38471-2.patch?h=ubuntu/jammy-security
+Upstream commit https://github.com/lathiat/avahi/commit/b675f70739f404342f7f78635d6e2dcd85a13460]
+CVE: CVE-2023-38471 #Follow-up Patch
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-core/server.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+Index: avahi-0.8/avahi-core/server.c
+===================================================================
+--- avahi-0.8.orig/avahi-core/server.c
++++ avahi-0.8/avahi-core/server.c
+@@ -1309,10 +1309,13 @@ int avahi_server_set_host_name(AvahiServ
+ else
+ hn = avahi_normalize_name_strdup(host_name);
+
++ if (!hn)
++ return avahi_server_set_errno(s, AVAHI_ERR_NO_MEMORY);
++
+ h = hn;
+ if (!avahi_unescape_label((const char **)&hn, label, sizeof(label))) {
+ avahi_free(h);
+- return AVAHI_ERR_INVALID_HOST_NAME;
++ return avahi_server_set_errno(s, AVAHI_ERR_INVALID_HOST_NAME);
+ }
+
+ avahi_free(h);
+@@ -1320,7 +1323,7 @@ int avahi_server_set_host_name(AvahiServ
+ h = label_escaped;
+ len = sizeof(label_escaped);
+ if (!avahi_escape_label(label, strlen(label), &h, &len))
+- return AVAHI_ERR_INVALID_HOST_NAME;
++ return avahi_server_set_errno(s, AVAHI_ERR_INVALID_HOST_NAME);
+
+ if (avahi_domain_equal(s->host_name, label_escaped) && s->state != AVAHI_SERVER_COLLISION)
+ return avahi_server_set_errno(s, AVAHI_ERR_NO_CHANGE);
+@@ -1330,7 +1333,7 @@ int avahi_server_set_host_name(AvahiServ
+ avahi_free(s->host_name);
+ s->host_name = avahi_strdup(label_escaped);
+ if (!s->host_name)
+- return AVAHI_ERR_NO_MEMORY;
++ return avahi_server_set_errno(s, AVAHI_ERR_NO_MEMORY);
+
+ update_fqdn(s);
+
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38472.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38472.patch
new file mode 100644
index 0000000000..85dbded73b
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38472.patch
@@ -0,0 +1,46 @@
+From b024ae5749f4aeba03478e6391687c3c9c8dee40 Mon Sep 17 00:00:00 2001
+From: Michal Sekletar <msekleta@redhat.com>
+Date: Thu, 19 Oct 2023 17:36:44 +0200
+Subject: [PATCH] core: make sure there is rdata to process before parsing it
+
+Fixes #452
+
+CVE-2023-38472
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38472.patch?h=ubuntu/jammy-security
+Upstream commit https://github.com/lathiat/avahi/commit/b024ae5749f4aeba03478e6391687c3c9c8dee40]
+CVE: CVE-2023-38472
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-client/client-test.c | 3 +++
+ avahi-daemon/dbus-entry-group.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+Index: avahi-0.8/avahi-client/client-test.c
+===================================================================
+--- avahi-0.8.orig/avahi-client/client-test.c
++++ avahi-0.8/avahi-client/client-test.c
+@@ -272,6 +272,9 @@ int main (AVAHI_GCC_UNUSED int argc, AVA
+ assert(error == AVAHI_ERR_INVALID_RECORD);
+ avahi_string_list_free(txt);
+
++ error = avahi_entry_group_add_record (group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "TestX", 0x01, 0x10, 120, "", 0);
++ assert(error != AVAHI_OK);
++
+ avahi_entry_group_commit (group);
+
+ domain = avahi_domain_browser_new (avahi, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, NULL, AVAHI_DOMAIN_BROWSER_BROWSE, 0, avahi_domain_browser_callback, (char*) "omghai3u");
+Index: avahi-0.8/avahi-daemon/dbus-entry-group.c
+===================================================================
+--- avahi-0.8.orig/avahi-daemon/dbus-entry-group.c
++++ avahi-0.8/avahi-daemon/dbus-entry-group.c
+@@ -340,7 +340,7 @@ DBusHandlerResult avahi_dbus_msg_entry_g
+ if (!(r = avahi_record_new_full (name, clazz, type, ttl)))
+ return avahi_dbus_respond_error(c, m, AVAHI_ERR_NO_MEMORY, NULL);
+
+- if (avahi_rdata_parse (r, rdata, size) < 0) {
++ if (!rdata || avahi_rdata_parse (r, rdata, size) < 0) {
+ avahi_record_unref (r);
+ return avahi_dbus_respond_error(c, m, AVAHI_ERR_INVALID_RDATA, NULL);
+ }
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38473.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38473.patch
new file mode 100644
index 0000000000..8a372a072a
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38473.patch
@@ -0,0 +1,108 @@
+From b448c9f771bada14ae8de175695a9729f8646797 Mon Sep 17 00:00:00 2001
+From: Michal Sekletar <msekleta@redhat.com>
+Date: Wed, 11 Oct 2023 17:45:44 +0200
+Subject: [PATCH]common: derive alternative host name from its
+ unescaped version
+
+Normalization of input makes sure we don't have to deal with special
+cases like unescaped dot at the end of label.
+
+Upstream-Status: Backport [https://github.com/lathiat/avahi/commit/b448c9f771bada14ae8de175695a9729f8646797]
+CVE: CVE-2023-38473
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ avahi-common/alternative-test.c | 3 +++
+ avahi-common/alternative.c | 27 +++++++++++++++++++--------
+ 2 files changed, 22 insertions(+), 8 deletions(-)
+
+diff --git a/avahi-common/alternative-test.c b/avahi-common/alternative-test.c
+index 9255435..681fc15 100644
+--- a/avahi-common/alternative-test.c
++++ b/avahi-common/alternative-test.c
+@@ -31,6 +31,9 @@ int main(AVAHI_GCC_UNUSED int argc, AVAHI_GCC_UNUSED char *argv[]) {
+ const char* const test_strings[] = {
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXüüüüüüü",
++ ").",
++ "\\.",
++ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\\\\",
+ "gurke",
+ "-",
+ " #",
+diff --git a/avahi-common/alternative.c b/avahi-common/alternative.c
+index b3d39f0..a094e6d 100644
+--- a/avahi-common/alternative.c
++++ b/avahi-common/alternative.c
+@@ -49,15 +49,20 @@ static void drop_incomplete_utf8(char *c) {
+ }
+
+ char *avahi_alternative_host_name(const char *s) {
++ char label[AVAHI_LABEL_MAX], alternative[AVAHI_LABEL_MAX*4+1];
++ char *alt, *r, *ret;
+ const char *e;
+- char *r;
++ size_t len;
+
+ assert(s);
+
+ if (!avahi_is_valid_host_name(s))
+ return NULL;
+
+- if ((e = strrchr(s, '-'))) {
++ if (!avahi_unescape_label(&s, label, sizeof(label)))
++ return NULL;
++
++ if ((e = strrchr(label, '-'))) {
+ const char *p;
+
+ e++;
+@@ -74,19 +79,18 @@ char *avahi_alternative_host_name(const char *s) {
+
+ if (e) {
+ char *c, *m;
+- size_t l;
+ int n;
+
+ n = atoi(e)+1;
+ if (!(m = avahi_strdup_printf("%i", n)))
+ return NULL;
+
+- l = e-s-1;
++ len = e-label-1;
+
+- if (l >= AVAHI_LABEL_MAX-1-strlen(m)-1)
+- l = AVAHI_LABEL_MAX-1-strlen(m)-1;
++ if (len >= AVAHI_LABEL_MAX-1-strlen(m)-1)
++ len = AVAHI_LABEL_MAX-1-strlen(m)-1;
+
+- if (!(c = avahi_strndup(s, l))) {
++ if (!(c = avahi_strndup(label, len))) {
+ avahi_free(m);
+ return NULL;
+ }
+@@ -100,7 +104,7 @@ char *avahi_alternative_host_name(const char *s) {
+ } else {
+ char *c;
+
+- if (!(c = avahi_strndup(s, AVAHI_LABEL_MAX-1-2)))
++ if (!(c = avahi_strndup(label, AVAHI_LABEL_MAX-1-2)))
+ return NULL;
+
+ drop_incomplete_utf8(c);
+@@ -109,6 +113,13 @@ char *avahi_alternative_host_name(const char *s) {
+ avahi_free(c);
+ }
+
++ alt = alternative;
++ len = sizeof(alternative);
++ ret = avahi_escape_label(r, strlen(r), &alt, &len);
++
++ avahi_free(r);
++ r = avahi_strdup(ret);
++
+ assert(avahi_is_valid_host_name(r));
+
+ return r;
+--
+2.40.0
diff --git a/meta/recipes-connectivity/bind/bind-9.18.2/0001-avoid-start-failure-with-bind-user.patch b/meta/recipes-connectivity/bind/bind/0001-avoid-start-failure-with-bind-user.patch
index ec1bc7b567..ec1bc7b567 100644
--- a/meta/recipes-connectivity/bind/bind-9.18.2/0001-avoid-start-failure-with-bind-user.patch
+++ b/meta/recipes-connectivity/bind/bind/0001-avoid-start-failure-with-bind-user.patch
diff --git a/meta/recipes-connectivity/bind/bind-9.18.2/0001-named-lwresd-V-and-start-log-hide-build-options.patch b/meta/recipes-connectivity/bind/bind/0001-named-lwresd-V-and-start-log-hide-build-options.patch
index 4c10f33f04..4c10f33f04 100644
--- a/meta/recipes-connectivity/bind/bind-9.18.2/0001-named-lwresd-V-and-start-log-hide-build-options.patch
+++ b/meta/recipes-connectivity/bind/bind/0001-named-lwresd-V-and-start-log-hide-build-options.patch
diff --git a/meta/recipes-connectivity/bind/bind-9.18.2/bind-ensure-searching-for-json-headers-searches-sysr.patch b/meta/recipes-connectivity/bind/bind/bind-ensure-searching-for-json-headers-searches-sysr.patch
index f1abd179e8..f1abd179e8 100644
--- a/meta/recipes-connectivity/bind/bind-9.18.2/bind-ensure-searching-for-json-headers-searches-sysr.patch
+++ b/meta/recipes-connectivity/bind/bind/bind-ensure-searching-for-json-headers-searches-sysr.patch
diff --git a/meta/recipes-connectivity/bind/bind-9.18.2/bind9 b/meta/recipes-connectivity/bind/bind/bind9
index 968679ff7f..968679ff7f 100644
--- a/meta/recipes-connectivity/bind/bind-9.18.2/bind9
+++ b/meta/recipes-connectivity/bind/bind/bind9
diff --git a/meta/recipes-connectivity/bind/bind-9.18.2/conf.patch b/meta/recipes-connectivity/bind/bind/conf.patch
index aa3642acec..aa3642acec 100644
--- a/meta/recipes-connectivity/bind/bind-9.18.2/conf.patch
+++ b/meta/recipes-connectivity/bind/bind/conf.patch
diff --git a/meta/recipes-connectivity/bind/bind-9.18.2/generate-rndc-key.sh b/meta/recipes-connectivity/bind/bind/generate-rndc-key.sh
index 633e29c0e6..633e29c0e6 100644
--- a/meta/recipes-connectivity/bind/bind-9.18.2/generate-rndc-key.sh
+++ b/meta/recipes-connectivity/bind/bind/generate-rndc-key.sh
diff --git a/meta/recipes-connectivity/bind/bind-9.18.2/init.d-add-support-for-read-only-rootfs.patch b/meta/recipes-connectivity/bind/bind/init.d-add-support-for-read-only-rootfs.patch
index 11db95ede1..11db95ede1 100644
--- a/meta/recipes-connectivity/bind/bind-9.18.2/init.d-add-support-for-read-only-rootfs.patch
+++ b/meta/recipes-connectivity/bind/bind/init.d-add-support-for-read-only-rootfs.patch
diff --git a/meta/recipes-connectivity/bind/bind-9.18.2/make-etc-initd-bind-stop-work.patch b/meta/recipes-connectivity/bind/bind/make-etc-initd-bind-stop-work.patch
index 146f3e35db..146f3e35db 100644
--- a/meta/recipes-connectivity/bind/bind-9.18.2/make-etc-initd-bind-stop-work.patch
+++ b/meta/recipes-connectivity/bind/bind/make-etc-initd-bind-stop-work.patch
diff --git a/meta/recipes-connectivity/bind/bind-9.18.2/named.service b/meta/recipes-connectivity/bind/bind/named.service
index cda56ef015..cda56ef015 100644
--- a/meta/recipes-connectivity/bind/bind-9.18.2/named.service
+++ b/meta/recipes-connectivity/bind/bind/named.service
diff --git a/meta/recipes-connectivity/bind/bind_9.18.2.bb b/meta/recipes-connectivity/bind/bind_9.18.24.bb
index 1c77aceb9f..fbbebe89ad 100644
--- a/meta/recipes-connectivity/bind/bind_9.18.2.bb
+++ b/meta/recipes-connectivity/bind/bind_9.18.24.bb
@@ -4,7 +4,7 @@ DESCRIPTION = "BIND 9 provides a full-featured Domain Name Server system"
SECTION = "console/network"
LICENSE = "MPL-2.0"
-LIC_FILES_CHKSUM = "file://COPYRIGHT;md5=9a4a897f202c0710e07f2f2836bc2b62"
+LIC_FILES_CHKSUM = "file://COPYRIGHT;md5=c7a0b6d9a1b692a5da9af9d503671f43"
DEPENDS = "openssl libcap zlib libuv"
@@ -20,7 +20,7 @@ SRC_URI = "https://ftp.isc.org/isc/bind9/${PV}/${BPN}-${PV}.tar.xz \
file://0001-avoid-start-failure-with-bind-user.patch \
"
-SRC_URI[sha256sum] = "2e4b38779bba0a23ee634fdf7c525fd9794c41d692bfd83cda25823a2a3ed969"
+SRC_URI[sha256sum] = "709d73023c9115ddad3bab65b6c8c79a590196d0d114f5d0ca2533dbd52ddf66"
UPSTREAM_CHECK_URI = "https://ftp.isc.org/isc/bind9/"
# follow the ESV versions divisible by 2
@@ -39,15 +39,13 @@ PACKAGECONFIG[readline] = "--with-readline=readline,,readline"
PACKAGECONFIG[libedit] = "--with-readline=libedit,,libedit"
PACKAGECONFIG[dns-over-http] = "--enable-doh,--disable-doh,nghttp2"
-EXTRA_OECONF = " --disable-devpoll --disable-auto-validation --enable-epoll \
+EXTRA_OECONF = " --disable-auto-validation \
--with-gssapi=no --with-lmdb=no --with-zlib \
--sysconfdir=${sysconfdir}/bind \
--with-openssl=${STAGING_DIR_HOST}${prefix} \
"
LDFLAGS:append = " -lz"
-inherit ${@bb.utils.contains('PACKAGECONFIG', 'python3', 'python3native setuptools3-base', '', d)}
-
# dhcp needs .la so keep them
REMOVE_LIBTOOL_LA = "0"
@@ -67,12 +65,6 @@ do_install:append() {
install -d "${D}${sysconfdir}/init.d"
install -m 644 ${S}/conf/* "${D}${sysconfdir}/bind/"
install -m 755 "${S}/init.d" "${D}${sysconfdir}/init.d/bind"
- if ${@bb.utils.contains('PACKAGECONFIG', 'python3', 'true', 'false', d)}; then
- sed -i -e '1s,#!.*python3,#! /usr/bin/python3,' \
- ${D}${sbindir}/dnssec-coverage \
- ${D}${sbindir}/dnssec-checkds \
- ${D}${sbindir}/dnssec-keymgr
- fi
# Install systemd related files
install -d ${D}${sbindir}
@@ -119,9 +111,4 @@ FILES_SOLIBSDEV = "${libdir}/*[!0-9].so ${libdir}/libbind9.so"
FILES:${PN}-libs = "${libdir}/named/*.so* ${libdir}/*-${PV}.so"
FILES:${PN}-staticdev += "${libdir}/*.la"
-PACKAGE_BEFORE_PN += "${@bb.utils.contains('PACKAGECONFIG', 'python3', 'python3-bind', '', d)}"
-FILES:python3-bind = "${sbindir}/dnssec-coverage ${sbindir}/dnssec-checkds \
- ${sbindir}/dnssec-keymgr ${PYTHON_SITEPACKAGES_DIR}"
-
RDEPENDS:${PN}-dev = ""
-RDEPENDS:python3-bind = "python3-core python3-ply"
diff --git a/meta/recipes-connectivity/bluez5/bluez5.inc b/meta/recipes-connectivity/bluez5/bluez5.inc
index 22dd07b348..7786b65670 100644
--- a/meta/recipes-connectivity/bluez5/bluez5.inc
+++ b/meta/recipes-connectivity/bluez5/bluez5.inc
@@ -7,6 +7,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=12f884d2ae1ff87c09e5b7ccc2c4ca7e \
file://COPYING.LIB;md5=fb504b67c50331fc78734fed90fb0e09 \
file://src/main.c;beginline=1;endline=24;md5=0ad83ca0dc37ab08af448777c581e7ac"
DEPENDS = "dbus glib-2.0"
+RDEPENDS:${PN} += "dbus"
PROVIDES += "bluez-hcidump"
RPROVIDES:${PN} += "bluez-hcidump"
@@ -53,7 +54,7 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/bluetooth/bluez-${PV}.tar.xz \
${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '', 'file://0001-Allow-using-obexd-without-systemd-in-the-user-sessio.patch', d)} \
file://0001-tests-add-a-target-for-building-tests-without-runnin.patch \
file://0001-test-gatt-Fix-hung-issue.patch \
- file://fix_service.patch \
+ file://CVE-2023-45866.patch \
"
S = "${WORKDIR}/bluez-${PV}"
@@ -68,6 +69,8 @@ EXTRA_OECONF = "\
--without-zsh-completion-dir \
"
+CFLAGS += "-DFIRMWARE_DIR=\\"${nonarch_base_libdir}/firmware\\""
+
# bluez5 builds a large number of useful utilities but does not
# install them. Specify which ones we want put into ${PN}-noinst-tools.
NOINST_TOOLS_READLINE ??= ""
diff --git a/meta/recipes-connectivity/bluez5/bluez5/CVE-2023-45866.patch b/meta/recipes-connectivity/bluez5/bluez5/CVE-2023-45866.patch
new file mode 100644
index 0000000000..5bb31d866a
--- /dev/null
+++ b/meta/recipes-connectivity/bluez5/bluez5/CVE-2023-45866.patch
@@ -0,0 +1,56 @@
+From 25a471a83e02e1effb15d5a488b3f0085eaeb675 Mon Sep 17 00:00:00 2001
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Date: Tue, 10 Oct 2023 13:03:12 -0700
+Subject: [PATCH] input.conf: Change default of ClassicBondedOnly
+
+This changes the default of ClassicBondedOnly since defaulting to false
+is not inline with HID specification which mandates the of Security Mode
+4:
+
+BLUETOOTH SPECIFICATION Page 84 of 123
+Human Interface Device (HID) Profile:
+
+ 5.4.3.4.2 Security Modes
+ Bluetooth HID Hosts shall use Security Mode 4 when interoperating with
+ Bluetooth HID devices that are compliant to the Bluetooth Core
+ Specification v2.1+EDR[6].
+
+Upstream-Status: Backport
+[https://git.kernel.org/pub/scm/bluetooth/bluez.git/commit/profiles/input?id=25a471a83e02e1effb15d5a488b3f0085eaeb675]
+
+CVE: CVE-2023-45866
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ profiles/input/device.c | 2 +-
+ profiles/input/input.conf | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/profiles/input/device.c b/profiles/input/device.c
+index 4a50ea9..4310dd1 100644
+--- a/profiles/input/device.c
++++ b/profiles/input/device.c
+@@ -81,7 +81,7 @@ struct input_device {
+
+ static int idle_timeout = 0;
+ static bool uhid_enabled = false;
+-static bool classic_bonded_only = false;
++static bool classic_bonded_only = true;
+
+ void input_set_idle_timeout(int timeout)
+ {
+diff --git a/profiles/input/input.conf b/profiles/input/input.conf
+index 4c70bc5..d8645f3 100644
+--- a/profiles/input/input.conf
++++ b/profiles/input/input.conf
+@@ -17,7 +17,7 @@
+ # platforms may want to make sure that input connections only come from bonded
+ # device connections. Several older mice have been known for not supporting
+ # pairing/encryption.
+-# Defaults to false to maximize device compatibility.
++# Defaults to true for security.
+ #ClassicBondedOnly=true
+
+ # LE upgrade security
+--
+2.40.0
diff --git a/meta/recipes-connectivity/bluez5/bluez5/fix_service.patch b/meta/recipes-connectivity/bluez5/bluez5/fix_service.patch
deleted file mode 100644
index 96fdf6b299..0000000000
--- a/meta/recipes-connectivity/bluez5/bluez5/fix_service.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-The systemd bluetooth service failed to start because the /var/lib/bluetooth
-path of ReadWritePaths= is created by the bluetooth daemon itself.
-
-The commit systemd: Add more filesystem lockdown (442d211) add ReadWritePaths=/etc/bluetooth
-and ReadOnlyPaths=/var/lib/bluetooth options to the bluetooth systemd service.
-The existing ProtectSystem=full option mounts the /usr, the boot loader
-directories and /etc read-only. This means the two option are useless and could be removed.
-
-Upstream-Status: Submitted [https://github.com/bluez/bluez/issues/329]
-
-Index: bluez-5.64/src/bluetooth.service.in
-===================================================================
---- bluez-5.64.orig/src/bluetooth.service.in
-+++ bluez-5.64/src/bluetooth.service.in
-@@ -15,12 +15,12 @@ LimitNPROC=1
-
- # Filesystem lockdown
- ProtectHome=true
--ProtectSystem=full
-+ProtectSystem=strict
- PrivateTmp=true
- ProtectKernelTunables=true
- ProtectControlGroups=true
--ReadWritePaths=@statedir@
--ReadOnlyPaths=@confdir@
-+ConfigurationDirectory=bluetooth
-+StateDirectory=bluetooth
-
- # Execute Mappings
- MemoryDenyWriteExecute=true
diff --git a/meta/recipes-connectivity/bluez5/bluez5_5.64.bb b/meta/recipes-connectivity/bluez5/bluez5_5.65.bb
index 4319f9aae8..4c15aeb46d 100644
--- a/meta/recipes-connectivity/bluez5/bluez5_5.64.bb
+++ b/meta/recipes-connectivity/bluez5/bluez5_5.65.bb
@@ -1,6 +1,6 @@
require bluez5.inc
-SRC_URI[sha256sum] = "ae437e65b6b3070c198bc5b0109fe9cdeb9eaa387380e2072f9de65fe8a1de34"
+SRC_URI[sha256sum] = "2565a4d48354b576e6ad92e25b54ed66808296581c8abb80587051f9993d96d4"
# These issues have kernel fixes rather than bluez fixes so exclude here
CVE_CHECK_IGNORE += "CVE-2020-12352 CVE-2020-24490"
diff --git a/meta/recipes-connectivity/connman/connman.inc b/meta/recipes-connectivity/connman/connman.inc
index 5880ecd5d4..0c1dc7e5dd 100644
--- a/meta/recipes-connectivity/connman/connman.inc
+++ b/meta/recipes-connectivity/connman/connman.inc
@@ -27,6 +27,7 @@ EXTRA_OECONF += "\
--enable-ethernet \
--enable-tools \
--disable-polkit \
+ --runstatedir=/run \
"
PACKAGECONFIG ??= "wispr iptables client\
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2022-32292.patch b/meta/recipes-connectivity/connman/connman/CVE-2022-32292.patch
new file mode 100644
index 0000000000..182c5ca29c
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2022-32292.patch
@@ -0,0 +1,37 @@
+From d1a5ede5d255bde8ef707f8441b997563b9312bd Mon Sep 17 00:00:00 2001
+From: Nathan Crandall <ncrandall@tesla.com>
+Date: Tue, 12 Jul 2022 08:56:34 +0200
+Subject: gweb: Fix OOB write in received_data()
+
+There is a mismatch of handling binary vs. C-string data with memchr
+and strlen, resulting in pos, count, and bytes_read to become out of
+sync and result in a heap overflow. Instead, do not treat the buffer
+as an ASCII C-string. We calculate the count based on the return value
+of memchr, instead of strlen.
+
+Fixes: CVE-2022-32292
+
+CVE: CVE-2022-32292
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=d1a5ede5d255bde8ef707f8441b997563b9312bd]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ gweb/gweb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gweb/gweb.c b/gweb/gweb.c
+index 12fcb1d8..13c6c5f2 100644
+--- a/gweb/gweb.c
++++ b/gweb/gweb.c
+@@ -918,7 +918,7 @@ static gboolean received_data(GIOChannel *channel, GIOCondition cond,
+ }
+
+ *pos = '\0';
+- count = strlen((char *) ptr);
++ count = pos - ptr;
+ if (count > 0 && ptr[count - 1] == '\r') {
+ ptr[--count] = '\0';
+ bytes_read--;
+--
+cgit
+
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2022-32293_p1.patch b/meta/recipes-connectivity/connman/connman/CVE-2022-32293_p1.patch
new file mode 100644
index 0000000000..b280203594
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2022-32293_p1.patch
@@ -0,0 +1,141 @@
+From 72343929836de80727a27d6744c869dff045757c Mon Sep 17 00:00:00 2001
+From: Daniel Wagner <wagi@monom.org>
+Date: Tue, 5 Jul 2022 08:32:12 +0200
+Subject: wispr: Add reference counter to portal context
+
+Track the connman_wispr_portal_context live time via a
+refcounter. This only adds the infrastructure to do proper reference
+counting.
+
+Fixes: CVE-2022-32293
+CVE: CVE-2022-32293
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=416bfaff988882c553c672e5bfc2d4f648d29e8a]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ src/wispr.c | 52 ++++++++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 42 insertions(+), 10 deletions(-)
+
+diff --git a/src/wispr.c b/src/wispr.c
+index a07896ca..bde7e63b 100644
+--- a/src/wispr.c
++++ b/src/wispr.c
+@@ -56,6 +56,7 @@ struct wispr_route {
+ };
+
+ struct connman_wispr_portal_context {
++ int refcount;
+ struct connman_service *service;
+ enum connman_ipconfig_type type;
+ struct connman_wispr_portal *wispr_portal;
+@@ -97,6 +98,11 @@ static char *online_check_ipv4_url = NULL;
+ static char *online_check_ipv6_url = NULL;
+ static bool enable_online_to_ready_transition = false;
+
++#define wispr_portal_context_ref(wp_context) \
++ wispr_portal_context_ref_debug(wp_context, __FILE__, __LINE__, __func__)
++#define wispr_portal_context_unref(wp_context) \
++ wispr_portal_context_unref_debug(wp_context, __FILE__, __LINE__, __func__)
++
+ static void connman_wispr_message_init(struct connman_wispr_message *msg)
+ {
+ DBG("");
+@@ -162,9 +168,6 @@ static void free_connman_wispr_portal_context(
+ {
+ DBG("context %p", wp_context);
+
+- if (!wp_context)
+- return;
+-
+ if (wp_context->wispr_portal) {
+ if (wp_context->wispr_portal->ipv4_context == wp_context)
+ wp_context->wispr_portal->ipv4_context = NULL;
+@@ -201,9 +204,38 @@ static void free_connman_wispr_portal_context(
+ g_free(wp_context);
+ }
+
++static struct connman_wispr_portal_context *
++wispr_portal_context_ref_debug(struct connman_wispr_portal_context *wp_context,
++ const char *file, int line, const char *caller)
++{
++ DBG("%p ref %d by %s:%d:%s()", wp_context,
++ wp_context->refcount + 1, file, line, caller);
++
++ __sync_fetch_and_add(&wp_context->refcount, 1);
++
++ return wp_context;
++}
++
++static void wispr_portal_context_unref_debug(
++ struct connman_wispr_portal_context *wp_context,
++ const char *file, int line, const char *caller)
++{
++ if (!wp_context)
++ return;
++
++ DBG("%p ref %d by %s:%d:%s()", wp_context,
++ wp_context->refcount - 1, file, line, caller);
++
++ if (__sync_fetch_and_sub(&wp_context->refcount, 1) != 1)
++ return;
++
++ free_connman_wispr_portal_context(wp_context);
++}
++
+ static struct connman_wispr_portal_context *create_wispr_portal_context(void)
+ {
+- return g_try_new0(struct connman_wispr_portal_context, 1);
++ return wispr_portal_context_ref(
++ g_new0(struct connman_wispr_portal_context, 1));
+ }
+
+ static void free_connman_wispr_portal(gpointer data)
+@@ -215,8 +247,8 @@ static void free_connman_wispr_portal(gpointer data)
+ if (!wispr_portal)
+ return;
+
+- free_connman_wispr_portal_context(wispr_portal->ipv4_context);
+- free_connman_wispr_portal_context(wispr_portal->ipv6_context);
++ wispr_portal_context_unref(wispr_portal->ipv4_context);
++ wispr_portal_context_unref(wispr_portal->ipv6_context);
+
+ g_free(wispr_portal);
+ }
+@@ -452,7 +484,7 @@ static void portal_manage_status(GWebResult *result,
+ connman_info("Client-Timezone: %s", str);
+
+ if (!enable_online_to_ready_transition)
+- free_connman_wispr_portal_context(wp_context);
++ wispr_portal_context_unref(wp_context);
+
+ __connman_service_ipconfig_indicate_state(service,
+ CONNMAN_SERVICE_STATE_ONLINE, type);
+@@ -616,7 +648,7 @@ static void wispr_portal_request_wispr_login(struct connman_service *service,
+ return;
+ }
+
+- free_connman_wispr_portal_context(wp_context);
++ wispr_portal_context_unref(wp_context);
+ return;
+ }
+
+@@ -952,7 +984,7 @@ static int wispr_portal_detect(struct connman_wispr_portal_context *wp_context)
+
+ if (wp_context->token == 0) {
+ err = -EINVAL;
+- free_connman_wispr_portal_context(wp_context);
++ wispr_portal_context_unref(wp_context);
+ }
+ } else if (wp_context->timeout == 0) {
+ wp_context->timeout = g_idle_add(no_proxy_callback, wp_context);
+@@ -1001,7 +1033,7 @@ int __connman_wispr_start(struct connman_service *service,
+
+ /* If there is already an existing context, we wipe it */
+ if (wp_context)
+- free_connman_wispr_portal_context(wp_context);
++ wispr_portal_context_unref(wp_context);
+
+ wp_context = create_wispr_portal_context();
+ if (!wp_context)
+--
+cgit
+
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2022-32293_p2.patch b/meta/recipes-connectivity/connman/connman/CVE-2022-32293_p2.patch
new file mode 100644
index 0000000000..56f8fc82de
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2022-32293_p2.patch
@@ -0,0 +1,174 @@
+From 416bfaff988882c553c672e5bfc2d4f648d29e8a Mon Sep 17 00:00:00 2001
+From: Daniel Wagner <wagi@monom.org>
+Date: Tue, 5 Jul 2022 09:11:09 +0200
+Subject: wispr: Update portal context references
+
+Maintain proper portal context references to avoid UAF.
+
+Fixes: CVE-2022-32293
+CVE: CVE-2022-32293
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=72343929836de80727a27d6744c869dff045757c]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ src/wispr.c | 34 ++++++++++++++++++++++------------
+ 1 file changed, 22 insertions(+), 12 deletions(-)
+
+diff --git a/src/wispr.c b/src/wispr.c
+index bde7e63b..84bed33f 100644
+--- a/src/wispr.c
++++ b/src/wispr.c
+@@ -105,8 +105,6 @@ static bool enable_online_to_ready_transition = false;
+
+ static void connman_wispr_message_init(struct connman_wispr_message *msg)
+ {
+- DBG("");
+-
+ msg->has_error = false;
+ msg->current_element = NULL;
+
+@@ -166,8 +164,6 @@ static void free_wispr_routes(struct connman_wispr_portal_context *wp_context)
+ static void free_connman_wispr_portal_context(
+ struct connman_wispr_portal_context *wp_context)
+ {
+- DBG("context %p", wp_context);
+-
+ if (wp_context->wispr_portal) {
+ if (wp_context->wispr_portal->ipv4_context == wp_context)
+ wp_context->wispr_portal->ipv4_context = NULL;
+@@ -483,9 +479,6 @@ static void portal_manage_status(GWebResult *result,
+ &str))
+ connman_info("Client-Timezone: %s", str);
+
+- if (!enable_online_to_ready_transition)
+- wispr_portal_context_unref(wp_context);
+-
+ __connman_service_ipconfig_indicate_state(service,
+ CONNMAN_SERVICE_STATE_ONLINE, type);
+
+@@ -546,14 +539,17 @@ static void wispr_portal_request_portal(
+ {
+ DBG("");
+
++ wispr_portal_context_ref(wp_context);
+ wp_context->request_id = g_web_request_get(wp_context->web,
+ wp_context->status_url,
+ wispr_portal_web_result,
+ wispr_route_request,
+ wp_context);
+
+- if (wp_context->request_id == 0)
++ if (wp_context->request_id == 0) {
+ wispr_portal_error(wp_context);
++ wispr_portal_context_unref(wp_context);
++ }
+ }
+
+ static bool wispr_input(const guint8 **data, gsize *length,
+@@ -618,13 +614,15 @@ static void wispr_portal_browser_reply_cb(struct connman_service *service,
+ return;
+
+ if (!authentication_done) {
+- wispr_portal_error(wp_context);
+ free_wispr_routes(wp_context);
++ wispr_portal_error(wp_context);
++ wispr_portal_context_unref(wp_context);
+ return;
+ }
+
+ /* Restarting the test */
+ __connman_service_wispr_start(service, wp_context->type);
++ wispr_portal_context_unref(wp_context);
+ }
+
+ static void wispr_portal_request_wispr_login(struct connman_service *service,
+@@ -700,11 +698,13 @@ static bool wispr_manage_message(GWebResult *result,
+
+ wp_context->wispr_result = CONNMAN_WISPR_RESULT_LOGIN;
+
++ wispr_portal_context_ref(wp_context);
+ if (__connman_agent_request_login_input(wp_context->service,
+ wispr_portal_request_wispr_login,
+- wp_context) != -EINPROGRESS)
++ wp_context) != -EINPROGRESS) {
+ wispr_portal_error(wp_context);
+- else
++ wispr_portal_context_unref(wp_context);
++ } else
+ return true;
+
+ break;
+@@ -753,6 +753,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+ if (length > 0) {
+ g_web_parser_feed_data(wp_context->wispr_parser,
+ chunk, length);
++ wispr_portal_context_unref(wp_context);
+ return true;
+ }
+
+@@ -770,6 +771,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+
+ switch (status) {
+ case 000:
++ wispr_portal_context_ref(wp_context);
+ __connman_agent_request_browser(wp_context->service,
+ wispr_portal_browser_reply_cb,
+ wp_context->status_url, wp_context);
+@@ -781,11 +783,14 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+ if (g_web_result_get_header(result, "X-ConnMan-Status",
+ &str)) {
+ portal_manage_status(result, wp_context);
++ wispr_portal_context_unref(wp_context);
+ return false;
+- } else
++ } else {
++ wispr_portal_context_ref(wp_context);
+ __connman_agent_request_browser(wp_context->service,
+ wispr_portal_browser_reply_cb,
+ wp_context->redirect_url, wp_context);
++ }
+
+ break;
+ case 300:
+@@ -798,6 +803,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+ !g_web_result_get_header(result, "Location",
+ &redirect)) {
+
++ wispr_portal_context_ref(wp_context);
+ __connman_agent_request_browser(wp_context->service,
+ wispr_portal_browser_reply_cb,
+ wp_context->status_url, wp_context);
+@@ -808,6 +814,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+
+ wp_context->redirect_url = g_strdup(redirect);
+
++ wispr_portal_context_ref(wp_context);
+ wp_context->request_id = g_web_request_get(wp_context->web,
+ redirect, wispr_portal_web_result,
+ wispr_route_request, wp_context);
+@@ -820,6 +827,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+
+ break;
+ case 505:
++ wispr_portal_context_ref(wp_context);
+ __connman_agent_request_browser(wp_context->service,
+ wispr_portal_browser_reply_cb,
+ wp_context->status_url, wp_context);
+@@ -832,6 +840,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+ wp_context->request_id = 0;
+ done:
+ wp_context->wispr_msg.message_type = -1;
++ wispr_portal_context_unref(wp_context);
+ return false;
+ }
+
+@@ -890,6 +899,7 @@ static void proxy_callback(const char *proxy, void *user_data)
+ xml_wispr_parser_callback, wp_context);
+
+ wispr_portal_request_portal(wp_context);
++ wispr_portal_context_unref(wp_context);
+ }
+
+ static gboolean no_proxy_callback(gpointer user_data)
+--
+cgit
+
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2023-28488.patch b/meta/recipes-connectivity/connman/connman/CVE-2023-28488.patch
new file mode 100644
index 0000000000..a6cabdfb20
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2023-28488.patch
@@ -0,0 +1,60 @@
+From 99e2c16ea1cced34a5dc450d76287a1c3e762138 Mon Sep 17 00:00:00 2001
+From: Daniel Wagner <wagi@monom.org>
+Date: Tue, 11 Apr 2023 08:12:56 +0200
+Subject: gdhcp: Verify and sanitize packet length first
+
+Avoid overwriting the read packet length after the initial test. Thus
+move all the length checks which depends on the total length first
+and do not use the total lenght from the IP packet afterwards.
+
+Reported by Polina Smirnova <moe.hwr@gmail.com>
+
+CVE: CVE-2023-28488
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=99e2c16ea1cced34a5dc450d76287a1c3e762138]
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ gdhcp/client.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/gdhcp/client.c b/gdhcp/client.c
+index 3016dfc..28fa606 100644
+--- a/gdhcp/client.c
++++ b/gdhcp/client.c
+@@ -1319,9 +1319,9 @@ static bool sanity_check(struct ip_udp_dhcp_packet *packet, int bytes)
+ static int dhcp_recv_l2_packet(struct dhcp_packet *dhcp_pkt, int fd,
+ struct sockaddr_in *dst_addr)
+ {
+- int bytes;
+ struct ip_udp_dhcp_packet packet;
+ uint16_t check;
++ int bytes, tot_len;
+
+ memset(&packet, 0, sizeof(packet));
+
+@@ -1329,15 +1329,17 @@ static int dhcp_recv_l2_packet(struct dhcp_packet *dhcp_pkt, int fd,
+ if (bytes < 0)
+ return -1;
+
+- if (bytes < (int) (sizeof(packet.ip) + sizeof(packet.udp)))
+- return -1;
+-
+- if (bytes < ntohs(packet.ip.tot_len))
++ tot_len = ntohs(packet.ip.tot_len);
++ if (bytes > tot_len) {
++ /* ignore any extra garbage bytes */
++ bytes = tot_len;
++ } else if (bytes < tot_len) {
+ /* packet is bigger than sizeof(packet), we did partial read */
+ return -1;
++ }
+
+- /* ignore any extra garbage bytes */
+- bytes = ntohs(packet.ip.tot_len);
++ if (bytes < (int) (sizeof(packet.ip) + sizeof(packet.udp)))
++ return -1;
+
+ if (!sanity_check(&packet, bytes))
+ return -1;
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/connman/connman_1.41.bb b/meta/recipes-connectivity/connman/connman_1.41.bb
index 736b78eaeb..27b28be41c 100644
--- a/meta/recipes-connectivity/connman/connman_1.41.bb
+++ b/meta/recipes-connectivity/connman/connman_1.41.bb
@@ -5,6 +5,10 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/network/${BPN}/${BP}.tar.xz \
file://0001-connman.service-stop-systemd-resolved-when-we-use-co.patch \
file://connman \
file://no-version-scripts.patch \
+ file://CVE-2022-32293_p1.patch \
+ file://CVE-2022-32293_p2.patch \
+ file://CVE-2022-32292.patch \
+ file://CVE-2023-28488.patch \
"
SRC_URI:append:libc-musl = " file://0002-resolve-musl-does-not-implement-res_ninit.patch"
diff --git a/meta/recipes-connectivity/dhcpcd/dhcpcd_9.4.1.bb b/meta/recipes-connectivity/dhcpcd/dhcpcd_9.4.1.bb
index ab6ffe986c..21b2eebbd8 100644
--- a/meta/recipes-connectivity/dhcpcd/dhcpcd_9.4.1.bb
+++ b/meta/recipes-connectivity/dhcpcd/dhcpcd_9.4.1.bb
@@ -9,15 +9,19 @@ HOMEPAGE = "http://roy.marples.name/projects/dhcpcd/"
LICENSE = "BSD-2-Clause"
LIC_FILES_CHKSUM = "file://LICENSE;md5=d148485768fe85b9f1072b186a7e9b4d"
-UPSTREAM_CHECK_URI = "https://roy.marples.name/downloads/dhcpcd/"
-
-SRC_URI = "https://roy.marples.name/downloads/${BPN}/${BPN}-${PV}.tar.xz \
+SRC_URI = "git://github.com/NetworkConfiguration/dhcpcd;protocol=https;branch=dhcpcd-9 \
file://0001-remove-INCLUDEDIR-to-prevent-build-issues.patch \
+ file://0001-20-resolv.conf-improve-the-sitation-of-working-with-.patch \
+ file://0001-privsep-Allow-getrandom-sysctl-for-newer-glibc.patch \
+ file://0002-privsep-Allow-newfstatat-syscall-as-well.patch \
+ file://0001-privsep-linux-fix-SECCOMP_AUDIT_ARCH-missing-ppc64le.patch \
file://dhcpcd.service \
file://dhcpcd@.service \
+ file://0001-dhcpcd.8-Fix-conflict-error-when-enable-multilib.patch \
"
-SRC_URI[sha256sum] = "819357634efed1ea5cf44ec01b24d3d3f8852fec8b4249925dcc5667c54e376c"
+SRCREV = "3c458fc7fa4146029a1e4f9e98cd7e7adf03081a"
+S = "${WORKDIR}/git"
inherit pkgconfig autotools-brokensep systemd useradd
diff --git a/meta/recipes-connectivity/dhcpcd/files/0001-20-resolv.conf-improve-the-sitation-of-working-with-.patch b/meta/recipes-connectivity/dhcpcd/files/0001-20-resolv.conf-improve-the-sitation-of-working-with-.patch
new file mode 100644
index 0000000000..6f90c88249
--- /dev/null
+++ b/meta/recipes-connectivity/dhcpcd/files/0001-20-resolv.conf-improve-the-sitation-of-working-with-.patch
@@ -0,0 +1,82 @@
+From 02acc4d875ee81e6fd19ef66d69c9f55b4b4a7e7 Mon Sep 17 00:00:00 2001
+From: Chen Qi <Qi.Chen@windriver.com>
+Date: Wed, 9 Nov 2022 16:33:18 +0800
+Subject: [PATCH] 20-resolv.conf: improve the sitation of working with systemd
+
+systemd's resolvconf implementation ignores the protocol part.
+See https://github.com/systemd/systemd/issues/25032.
+
+When using 'dhcp server + dns server + dhcpcd + systemd', we
+get an integration issue, that is dhcpcd runs 'resolvconf -d eth0.ra',
+yet systemd's resolvconf treats it as eth0. This will delete the
+DNS information set by 'resolvconf -a eth0.dhcp'.
+
+Fortunately, 20-resolv.conf has the ability to build the resolv.conf
+file contents itself. We can just pass the generated contents to
+systemd's resolvconf. This way, the DNS information is not incorrectly
+deleted. Also, it does not cause behavior regression for dhcpcd
+in other cases.
+
+Upstream-Status: Inappropriate [OE Specific]
+This patch has been rejected by dhcpcd upstream.
+See details in https://github.com/NetworkConfiguration/dhcpcd/pull/152
+
+Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+---
+ hooks/20-resolv.conf | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/hooks/20-resolv.conf b/hooks/20-resolv.conf
+index 504a6c53..eb6e5845 100644
+--- a/hooks/20-resolv.conf
++++ b/hooks/20-resolv.conf
+@@ -11,8 +11,12 @@ nocarrier_roaming_dir="$state_dir/roaming"
+ NL="
+ "
+ : ${resolvconf:=resolvconf}
++resolvconf_from_systemd=false
+ if type "$resolvconf" >/dev/null 2>&1; then
+ have_resolvconf=true
++ if [ $(basename $(readlink -f $(which $resolvconf))) = resolvectl ]; then
++ resolvconf_from_systemd=true
++ fi
+ else
+ have_resolvconf=false
+ fi
+@@ -69,8 +73,13 @@ build_resolv_conf()
+ else
+ echo "# /etc/resolv.conf.tail can replace this line" >> "$cf"
+ fi
+- if change_file /etc/resolv.conf "$cf"; then
+- chmod 644 /etc/resolv.conf
++ if $resolvconf_from_systemd; then
++ [ -n "$ifmetric" ] && export IF_METRIC="$ifmetric"
++ "$resolvconf" -a "$ifname" <"$cf"
++ else
++ if change_file /etc/resolv.conf "$cf"; then
++ chmod 644 /etc/resolv.conf
++ fi
+ fi
+ rm -f "$cf"
+ }
+@@ -170,7 +179,7 @@ add_resolv_conf()
+ for x in ${new_domain_name_servers}; do
+ conf="${conf}nameserver $x$NL"
+ done
+- if $have_resolvconf; then
++ if $have_resolvconf && ! $resolvconf_from_systemd; then
+ [ -n "$ifmetric" ] && export IF_METRIC="$ifmetric"
+ printf %s "$conf" | "$resolvconf" -a "$ifname"
+ return $?
+@@ -186,7 +195,7 @@ add_resolv_conf()
+
+ remove_resolv_conf()
+ {
+- if $have_resolvconf; then
++ if $have_resolvconf && ($if_down || ! $resolvconf_from_systemd); then
+ "$resolvconf" -d "$ifname" -f
+ else
+ if [ -e "$resolv_conf_dir/$ifname" ]; then
+--
+2.17.1
+
diff --git a/meta/recipes-connectivity/dhcpcd/files/0001-dhcpcd.8-Fix-conflict-error-when-enable-multilib.patch b/meta/recipes-connectivity/dhcpcd/files/0001-dhcpcd.8-Fix-conflict-error-when-enable-multilib.patch
new file mode 100644
index 0000000000..12998aada4
--- /dev/null
+++ b/meta/recipes-connectivity/dhcpcd/files/0001-dhcpcd.8-Fix-conflict-error-when-enable-multilib.patch
@@ -0,0 +1,46 @@
+From 4915a7e52fcea8fe283a842890a1e726b1e26b10 Mon Sep 17 00:00:00 2001
+From: Lei Maohui <leimaohui@fujitsu.com>
+Date: Fri, 10 Mar 2023 03:48:46 +0000
+Subject: [PATCH] dhcpcd.8: Fix conflict error when enable multilib.
+
+Error: Transaction test error:
+ file /usr/share/man/man8/dhcpcd.8 conflicts between attempted
+ installs of dhcpcd-doc-9.4.1-r0.cortexa57 and
+ lib32-dhcpcd-doc-9.4.1-r0.armv7ahf_neon
+
+The differences between the two files are as follows:
+@@ -821,7 +821,7 @@
+ If you always use the same options, put them here.
+ .It Pa /usr/libexec/dhcpcd-run-hooks
+ Bourne shell script that is run to configure or de-configure an interface.
+-.It Pa /usr/lib64/dhcpcd/dev
++.It Pa /usr/lib/dhcpcd/dev
+ Linux
+ .Pa /dev
+ management modules.
+
+It is just a man file, there is no necessary to manage multiple
+versions.
+
+Upstream-Status: Inappropriate [oe specific]
+Signed-off-by: Lei Maohui <leimaohui@fujitsu.com>
+---
+ src/dhcpcd.8.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/dhcpcd.8.in b/src/dhcpcd.8.in
+index bc6b3b5..791f2ba 100644
+--- a/src/dhcpcd.8.in
++++ b/src/dhcpcd.8.in
+@@ -821,7 +821,7 @@ Configuration file for dhcpcd.
+ If you always use the same options, put them here.
+ .It Pa @SCRIPT@
+ Bourne shell script that is run to configure or de-configure an interface.
+-.It Pa @LIBDIR@/dhcpcd/dev
++.It Pa /usr/<libdir>/dhcpcd/dev
+ Linux
+ .Pa /dev
+ management modules.
+--
+2.34.1
+
diff --git a/meta/recipes-connectivity/dhcpcd/files/0001-privsep-Allow-getrandom-sysctl-for-newer-glibc.patch b/meta/recipes-connectivity/dhcpcd/files/0001-privsep-Allow-getrandom-sysctl-for-newer-glibc.patch
new file mode 100644
index 0000000000..68ab93416a
--- /dev/null
+++ b/meta/recipes-connectivity/dhcpcd/files/0001-privsep-Allow-getrandom-sysctl-for-newer-glibc.patch
@@ -0,0 +1,30 @@
+From c6cdf0aee71ab4126d36b045f02428ee3c6ec50b Mon Sep 17 00:00:00 2001
+From: Roy Marples <roy@marples.name>
+Date: Fri, 26 Aug 2022 09:08:36 +0100
+Subject: [PATCH 1/2] privsep: Allow getrandom sysctl for newer glibc
+
+Fixes #120
+
+Upstream-Status: Backport [c6cdf0aee71ab4126d36b045f02428ee3c6ec50b]
+Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+---
+ src/privsep-linux.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/src/privsep-linux.c b/src/privsep-linux.c
+index b238644b..479a1d82 100644
+--- a/src/privsep-linux.c
++++ b/src/privsep-linux.c
+@@ -300,6 +300,9 @@ static struct sock_filter ps_seccomp_filter[] = {
+ #ifdef __NR_getpid
+ SECCOMP_ALLOW(__NR_getpid),
+ #endif
++#ifdef __NR_getrandom
++ SECCOMP_ALLOW(__NR_getrandom),
++#endif
+ #ifdef __NR_getsockopt
+ /* For route socket overflow */
+ SECCOMP_ALLOW_ARG(__NR_getsockopt, 1, SOL_SOCKET),
+--
+2.17.1
+
diff --git a/meta/recipes-connectivity/dhcpcd/files/0001-privsep-linux-fix-SECCOMP_AUDIT_ARCH-missing-ppc64le.patch b/meta/recipes-connectivity/dhcpcd/files/0001-privsep-linux-fix-SECCOMP_AUDIT_ARCH-missing-ppc64le.patch
new file mode 100644
index 0000000000..1c514f9b8c
--- /dev/null
+++ b/meta/recipes-connectivity/dhcpcd/files/0001-privsep-linux-fix-SECCOMP_AUDIT_ARCH-missing-ppc64le.patch
@@ -0,0 +1,34 @@
+From 7a2d9767585ed2c407d4985bd2d81552034fb90a Mon Sep 17 00:00:00 2001
+From: CHEN Xiangyu <xiangyu.chen@aol.com>
+Date: Thu, 9 Feb 2023 18:41:52 +0800
+Subject: [PATCH] privsep-linux: fix SECCOMP_AUDIT_ARCH missing ppc64le (#181)
+
+when dhcpcd running on ppc64le platform, it would be killed by SIGSYS.
+
+Upstream-Status: Backport [7a2d9767585ed2c407d4985bd2d81552034fb90a]
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ src/privsep-linux.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/src/privsep-linux.c b/src/privsep-linux.c
+index 7372d26b..6a301950 100644
+--- a/src/privsep-linux.c
++++ b/src/privsep-linux.c
+@@ -232,7 +232,11 @@ ps_root_sendnetlink(struct dhcpcd_ctx *ctx, int protocol, struct msghdr *msg)
+ #elif defined(__or1k__)
+ # define SECCOMP_AUDIT_ARCH AUDIT_ARCH_OPENRISC
+ #elif defined(__powerpc64__)
+-# define SECCOMP_AUDIT_ARCH AUDIT_ARCH_PPC64
++# if (BYTE_ORDER == LITTLE_ENDIAN)
++# define SECCOMP_AUDIT_ARCH AUDIT_ARCH_PPC64LE
++# else
++# define SECCOMP_AUDIT_ARCH AUDIT_ARCH_PPC64
++# endif
+ #elif defined(__powerpc__)
+ # define SECCOMP_AUDIT_ARCH AUDIT_ARCH_PPC
+ #elif defined(__riscv)
+--
+2.34.1
+
diff --git a/meta/recipes-connectivity/dhcpcd/files/0002-privsep-Allow-newfstatat-syscall-as-well.patch b/meta/recipes-connectivity/dhcpcd/files/0002-privsep-Allow-newfstatat-syscall-as-well.patch
new file mode 100644
index 0000000000..c5d2cba305
--- /dev/null
+++ b/meta/recipes-connectivity/dhcpcd/files/0002-privsep-Allow-newfstatat-syscall-as-well.patch
@@ -0,0 +1,31 @@
+From 7625a555797f587a89dc2447fd9d621024d5165c Mon Sep 17 00:00:00 2001
+From: Roy Marples <roy@marples.name>
+Date: Fri, 26 Aug 2022 09:24:50 +0100
+Subject: [PATCH 2/2] privsep: Allow newfstatat syscall as well
+
+Allows newer glibc variants to work apparently.
+As reported in #84 and #89.
+
+Upstream-Status: Backport [7625a555797f587a89dc2447fd9d621024d5165c]
+Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+---
+ src/privsep-linux.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/src/privsep-linux.c b/src/privsep-linux.c
+index 479a1d82..6327b1bc 100644
+--- a/src/privsep-linux.c
++++ b/src/privsep-linux.c
+@@ -328,6 +328,9 @@ static struct sock_filter ps_seccomp_filter[] = {
+ #ifdef __NR_nanosleep
+ SECCOMP_ALLOW(__NR_nanosleep), /* XXX should use ppoll instead */
+ #endif
++#ifdef __NR_newfstatat
++ SECCOMP_ALLOW(__NR_newfstatat),
++#endif
+ #ifdef __NR_ppoll
+ SECCOMP_ALLOW(__NR_ppoll),
+ #endif
+--
+2.17.1
+
diff --git a/meta/recipes-connectivity/inetutils/inetutils/0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch b/meta/recipes-connectivity/inetutils/inetutils/0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch
new file mode 100644
index 0000000000..7f5baf3637
--- /dev/null
+++ b/meta/recipes-connectivity/inetutils/inetutils/0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch
@@ -0,0 +1,280 @@
+From 703418fe9d2e3b1e8d594df5788d8001a8116265 Mon Sep 17 00:00:00 2001
+From: Jeffrey Bencteux <jeffbencteux@gmail.com>
+Date: Fri, 30 Jun 2023 19:02:45 +0200
+Subject: [PATCH] CVE-2023-40303: ftpd,rcp,rlogin,rsh,rshd,uucpd: fix: check
+ set*id() return values
+
+Several setuid(), setgid(), seteuid() and setguid() return values
+were not checked in ftpd/rcp/rlogin/rsh/rshd/uucpd code potentially
+leading to potential security issues.
+
+CVE: CVE-2023-40303
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/inetutils.git/commit/?id=e4e65c03f4c11292a3e40ef72ca3f194c8bffdd6]
+Signed-off-by: Jeffrey Bencteux <jeffbencteux@gmail.com>
+Signed-off-by: Simon Josefsson <simon@josefsson.org>
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ ftpd/ftpd.c | 10 +++++++---
+ src/rcp.c | 39 +++++++++++++++++++++++++++++++++------
+ src/rlogin.c | 11 +++++++++--
+ src/rsh.c | 25 +++++++++++++++++++++----
+ src/rshd.c | 20 +++++++++++++++++---
+ src/uucpd.c | 15 +++++++++++++--
+ 6 files changed, 100 insertions(+), 20 deletions(-)
+
+diff --git a/ftpd/ftpd.c b/ftpd/ftpd.c
+index 92b2cca5..28dd523f 100644
+--- a/ftpd/ftpd.c
++++ b/ftpd/ftpd.c
+@@ -862,7 +862,9 @@ end_login (struct credentials *pcred)
+ char *remotehost = pcred->remotehost;
+ int atype = pcred->auth_type;
+
+- seteuid ((uid_t) 0);
++ if (seteuid ((uid_t) 0) == -1)
++ _exit (EXIT_FAILURE);
++
+ if (pcred->logged_in)
+ {
+ logwtmp_keep_open (ttyline, "", "");
+@@ -1151,7 +1153,8 @@ getdatasock (const char *mode)
+
+ if (data >= 0)
+ return fdopen (data, mode);
+- seteuid ((uid_t) 0);
++ if (seteuid ((uid_t) 0) == -1)
++ _exit (EXIT_FAILURE);
+ s = socket (ctrl_addr.ss_family, SOCK_STREAM, 0);
+ if (s < 0)
+ goto bad;
+@@ -1978,7 +1981,8 @@ passive (int epsv, int af)
+ else /* !AF_INET6 */
+ ((struct sockaddr_in *) &pasv_addr)->sin_port = 0;
+
+- seteuid ((uid_t) 0);
++ if (seteuid ((uid_t) 0) == -1)
++ _exit (EXIT_FAILURE);
+ if (bind (pdata, (struct sockaddr *) &pasv_addr, pasv_addrlen) < 0)
+ {
+ if (seteuid ((uid_t) cred.uid))
+diff --git a/src/rcp.c b/src/rcp.c
+index 75adb253..cdcf8500 100644
+--- a/src/rcp.c
++++ b/src/rcp.c
+@@ -345,14 +345,23 @@ main (int argc, char *argv[])
+ if (from_option)
+ { /* Follow "protocol", send data. */
+ response ();
+- setuid (userid);
++
++ if (setuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
++
+ source (argc, argv);
+ exit (errs);
+ }
+
+ if (to_option)
+ { /* Receive data. */
+- setuid (userid);
++ if (setuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
++
+ sink (argc, argv);
+ exit (errs);
+ }
+@@ -537,7 +546,11 @@ toremote (char *targ, int argc, char *argv[])
+ if (response () < 0)
+ exit (EXIT_FAILURE);
+ free (bp);
+- setuid (userid);
++
++ if (setuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
+ }
+ source (1, argv + i);
+ close (rem);
+@@ -630,7 +643,12 @@ tolocal (int argc, char *argv[])
+ ++errs;
+ continue;
+ }
+- seteuid (userid);
++
++ if (seteuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
++ }
++
+ #if defined IP_TOS && defined IPPROTO_IP && defined IPTOS_THROUGHPUT
+ sslen = sizeof (ss);
+ (void) getpeername (rem, (struct sockaddr *) &ss, &sslen);
+@@ -643,7 +661,12 @@ tolocal (int argc, char *argv[])
+ #endif
+ vect[0] = target;
+ sink (1, vect);
+- seteuid (effuid);
++
++ if (seteuid (effuid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
++ }
++
+ close (rem);
+ rem = -1;
+ #ifdef SHISHI
+@@ -1441,7 +1464,11 @@ susystem (char *s, int userid)
+ return (127);
+
+ case 0:
+- setuid (userid);
++ if (setuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
++
+ execl (PATH_BSHELL, "sh", "-c", s, NULL);
+ _exit (127);
+ }
+diff --git a/src/rlogin.c b/src/rlogin.c
+index aa6426fb..c543de0c 100644
+--- a/src/rlogin.c
++++ b/src/rlogin.c
+@@ -647,8 +647,15 @@ try_connect:
+ /* Now change to the real user ID. We have to be set-user-ID root
+ to get the privileged port that rcmd () uses. We now want, however,
+ to run as the real user who invoked us. */
+- seteuid (uid);
+- setuid (uid);
++ if (seteuid (uid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
++ }
++
++ if (setuid (uid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
+
+ doit (&osmask); /* The old mask will activate SIGURG and SIGUSR1! */
+
+diff --git a/src/rsh.c b/src/rsh.c
+index 2d622ca4..6f60667d 100644
+--- a/src/rsh.c
++++ b/src/rsh.c
+@@ -276,8 +276,17 @@ main (int argc, char **argv)
+ {
+ if (asrsh)
+ *argv = (char *) "rlogin";
+- seteuid (getuid ());
+- setuid (getuid ());
++
++ if (seteuid (getuid ()) == -1)
++ {
++ error (EXIT_FAILURE, errno, "seteuid() failed");
++ }
++
++ if (setuid (getuid ()) == -1)
++ {
++ error (EXIT_FAILURE, errno, "setuid() failed");
++ }
++
+ execv (PATH_RLOGIN, argv);
+ error (EXIT_FAILURE, errno, "cannot execute %s", PATH_RLOGIN);
+ }
+@@ -541,8 +550,16 @@ try_connect:
+ error (0, errno, "setsockopt DEBUG (ignored)");
+ }
+
+- seteuid (uid);
+- setuid (uid);
++ if (seteuid (uid) == -1)
++ {
++ error (EXIT_FAILURE, errno, "seteuid() failed");
++ }
++
++ if (setuid (uid) == -1)
++ {
++ error (EXIT_FAILURE, errno, "setuid() failed");
++ }
++
+ #ifdef HAVE_SIGACTION
+ sigemptyset (&sigs);
+ sigaddset (&sigs, SIGINT);
+diff --git a/src/rshd.c b/src/rshd.c
+index d1c0d0cd..707790e7 100644
+--- a/src/rshd.c
++++ b/src/rshd.c
+@@ -1847,8 +1847,18 @@ doit (int sockfd, struct sockaddr *fromp, socklen_t fromlen)
+ pwd->pw_shell = PATH_BSHELL;
+
+ /* Set the gid, then uid to become the user specified by "locuser" */
+- setegid ((gid_t) pwd->pw_gid);
+- setgid ((gid_t) pwd->pw_gid);
++ if (setegid ((gid_t) pwd->pw_gid) == -1)
++ {
++ rshd_error ("Cannot drop privileges (setegid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
++
++ if (setgid ((gid_t) pwd->pw_gid) == -1)
++ {
++ rshd_error ("Cannot drop privileges (setgid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
++
+ #ifdef HAVE_INITGROUPS
+ initgroups (pwd->pw_name, pwd->pw_gid); /* BSD groups */
+ #endif
+@@ -1870,7 +1880,11 @@ doit (int sockfd, struct sockaddr *fromp, socklen_t fromlen)
+ }
+ #endif /* WITH_PAM */
+
+- setuid ((uid_t) pwd->pw_uid);
++ if (setuid ((uid_t) pwd->pw_uid) == -1)
++ {
++ rshd_error ("Cannot drop privileges (setuid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
+
+ /* We'll execute the client's command in the home directory
+ * of locuser. Note, that the chdir must be executed after
+diff --git a/src/uucpd.c b/src/uucpd.c
+index 107589e1..29cfce35 100644
+--- a/src/uucpd.c
++++ b/src/uucpd.c
+@@ -252,7 +252,12 @@ doit (struct sockaddr *sap, socklen_t salen)
+ snprintf (Username, sizeof (Username), "USER=%s", user);
+ snprintf (Logname, sizeof (Logname), "LOGNAME=%s", user);
+ dologin (pw, sap, salen);
+- setgid (pw->pw_gid);
++
++ if (setgid (pw->pw_gid) == -1)
++ {
++ fprintf (stderr, "setgid() failed");
++ return;
++ }
+ #ifdef HAVE_INITGROUPS
+ initgroups (pw->pw_name, pw->pw_gid);
+ #endif
+@@ -261,7 +266,13 @@ doit (struct sockaddr *sap, socklen_t salen)
+ fprintf (stderr, "Login incorrect.");
+ return;
+ }
+- setuid (pw->pw_uid);
++
++ if (setuid (pw->pw_uid) == -1)
++ {
++ fprintf (stderr, "setuid() failed");
++ return;
++ }
++
+ execl (uucico_location, "uucico", NULL);
+ perror ("uucico server: execl");
+ }
diff --git a/meta/recipes-connectivity/inetutils/inetutils/0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch b/meta/recipes-connectivity/inetutils/inetutils/0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch
new file mode 100644
index 0000000000..4bc354d256
--- /dev/null
+++ b/meta/recipes-connectivity/inetutils/inetutils/0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch
@@ -0,0 +1,254 @@
+From 70fe022f9dac760eaece0228cad17e3d29a57fb8 Mon Sep 17 00:00:00 2001
+From: Simon Josefsson <simon@josefsson.org>
+Date: Mon, 31 Jul 2023 13:59:05 +0200
+Subject: [PATCH] CVE-2023-40303: Indent changes in previous commit.
+
+CVE: CVE-2023-40303
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/inetutils.git/commit/?id=9122999252c7e21eb7774de11d539748e7bdf46d]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/rcp.c | 42 ++++++++++++++++++++++++------------------
+ src/rlogin.c | 12 ++++++------
+ src/rsh.c | 24 ++++++++++++------------
+ src/rshd.c | 24 ++++++++++++------------
+ src/uucpd.c | 16 ++++++++--------
+ 5 files changed, 62 insertions(+), 56 deletions(-)
+
+diff --git a/src/rcp.c b/src/rcp.c
+index cdcf8500..652f22e6 100644
+--- a/src/rcp.c
++++ b/src/rcp.c
+@@ -347,9 +347,10 @@ main (int argc, char *argv[])
+ response ();
+
+ if (setuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (setuid() failed)");
++ }
+
+ source (argc, argv);
+ exit (errs);
+@@ -358,9 +359,10 @@ main (int argc, char *argv[])
+ if (to_option)
+ { /* Receive data. */
+ if (setuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (setuid() failed)");
++ }
+
+ sink (argc, argv);
+ exit (errs);
+@@ -548,9 +550,10 @@ toremote (char *targ, int argc, char *argv[])
+ free (bp);
+
+ if (setuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (setuid() failed)");
++ }
+ }
+ source (1, argv + i);
+ close (rem);
+@@ -645,9 +648,10 @@ tolocal (int argc, char *argv[])
+ }
+
+ if (seteuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (seteuid() failed)");
++ }
+
+ #if defined IP_TOS && defined IPPROTO_IP && defined IPTOS_THROUGHPUT
+ sslen = sizeof (ss);
+@@ -663,9 +667,10 @@ tolocal (int argc, char *argv[])
+ sink (1, vect);
+
+ if (seteuid (effuid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (seteuid() failed)");
++ }
+
+ close (rem);
+ rem = -1;
+@@ -1465,9 +1470,10 @@ susystem (char *s, int userid)
+
+ case 0:
+ if (setuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (setuid() failed)");
++ }
+
+ execl (PATH_BSHELL, "sh", "-c", s, NULL);
+ _exit (127);
+diff --git a/src/rlogin.c b/src/rlogin.c
+index c543de0c..4360202f 100644
+--- a/src/rlogin.c
++++ b/src/rlogin.c
+@@ -648,14 +648,14 @@ try_connect:
+ to get the privileged port that rcmd () uses. We now want, however,
+ to run as the real user who invoked us. */
+ if (seteuid (uid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
++ }
+
+ if (setuid (uid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
+
+ doit (&osmask); /* The old mask will activate SIGURG and SIGUSR1! */
+
+diff --git a/src/rsh.c b/src/rsh.c
+index 6f60667d..179b47cd 100644
+--- a/src/rsh.c
++++ b/src/rsh.c
+@@ -278,14 +278,14 @@ main (int argc, char **argv)
+ *argv = (char *) "rlogin";
+
+ if (seteuid (getuid ()) == -1)
+- {
+- error (EXIT_FAILURE, errno, "seteuid() failed");
+- }
++ {
++ error (EXIT_FAILURE, errno, "seteuid() failed");
++ }
+
+ if (setuid (getuid ()) == -1)
+- {
+- error (EXIT_FAILURE, errno, "setuid() failed");
+- }
++ {
++ error (EXIT_FAILURE, errno, "setuid() failed");
++ }
+
+ execv (PATH_RLOGIN, argv);
+ error (EXIT_FAILURE, errno, "cannot execute %s", PATH_RLOGIN);
+@@ -551,14 +551,14 @@ try_connect:
+ }
+
+ if (seteuid (uid) == -1)
+- {
+- error (EXIT_FAILURE, errno, "seteuid() failed");
+- }
++ {
++ error (EXIT_FAILURE, errno, "seteuid() failed");
++ }
+
+ if (setuid (uid) == -1)
+- {
+- error (EXIT_FAILURE, errno, "setuid() failed");
+- }
++ {
++ error (EXIT_FAILURE, errno, "setuid() failed");
++ }
+
+ #ifdef HAVE_SIGACTION
+ sigemptyset (&sigs);
+diff --git a/src/rshd.c b/src/rshd.c
+index 707790e7..3a153a18 100644
+--- a/src/rshd.c
++++ b/src/rshd.c
+@@ -1848,16 +1848,16 @@ doit (int sockfd, struct sockaddr *fromp, socklen_t fromlen)
+
+ /* Set the gid, then uid to become the user specified by "locuser" */
+ if (setegid ((gid_t) pwd->pw_gid) == -1)
+- {
+- rshd_error ("Cannot drop privileges (setegid() failed)\n");
+- exit (EXIT_FAILURE);
+- }
++ {
++ rshd_error ("Cannot drop privileges (setegid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
+
+ if (setgid ((gid_t) pwd->pw_gid) == -1)
+- {
+- rshd_error ("Cannot drop privileges (setgid() failed)\n");
+- exit (EXIT_FAILURE);
+- }
++ {
++ rshd_error ("Cannot drop privileges (setgid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
+
+ #ifdef HAVE_INITGROUPS
+ initgroups (pwd->pw_name, pwd->pw_gid); /* BSD groups */
+@@ -1881,10 +1881,10 @@ doit (int sockfd, struct sockaddr *fromp, socklen_t fromlen)
+ #endif /* WITH_PAM */
+
+ if (setuid ((uid_t) pwd->pw_uid) == -1)
+- {
+- rshd_error ("Cannot drop privileges (setuid() failed)\n");
+- exit (EXIT_FAILURE);
+- }
++ {
++ rshd_error ("Cannot drop privileges (setuid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
+
+ /* We'll execute the client's command in the home directory
+ * of locuser. Note, that the chdir must be executed after
+diff --git a/src/uucpd.c b/src/uucpd.c
+index 29cfce35..fde7b9c9 100644
+--- a/src/uucpd.c
++++ b/src/uucpd.c
+@@ -254,10 +254,10 @@ doit (struct sockaddr *sap, socklen_t salen)
+ dologin (pw, sap, salen);
+
+ if (setgid (pw->pw_gid) == -1)
+- {
+- fprintf (stderr, "setgid() failed");
+- return;
+- }
++ {
++ fprintf (stderr, "setgid() failed");
++ return;
++ }
+ #ifdef HAVE_INITGROUPS
+ initgroups (pw->pw_name, pw->pw_gid);
+ #endif
+@@ -268,10 +268,10 @@ doit (struct sockaddr *sap, socklen_t salen)
+ }
+
+ if (setuid (pw->pw_uid) == -1)
+- {
+- fprintf (stderr, "setuid() failed");
+- return;
+- }
++ {
++ fprintf (stderr, "setuid() failed");
++ return;
++ }
+
+ execl (uucico_location, "uucico", NULL);
+ perror ("uucico server: execl");
diff --git a/meta/recipes-connectivity/inetutils/inetutils/CVE-2022-39028.patch b/meta/recipes-connectivity/inetutils/inetutils/CVE-2022-39028.patch
new file mode 100644
index 0000000000..54040ad74c
--- /dev/null
+++ b/meta/recipes-connectivity/inetutils/inetutils/CVE-2022-39028.patch
@@ -0,0 +1,54 @@
+From d52349fa1b6baac77ffa2c74769636aa2ece2ec5 Mon Sep 17 00:00:00 2001
+From: Erik Auerswald <auerswal@unix-ag.uni-kl.de>
+Date: Sat, 3 Sep 2022 16:58:16 +0200
+Subject: [PATCH] telnetd: Handle early IAC EC or IAC EL receipt
+
+Fix telnetd crash if the first two bytes of a new connection
+are 0xff 0xf7 (IAC EC) or 0xff 0xf8 (IAC EL).
+
+The problem was reported in:
+<https://pierrekim.github.io/blog/2022-08-24-2-byte-dos-freebsd-netbsd-telnetd-netkit-telnetd-inetutils-telnetd-kerberos-telnetd.html>.
+
+* NEWS: Mention fix.
+* telnetd/state.c (telrcv): Handle zero slctab[SLC_EC].sptr and
+zero slctab[SLC_EL].sptr.
+
+CVE: CVE-2022-39028
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/inetutils.git/commit/?id=fae8263e467380483c28513c0e5fac143e46f94f]
+Signed-off-by: Teoh Jay Shen <jay.shen.teoh@intel.com>
+---
+ telnetd/state.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/telnetd/state.c b/telnetd/state.c
+index ffc6cba..c2d760f 100644
+--- a/telnetd/state.c
++++ b/telnetd/state.c
+@@ -312,15 +312,21 @@ telrcv (void)
+ case EC:
+ case EL:
+ {
+- cc_t ch;
++ cc_t ch = (cc_t) (_POSIX_VDISABLE);
+
+ DEBUG (debug_options, 1, printoption ("td: recv IAC", c));
+ ptyflush (); /* half-hearted */
+ init_termbuf ();
+ if (c == EC)
+- ch = *slctab[SLC_EC].sptr;
++ {
++ if (slctab[SLC_EC].sptr)
++ ch = *slctab[SLC_EC].sptr;
++ }
+ else
+- ch = *slctab[SLC_EL].sptr;
++ {
++ if (slctab[SLC_EL].sptr)
++ ch = *slctab[SLC_EL].sptr;
++ }
+ if (ch != (cc_t) (_POSIX_VDISABLE))
+ pty_output_byte ((unsigned char) ch);
+ break;
+--
+2.37.3
+
diff --git a/meta/recipes-connectivity/inetutils/inetutils_2.2.bb b/meta/recipes-connectivity/inetutils/inetutils_2.2.bb
index 6c9a299b71..6f9173dbc1 100644
--- a/meta/recipes-connectivity/inetutils/inetutils_2.2.bb
+++ b/meta/recipes-connectivity/inetutils/inetutils_2.2.bb
@@ -21,6 +21,9 @@ SRC_URI = "${GNU_MIRROR}/inetutils/inetutils-${PV}.tar.xz \
file://tftpd.xinetd.inetutils \
file://inetutils-1.9-PATH_PROCNET_DEV.patch \
file://inetutils-only-check-pam_appl.h-when-pam-enabled.patch \
+ file://CVE-2022-39028.patch \
+ file://0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch \
+ file://0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch \
"
inherit autotools gettext update-alternatives texinfo
diff --git a/meta/recipes-connectivity/kea/files/fix-multilib-conflict.patch b/meta/recipes-connectivity/kea/files/fix-multilib-conflict.patch
index 78f475a495..451b409c88 100644
--- a/meta/recipes-connectivity/kea/files/fix-multilib-conflict.patch
+++ b/meta/recipes-connectivity/kea/files/fix-multilib-conflict.patch
@@ -12,7 +12,7 @@ Subject: [PATCH] There are conflict of config files between kea and lib32-kea:
Because they are all commented out, replace the expanded libdir path with
'$libdir' in the config files to avoid conflict.
-Upstream-Status: Pending
+Upstream-Status: Submitted [https://gitlab.isc.org/isc-projects/kea/-/issues/2602]
Signed-off-by: Kai Kang <kai.kang@windriver.com>
---
diff --git a/meta/recipes-connectivity/libuv/libuv/CVE-2024-24806-1.patch b/meta/recipes-connectivity/libuv/libuv/CVE-2024-24806-1.patch
new file mode 100644
index 0000000000..d263cced8d
--- /dev/null
+++ b/meta/recipes-connectivity/libuv/libuv/CVE-2024-24806-1.patch
@@ -0,0 +1,56 @@
+From b8ee33667d265b936d60ee7f0ba0b22463ccb019 Mon Sep 17 00:00:00 2001
+From: Ben Noordhuis <info@bnoordhuis.nl>
+Date: Thu, 18 Jan 2024 14:51:40 +0100
+Subject: [PATCH] fix: always zero-terminate idna output
+
+Upstream-Status: Backport [https://github.com/libuv/libuv/commit/0f2d7e784a256b54b2385043438848047bc2a629]
+CVE: CVE-2024-24806
+
+Fixes: https://github.com/libuv/libuv/security/advisories/GHSA-f74f-cvh7-c6q6
+Signed-off-by: Hugo SIMELIERE <hsimeliere.opensource@witekio.com>
+---
+ src/idna.c | 5 +++--
+ test/test-idna.c | 4 ++++
+ 2 files changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/src/idna.c b/src/idna.c
+index 93d982ca..ce7f2746 100644
+--- a/src/idna.c
++++ b/src/idna.c
+@@ -308,8 +308,9 @@ long uv__idna_toascii(const char* s, const char* se, char* d, char* de) {
+ return rc;
+ }
+
+- if (d < de)
+- *d++ = '\0';
++ if (d >= de)
++ return UV_EINVAL;
+
++ *d++ = '\0';
+ return d - ds; /* Number of bytes written. */
+ }
+diff --git a/test/test-idna.c b/test/test-idna.c
+index f4fad965..d079be55 100644
+--- a/test/test-idna.c
++++ b/test/test-idna.c
+@@ -99,6 +99,7 @@ TEST_IMPL(utf8_decode1) {
+ TEST_IMPL(utf8_decode1_overrun) {
+ const char* p;
+ char b[1];
++ char c[1];
+
+ /* Single byte. */
+ p = b;
+@@ -112,6 +113,9 @@ TEST_IMPL(utf8_decode1_overrun) {
+ ASSERT_EQ((unsigned) -1, uv__utf8_decode1(&p, b + 1));
+ ASSERT_EQ(p, b + 1);
+
++ b[0] = 0x7F;
++ ASSERT_EQ(UV_EINVAL, uv__idna_toascii(b, b + 1, c, c + 1));
++
+ return 0;
+ }
+
+--
+2.43.0
+
diff --git a/meta/recipes-connectivity/libuv/libuv/CVE-2024-24806-2.patch b/meta/recipes-connectivity/libuv/libuv/CVE-2024-24806-2.patch
new file mode 100644
index 0000000000..b0ed5f0ea2
--- /dev/null
+++ b/meta/recipes-connectivity/libuv/libuv/CVE-2024-24806-2.patch
@@ -0,0 +1,44 @@
+From 96f881c8f600da33ec4ecec450ec491990ce613b Mon Sep 17 00:00:00 2001
+From: Ben Noordhuis <info@bnoordhuis.nl>
+Date: Thu, 18 Jan 2024 14:52:38 +0100
+Subject: [PATCH] fix: reject zero-length idna inputs
+
+Upstream-Status: Backport [https://github.com/libuv/libuv/commit/3530bcc30350d4a6ccf35d2f7b33e23292b9de70]
+CVE: CVE-2024-24806
+
+Fixes: https://github.com/libuv/libuv/security/advisories/GHSA-f74f-cvh7-c6q6
+Signed-off-by: Hugo SIMELIERE <hsimeliere.opensource@witekio.com>
+---
+ src/idna.c | 3 +++
+ test/test-idna.c | 1 +
+ 2 files changed, 4 insertions(+)
+
+diff --git a/src/idna.c b/src/idna.c
+index ce7f2746..858b19d0 100644
+--- a/src/idna.c
++++ b/src/idna.c
+@@ -274,6 +274,9 @@ long uv__idna_toascii(const char* s, const char* se, char* d, char* de) {
+ char* ds;
+ int rc;
+
++ if (s == se)
++ return UV_EINVAL;
++
+ ds = d;
+
+ si = s;
+diff --git a/test/test-idna.c b/test/test-idna.c
+index d079be55..d59b521e 100644
+--- a/test/test-idna.c
++++ b/test/test-idna.c
+@@ -114,6 +114,7 @@ TEST_IMPL(utf8_decode1_overrun) {
+ ASSERT_EQ(p, b + 1);
+
+ b[0] = 0x7F;
++ ASSERT_EQ(UV_EINVAL, uv__idna_toascii(b, b + 0, c, c + 1));
+ ASSERT_EQ(UV_EINVAL, uv__idna_toascii(b, b + 1, c, c + 1));
+
+ return 0;
+--
+2.43.0
+
diff --git a/meta/recipes-connectivity/libuv/libuv/CVE-2024-24806-3.patch b/meta/recipes-connectivity/libuv/libuv/CVE-2024-24806-3.patch
new file mode 100644
index 0000000000..733660cf05
--- /dev/null
+++ b/meta/recipes-connectivity/libuv/libuv/CVE-2024-24806-3.patch
@@ -0,0 +1,31 @@
+From a7443ee6b3b3c6a12708148aa9bb001b7782905c Mon Sep 17 00:00:00 2001
+From: Santiago Gimeno <santiago.gimeno@gmail.com>
+Date: Wed, 7 Feb 2024 20:27:58 +0100
+Subject: [PATCH] test: empty strings are not valid IDNA
+
+Upstream-Status: Backport [https://github.com/libuv/libuv/commit/e0327e1d508b8207c9150b6e582f0adf26213c39]
+CVE: CVE-2024-24806
+
+Fixes: https://github.com/libuv/libuv/security/advisories/GHSA-f74f-cvh7-c6q6
+Signed-off-by: Hugo SIMELIERE <hsimeliere.opensource@witekio.com>
+---
+ test/test-idna.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/test/test-idna.c b/test/test-idna.c
+index d59b521e..37da38de 100644
+--- a/test/test-idna.c
++++ b/test/test-idna.c
+@@ -150,8 +150,8 @@ TEST_IMPL(idna_toascii) {
+ /* Illegal inputs. */
+ F("\xC0\x80\xC1\x80", UV_EINVAL); /* Overlong UTF-8 sequence. */
+ F("\xC0\x80\xC1\x80.com", UV_EINVAL); /* Overlong UTF-8 sequence. */
++ F("", UV_EINVAL);
+ /* No conversion. */
+- T("", "");
+ T(".", ".");
+ T(".com", ".com");
+ T("example", "example");
+--
+2.43.0
+
diff --git a/meta/recipes-connectivity/libuv/libuv_1.44.1.bb b/meta/recipes-connectivity/libuv/libuv_1.44.2.bb
index 4c96d80a65..e2cd3c3247 100644
--- a/meta/recipes-connectivity/libuv/libuv_1.44.1.bb
+++ b/meta/recipes-connectivity/libuv/libuv_1.44.2.bb
@@ -5,8 +5,12 @@ BUGTRACKER = "https://github.com/libuv/libuv/issues"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://LICENSE;md5=ad93ca1fffe931537fcf64f6fcce084d"
-SRCREV = "e8b7eb6908a847ffbe6ab2eec7428e43a0aa53a2"
-SRC_URI = "git://github.com/libuv/libuv;branch=v1.x;protocol=https"
+SRCREV = "0c1fa696aa502eb749c2c4735005f41ba00a27b8"
+SRC_URI = "git://github.com/libuv/libuv.git;branch=v1.x;protocol=https \
+ file://CVE-2024-24806-1.patch \
+ file://CVE-2024-24806-2.patch \
+ file://CVE-2024-24806-3.patch \
+ "
UPSTREAM_CHECK_GITTAGREGEX = "v(?P<pver>\d+(\.\d+)+)"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb b/meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb
index e6f216e5cb..a4030b7b32 100644
--- a/meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb
+++ b/meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb
@@ -5,8 +5,8 @@ SECTION = "network"
LICENSE = "PD"
LIC_FILES_CHKSUM = "file://COPYING;md5=87964579b2a8ece4bc6744d2dc9a8b04"
-SRCREV = "3d5c8d0f7e0264768a2c000d0fd4b4d4a991e041"
-PV = "20220511"
+SRCREV = "aae7c68671d225e6d35224613d5b98192b9b2ffe"
+PV = "20230416"
PE = "1"
SRC_URI = "git://gitlab.gnome.org/GNOME/mobile-broadband-provider-info.git;protocol=https;branch=main"
diff --git a/meta/recipes-connectivity/openssh/openssh/0001-upstream-include-destination-constraints-for-smartca.patch b/meta/recipes-connectivity/openssh/openssh/0001-upstream-include-destination-constraints-for-smartca.patch
new file mode 100644
index 0000000000..b4e7ce7ef6
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/0001-upstream-include-destination-constraints-for-smartca.patch
@@ -0,0 +1,35 @@
+From 91889b5a3e7554af474a21ce8e1ffd3eb1542f06 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Thu, 9 Mar 2023 06:58:26 +0000
+Subject: [PATCH] upstream: include destination constraints for smartcard keys
+ too.
+
+Spotted by Luci Stanescu; ok deraadt@ markus@
+
+OpenBSD-Commit-ID: add879fac6903a1cb1d1e42c4309e5359c3d870f
+
+CVE: CVE-2023-28531
+
+Upstream-Status: Backport [54ac4ab2b53ce9fcb66b8250dee91c070e4167ed]
+
+Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+---
+ authfd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/authfd.c b/authfd.c
+index 76e48aab..dca8e55b 100644
+--- a/authfd.c
++++ b/authfd.c
+@@ -665,7 +665,7 @@ ssh_update_card(int sock, int add, const char *reader_id, const char *pin,
+ struct dest_constraint **dest_constraints, size_t ndest_constraints)
+ {
+ struct sshbuf *msg;
+- int r, constrained = (life || confirm);
++ int r, constrained = (life || confirm || dest_constraints);
+ u_char type;
+
+ if (add) {
+--
+2.37.1
+
diff --git a/meta/recipes-connectivity/openssh/openssh/7280401bdd77ca54be6867a154cc01e0d72612e0.patch b/meta/recipes-connectivity/openssh/openssh/7280401bdd77ca54be6867a154cc01e0d72612e0.patch
new file mode 100644
index 0000000000..ebdff1ffe4
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/7280401bdd77ca54be6867a154cc01e0d72612e0.patch
@@ -0,0 +1,984 @@
+From 7280401bdd77ca54be6867a154cc01e0d72612e0 Mon Sep 17 00:00:00 2001
+From: Damien Miller <djm@mindrot.org>
+Date: Fri, 24 Mar 2023 13:56:25 +1100
+Subject: [PATCH] remove support for old libcrypto
+
+OpenSSH now requires LibreSSL 3.1.0 or greater or
+OpenSSL 1.1.1 or greater
+
+with/ok dtucker@
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/7280401bdd77ca54be6867a154cc01e0d72612e0]
+Comment: Hunk are refreshed, removed couple of hunks from configure.ac as hunk code is not prasent
+and backported to the existing code.
+Signed-off-by: Riyaz Khan <Riyaz.Khan@kpit.com>
+
+---
+ .github/workflows/c-cpp.yml | 7 -
+ INSTALL | 8 +-
+ cipher-aes.c | 2 +-
+ configure.ac | 96 ++---
+ openbsd-compat/libressl-api-compat.c | 556 +--------------------------
+ openbsd-compat/openssl-compat.h | 151 +-------
+ 6 files changed, 40 insertions(+), 780 deletions(-)
+
+diff --git a/.github/workflows/c-cpp.yml b/.github/workflows/c-cpp.yml
+index 3d9aa22dba5..d299a32468d 100644
+--- a/.github/workflows/c-cpp.yml
++++ b/.github/workflows/c-cpp.yml
+@@ -40,18 +40,11 @@
+ - { os: ubuntu-20.04, configs: tcmalloc }
+ - { os: ubuntu-20.04, configs: musl }
+ - { os: ubuntu-latest, configs: libressl-master }
+- - { os: ubuntu-latest, configs: libressl-2.2.9 }
+- - { os: ubuntu-latest, configs: libressl-2.8.3 }
+- - { os: ubuntu-latest, configs: libressl-3.0.2 }
+ - { os: ubuntu-latest, configs: libressl-3.2.6 }
+ - { os: ubuntu-latest, configs: libressl-3.3.4 }
+ - { os: ubuntu-latest, configs: libressl-3.4.1 }
+ - { os: ubuntu-latest, configs: openssl-master }
+ - { os: ubuntu-latest, configs: openssl-noec }
+- - { os: ubuntu-latest, configs: openssl-1.0.1 }
+- - { os: ubuntu-latest, configs: openssl-1.0.1u }
+- - { os: ubuntu-latest, configs: openssl-1.0.2u }
+- - { os: ubuntu-latest, configs: openssl-1.1.0h }
+ - { os: ubuntu-latest, configs: openssl-1.1.1 }
+ - { os: ubuntu-latest, configs: openssl-1.1.1k }
+ - { os: ubuntu-latest, configs: openssl-3.0.0 }
+diff --git a/INSTALL b/INSTALL
+index 68b15e13190..f99d1e2a809 100644
+--- a/INSTALL
++++ b/INSTALL
+@@ -21,12 +21,8 @@ https://zlib.net/
+
+ libcrypto from either of LibreSSL or OpenSSL. Building without libcrypto
+ is supported but severely restricts the available ciphers and algorithms.
+- - LibreSSL (https://www.libressl.org/)
+- - OpenSSL (https://www.openssl.org) with any of the following versions:
+- - 1.0.x >= 1.0.1 or 1.1.0 >= 1.1.0g or any 1.1.1
+-
+-Note that due to a bug in EVP_CipherInit OpenSSL 1.1 versions prior to
+-1.1.0g can't be used.
++ - LibreSSL (https://www.libressl.org/) 3.1.0 or greater
++ - OpenSSL (https://www.openssl.org) 1.1.1 or greater
+
+ LibreSSL/OpenSSL should be compiled as a position-independent library
+ (i.e. -fPIC, eg by configuring OpenSSL as "./config [options] -fPIC"
+diff --git a/cipher-aes.c b/cipher-aes.c
+index 8b101727284..87c763353d8 100644
+--- a/cipher-aes.c
++++ b/cipher-aes.c
+@@ -69,7 +69,7 @@ ssh_rijndael_init(EVP_CIPHER_CTX *ctx, const u_char *key, const u_char *iv,
+
+ static int
+ ssh_rijndael_cbc(EVP_CIPHER_CTX *ctx, u_char *dest, const u_char *src,
+- LIBCRYPTO_EVP_INL_TYPE len)
++ size_t len)
+ {
+ struct ssh_rijndael_ctx *c;
+ u_char buf[RIJNDAEL_BLOCKSIZE];
+diff --git a/configure.ac b/configure.ac
+index 22fee70f604..1c0ccdf19c5 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -2744,42 +2744,40 @@
+ #include <openssl/crypto.h>
+ #define DATA "conftest.ssllibver"
+ ]], [[
+- FILE *fd;
+- int rc;
++ FILE *f;
+
+- fd = fopen(DATA,"w");
+- if(fd == NULL)
++ if ((f = fopen(DATA, "w")) == NULL)
+ exit(1);
+-#ifndef OPENSSL_VERSION
+-# define OPENSSL_VERSION SSLEAY_VERSION
+-#endif
+-#ifndef HAVE_OPENSSL_VERSION
+-# define OpenSSL_version SSLeay_version
+-#endif
+-#ifndef HAVE_OPENSSL_VERSION_NUM
+-# define OpenSSL_version_num SSLeay
+-#endif
+- if ((rc = fprintf(fd, "%08lx (%s)\n",
++ if (fprintf(f, "%08lx (%s)",
+ (unsigned long)OpenSSL_version_num(),
+- OpenSSL_version(OPENSSL_VERSION))) < 0)
++ OpenSSL_version(OPENSSL_VERSION)) < 0)
++ exit(1);
++#ifdef LIBRESSL_VERSION_NUMBER
++ if (fprintf(f, " libressl-%08lx", LIBRESSL_VERSION_NUMBER) < 0)
++ exit(1);
++#endif
++ if (fputc('\n', f) == EOF || fclose(f) == EOF)
+ exit(1);
+-
+ exit(0);
+ ]])],
+ [
+- ssl_library_ver=`cat conftest.ssllibver`
++ sslver=`cat conftest.ssllibver`
++ ssl_showver=`echo "$sslver" | sed 's/ libressl-.*//'`
+ # Check version is supported.
+- case "$ssl_library_ver" in
+- 10000*|0*)
+- AC_MSG_ERROR([OpenSSL >= 1.0.1 required (have "$ssl_library_ver")])
+- ;;
+- 100*) ;; # 1.0.x
+- 101000[[0123456]]*)
+- # https://github.com/openssl/openssl/pull/4613
+- AC_MSG_ERROR([OpenSSL 1.1.x versions prior to 1.1.0g have a bug that breaks their use with OpenSSH (have "$ssl_library_ver")])
++ case "$sslver" in
++ 100*|10100*) # 1.0.x, 1.1.0x
++ AC_MSG_ERROR([OpenSSL >= 1.1.1 required (have "$ssl_showver")])
+ ;;
+ 101*) ;; # 1.1.x
+- 200*) ;; # LibreSSL
++ 200*) # LibreSSL
++ lver=`echo "$sslver" | sed 's/.*libressl-//'`
++ case "$lver" in
++ 2*|300*) # 2.x, 3.0.0
++ AC_MSG_ERROR([LibreSSL >= 3.1.0 required (have "$ssl_showver")])
++ ;;
++ *) ;; # Assume all other versions are good.
++ esac
++ ;;
+ 300*) ;; # OpenSSL 3
+ 301*) ;; # OpenSSL development branch.
+ *)
+@@ -2781,10 +2781,10 @@
+ 300*) ;; # OpenSSL 3
+ 301*) ;; # OpenSSL development branch.
+ *)
+- AC_MSG_ERROR([Unknown/unsupported OpenSSL version ("$ssl_library_ver")])
++ AC_MSG_ERROR([Unknown/unsupported OpenSSL version ("$ssl_showver")])
+ ;;
+ esac
+- AC_MSG_RESULT([$ssl_library_ver])
++ AC_MSG_RESULT([$ssl_showver])
+ ],
+ [
+ AC_MSG_RESULT([not found])
+@@ -2804,9 +2804,6 @@
+ #include <openssl/opensslv.h>
+ #include <openssl/crypto.h>
+ ]], [[
+-#ifndef HAVE_OPENSSL_VERSION_NUM
+-# define OpenSSL_version_num SSLeay
+-#endif
+ exit(OpenSSL_version_num() == OPENSSL_VERSION_NUMBER ? 0 : 1);
+ ]])],
+ [
+@@ -2881,44 +2878,13 @@
+ )
+ )
+
+- # LibreSSL/OpenSSL 1.1x API
++ # LibreSSL/OpenSSL API differences
+ AC_CHECK_FUNCS([ \
+- OPENSSL_init_crypto \
+- DH_get0_key \
+- DH_get0_pqg \
+- DH_set0_key \
+- DH_set_length \
+- DH_set0_pqg \
+- DSA_get0_key \
+- DSA_get0_pqg \
+- DSA_set0_key \
+- DSA_set0_pqg \
+- DSA_SIG_get0 \
+- DSA_SIG_set0 \
+- ECDSA_SIG_get0 \
+- ECDSA_SIG_set0 \
+ EVP_CIPHER_CTX_iv \
+ EVP_CIPHER_CTX_iv_noconst \
+ EVP_CIPHER_CTX_get_iv \
+ EVP_CIPHER_CTX_get_updated_iv \
+ EVP_CIPHER_CTX_set_iv \
+- RSA_get0_crt_params \
+- RSA_get0_factors \
+- RSA_get0_key \
+- RSA_set0_crt_params \
+- RSA_set0_factors \
+- RSA_set0_key \
+- RSA_meth_free \
+- RSA_meth_dup \
+- RSA_meth_set1_name \
+- RSA_meth_get_finish \
+- RSA_meth_set_priv_enc \
+- RSA_meth_set_priv_dec \
+- RSA_meth_set_finish \
+- EVP_PKEY_get0_RSA \
+- EVP_MD_CTX_new \
+- EVP_MD_CTX_free \
+- EVP_chacha20 \
+ ])
+
+ if test "x$openssl_engine" = "xyes" ; then
+@@ -3040,8 +3006,8 @@
+ fi
+ AC_CHECK_FUNCS([crypt DES_crypt])
+
+- # Check for SHA256, SHA384 and SHA512 support in OpenSSL
+- AC_CHECK_FUNCS([EVP_sha256 EVP_sha384 EVP_sha512])
++ # Check for various EVP support in OpenSSL
++ AC_CHECK_FUNCS([EVP_sha256 EVP_sha384 EVP_sha512 EVP_chacha20])
+
+ # Check complete ECC support in OpenSSL
+ AC_MSG_CHECKING([whether OpenSSL has NID_X9_62_prime256v1])
+diff --git a/openbsd-compat/libressl-api-compat.c b/openbsd-compat/libressl-api-compat.c
+index 498180dc894..59be17397c5 100644
+--- a/openbsd-compat/libressl-api-compat.c
++++ b/openbsd-compat/libressl-api-compat.c
+@@ -1,129 +1,5 @@
+-/* $OpenBSD: dsa_lib.c,v 1.29 2018/04/14 07:09:21 tb Exp $ */
+-/* $OpenBSD: rsa_lib.c,v 1.37 2018/04/14 07:09:21 tb Exp $ */
+-/* $OpenBSD: evp_lib.c,v 1.17 2018/09/12 06:35:38 djm Exp $ */
+-/* $OpenBSD: dh_lib.c,v 1.32 2018/05/02 15:48:38 tb Exp $ */
+-/* $OpenBSD: p_lib.c,v 1.24 2018/05/30 15:40:50 tb Exp $ */
+-/* $OpenBSD: digest.c,v 1.30 2018/04/14 07:09:21 tb Exp $ */
+-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+- * All rights reserved.
+- *
+- * This package is an SSL implementation written
+- * by Eric Young (eay@cryptsoft.com).
+- * The implementation was written so as to conform with Netscapes SSL.
+- *
+- * This library is free for commercial and non-commercial use as long as
+- * the following conditions are aheared to. The following conditions
+- * apply to all code found in this distribution, be it the RC4, RSA,
+- * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+- * included with this distribution is covered by the same copyright terms
+- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+- *
+- * Copyright remains Eric Young's, and as such any Copyright notices in
+- * the code are not to be removed.
+- * If this package is used in a product, Eric Young should be given attribution
+- * as the author of the parts of the library used.
+- * This can be in the form of a textual message at program startup or
+- * in documentation (online or textual) provided with the package.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- * 1. Redistributions of source code must retain the copyright
+- * notice, this list of conditions and the following disclaimer.
+- * 2. Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * 3. All advertising materials mentioning features or use of this software
+- * must display the following acknowledgement:
+- * "This product includes cryptographic software written by
+- * Eric Young (eay@cryptsoft.com)"
+- * The word 'cryptographic' can be left out if the rouines from the library
+- * being used are not cryptographic related :-).
+- * 4. If you include any Windows specific code (or a derivative thereof) from
+- * the apps directory (application code) you must include an acknowledgement:
+- * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+- *
+- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+- * SUCH DAMAGE.
+- *
+- * The licence and distribution terms for any publically available version or
+- * derivative of this code cannot be changed. i.e. this code cannot simply be
+- * copied and put under another distribution licence
+- * [including the GNU Public Licence.]
+- */
+-
+-/* $OpenBSD: dsa_asn1.c,v 1.22 2018/06/14 17:03:19 jsing Exp $ */
+-/* $OpenBSD: ecs_asn1.c,v 1.9 2018/03/17 15:24:44 tb Exp $ */
+-/* $OpenBSD: digest.c,v 1.30 2018/04/14 07:09:21 tb Exp $ */
+-/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
+- * project 2000.
+- */
+-/* ====================================================================
+- * Copyright (c) 2000-2005 The OpenSSL Project. All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * 1. Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- *
+- * 2. Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in
+- * the documentation and/or other materials provided with the
+- * distribution.
+- *
+- * 3. All advertising materials mentioning features or use of this
+- * software must display the following acknowledgment:
+- * "This product includes software developed by the OpenSSL Project
+- * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
+- *
+- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+- * endorse or promote products derived from this software without
+- * prior written permission. For written permission, please contact
+- * licensing@OpenSSL.org.
+- *
+- * 5. Products derived from this software may not be called "OpenSSL"
+- * nor may "OpenSSL" appear in their names without prior written
+- * permission of the OpenSSL Project.
+- *
+- * 6. Redistributions of any form whatsoever must retain the following
+- * acknowledgment:
+- * "This product includes software developed by the OpenSSL Project
+- * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+- * OF THE POSSIBILITY OF SUCH DAMAGE.
+- * ====================================================================
+- *
+- * This product includes cryptographic software written by Eric Young
+- * (eay@cryptsoft.com). This product includes software written by Tim
+- * Hudson (tjh@cryptsoft.com).
+- *
+- */
+-
+-/* $OpenBSD: rsa_meth.c,v 1.2 2018/09/12 06:35:38 djm Exp $ */
+ /*
+- * Copyright (c) 2018 Theo Buehler <tb@openbsd.org>
++ * Copyright (c) 2018 Damien Miller <djm@mindrot.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+@@ -147,192 +23,7 @@
+ #include <stdlib.h>
+ #include <string.h>
+
+-#include <openssl/err.h>
+-#include <openssl/bn.h>
+-#include <openssl/dsa.h>
+-#include <openssl/rsa.h>
+ #include <openssl/evp.h>
+-#ifdef OPENSSL_HAS_ECC
+-#include <openssl/ecdsa.h>
+-#endif
+-#include <openssl/dh.h>
+-
+-#ifndef HAVE_DSA_GET0_PQG
+-void
+-DSA_get0_pqg(const DSA *d, const BIGNUM **p, const BIGNUM **q, const BIGNUM **g)
+-{
+- if (p != NULL)
+- *p = d->p;
+- if (q != NULL)
+- *q = d->q;
+- if (g != NULL)
+- *g = d->g;
+-}
+-#endif /* HAVE_DSA_GET0_PQG */
+-
+-#ifndef HAVE_DSA_SET0_PQG
+-int
+-DSA_set0_pqg(DSA *d, BIGNUM *p, BIGNUM *q, BIGNUM *g)
+-{
+- if ((d->p == NULL && p == NULL) || (d->q == NULL && q == NULL) ||
+- (d->g == NULL && g == NULL))
+- return 0;
+-
+- if (p != NULL) {
+- BN_free(d->p);
+- d->p = p;
+- }
+- if (q != NULL) {
+- BN_free(d->q);
+- d->q = q;
+- }
+- if (g != NULL) {
+- BN_free(d->g);
+- d->g = g;
+- }
+-
+- return 1;
+-}
+-#endif /* HAVE_DSA_SET0_PQG */
+-
+-#ifndef HAVE_DSA_GET0_KEY
+-void
+-DSA_get0_key(const DSA *d, const BIGNUM **pub_key, const BIGNUM **priv_key)
+-{
+- if (pub_key != NULL)
+- *pub_key = d->pub_key;
+- if (priv_key != NULL)
+- *priv_key = d->priv_key;
+-}
+-#endif /* HAVE_DSA_GET0_KEY */
+-
+-#ifndef HAVE_DSA_SET0_KEY
+-int
+-DSA_set0_key(DSA *d, BIGNUM *pub_key, BIGNUM *priv_key)
+-{
+- if (d->pub_key == NULL && pub_key == NULL)
+- return 0;
+-
+- if (pub_key != NULL) {
+- BN_free(d->pub_key);
+- d->pub_key = pub_key;
+- }
+- if (priv_key != NULL) {
+- BN_free(d->priv_key);
+- d->priv_key = priv_key;
+- }
+-
+- return 1;
+-}
+-#endif /* HAVE_DSA_SET0_KEY */
+-
+-#ifndef HAVE_RSA_GET0_KEY
+-void
+-RSA_get0_key(const RSA *r, const BIGNUM **n, const BIGNUM **e, const BIGNUM **d)
+-{
+- if (n != NULL)
+- *n = r->n;
+- if (e != NULL)
+- *e = r->e;
+- if (d != NULL)
+- *d = r->d;
+-}
+-#endif /* HAVE_RSA_GET0_KEY */
+-
+-#ifndef HAVE_RSA_SET0_KEY
+-int
+-RSA_set0_key(RSA *r, BIGNUM *n, BIGNUM *e, BIGNUM *d)
+-{
+- if ((r->n == NULL && n == NULL) || (r->e == NULL && e == NULL))
+- return 0;
+-
+- if (n != NULL) {
+- BN_free(r->n);
+- r->n = n;
+- }
+- if (e != NULL) {
+- BN_free(r->e);
+- r->e = e;
+- }
+- if (d != NULL) {
+- BN_free(r->d);
+- r->d = d;
+- }
+-
+- return 1;
+-}
+-#endif /* HAVE_RSA_SET0_KEY */
+-
+-#ifndef HAVE_RSA_GET0_CRT_PARAMS
+-void
+-RSA_get0_crt_params(const RSA *r, const BIGNUM **dmp1, const BIGNUM **dmq1,
+- const BIGNUM **iqmp)
+-{
+- if (dmp1 != NULL)
+- *dmp1 = r->dmp1;
+- if (dmq1 != NULL)
+- *dmq1 = r->dmq1;
+- if (iqmp != NULL)
+- *iqmp = r->iqmp;
+-}
+-#endif /* HAVE_RSA_GET0_CRT_PARAMS */
+-
+-#ifndef HAVE_RSA_SET0_CRT_PARAMS
+-int
+-RSA_set0_crt_params(RSA *r, BIGNUM *dmp1, BIGNUM *dmq1, BIGNUM *iqmp)
+-{
+- if ((r->dmp1 == NULL && dmp1 == NULL) ||
+- (r->dmq1 == NULL && dmq1 == NULL) ||
+- (r->iqmp == NULL && iqmp == NULL))
+- return 0;
+-
+- if (dmp1 != NULL) {
+- BN_free(r->dmp1);
+- r->dmp1 = dmp1;
+- }
+- if (dmq1 != NULL) {
+- BN_free(r->dmq1);
+- r->dmq1 = dmq1;
+- }
+- if (iqmp != NULL) {
+- BN_free(r->iqmp);
+- r->iqmp = iqmp;
+- }
+-
+- return 1;
+-}
+-#endif /* HAVE_RSA_SET0_CRT_PARAMS */
+-
+-#ifndef HAVE_RSA_GET0_FACTORS
+-void
+-RSA_get0_factors(const RSA *r, const BIGNUM **p, const BIGNUM **q)
+-{
+- if (p != NULL)
+- *p = r->p;
+- if (q != NULL)
+- *q = r->q;
+-}
+-#endif /* HAVE_RSA_GET0_FACTORS */
+-
+-#ifndef HAVE_RSA_SET0_FACTORS
+-int
+-RSA_set0_factors(RSA *r, BIGNUM *p, BIGNUM *q)
+-{
+- if ((r->p == NULL && p == NULL) || (r->q == NULL && q == NULL))
+- return 0;
+-
+- if (p != NULL) {
+- BN_free(r->p);
+- r->p = p;
+- }
+- if (q != NULL) {
+- BN_free(r->q);
+- r->q = q;
+- }
+-
+- return 1;
+-}
+-#endif /* HAVE_RSA_SET0_FACTORS */
+
+ #ifndef HAVE_EVP_CIPHER_CTX_GET_IV
+ int
+@@ -392,249 +83,4 @@ EVP_CIPHER_CTX_set_iv(EVP_CIPHER_CTX *ctx, const unsigned char *iv, size_t len)
+ }
+ #endif /* HAVE_EVP_CIPHER_CTX_SET_IV */
+
+-#ifndef HAVE_DSA_SIG_GET0
+-void
+-DSA_SIG_get0(const DSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps)
+-{
+- if (pr != NULL)
+- *pr = sig->r;
+- if (ps != NULL)
+- *ps = sig->s;
+-}
+-#endif /* HAVE_DSA_SIG_GET0 */
+-
+-#ifndef HAVE_DSA_SIG_SET0
+-int
+-DSA_SIG_set0(DSA_SIG *sig, BIGNUM *r, BIGNUM *s)
+-{
+- if (r == NULL || s == NULL)
+- return 0;
+-
+- BN_clear_free(sig->r);
+- sig->r = r;
+- BN_clear_free(sig->s);
+- sig->s = s;
+-
+- return 1;
+-}
+-#endif /* HAVE_DSA_SIG_SET0 */
+-
+-#ifdef OPENSSL_HAS_ECC
+-#ifndef HAVE_ECDSA_SIG_GET0
+-void
+-ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps)
+-{
+- if (pr != NULL)
+- *pr = sig->r;
+- if (ps != NULL)
+- *ps = sig->s;
+-}
+-#endif /* HAVE_ECDSA_SIG_GET0 */
+-
+-#ifndef HAVE_ECDSA_SIG_SET0
+-int
+-ECDSA_SIG_set0(ECDSA_SIG *sig, BIGNUM *r, BIGNUM *s)
+-{
+- if (r == NULL || s == NULL)
+- return 0;
+-
+- BN_clear_free(sig->r);
+- BN_clear_free(sig->s);
+- sig->r = r;
+- sig->s = s;
+- return 1;
+-}
+-#endif /* HAVE_ECDSA_SIG_SET0 */
+-#endif /* OPENSSL_HAS_ECC */
+-
+-#ifndef HAVE_DH_GET0_PQG
+-void
+-DH_get0_pqg(const DH *dh, const BIGNUM **p, const BIGNUM **q, const BIGNUM **g)
+-{
+- if (p != NULL)
+- *p = dh->p;
+- if (q != NULL)
+- *q = dh->q;
+- if (g != NULL)
+- *g = dh->g;
+-}
+-#endif /* HAVE_DH_GET0_PQG */
+-
+-#ifndef HAVE_DH_SET0_PQG
+-int
+-DH_set0_pqg(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g)
+-{
+- if ((dh->p == NULL && p == NULL) || (dh->g == NULL && g == NULL))
+- return 0;
+-
+- if (p != NULL) {
+- BN_free(dh->p);
+- dh->p = p;
+- }
+- if (q != NULL) {
+- BN_free(dh->q);
+- dh->q = q;
+- }
+- if (g != NULL) {
+- BN_free(dh->g);
+- dh->g = g;
+- }
+-
+- return 1;
+-}
+-#endif /* HAVE_DH_SET0_PQG */
+-
+-#ifndef HAVE_DH_GET0_KEY
+-void
+-DH_get0_key(const DH *dh, const BIGNUM **pub_key, const BIGNUM **priv_key)
+-{
+- if (pub_key != NULL)
+- *pub_key = dh->pub_key;
+- if (priv_key != NULL)
+- *priv_key = dh->priv_key;
+-}
+-#endif /* HAVE_DH_GET0_KEY */
+-
+-#ifndef HAVE_DH_SET0_KEY
+-int
+-DH_set0_key(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key)
+-{
+- if (pub_key != NULL) {
+- BN_free(dh->pub_key);
+- dh->pub_key = pub_key;
+- }
+- if (priv_key != NULL) {
+- BN_free(dh->priv_key);
+- dh->priv_key = priv_key;
+- }
+-
+- return 1;
+-}
+-#endif /* HAVE_DH_SET0_KEY */
+-
+-#ifndef HAVE_DH_SET_LENGTH
+-int
+-DH_set_length(DH *dh, long length)
+-{
+- if (length < 0 || length > INT_MAX)
+- return 0;
+-
+- dh->length = length;
+- return 1;
+-}
+-#endif /* HAVE_DH_SET_LENGTH */
+-
+-#ifndef HAVE_RSA_METH_FREE
+-void
+-RSA_meth_free(RSA_METHOD *meth)
+-{
+- if (meth != NULL) {
+- free((char *)meth->name);
+- free(meth);
+- }
+-}
+-#endif /* HAVE_RSA_METH_FREE */
+-
+-#ifndef HAVE_RSA_METH_DUP
+-RSA_METHOD *
+-RSA_meth_dup(const RSA_METHOD *meth)
+-{
+- RSA_METHOD *copy;
+-
+- if ((copy = calloc(1, sizeof(*copy))) == NULL)
+- return NULL;
+- memcpy(copy, meth, sizeof(*copy));
+- if ((copy->name = strdup(meth->name)) == NULL) {
+- free(copy);
+- return NULL;
+- }
+-
+- return copy;
+-}
+-#endif /* HAVE_RSA_METH_DUP */
+-
+-#ifndef HAVE_RSA_METH_SET1_NAME
+-int
+-RSA_meth_set1_name(RSA_METHOD *meth, const char *name)
+-{
+- char *copy;
+-
+- if ((copy = strdup(name)) == NULL)
+- return 0;
+- free((char *)meth->name);
+- meth->name = copy;
+- return 1;
+-}
+-#endif /* HAVE_RSA_METH_SET1_NAME */
+-
+-#ifndef HAVE_RSA_METH_GET_FINISH
+-int
+-(*RSA_meth_get_finish(const RSA_METHOD *meth))(RSA *rsa)
+-{
+- return meth->finish;
+-}
+-#endif /* HAVE_RSA_METH_GET_FINISH */
+-
+-#ifndef HAVE_RSA_METH_SET_PRIV_ENC
+-int
+-RSA_meth_set_priv_enc(RSA_METHOD *meth, int (*priv_enc)(int flen,
+- const unsigned char *from, unsigned char *to, RSA *rsa, int padding))
+-{
+- meth->rsa_priv_enc = priv_enc;
+- return 1;
+-}
+-#endif /* HAVE_RSA_METH_SET_PRIV_ENC */
+-
+-#ifndef HAVE_RSA_METH_SET_PRIV_DEC
+-int
+-RSA_meth_set_priv_dec(RSA_METHOD *meth, int (*priv_dec)(int flen,
+- const unsigned char *from, unsigned char *to, RSA *rsa, int padding))
+-{
+- meth->rsa_priv_dec = priv_dec;
+- return 1;
+-}
+-#endif /* HAVE_RSA_METH_SET_PRIV_DEC */
+-
+-#ifndef HAVE_RSA_METH_SET_FINISH
+-int
+-RSA_meth_set_finish(RSA_METHOD *meth, int (*finish)(RSA *rsa))
+-{
+- meth->finish = finish;
+- return 1;
+-}
+-#endif /* HAVE_RSA_METH_SET_FINISH */
+-
+-#ifndef HAVE_EVP_PKEY_GET0_RSA
+-RSA *
+-EVP_PKEY_get0_RSA(EVP_PKEY *pkey)
+-{
+- if (pkey->type != EVP_PKEY_RSA) {
+- /* EVPerror(EVP_R_EXPECTING_AN_RSA_KEY); */
+- return NULL;
+- }
+- return pkey->pkey.rsa;
+-}
+-#endif /* HAVE_EVP_PKEY_GET0_RSA */
+-
+-#ifndef HAVE_EVP_MD_CTX_NEW
+-EVP_MD_CTX *
+-EVP_MD_CTX_new(void)
+-{
+- return calloc(1, sizeof(EVP_MD_CTX));
+-}
+-#endif /* HAVE_EVP_MD_CTX_NEW */
+-
+-#ifndef HAVE_EVP_MD_CTX_FREE
+-void
+-EVP_MD_CTX_free(EVP_MD_CTX *ctx)
+-{
+- if (ctx == NULL)
+- return;
+-
+- EVP_MD_CTX_cleanup(ctx);
+-
+- free(ctx);
+-}
+-#endif /* HAVE_EVP_MD_CTX_FREE */
+-
+ #endif /* WITH_OPENSSL */
+diff --git a/openbsd-compat/openssl-compat.h b/openbsd-compat/openssl-compat.h
+index 61a69dd56eb..d0dd2c3450d 100644
+--- a/openbsd-compat/openssl-compat.h
++++ b/openbsd-compat/openssl-compat.h
+@@ -33,26 +33,13 @@
+ int ssh_compatible_openssl(long, long);
+ void ssh_libcrypto_init(void);
+
+-#if (OPENSSL_VERSION_NUMBER < 0x1000100fL)
+-# error OpenSSL 1.0.1 or greater is required
++#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
++# error OpenSSL 1.1.0 or greater is required
+ #endif
+-
+-#ifndef OPENSSL_VERSION
+-# define OPENSSL_VERSION SSLEAY_VERSION
+-#endif
+-
+-#ifndef HAVE_OPENSSL_VERSION
+-# define OpenSSL_version(x) SSLeay_version(x)
+-#endif
+-
+-#ifndef HAVE_OPENSSL_VERSION_NUM
+-# define OpenSSL_version_num SSLeay
+-#endif
+-
+-#if OPENSSL_VERSION_NUMBER < 0x10000001L
+-# define LIBCRYPTO_EVP_INL_TYPE unsigned int
+-#else
+-# define LIBCRYPTO_EVP_INL_TYPE size_t
++#ifdef LIBRESSL_VERSION_NUMBER
++# if LIBRESSL_VERSION_NUMBER < 0x3010000fL
++# error LibreSSL 3.1.0 or greater is required
++# endif
+ #endif
+
+ #ifndef OPENSSL_RSA_MAX_MODULUS_BITS
+@@ -68,25 +55,6 @@ void ssh_libcrypto_init(void);
+ # endif
+ #endif
+
+-/* LibreSSL/OpenSSL 1.1x API compat */
+-#ifndef HAVE_DSA_GET0_PQG
+-void DSA_get0_pqg(const DSA *d, const BIGNUM **p, const BIGNUM **q,
+- const BIGNUM **g);
+-#endif /* HAVE_DSA_GET0_PQG */
+-
+-#ifndef HAVE_DSA_SET0_PQG
+-int DSA_set0_pqg(DSA *d, BIGNUM *p, BIGNUM *q, BIGNUM *g);
+-#endif /* HAVE_DSA_SET0_PQG */
+-
+-#ifndef HAVE_DSA_GET0_KEY
+-void DSA_get0_key(const DSA *d, const BIGNUM **pub_key,
+- const BIGNUM **priv_key);
+-#endif /* HAVE_DSA_GET0_KEY */
+-
+-#ifndef HAVE_DSA_SET0_KEY
+-int DSA_set0_key(DSA *d, BIGNUM *pub_key, BIGNUM *priv_key);
+-#endif /* HAVE_DSA_SET0_KEY */
+-
+ #ifndef HAVE_EVP_CIPHER_CTX_GET_IV
+ # ifdef HAVE_EVP_CIPHER_CTX_GET_UPDATED_IV
+ # define EVP_CIPHER_CTX_get_iv EVP_CIPHER_CTX_get_updated_iv
+@@ -101,112 +69,5 @@ int EVP_CIPHER_CTX_set_iv(EVP_CIPHER_CTX *ctx,
+ const unsigned char *iv, size_t len);
+ #endif /* HAVE_EVP_CIPHER_CTX_SET_IV */
+
+-#ifndef HAVE_RSA_GET0_KEY
+-void RSA_get0_key(const RSA *r, const BIGNUM **n, const BIGNUM **e,
+- const BIGNUM **d);
+-#endif /* HAVE_RSA_GET0_KEY */
+-
+-#ifndef HAVE_RSA_SET0_KEY
+-int RSA_set0_key(RSA *r, BIGNUM *n, BIGNUM *e, BIGNUM *d);
+-#endif /* HAVE_RSA_SET0_KEY */
+-
+-#ifndef HAVE_RSA_GET0_CRT_PARAMS
+-void RSA_get0_crt_params(const RSA *r, const BIGNUM **dmp1, const BIGNUM **dmq1,
+- const BIGNUM **iqmp);
+-#endif /* HAVE_RSA_GET0_CRT_PARAMS */
+-
+-#ifndef HAVE_RSA_SET0_CRT_PARAMS
+-int RSA_set0_crt_params(RSA *r, BIGNUM *dmp1, BIGNUM *dmq1, BIGNUM *iqmp);
+-#endif /* HAVE_RSA_SET0_CRT_PARAMS */
+-
+-#ifndef HAVE_RSA_GET0_FACTORS
+-void RSA_get0_factors(const RSA *r, const BIGNUM **p, const BIGNUM **q);
+-#endif /* HAVE_RSA_GET0_FACTORS */
+-
+-#ifndef HAVE_RSA_SET0_FACTORS
+-int RSA_set0_factors(RSA *r, BIGNUM *p, BIGNUM *q);
+-#endif /* HAVE_RSA_SET0_FACTORS */
+-
+-#ifndef DSA_SIG_GET0
+-void DSA_SIG_get0(const DSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps);
+-#endif /* DSA_SIG_GET0 */
+-
+-#ifndef DSA_SIG_SET0
+-int DSA_SIG_set0(DSA_SIG *sig, BIGNUM *r, BIGNUM *s);
+-#endif /* DSA_SIG_SET0 */
+-
+-#ifdef OPENSSL_HAS_ECC
+-#ifndef HAVE_ECDSA_SIG_GET0
+-void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps);
+-#endif /* HAVE_ECDSA_SIG_GET0 */
+-
+-#ifndef HAVE_ECDSA_SIG_SET0
+-int ECDSA_SIG_set0(ECDSA_SIG *sig, BIGNUM *r, BIGNUM *s);
+-#endif /* HAVE_ECDSA_SIG_SET0 */
+-#endif /* OPENSSL_HAS_ECC */
+-
+-#ifndef HAVE_DH_GET0_PQG
+-void DH_get0_pqg(const DH *dh, const BIGNUM **p, const BIGNUM **q,
+- const BIGNUM **g);
+-#endif /* HAVE_DH_GET0_PQG */
+-
+-#ifndef HAVE_DH_SET0_PQG
+-int DH_set0_pqg(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g);
+-#endif /* HAVE_DH_SET0_PQG */
+-
+-#ifndef HAVE_DH_GET0_KEY
+-void DH_get0_key(const DH *dh, const BIGNUM **pub_key, const BIGNUM **priv_key);
+-#endif /* HAVE_DH_GET0_KEY */
+-
+-#ifndef HAVE_DH_SET0_KEY
+-int DH_set0_key(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key);
+-#endif /* HAVE_DH_SET0_KEY */
+-
+-#ifndef HAVE_DH_SET_LENGTH
+-int DH_set_length(DH *dh, long length);
+-#endif /* HAVE_DH_SET_LENGTH */
+-
+-#ifndef HAVE_RSA_METH_FREE
+-void RSA_meth_free(RSA_METHOD *meth);
+-#endif /* HAVE_RSA_METH_FREE */
+-
+-#ifndef HAVE_RSA_METH_DUP
+-RSA_METHOD *RSA_meth_dup(const RSA_METHOD *meth);
+-#endif /* HAVE_RSA_METH_DUP */
+-
+-#ifndef HAVE_RSA_METH_SET1_NAME
+-int RSA_meth_set1_name(RSA_METHOD *meth, const char *name);
+-#endif /* HAVE_RSA_METH_SET1_NAME */
+-
+-#ifndef HAVE_RSA_METH_GET_FINISH
+-int (*RSA_meth_get_finish(const RSA_METHOD *meth))(RSA *rsa);
+-#endif /* HAVE_RSA_METH_GET_FINISH */
+-
+-#ifndef HAVE_RSA_METH_SET_PRIV_ENC
+-int RSA_meth_set_priv_enc(RSA_METHOD *meth, int (*priv_enc)(int flen,
+- const unsigned char *from, unsigned char *to, RSA *rsa, int padding));
+-#endif /* HAVE_RSA_METH_SET_PRIV_ENC */
+-
+-#ifndef HAVE_RSA_METH_SET_PRIV_DEC
+-int RSA_meth_set_priv_dec(RSA_METHOD *meth, int (*priv_dec)(int flen,
+- const unsigned char *from, unsigned char *to, RSA *rsa, int padding));
+-#endif /* HAVE_RSA_METH_SET_PRIV_DEC */
+-
+-#ifndef HAVE_RSA_METH_SET_FINISH
+-int RSA_meth_set_finish(RSA_METHOD *meth, int (*finish)(RSA *rsa));
+-#endif /* HAVE_RSA_METH_SET_FINISH */
+-
+-#ifndef HAVE_EVP_PKEY_GET0_RSA
+-RSA *EVP_PKEY_get0_RSA(EVP_PKEY *pkey);
+-#endif /* HAVE_EVP_PKEY_GET0_RSA */
+-
+-#ifndef HAVE_EVP_MD_CTX_new
+-EVP_MD_CTX *EVP_MD_CTX_new(void);
+-#endif /* HAVE_EVP_MD_CTX_new */
+-
+-#ifndef HAVE_EVP_MD_CTX_free
+-void EVP_MD_CTX_free(EVP_MD_CTX *ctx);
+-#endif /* HAVE_EVP_MD_CTX_free */
+-
+ #endif /* WITH_OPENSSL */
+ #endif /* _OPENSSL_COMPAT_H */
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0001.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0001.patch
new file mode 100644
index 0000000000..2ee344cb27
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0001.patch
@@ -0,0 +1,585 @@
+From 099cdf59ce1e72f55d421c8445bf6321b3004755 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Wed, 19 Jul 2023 14:03:45 +0000
+Subject: [PATCH 1/4] upstream: Separate ssh-pkcs11-helpers for each p11 module
+
+Make ssh-pkcs11-client start an independent helper for each provider,
+providing better isolation between modules and reliability if a single
+module misbehaves.
+
+This also implements reference counting of PKCS#11-hosted keys,
+allowing ssh-pkcs11-helper subprocesses to be automatically reaped
+when no remaining keys reference them. This fixes some bugs we have
+that make PKCS11 keys unusable after they have been deleted, e.g.
+https://bugzilla.mindrot.org/show_bug.cgi?id=3125
+
+ok markus@
+
+OpenBSD-Commit-ID: 0ce188b14fe271ab0568f4500070d96c5657244e
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/099cdf59ce1e72f55d421c8445bf6321b3004755]
+
+CVE: CVE-2023-38408
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ ssh-pkcs11-client.c | 378 +++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 285 insertions(+), 93 deletions(-)
+
+diff --git a/ssh-pkcs11-client.c b/ssh-pkcs11-client.c
+index cfd833d..7db6c6c 100644
+--- a/ssh-pkcs11-client.c
++++ b/ssh-pkcs11-client.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-pkcs11-client.c,v 1.17 2020/10/18 11:32:02 djm Exp $ */
++/* $OpenBSD: ssh-pkcs11-client.c,v 1.18 2023/07/19 14:03:45 djm Exp $ */
+ /*
+ * Copyright (c) 2010 Markus Friedl. All rights reserved.
+ * Copyright (c) 2014 Pedro Martelletto. All rights reserved.
+@@ -30,12 +30,11 @@
+ #include <string.h>
+ #include <unistd.h>
+ #include <errno.h>
++#include <limits.h>
+
+ #include <openssl/ecdsa.h>
+ #include <openssl/rsa.h>
+
+-#include "openbsd-compat/openssl-compat.h"
+-
+ #include "pathnames.h"
+ #include "xmalloc.h"
+ #include "sshbuf.h"
+@@ -47,18 +46,140 @@
+ #include "ssh-pkcs11.h"
+ #include "ssherr.h"
+
++#include "openbsd-compat/openssl-compat.h"
++
+ /* borrows code from sftp-server and ssh-agent */
+
+-static int fd = -1;
+-static pid_t pid = -1;
++/*
++ * Maintain a list of ssh-pkcs11-helper subprocesses. These may be looked up
++ * by provider path or their unique EC/RSA METHOD pointers.
++ */
++struct helper {
++ char *path;
++ pid_t pid;
++ int fd;
++ RSA_METHOD *rsa_meth;
++ EC_KEY_METHOD *ec_meth;
++ int (*rsa_finish)(RSA *rsa);
++ void (*ec_finish)(EC_KEY *key);
++ size_t nrsa, nec; /* number of active keys of each type */
++};
++static struct helper **helpers;
++static size_t nhelpers;
++
++static struct helper *
++helper_by_provider(const char *path)
++{
++ size_t i;
++
++ for (i = 0; i < nhelpers; i++) {
++ if (helpers[i] == NULL || helpers[i]->path == NULL ||
++ helpers[i]->fd == -1)
++ continue;
++ if (strcmp(helpers[i]->path, path) == 0)
++ return helpers[i];
++ }
++ return NULL;
++}
++
++static struct helper *
++helper_by_rsa(const RSA *rsa)
++{
++ size_t i;
++ const RSA_METHOD *meth;
++
++ if ((meth = RSA_get_method(rsa)) == NULL)
++ return NULL;
++ for (i = 0; i < nhelpers; i++) {
++ if (helpers[i] != NULL && helpers[i]->rsa_meth == meth)
++ return helpers[i];
++ }
++ return NULL;
++
++}
++
++static struct helper *
++helper_by_ec(const EC_KEY *ec)
++{
++ size_t i;
++ const EC_KEY_METHOD *meth;
++
++ if ((meth = EC_KEY_get_method(ec)) == NULL)
++ return NULL;
++ for (i = 0; i < nhelpers; i++) {
++ if (helpers[i] != NULL && helpers[i]->ec_meth == meth)
++ return helpers[i];
++ }
++ return NULL;
++
++}
++
++static void
++helper_free(struct helper *helper)
++{
++ size_t i;
++ int found = 0;
++
++ if (helper == NULL)
++ return;
++ if (helper->path == NULL || helper->ec_meth == NULL ||
++ helper->rsa_meth == NULL)
++ fatal_f("inconsistent helper");
++ debug3_f("free helper for provider %s", helper->path);
++ for (i = 0; i < nhelpers; i++) {
++ if (helpers[i] == helper) {
++ if (found)
++ fatal_f("helper recorded more than once");
++ found = 1;
++ }
++ else if (found)
++ helpers[i - 1] = helpers[i];
++ }
++ if (found) {
++ helpers = xrecallocarray(helpers, nhelpers,
++ nhelpers - 1, sizeof(*helpers));
++ nhelpers--;
++ }
++ free(helper->path);
++ EC_KEY_METHOD_free(helper->ec_meth);
++ RSA_meth_free(helper->rsa_meth);
++ free(helper);
++}
++
++static void
++helper_terminate(struct helper *helper)
++{
++ if (helper == NULL) {
++ return;
++ } else if (helper->fd == -1) {
++ debug3_f("already terminated");
++ } else {
++ debug3_f("terminating helper for %s; "
++ "remaining %zu RSA %zu ECDSA",
++ helper->path, helper->nrsa, helper->nec);
++ close(helper->fd);
++ /* XXX waitpid() */
++ helper->fd = -1;
++ helper->pid = -1;
++ }
++ /*
++ * Don't delete the helper entry until there are no remaining keys
++ * that reference it. Otherwise, any signing operation would call
++ * a free'd METHOD pointer and that would be bad.
++ */
++ if (helper->nrsa == 0 && helper->nec == 0)
++ helper_free(helper);
++}
+
+ static void
+-send_msg(struct sshbuf *m)
++send_msg(int fd, struct sshbuf *m)
+ {
+ u_char buf[4];
+ size_t mlen = sshbuf_len(m);
+ int r;
+
++ if (fd == -1)
++ return;
+ POKE_U32(buf, mlen);
+ if (atomicio(vwrite, fd, buf, 4) != 4 ||
+ atomicio(vwrite, fd, sshbuf_mutable_ptr(m),
+@@ -69,12 +190,15 @@ send_msg(struct sshbuf *m)
+ }
+
+ static int
+-recv_msg(struct sshbuf *m)
++recv_msg(int fd, struct sshbuf *m)
+ {
+ u_int l, len;
+ u_char c, buf[1024];
+ int r;
+
++ sshbuf_reset(m);
++ if (fd == -1)
++ return 0; /* XXX */
+ if ((len = atomicio(read, fd, buf, 4)) != 4) {
+ error("read from helper failed: %u", len);
+ return (0); /* XXX */
+@@ -83,7 +207,6 @@ recv_msg(struct sshbuf *m)
+ if (len > 256 * 1024)
+ fatal("response too long: %u", len);
+ /* read len bytes into m */
+- sshbuf_reset(m);
+ while (len > 0) {
+ l = len;
+ if (l > sizeof(buf))
+@@ -104,14 +227,17 @@ recv_msg(struct sshbuf *m)
+ int
+ pkcs11_init(int interactive)
+ {
+- return (0);
++ return 0;
+ }
+
+ void
+ pkcs11_terminate(void)
+ {
+- if (fd >= 0)
+- close(fd);
++ size_t i;
++
++ debug3_f("terminating %zu helpers", nhelpers);
++ for (i = 0; i < nhelpers; i++)
++ helper_terminate(helpers[i]);
+ }
+
+ static int
+@@ -122,7 +248,11 @@ rsa_encrypt(int flen, const u_char *from, u_char *to, RSA *rsa, int padding)
+ u_char *blob = NULL, *signature = NULL;
+ size_t blen, slen = 0;
+ int r, ret = -1;
++ struct helper *helper;
+
++ if ((helper = helper_by_rsa(rsa)) == NULL || helper->fd == -1)
++ fatal_f("no helper for PKCS11 key");
++ debug3_f("signing with PKCS11 provider %s", helper->path);
+ if (padding != RSA_PKCS1_PADDING)
+ goto fail;
+ key = sshkey_new(KEY_UNSPEC);
+@@ -144,10 +274,10 @@ rsa_encrypt(int flen, const u_char *from, u_char *to, RSA *rsa, int padding)
+ (r = sshbuf_put_string(msg, from, flen)) != 0 ||
+ (r = sshbuf_put_u32(msg, 0)) != 0)
+ fatal_fr(r, "compose");
+- send_msg(msg);
++ send_msg(helper->fd, msg);
+ sshbuf_reset(msg);
+
+- if (recv_msg(msg) == SSH2_AGENT_SIGN_RESPONSE) {
++ if (recv_msg(helper->fd, msg) == SSH2_AGENT_SIGN_RESPONSE) {
+ if ((r = sshbuf_get_string(msg, &signature, &slen)) != 0)
+ fatal_fr(r, "parse");
+ if (slen <= (size_t)RSA_size(rsa)) {
+@@ -163,7 +293,26 @@ rsa_encrypt(int flen, const u_char *from, u_char *to, RSA *rsa, int padding)
+ return (ret);
+ }
+
+-#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
++static int
++rsa_finish(RSA *rsa)
++{
++ struct helper *helper;
++
++ if ((helper = helper_by_rsa(rsa)) == NULL)
++ fatal_f("no helper for PKCS11 key");
++ debug3_f("free PKCS11 RSA key for provider %s", helper->path);
++ if (helper->rsa_finish != NULL)
++ helper->rsa_finish(rsa);
++ if (helper->nrsa == 0)
++ fatal_f("RSA refcount error");
++ helper->nrsa--;
++ debug3_f("provider %s remaining keys: %zu RSA %zu ECDSA",
++ helper->path, helper->nrsa, helper->nec);
++ if (helper->nrsa == 0 && helper->nec == 0)
++ helper_terminate(helper);
++ return 1;
++}
++
+ static ECDSA_SIG *
+ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ const BIGNUM *rp, EC_KEY *ec)
+@@ -175,7 +324,11 @@ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ u_char *blob = NULL, *signature = NULL;
+ size_t blen, slen = 0;
+ int r, nid;
++ struct helper *helper;
+
++ if ((helper = helper_by_ec(ec)) == NULL || helper->fd == -1)
++ fatal_f("no helper for PKCS11 key");
++ debug3_f("signing with PKCS11 provider %s", helper->path);
+ nid = sshkey_ecdsa_key_to_nid(ec);
+ if (nid < 0) {
+ error_f("couldn't get curve nid");
+@@ -203,10 +356,10 @@ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ (r = sshbuf_put_string(msg, dgst, dgst_len)) != 0 ||
+ (r = sshbuf_put_u32(msg, 0)) != 0)
+ fatal_fr(r, "compose");
+- send_msg(msg);
++ send_msg(helper->fd, msg);
+ sshbuf_reset(msg);
+
+- if (recv_msg(msg) == SSH2_AGENT_SIGN_RESPONSE) {
++ if (recv_msg(helper->fd, msg) == SSH2_AGENT_SIGN_RESPONSE) {
+ if ((r = sshbuf_get_string(msg, &signature, &slen)) != 0)
+ fatal_fr(r, "parse");
+ cp = signature;
+@@ -220,75 +373,110 @@ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ sshbuf_free(msg);
+ return (ret);
+ }
+-#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+-static RSA_METHOD *helper_rsa;
+-#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+-static EC_KEY_METHOD *helper_ecdsa;
+-#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
++static void
++ecdsa_do_finish(EC_KEY *ec)
++{
++ struct helper *helper;
++
++ if ((helper = helper_by_ec(ec)) == NULL)
++ fatal_f("no helper for PKCS11 key");
++ debug3_f("free PKCS11 ECDSA key for provider %s", helper->path);
++ if (helper->ec_finish != NULL)
++ helper->ec_finish(ec);
++ if (helper->nec == 0)
++ fatal_f("ECDSA refcount error");
++ helper->nec--;
++ debug3_f("provider %s remaining keys: %zu RSA %zu ECDSA",
++ helper->path, helper->nrsa, helper->nec);
++ if (helper->nrsa == 0 && helper->nec == 0)
++ helper_terminate(helper);
++}
+
+ /* redirect private key crypto operations to the ssh-pkcs11-helper */
+ static void
+-wrap_key(struct sshkey *k)
++wrap_key(struct helper *helper, struct sshkey *k)
+ {
+- if (k->type == KEY_RSA)
+- RSA_set_method(k->rsa, helper_rsa);
+-#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+- else if (k->type == KEY_ECDSA)
+- EC_KEY_set_method(k->ecdsa, helper_ecdsa);
+-#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+- else
++ debug3_f("wrap %s for provider %s", sshkey_type(k), helper->path);
++ if (k->type == KEY_RSA) {
++ RSA_set_method(k->rsa, helper->rsa_meth);
++ if (helper->nrsa++ >= INT_MAX)
++ fatal_f("RSA refcount error");
++ } else if (k->type == KEY_ECDSA) {
++ EC_KEY_set_method(k->ecdsa, helper->ec_meth);
++ if (helper->nec++ >= INT_MAX)
++ fatal_f("EC refcount error");
++ } else
+ fatal_f("unknown key type");
++ k->flags |= SSHKEY_FLAG_EXT;
++ debug3_f("provider %s remaining keys: %zu RSA %zu ECDSA",
++ helper->path, helper->nrsa, helper->nec);
+ }
+
+ static int
+-pkcs11_start_helper_methods(void)
++pkcs11_start_helper_methods(struct helper *helper)
+ {
+- if (helper_rsa != NULL)
+- return (0);
+-
+-#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+- int (*orig_sign)(int, const unsigned char *, int, unsigned char *,
++ int (*ec_init)(EC_KEY *key);
++ int (*ec_copy)(EC_KEY *dest, const EC_KEY *src);
++ int (*ec_set_group)(EC_KEY *key, const EC_GROUP *grp);
++ int (*ec_set_private)(EC_KEY *key, const BIGNUM *priv_key);
++ int (*ec_set_public)(EC_KEY *key, const EC_POINT *pub_key);
++ int (*ec_sign)(int, const unsigned char *, int, unsigned char *,
+ unsigned int *, const BIGNUM *, const BIGNUM *, EC_KEY *) = NULL;
+- if (helper_ecdsa != NULL)
+- return (0);
+- helper_ecdsa = EC_KEY_METHOD_new(EC_KEY_OpenSSL());
+- if (helper_ecdsa == NULL)
+- return (-1);
+- EC_KEY_METHOD_get_sign(helper_ecdsa, &orig_sign, NULL, NULL);
+- EC_KEY_METHOD_set_sign(helper_ecdsa, orig_sign, NULL, ecdsa_do_sign);
+-#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+-
+- if ((helper_rsa = RSA_meth_dup(RSA_get_default_method())) == NULL)
++ RSA_METHOD *rsa_meth;
++ EC_KEY_METHOD *ec_meth;
++
++ if ((ec_meth = EC_KEY_METHOD_new(EC_KEY_OpenSSL())) == NULL)
++ return -1;
++ EC_KEY_METHOD_get_sign(ec_meth, &ec_sign, NULL, NULL);
++ EC_KEY_METHOD_set_sign(ec_meth, ec_sign, NULL, ecdsa_do_sign);
++ EC_KEY_METHOD_get_init(ec_meth, &ec_init, &helper->ec_finish,
++ &ec_copy, &ec_set_group, &ec_set_private, &ec_set_public);
++ EC_KEY_METHOD_set_init(ec_meth, ec_init, ecdsa_do_finish,
++ ec_copy, ec_set_group, ec_set_private, ec_set_public);
++
++ if ((rsa_meth = RSA_meth_dup(RSA_get_default_method())) == NULL)
+ fatal_f("RSA_meth_dup failed");
+- if (!RSA_meth_set1_name(helper_rsa, "ssh-pkcs11-helper") ||
+- !RSA_meth_set_priv_enc(helper_rsa, rsa_encrypt))
++ helper->rsa_finish = RSA_meth_get_finish(rsa_meth);
++ if (!RSA_meth_set1_name(rsa_meth, "ssh-pkcs11-helper") ||
++ !RSA_meth_set_priv_enc(rsa_meth, rsa_encrypt) ||
++ !RSA_meth_set_finish(rsa_meth, rsa_finish))
+ fatal_f("failed to prepare method");
+
+- return (0);
++ helper->ec_meth = ec_meth;
++ helper->rsa_meth = rsa_meth;
++ return 0;
+ }
+
+-static int
+-pkcs11_start_helper(void)
++static struct helper *
++pkcs11_start_helper(const char *path)
+ {
+ int pair[2];
+- char *helper, *verbosity = NULL;
+-
+- if (log_level_get() >= SYSLOG_LEVEL_DEBUG1)
+- verbosity = "-vvv";
+-
+- if (pkcs11_start_helper_methods() == -1) {
+- error("pkcs11_start_helper_methods failed");
+- return (-1);
+- }
++ char *prog, *verbosity = NULL;
++ struct helper *helper;
++ pid_t pid;
+
++ if (nhelpers >= INT_MAX)
++ fatal_f("too many helpers");
++ debug3_f("start helper for %s", path);
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) {
+- error("socketpair: %s", strerror(errno));
+- return (-1);
++ error_f("socketpair: %s", strerror(errno));
++ return NULL;
++ }
++ helper = xcalloc(1, sizeof(*helper));
++ if (pkcs11_start_helper_methods(helper) == -1) {
++ error_f("pkcs11_start_helper_methods failed");
++ goto fail;
+ }
+ if ((pid = fork()) == -1) {
+- error("fork: %s", strerror(errno));
+- return (-1);
++ error_f("fork: %s", strerror(errno));
++ fail:
++ close(pair[0]);
++ close(pair[1]);
++ RSA_meth_free(helper->rsa_meth);
++ EC_KEY_METHOD_free(helper->ec_meth);
++ free(helper);
++ return NULL;
+ } else if (pid == 0) {
+ if ((dup2(pair[1], STDIN_FILENO) == -1) ||
+ (dup2(pair[1], STDOUT_FILENO) == -1)) {
+@@ -297,18 +485,27 @@ pkcs11_start_helper(void)
+ }
+ close(pair[0]);
+ close(pair[1]);
+- helper = getenv("SSH_PKCS11_HELPER");
+- if (helper == NULL || strlen(helper) == 0)
+- helper = _PATH_SSH_PKCS11_HELPER;
+- debug_f("starting %s %s", helper,
++ prog = getenv("SSH_PKCS11_HELPER");
++ if (prog == NULL || strlen(prog) == 0)
++ prog = _PATH_SSH_PKCS11_HELPER;
++ if (log_level_get() >= SYSLOG_LEVEL_DEBUG1)
++ verbosity = "-vvv";
++ debug_f("starting %s %s", prog,
+ verbosity == NULL ? "" : verbosity);
+- execlp(helper, helper, verbosity, (char *)NULL);
+- fprintf(stderr, "exec: %s: %s\n", helper, strerror(errno));
++ execlp(prog, prog, verbosity, (char *)NULL);
++ fprintf(stderr, "exec: %s: %s\n", prog, strerror(errno));
+ _exit(1);
+ }
+ close(pair[1]);
+- fd = pair[0];
+- return (0);
++ helper->fd = pair[0];
++ helper->path = xstrdup(path);
++ helper->pid = pid;
++ debug3_f("helper %zu for \"%s\" on fd %d pid %ld", nhelpers,
++ helper->path, helper->fd, (long)helper->pid);
++ helpers = xrecallocarray(helpers, nhelpers,
++ nhelpers + 1, sizeof(*helpers));
++ helpers[nhelpers++] = helper;
++ return helper;
+ }
+
+ int
+@@ -322,9 +519,11 @@ pkcs11_add_provider(char *name, char *pin, struct sshkey ***keysp,
+ size_t blen;
+ u_int nkeys, i;
+ struct sshbuf *msg;
++ struct helper *helper;
+
+- if (fd < 0 && pkcs11_start_helper() < 0)
+- return (-1);
++ if ((helper = helper_by_provider(name)) == NULL &&
++ (helper = pkcs11_start_helper(name)) == NULL)
++ return -1;
+
+ if ((msg = sshbuf_new()) == NULL)
+ fatal_f("sshbuf_new failed");
+@@ -332,10 +531,10 @@ pkcs11_add_provider(char *name, char *pin, struct sshkey ***keysp,
+ (r = sshbuf_put_cstring(msg, name)) != 0 ||
+ (r = sshbuf_put_cstring(msg, pin)) != 0)
+ fatal_fr(r, "compose");
+- send_msg(msg);
++ send_msg(helper->fd, msg);
+ sshbuf_reset(msg);
+
+- type = recv_msg(msg);
++ type = recv_msg(helper->fd, msg);
+ if (type == SSH2_AGENT_IDENTITIES_ANSWER) {
+ if ((r = sshbuf_get_u32(msg, &nkeys)) != 0)
+ fatal_fr(r, "parse nkeys");
+@@ -349,7 +548,7 @@ pkcs11_add_provider(char *name, char *pin, struct sshkey ***keysp,
+ fatal_fr(r, "parse key");
+ if ((r = sshkey_from_blob(blob, blen, &k)) != 0)
+ fatal_fr(r, "decode key");
+- wrap_key(k);
++ wrap_key(helper, k);
+ (*keysp)[i] = k;
+ if (labelsp)
+ (*labelsp)[i] = label;
+@@ -370,22 +569,15 @@ pkcs11_add_provider(char *name, char *pin, struct sshkey ***keysp,
+ int
+ pkcs11_del_provider(char *name)
+ {
+- int r, ret = -1;
+- struct sshbuf *msg;
+-
+- if ((msg = sshbuf_new()) == NULL)
+- fatal_f("sshbuf_new failed");
+- if ((r = sshbuf_put_u8(msg, SSH_AGENTC_REMOVE_SMARTCARD_KEY)) != 0 ||
+- (r = sshbuf_put_cstring(msg, name)) != 0 ||
+- (r = sshbuf_put_cstring(msg, "")) != 0)
+- fatal_fr(r, "compose");
+- send_msg(msg);
+- sshbuf_reset(msg);
+-
+- if (recv_msg(msg) == SSH_AGENT_SUCCESS)
+- ret = 0;
+- sshbuf_free(msg);
+- return (ret);
++ struct helper *helper;
++
++ /*
++ * ssh-agent deletes keys before calling this, so the helper entry
++ * should be gone before we get here.
++ */
++ debug3_f("delete %s", name);
++ if ((helper = helper_by_provider(name)) != NULL)
++ helper_terminate(helper);
++ return 0;
+ }
+-
+ #endif /* ENABLE_PKCS11 */
+--
+2.40.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0002.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0002.patch
new file mode 100644
index 0000000000..81f4cc5fba
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0002.patch
@@ -0,0 +1,173 @@
+From 29ef8a04866ca14688d5b7fed7b8b9deab851f77 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Wed, 19 Jul 2023 14:02:27 +0000
+Subject: [PATCH 2/4] upstream: Ensure FIDO/PKCS11 libraries contain expected
+ symbols
+
+This checks via nlist(3) that candidate provider libraries contain one
+of the symbols that we will require prior to dlopen(), which can cause
+a number of side effects, including execution of constructors.
+
+Feedback deraadt; ok markus
+
+OpenBSD-Commit-ID: 1508a5fbd74e329e69a55b56c453c292029aefbe
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/29ef8a04866ca14688d5b7fed7b8b9deab851f77]
+
+CVE: CVE-2023-38408
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ misc.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++
+ misc.h | 1 +
+ ssh-pkcs11.c | 4 +++
+ ssh-sk.c | 6 ++--
+ 4 files changed, 86 insertions(+), 2 deletions(-)
+
+diff --git a/misc.c b/misc.c
+index 417498d..d0270e7 100644
+--- a/misc.c
++++ b/misc.c
+@@ -22,6 +22,7 @@
+
+ #include <sys/types.h>
+ #include <sys/ioctl.h>
++#include <sys/mman.h>
+ #include <sys/socket.h>
+ #include <sys/stat.h>
+ #include <sys/time.h>
+@@ -35,6 +36,9 @@
+ #ifdef HAVE_POLL_H
+ #include <poll.h>
+ #endif
++#ifdef HAVE_NLIST_H
++#include <nlist.h>
++#endif
+ #include <signal.h>
+ #include <stdarg.h>
+ #include <stdio.h>
+@@ -2784,3 +2788,76 @@ lookup_env_in_list(const char *env, char * const *envs, size_t nenvs)
+ }
+ return NULL;
+ }
++
++
++/*
++ * Returns zero if the library at 'path' contains symbol 's', nonzero
++ * otherwise.
++ */
++int
++lib_contains_symbol(const char *path, const char *s)
++{
++#ifdef HAVE_NLIST_H
++ struct nlist nl[2];
++ int ret = -1, r;
++
++ memset(nl, 0, sizeof(nl));
++ nl[0].n_name = xstrdup(s);
++ nl[1].n_name = NULL;
++ if ((r = nlist(path, nl)) == -1) {
++ error_f("nlist failed for %s", path);
++ goto out;
++ }
++ if (r != 0 || nl[0].n_value == 0 || nl[0].n_type == 0) {
++ error_f("library %s does not contain symbol %s", path, s);
++ goto out;
++ }
++ /* success */
++ ret = 0;
++ out:
++ free(nl[0].n_name);
++ return ret;
++#else /* HAVE_NLIST_H */
++ int fd, ret = -1;
++ struct stat st;
++ void *m = NULL;
++ size_t sz = 0;
++
++ memset(&st, 0, sizeof(st));
++ if ((fd = open(path, O_RDONLY)) < 0) {
++ error_f("open %s: %s", path, strerror(errno));
++ return -1;
++ }
++ if (fstat(fd, &st) != 0) {
++ error_f("fstat %s: %s", path, strerror(errno));
++ goto out;
++ }
++ if (!S_ISREG(st.st_mode)) {
++ error_f("%s is not a regular file", path);
++ goto out;
++ }
++ if (st.st_size < 0 ||
++ (size_t)st.st_size < strlen(s) ||
++ st.st_size >= INT_MAX/2) {
++ error_f("%s bad size %lld", path, (long long)st.st_size);
++ goto out;
++ }
++ sz = (size_t)st.st_size;
++ if ((m = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, fd, 0)) == MAP_FAILED ||
++ m == NULL) {
++ error_f("mmap %s: %s", path, strerror(errno));
++ goto out;
++ }
++ if (memmem(m, sz, s, strlen(s)) == NULL) {
++ error_f("%s does not contain expected string %s", path, s);
++ goto out;
++ }
++ /* success */
++ ret = 0;
++ out:
++ if (m != NULL && m != MAP_FAILED)
++ munmap(m, sz);
++ close(fd);
++ return ret;
++#endif /* HAVE_NLIST_H */
++}
+diff --git a/misc.h b/misc.h
+index 2e1b5fe..3f48315 100644
+--- a/misc.h
++++ b/misc.h
+@@ -96,6 +96,7 @@ int parse_absolute_time(const char *, uint64_t *);
+ void format_absolute_time(uint64_t, char *, size_t);
+ int path_absolute(const char *);
+ int stdfd_devnull(int, int, int);
++int lib_contains_symbol(const char *, const char *);
+
+ void sock_set_v6only(int);
+
+diff --git a/ssh-pkcs11.c b/ssh-pkcs11.c
+index b2e2b32..5eb28e9 100644
+--- a/ssh-pkcs11.c
++++ b/ssh-pkcs11.c
+@@ -1532,6 +1532,10 @@ pkcs11_register_provider(char *provider_id, char *pin,
+ debug_f("provider already registered: %s", provider_id);
+ goto fail;
+ }
++ if (lib_contains_symbol(provider_id, "C_GetFunctionList") != 0) {
++ error("provider %s is not a PKCS11 library", provider_id);
++ goto fail;
++ }
+ /* open shared pkcs11-library */
+ if ((handle = dlopen(provider_id, RTLD_NOW)) == NULL) {
+ error("dlopen %s failed: %s", provider_id, dlerror());
+diff --git a/ssh-sk.c b/ssh-sk.c
+index a1ff5cc..1042bf6 100644
+--- a/ssh-sk.c
++++ b/ssh-sk.c
+@@ -132,10 +132,12 @@ sshsk_open(const char *path)
+ #endif
+ return ret;
+ }
+- if ((ret->dlhandle = dlopen(path, RTLD_NOW)) == NULL) {
+- error("Provider \"%s\" dlopen failed: %s", path, dlerror());
++ if (lib_contains_symbol(path, "sk_api_version") != 0) {
++ error("provider %s is not an OpenSSH FIDO library", path);
+ goto fail;
+ }
++ if ((ret->dlhandle = dlopen(path, RTLD_NOW)) == NULL)
++ fatal("Provider \"%s\" dlopen failed: %s", path, dlerror());
+ if ((ret->sk_api_version = dlsym(ret->dlhandle,
+ "sk_api_version")) == NULL) {
+ error("Provider \"%s\" dlsym(sk_api_version) failed: %s",
+--
+2.40.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0003.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0003.patch
new file mode 100644
index 0000000000..f226f12edc
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0003.patch
@@ -0,0 +1,36 @@
+From 892506b13654301f69f9545f48213fc210e5c5cc Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Wed, 19 Jul 2023 13:55:53 +0000
+Subject: [PATCH 3/4] upstream: terminate process if requested to load a
+ PKCS#11 provider that isn't a PKCS#11 provider; from / ok markus@
+
+OpenBSD-Commit-ID: 39532cf18b115881bb4cfaee32084497aadfa05c
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/892506b13654301f69f9545f48213fc210e5c5cc]
+
+CVE: CVE-2023-38408
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ ssh-pkcs11.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/ssh-pkcs11.c b/ssh-pkcs11.c
+index 5eb28e9..0aef379 100644
+--- a/ssh-pkcs11.c
++++ b/ssh-pkcs11.c
+@@ -1541,10 +1541,8 @@ pkcs11_register_provider(char *provider_id, char *pin,
+ error("dlopen %s failed: %s", provider_id, dlerror());
+ goto fail;
+ }
+- if ((getfunctionlist = dlsym(handle, "C_GetFunctionList")) == NULL) {
+- error("dlsym(C_GetFunctionList) failed: %s", dlerror());
+- goto fail;
+- }
++ if ((getfunctionlist = dlsym(handle, "C_GetFunctionList")) == NULL)
++ fatal("dlsym(C_GetFunctionList) failed: %s", dlerror());
+ p = xcalloc(1, sizeof(*p));
+ p->name = xstrdup(provider_id);
+ p->handle = handle;
+--
+2.40.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0004.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0004.patch
new file mode 100644
index 0000000000..1ff8505938
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-0004.patch
@@ -0,0 +1,114 @@
+From 1f2731f5d7a8f8a8385c6031667ed29072c0d92a Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Wed, 19 Jul 2023 13:56:33 +0000
+Subject: [PATCH 4/4] upstream: Disallow remote addition of FIDO/PKCS11
+ provider libraries to ssh-agent by default.
+
+The old behaviour of allowing remote clients from loading providers
+can be restored using `ssh-agent -O allow-remote-pkcs11`.
+
+Detection of local/remote clients requires a ssh(1) that supports
+the `session-bind@openssh.com` extension. Forwarding access to a
+ssh-agent socket using non-OpenSSH tools may circumvent this control.
+
+ok markus@
+
+OpenBSD-Commit-ID: 4c2bdf79b214ae7e60cc8c39a45501344fa7bd7c
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/1f2731f5d7a8f8a8385c6031667ed29072c0d92a]
+
+CVE: CVE-2023-38408
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ ssh-agent.1 | 21 +++++++++++++++++++++
+ ssh-agent.c | 21 ++++++++++++++++++++-
+ 2 files changed, 41 insertions(+), 1 deletion(-)
+
+diff --git a/ssh-agent.1 b/ssh-agent.1
+index ed8c870..15d0a47 100644
+--- a/ssh-agent.1
++++ b/ssh-agent.1
+@@ -102,6 +102,27 @@ The default is
+ Kill the current agent (given by the
+ .Ev SSH_AGENT_PID
+ environment variable).
++Currently two options are supported:
++.Cm allow-remote-pkcs11
++and
++.Cm no-restrict-websafe .
++.Pp
++The
++.Cm allow-remote-pkcs11
++option allows clients of a forwarded
++.Nm
++to load PKCS#11 or FIDO provider libraries.
++By default only local clients may perform this operation.
++Note that signalling that a
++.Nm
++client remote is performed by
++.Xr ssh 1 ,
++and use of other tools to forward access to the agent socket may circumvent
++this restriction.
++.Pp
++The
++.Cm no-restrict-websafe ,
++instructs
+ .It Fl P Ar allowed_providers
+ Specify a pattern-list of acceptable paths for PKCS#11 provider and FIDO
+ authenticator middleware shared libraries that may be used with the
+diff --git a/ssh-agent.c b/ssh-agent.c
+index 03ae2b0..19eeaae 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -171,6 +171,12 @@ char socket_dir[PATH_MAX];
+ /* Pattern-list of allowed PKCS#11/Security key paths */
+ static char *allowed_providers;
+
++/*
++ * Allows PKCS11 providers or SK keys that use non-internal providers to
++ * be added over a remote connection (identified by session-bind@openssh.com).
++ */
++static int remote_add_provider;
++
+ /* locking */
+ #define LOCK_SIZE 32
+ #define LOCK_SALT_SIZE 16
+@@ -1239,6 +1245,12 @@ process_add_identity(SocketEntry *e)
+ if (strcasecmp(sk_provider, "internal") == 0) {
+ debug_f("internal provider");
+ } else {
++ if (e->nsession_ids != 0 && !remote_add_provider) {
++ verbose("failed add of SK provider \"%.100s\": "
++ "remote addition of providers is disabled",
++ sk_provider);
++ goto out;
++ }
+ if (realpath(sk_provider, canonical_provider) == NULL) {
+ verbose("failed provider \"%.100s\": "
+ "realpath: %s", sk_provider,
+@@ -1402,6 +1414,11 @@ process_add_smartcard_key(SocketEntry *e)
+ error_f("failed to parse constraints");
+ goto send;
+ }
++ if (e->nsession_ids != 0 && !remote_add_provider) {
++ verbose("failed PKCS#11 add of \"%.100s\": remote addition of "
++ "providers is disabled", provider);
++ goto send;
++ }
+ if (realpath(provider, canonical_provider) == NULL) {
+ verbose("failed PKCS#11 add of \"%.100s\": realpath: %s",
+ provider, strerror(errno));
+@@ -2061,7 +2078,9 @@ main(int ac, char **av)
+ break;
+ case 'O':
+ if (strcmp(optarg, "no-restrict-websafe") == 0)
+- restrict_websafe = 0;
++ restrict_websafe = 0;
++ else if (strcmp(optarg, "allow-remote-pkcs11") == 0)
++ remote_add_provider = 1;
+ else
+ fatal("Unknown -O option");
+ break;
+--
+2.40.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-48795.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-48795.patch
new file mode 100644
index 0000000000..6b2f927779
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-48795.patch
@@ -0,0 +1,476 @@
+(modified to not remove ssh_packet_read_expect() and to add to
+KexAlgorithms in sshd.c and sshconnect2.c as this version pre-dates
+kex_proposal_populate_entries())
+
+Backport of:
+
+From 1edb00c58f8a6875fad6a497aa2bacf37f9e6cd5 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Mon, 18 Dec 2023 14:45:17 +0000
+Subject: [PATCH] upstream: implement "strict key exchange" in ssh and sshd
+
+This adds a protocol extension to improve the integrity of the SSH
+transport protocol, particular in and around the initial key exchange
+(KEX) phase.
+
+Full details of the extension are in the PROTOCOL file.
+
+with markus@
+
+OpenBSD-Commit-ID: 2a66ac962f0a630d7945fee54004ed9e9c439f14
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/openssh/tree/debian/patches/CVE-2023-48795.patch?h=ubuntu/jammy-security
+Upstream commit https://github.com/openssh/openssh-portable/commit/1edb00c58f8a6875fad6a497aa2bacf37f9e6cd5]
+CVE: CVE-2023-48795
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ PROTOCOL | 26 +++++++++++++++++
+ kex.c | 72 +++++++++++++++++++++++++++++++----------------
+ kex.h | 1 +
+ packet.c | 78 ++++++++++++++++++++++++++++++++++++++-------------
+ sshconnect2.c | 14 +++------
+ sshd.c | 7 +++--
+ 6 files changed, 142 insertions(+), 56 deletions(-)
+
+diff --git a/PROTOCOL b/PROTOCOL
+index e6a7d60..971f01e 100644
+--- a/PROTOCOL
++++ b/PROTOCOL
+@@ -102,6 +102,32 @@ OpenSSH supports the use of ECDH in Curve25519 for key exchange as
+ described at:
+ http://git.libssh.org/users/aris/libssh.git/plain/doc/curve25519-sha256@libssh.org.txt?h=curve25519
+
++1.9 transport: strict key exchange extension
++
++OpenSSH supports a number of transport-layer hardening measures under
++a "strict KEX" feature. This feature is signalled similarly to the
++RFC8308 ext-info feature: by including a additional algorithm in the
++initiial SSH2_MSG_KEXINIT kex_algorithms field. The client may append
++"kex-strict-c-v00@openssh.com" to its kex_algorithms and the server
++may append "kex-strict-s-v00@openssh.com". These pseudo-algorithms
++are only valid in the initial SSH2_MSG_KEXINIT and MUST be ignored
++if they are present in subsequent SSH2_MSG_KEXINIT packets.
++
++When an endpoint that supports this extension observes this algorithm
++name in a peer's KEXINIT packet, it MUST make the following changes to
++the the protocol:
++
++a) During initial KEX, terminate the connection if any unexpected or
++ out-of-sequence packet is received. This includes terminating the
++ connection if the first packet received is not SSH2_MSG_KEXINIT.
++ Unexpected packets for the purpose of strict KEX include messages
++ that are otherwise valid at any time during the connection such as
++ SSH2_MSG_DEBUG and SSH2_MSG_IGNORE.
++b) After sending or receiving a SSH2_MSG_NEWKEYS message, reset the
++ packet sequence number to zero. This behaviour persists for the
++ duration of the connection (i.e. not just the first
++ SSH2_MSG_NEWKEYS).
++
+ 2. Connection protocol changes
+
+ 2.1. connection: Channel write close extension "eow@openssh.com"
+diff --git a/kex.c b/kex.c
+index 0bcd27d..e7b2d4d 100644
+--- a/kex.c
++++ b/kex.c
+@@ -63,7 +63,7 @@
+ #include "digest.h"
+
+ /* prototype */
+-static int kex_choose_conf(struct ssh *);
++static int kex_choose_conf(struct ssh *, uint32_t seq);
+ static int kex_input_newkeys(int, u_int32_t, struct ssh *);
+
+ static const char * const proposal_names[PROPOSAL_MAX] = {
+@@ -175,6 +175,18 @@ kex_names_valid(const char *names)
+ return 1;
+ }
+
++/* returns non-zero if proposal contains any algorithm from algs */
++static int
++has_any_alg(const char *proposal, const char *algs)
++{
++ char *cp;
++
++ if ((cp = match_list(proposal, algs, NULL)) == NULL)
++ return 0;
++ free(cp);
++ return 1;
++}
++
+ /*
+ * Concatenate algorithm names, avoiding duplicates in the process.
+ * Caller must free returned string.
+@@ -182,7 +194,7 @@ kex_names_valid(const char *names)
+ char *
+ kex_names_cat(const char *a, const char *b)
+ {
+- char *ret = NULL, *tmp = NULL, *cp, *p, *m;
++ char *ret = NULL, *tmp = NULL, *cp, *p;
+ size_t len;
+
+ if (a == NULL || *a == '\0')
+@@ -199,10 +211,8 @@ kex_names_cat(const char *a, const char *b)
+ }
+ strlcpy(ret, a, len);
+ for ((p = strsep(&cp, ",")); p && *p != '\0'; (p = strsep(&cp, ","))) {
+- if ((m = match_list(ret, p, NULL)) != NULL) {
+- free(m);
++ if (has_any_alg(ret, p))
+ continue; /* Algorithm already present */
+- }
+ if (strlcat(ret, ",", len) >= len ||
+ strlcat(ret, p, len) >= len) {
+ free(tmp);
+@@ -410,7 +420,12 @@ kex_protocol_error(int type, u_int32_t seq, struct ssh *ssh)
+ {
+ int r;
+
+- error("kex protocol error: type %d seq %u", type, seq);
++ /* If in strict mode, any unexpected message is an error */
++ if ((ssh->kex->flags & KEX_INITIAL) && ssh->kex->kex_strict) {
++ ssh_packet_disconnect(ssh, "strict KEX violation: "
++ "unexpected packet type %u (seqnr %u)", type, seq);
++ }
++ error_f("type %u seq %u", type, seq);
+ if ((r = sshpkt_start(ssh, SSH2_MSG_UNIMPLEMENTED)) != 0 ||
+ (r = sshpkt_put_u32(ssh, seq)) != 0 ||
+ (r = sshpkt_send(ssh)) != 0)
+@@ -485,6 +500,11 @@ kex_input_ext_info(int type, u_int32_t seq, struct ssh *ssh)
+ ssh_dispatch_set(ssh, SSH2_MSG_EXT_INFO, &kex_protocol_error);
+ if ((r = sshpkt_get_u32(ssh, &ninfo)) != 0)
+ return r;
++ if (ninfo >= 1024) {
++ error("SSH2_MSG_EXT_INFO with too many entries, expected "
++ "<=1024, received %u", ninfo);
++ return dispatch_protocol_error(type, seq, ssh);
++ }
+ for (i = 0; i < ninfo; i++) {
+ if ((r = sshpkt_get_cstring(ssh, &name, NULL)) != 0)
+ return r;
+@@ -600,7 +620,7 @@ kex_input_kexinit(int type, u_int32_t seq, struct ssh *ssh)
+ error_f("no kex");
+ return SSH_ERR_INTERNAL_ERROR;
+ }
+- ssh_dispatch_set(ssh, SSH2_MSG_KEXINIT, NULL);
++ ssh_dispatch_set(ssh, SSH2_MSG_KEXINIT, &kex_protocol_error);
+ ptr = sshpkt_ptr(ssh, &dlen);
+ if ((r = sshbuf_put(kex->peer, ptr, dlen)) != 0)
+ return r;
+@@ -636,7 +656,7 @@ kex_input_kexinit(int type, u_int32_t seq, struct ssh *ssh)
+ if (!(kex->flags & KEX_INIT_SENT))
+ if ((r = kex_send_kexinit(ssh)) != 0)
+ return r;
+- if ((r = kex_choose_conf(ssh)) != 0)
++ if ((r = kex_choose_conf(ssh, seq)) != 0)
+ return r;
+
+ if (kex->kex_type < KEX_MAX && kex->kex[kex->kex_type] != NULL)
+@@ -900,20 +920,14 @@ proposals_match(char *my[PROPOSAL_MAX], char *peer[PROPOSAL_MAX])
+ return (1);
+ }
+
+-/* returns non-zero if proposal contains any algorithm from algs */
+ static int
+-has_any_alg(const char *proposal, const char *algs)
++kexalgs_contains(char **peer, const char *ext)
+ {
+- char *cp;
+-
+- if ((cp = match_list(proposal, algs, NULL)) == NULL)
+- return 0;
+- free(cp);
+- return 1;
++ return has_any_alg(peer[PROPOSAL_KEX_ALGS], ext);
+ }
+
+ static int
+-kex_choose_conf(struct ssh *ssh)
++kex_choose_conf(struct ssh *ssh, uint32_t seq)
+ {
+ struct kex *kex = ssh->kex;
+ struct newkeys *newkeys;
+@@ -938,13 +952,23 @@ kex_choose_conf(struct ssh *ssh)
+ sprop=peer;
+ }
+
+- /* Check whether client supports ext_info_c */
+- if (kex->server && (kex->flags & KEX_INITIAL)) {
+- char *ext;
+-
+- ext = match_list("ext-info-c", peer[PROPOSAL_KEX_ALGS], NULL);
+- kex->ext_info_c = (ext != NULL);
+- free(ext);
++ /* Check whether peer supports ext_info/kex_strict */
++ if ((kex->flags & KEX_INITIAL) != 0) {
++ if (kex->server) {
++ kex->ext_info_c = kexalgs_contains(peer, "ext-info-c");
++ kex->kex_strict = kexalgs_contains(peer,
++ "kex-strict-c-v00@openssh.com");
++ } else {
++ kex->kex_strict = kexalgs_contains(peer,
++ "kex-strict-s-v00@openssh.com");
++ }
++ if (kex->kex_strict) {
++ debug3_f("will use strict KEX ordering");
++ if (seq != 0)
++ ssh_packet_disconnect(ssh,
++ "strict KEX violation: "
++ "KEXINIT was not the first packet");
++ }
+ }
+
+ /* Check whether client supports rsa-sha2 algorithms */
+diff --git a/kex.h b/kex.h
+index c353295..d97323e 100644
+--- a/kex.h
++++ b/kex.h
+@@ -148,6 +148,7 @@ struct kex {
+ u_int kex_type;
+ char *server_sig_algs;
+ int ext_info_c;
++ int kex_strict;
+ struct sshbuf *my;
+ struct sshbuf *peer;
+ struct sshbuf *client_version;
+diff --git a/packet.c b/packet.c
+index bde6c10..28f3729 100644
+--- a/packet.c
++++ b/packet.c
+@@ -1205,8 +1205,13 @@ ssh_packet_send2_wrapped(struct ssh *ssh)
+ sshbuf_dump(state->output, stderr);
+ #endif
+ /* increment sequence number for outgoing packets */
+- if (++state->p_send.seqnr == 0)
++ if (++state->p_send.seqnr == 0) {
++ if ((ssh->kex->flags & KEX_INITIAL) != 0) {
++ ssh_packet_disconnect(ssh, "outgoing sequence number "
++ "wrapped during initial key exchange");
++ }
+ logit("outgoing seqnr wraps around");
++ }
+ if (++state->p_send.packets == 0)
+ if (!(ssh->compat & SSH_BUG_NOREKEY))
+ return SSH_ERR_NEED_REKEY;
+@@ -1214,6 +1219,11 @@ ssh_packet_send2_wrapped(struct ssh *ssh)
+ state->p_send.bytes += len;
+ sshbuf_reset(state->outgoing_packet);
+
++ if (type == SSH2_MSG_NEWKEYS && ssh->kex->kex_strict) {
++ debug_f("resetting send seqnr %u", state->p_send.seqnr);
++ state->p_send.seqnr = 0;
++ }
++
+ if (type == SSH2_MSG_NEWKEYS)
+ r = ssh_set_newkeys(ssh, MODE_OUT);
+ else if (type == SSH2_MSG_USERAUTH_SUCCESS && state->server_side)
+@@ -1342,8 +1352,7 @@ ssh_packet_read_seqnr(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ /* Stay in the loop until we have received a complete packet. */
+ for (;;) {
+ /* Try to read a packet from the buffer. */
+- r = ssh_packet_read_poll_seqnr(ssh, typep, seqnr_p);
+- if (r != 0)
++ if ((r = ssh_packet_read_poll_seqnr(ssh, typep, seqnr_p)) != 0)
+ break;
+ /* If we got a packet, return it. */
+ if (*typep != SSH_MSG_NONE)
+@@ -1627,10 +1636,16 @@ ssh_packet_read_poll2(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ if ((r = sshbuf_consume(state->input, mac->mac_len)) != 0)
+ goto out;
+ }
++
+ if (seqnr_p != NULL)
+ *seqnr_p = state->p_read.seqnr;
+- if (++state->p_read.seqnr == 0)
++ if (++state->p_read.seqnr == 0) {
++ if ((ssh->kex->flags & KEX_INITIAL) != 0) {
++ ssh_packet_disconnect(ssh, "incoming sequence number "
++ "wrapped during initial key exchange");
++ }
+ logit("incoming seqnr wraps around");
++ }
+ if (++state->p_read.packets == 0)
+ if (!(ssh->compat & SSH_BUG_NOREKEY))
+ return SSH_ERR_NEED_REKEY;
+@@ -1696,6 +1711,10 @@ ssh_packet_read_poll2(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ #endif
+ /* reset for next packet */
+ state->packlen = 0;
++ if (*typep == SSH2_MSG_NEWKEYS && ssh->kex->kex_strict) {
++ debug_f("resetting read seqnr %u", state->p_read.seqnr);
++ state->p_read.seqnr = 0;
++ }
+
+ if ((r = ssh_packet_check_rekey(ssh)) != 0)
+ return r;
+@@ -1716,10 +1735,39 @@ ssh_packet_read_poll_seqnr(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ r = ssh_packet_read_poll2(ssh, typep, seqnr_p);
+ if (r != 0)
+ return r;
+- if (*typep) {
+- state->keep_alive_timeouts = 0;
+- DBG(debug("received packet type %d", *typep));
++ if (*typep == 0) {
++ /* no message ready */
++ return 0;
++ }
++ state->keep_alive_timeouts = 0;
++ DBG(debug("received packet type %d", *typep));
++
++ /* Always process disconnect messages */
++ if (*typep == SSH2_MSG_DISCONNECT) {
++ if ((r = sshpkt_get_u32(ssh, &reason)) != 0 ||
++ (r = sshpkt_get_string(ssh, &msg, NULL)) != 0)
++ return r;
++ /* Ignore normal client exit notifications */
++ do_log2(ssh->state->server_side &&
++ reason == SSH2_DISCONNECT_BY_APPLICATION ?
++ SYSLOG_LEVEL_INFO : SYSLOG_LEVEL_ERROR,
++ "Received disconnect from %s port %d:"
++ "%u: %.400s", ssh_remote_ipaddr(ssh),
++ ssh_remote_port(ssh), reason, msg);
++ free(msg);
++ return SSH_ERR_DISCONNECTED;
+ }
++
++ /*
++ * Do not implicitly handle any messages here during initial
++ * KEX when in strict mode. They will be need to be allowed
++ * explicitly by the KEX dispatch table or they will generate
++ * protocol errors.
++ */
++ if (ssh->kex != NULL &&
++ (ssh->kex->flags & KEX_INITIAL) && ssh->kex->kex_strict)
++ return 0;
++ /* Implicitly handle transport-level messages */
+ switch (*typep) {
+ case SSH2_MSG_IGNORE:
+ debug3("Received SSH2_MSG_IGNORE");
+@@ -1734,19 +1782,6 @@ ssh_packet_read_poll_seqnr(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ debug("Remote: %.900s", msg);
+ free(msg);
+ break;
+- case SSH2_MSG_DISCONNECT:
+- if ((r = sshpkt_get_u32(ssh, &reason)) != 0 ||
+- (r = sshpkt_get_string(ssh, &msg, NULL)) != 0)
+- return r;
+- /* Ignore normal client exit notifications */
+- do_log2(ssh->state->server_side &&
+- reason == SSH2_DISCONNECT_BY_APPLICATION ?
+- SYSLOG_LEVEL_INFO : SYSLOG_LEVEL_ERROR,
+- "Received disconnect from %s port %d:"
+- "%u: %.400s", ssh_remote_ipaddr(ssh),
+- ssh_remote_port(ssh), reason, msg);
+- free(msg);
+- return SSH_ERR_DISCONNECTED;
+ case SSH2_MSG_UNIMPLEMENTED:
+ if ((r = sshpkt_get_u32(ssh, &seqnr)) != 0)
+ return r;
+@@ -2211,6 +2246,7 @@ kex_to_blob(struct sshbuf *m, struct kex *kex)
+ (r = sshbuf_put_u32(m, kex->hostkey_type)) != 0 ||
+ (r = sshbuf_put_u32(m, kex->hostkey_nid)) != 0 ||
+ (r = sshbuf_put_u32(m, kex->kex_type)) != 0 ||
++ (r = sshbuf_put_u32(m, kex->kex_strict)) != 0 ||
+ (r = sshbuf_put_stringb(m, kex->my)) != 0 ||
+ (r = sshbuf_put_stringb(m, kex->peer)) != 0 ||
+ (r = sshbuf_put_stringb(m, kex->client_version)) != 0 ||
+@@ -2373,6 +2409,7 @@ kex_from_blob(struct sshbuf *m, struct kex **kexp)
+ (r = sshbuf_get_u32(m, (u_int *)&kex->hostkey_type)) != 0 ||
+ (r = sshbuf_get_u32(m, (u_int *)&kex->hostkey_nid)) != 0 ||
+ (r = sshbuf_get_u32(m, &kex->kex_type)) != 0 ||
++ (r = sshbuf_get_u32(m, &kex->kex_strict)) != 0 ||
+ (r = sshbuf_get_stringb(m, kex->my)) != 0 ||
+ (r = sshbuf_get_stringb(m, kex->peer)) != 0 ||
+ (r = sshbuf_get_stringb(m, kex->client_version)) != 0 ||
+@@ -2701,6 +2738,7 @@ sshpkt_disconnect(struct ssh *ssh, const char *fmt,...)
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
++ debug2_f("sending SSH2_MSG_DISCONNECT: %s", buf);
+ if ((r = sshpkt_start(ssh, SSH2_MSG_DISCONNECT)) != 0 ||
+ (r = sshpkt_put_u32(ssh, SSH2_DISCONNECT_PROTOCOL_ERROR)) != 0 ||
+ (r = sshpkt_put_cstring(ssh, buf)) != 0 ||
+diff --git a/sshconnect2.c b/sshconnect2.c
+index b25225e..83ae4a4 100644
+--- a/sshconnect2.c
++++ b/sshconnect2.c
+@@ -241,7 +241,8 @@ ssh_kex2(struct ssh *ssh, char *host, struct sockaddr *hostaddr, u_short port,
+ fatal_fr(r, "kex_assemble_namelist");
+ free(all_key);
+
+- if ((s = kex_names_cat(options.kex_algorithms, "ext-info-c")) == NULL)
++ if ((s = kex_names_cat(options.kex_algorithms,
++ "ext-info-c,kex-strict-c-v00@openssh.com")) == NULL)
+ fatal_f("kex_names_cat");
+ myproposal[PROPOSAL_KEX_ALGS] = compat_kex_proposal(ssh, s);
+ myproposal[PROPOSAL_ENC_ALGS_CTOS] =
+@@ -363,7 +364,6 @@ struct cauthmethod {
+ };
+
+ static int input_userauth_service_accept(int, u_int32_t, struct ssh *);
+-static int input_userauth_ext_info(int, u_int32_t, struct ssh *);
+ static int input_userauth_success(int, u_int32_t, struct ssh *);
+ static int input_userauth_failure(int, u_int32_t, struct ssh *);
+ static int input_userauth_banner(int, u_int32_t, struct ssh *);
+@@ -477,7 +477,7 @@ ssh_userauth2(struct ssh *ssh, const char *local_user,
+
+ ssh->authctxt = &authctxt;
+ ssh_dispatch_init(ssh, &input_userauth_error);
+- ssh_dispatch_set(ssh, SSH2_MSG_EXT_INFO, &input_userauth_ext_info);
++ ssh_dispatch_set(ssh, SSH2_MSG_EXT_INFO, kex_input_ext_info);
+ ssh_dispatch_set(ssh, SSH2_MSG_SERVICE_ACCEPT, &input_userauth_service_accept);
+ ssh_dispatch_run_fatal(ssh, DISPATCH_BLOCK, &authctxt.success); /* loop until success */
+ pubkey_cleanup(ssh);
+@@ -529,13 +529,6 @@ input_userauth_service_accept(int type, u_int32_t seq, struct ssh *ssh)
+ return r;
+ }
+
+-/* ARGSUSED */
+-static int
+-input_userauth_ext_info(int type, u_int32_t seqnr, struct ssh *ssh)
+-{
+- return kex_input_ext_info(type, seqnr, ssh);
+-}
+-
+ void
+ userauth(struct ssh *ssh, char *authlist)
+ {
+@@ -617,6 +610,7 @@ input_userauth_success(int type, u_int32_t seq, struct ssh *ssh)
+ free(authctxt->methoddata);
+ authctxt->methoddata = NULL;
+ authctxt->success = 1; /* break out */
++ ssh_dispatch_set(ssh, SSH2_MSG_EXT_INFO, dispatch_protocol_error);
+ return 0;
+ }
+
+diff --git a/sshd.c b/sshd.c
+index ef18ba4..652bdc3 100644
+--- a/sshd.c
++++ b/sshd.c
+@@ -2354,11 +2354,13 @@ static void
+ do_ssh2_kex(struct ssh *ssh)
+ {
+ char *myproposal[PROPOSAL_MAX] = { KEX_SERVER };
++ char *s;
+ struct kex *kex;
+ int r;
+
+- myproposal[PROPOSAL_KEX_ALGS] = compat_kex_proposal(ssh,
+- options.kex_algorithms);
++ if ((s = kex_names_cat(options.kex_algorithms, "kex-strict-s-v00@openssh.com")) == NULL)
++ fatal_f("kex_names_cat");
++ myproposal[PROPOSAL_KEX_ALGS] = compat_kex_proposal(ssh, s);
+ myproposal[PROPOSAL_ENC_ALGS_CTOS] = compat_cipher_proposal(ssh,
+ options.ciphers);
+ myproposal[PROPOSAL_ENC_ALGS_STOC] = compat_cipher_proposal(ssh,
+@@ -2411,6 +2413,7 @@ do_ssh2_kex(struct ssh *ssh)
+ (r = ssh_packet_write_wait(ssh)) != 0)
+ fatal_fr(r, "send test");
+ #endif
++ free(s);
+ debug("KEX done");
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-51384.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-51384.patch
new file mode 100644
index 0000000000..ead3256915
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-51384.patch
@@ -0,0 +1,171 @@
+From 881d9c6af9da4257c69c327c4e2f1508b2fa754b Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Mon, 18 Dec 2023 14:46:12 +0000
+Subject: [PATCH] upstream: apply destination constraints to all p11 keys
+
+Previously applied only to the first key returned from each token.
+
+ok markus@
+
+OpenBSD-Commit-ID: 36df3afb8eb94eec6b2541f063d0d164ef8b488d
+
+CVE: CVE-2023-51384
+
+Upstream-Status: Backport
+https://github.com/openssh/openssh-portable/commit/881d9c6af9da4257c69c327c4e2f1508b2fa754b
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ ssh-agent.c | 102 +++++++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 98 insertions(+), 4 deletions(-)
+
+diff --git a/ssh-agent.c b/ssh-agent.c
+index 19eeaae..4dbb4f3 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -249,6 +249,90 @@ free_dest_constraints(struct dest_constraint *dcs, size_t ndcs)
+ free(dcs);
+ }
+
++static void
++dup_dest_constraint_hop(const struct dest_constraint_hop *dch,
++ struct dest_constraint_hop *out)
++{
++ u_int i;
++ int r;
++
++ out->user = dch->user == NULL ? NULL : xstrdup(dch->user);
++ out->hostname = dch->hostname == NULL ? NULL : xstrdup(dch->hostname);
++ out->is_ca = dch->is_ca;
++ out->nkeys = dch->nkeys;
++ out->keys = out->nkeys == 0 ? NULL :
++ xcalloc(out->nkeys, sizeof(*out->keys));
++ out->key_is_ca = out->nkeys == 0 ? NULL :
++ xcalloc(out->nkeys, sizeof(*out->key_is_ca));
++ for (i = 0; i < dch->nkeys; i++) {
++ if (dch->keys[i] != NULL &&
++ (r = sshkey_from_private(dch->keys[i],
++ &(out->keys[i]))) != 0)
++ fatal_fr(r, "copy key");
++ out->key_is_ca[i] = dch->key_is_ca[i];
++ }
++}
++
++static struct dest_constraint *
++dup_dest_constraints(const struct dest_constraint *dcs, size_t ndcs)
++{
++ size_t i;
++ struct dest_constraint *ret;
++
++ if (ndcs == 0)
++ return NULL;
++ ret = xcalloc(ndcs, sizeof(*ret));
++ for (i = 0; i < ndcs; i++) {
++ dup_dest_constraint_hop(&dcs[i].from, &ret[i].from);
++ dup_dest_constraint_hop(&dcs[i].to, &ret[i].to);
++ }
++ return ret;
++}
++
++#ifdef DEBUG_CONSTRAINTS
++static void
++dump_dest_constraint_hop(const struct dest_constraint_hop *dch)
++{
++ u_int i;
++ char *fp;
++
++ debug_f("user %s hostname %s is_ca %d nkeys %u",
++ dch->user == NULL ? "(null)" : dch->user,
++ dch->hostname == NULL ? "(null)" : dch->hostname,
++ dch->is_ca, dch->nkeys);
++ for (i = 0; i < dch->nkeys; i++) {
++ fp = NULL;
++ if (dch->keys[i] != NULL &&
++ (fp = sshkey_fingerprint(dch->keys[i],
++ SSH_FP_HASH_DEFAULT, SSH_FP_DEFAULT)) == NULL)
++ fatal_f("fingerprint failed");
++ debug_f("key %u/%u: %s%s%s key_is_ca %d", i, dch->nkeys,
++ dch->keys[i] == NULL ? "" : sshkey_ssh_name(dch->keys[i]),
++ dch->keys[i] == NULL ? "" : " ",
++ dch->keys[i] == NULL ? "none" : fp,
++ dch->key_is_ca[i]);
++ free(fp);
++ }
++}
++#endif /* DEBUG_CONSTRAINTS */
++
++static void
++dump_dest_constraints(const char *context,
++ const struct dest_constraint *dcs, size_t ndcs)
++{
++#ifdef DEBUG_CONSTRAINTS
++ size_t i;
++
++ debug_f("%s: %zu constraints", context, ndcs);
++ for (i = 0; i < ndcs; i++) {
++ debug_f("constraint %zu / %zu: from: ", i, ndcs);
++ dump_dest_constraint_hop(&dcs[i].from);
++ debug_f("constraint %zu / %zu: to: ", i, ndcs);
++ dump_dest_constraint_hop(&dcs[i].to);
++ }
++ debug_f("done for %s", context);
++#endif /* DEBUG_CONSTRAINTS */
++}
+ static void
+ free_identity(Identity *id)
+ {
+@@ -520,13 +604,22 @@ process_request_identities(SocketEntry *e)
+ Identity *id;
+ struct sshbuf *msg, *keys;
+ int r;
+- u_int nentries = 0;
++ u_int i = 0, nentries = 0;
++ char *fp;
+
+ debug2_f("entering");
+
+ if ((msg = sshbuf_new()) == NULL || (keys = sshbuf_new()) == NULL)
+ fatal_f("sshbuf_new failed");
+ TAILQ_FOREACH(id, &idtab->idlist, next) {
++ if ((fp = sshkey_fingerprint(id->key, SSH_FP_HASH_DEFAULT,
++ SSH_FP_DEFAULT)) == NULL)
++ fatal_f("fingerprint failed");
++ debug_f("key %u / %u: %s %s", i++, idtab->nentries,
++ sshkey_ssh_name(id->key), fp);
++ dump_dest_constraints(__func__,
++ id->dest_constraints, id->ndest_constraints);
++ free(fp);
+ /* identity not visible, don't include in response */
+ if (identity_permitted(id, e, NULL, NULL, NULL) != 0)
+ continue;
+@@ -1235,6 +1328,7 @@ process_add_identity(SocketEntry *e)
+ sshbuf_reset(e->request);
+ goto out;
+ }
++ dump_dest_constraints(__func__, dest_constraints, ndest_constraints);
+
+ if (sk_provider != NULL) {
+ if (!sshkey_is_sk(k)) {
+@@ -1414,6 +1508,7 @@ process_add_smartcard_key(SocketEntry *e)
+ error_f("failed to parse constraints");
+ goto send;
+ }
++ dump_dest_constraints(__func__, dest_constraints, ndest_constraints);
+ if (e->nsession_ids != 0 && !remote_add_provider) {
+ verbose("failed PKCS#11 add of \"%.100s\": remote addition of "
+ "providers is disabled", provider);
+@@ -1449,10 +1544,9 @@ process_add_smartcard_key(SocketEntry *e)
+ }
+ id->death = death;
+ id->confirm = confirm;
+- id->dest_constraints = dest_constraints;
++ id->dest_constraints = dup_dest_constraints(
++ dest_constraints, ndest_constraints);
+ id->ndest_constraints = ndest_constraints;
+- dest_constraints = NULL; /* transferred */
+- ndest_constraints = 0;
+ TAILQ_INSERT_TAIL(&idtab->idlist, id, next);
+ idtab->nentries++;
+ success = 1;
+--
+2.40.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-51385.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-51385.patch
new file mode 100644
index 0000000000..b8e6813857
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-51385.patch
@@ -0,0 +1,97 @@
+From 7ef3787c84b6b524501211b11a26c742f829af1a Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Mon, 18 Dec 2023 14:47:44 +0000
+Subject: [PATCH] upstream: ban user/hostnames with most shell metacharacters
+ This makes ssh(1) refuse user or host names provided on the commandline that
+ contain most shell metacharacters.
+
+Some programs that invoke ssh(1) using untrusted data do not filter
+metacharacters in arguments they supply. This could create
+interactions with user-specified ProxyCommand and other directives
+that allow shell injection attacks to occur.
+
+It's a mistake to invoke ssh(1) with arbitrary untrusted arguments,
+but getting this stuff right can be tricky, so this should prevent
+most obvious ways of creating risky situations. It however is not
+and cannot be perfect: ssh(1) has no practical way of interpreting
+what shell quoting rules are in use and how they interact with the
+user's specified ProxyCommand.
+
+To allow configurations that use strange user or hostnames to
+continue to work, this strictness is applied only to names coming
+from the commandline. Names specified using User or Hostname
+directives in ssh_config(5) are not affected.
+
+feedback/ok millert@ markus@ dtucker@ deraadt@
+
+OpenBSD-Commit-ID: 3b487348b5964f3e77b6b4d3da4c3b439e94b2d9
+
+CVE: CVE-2023-51385
+
+Upstream-Status: Backport
+[https://github.com/openssh/openssh-portable/commit/7ef3787c84b6b524501211b11a26c742f829af1a]
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ ssh.c | 39 +++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 39 insertions(+)
+
+diff --git a/ssh.c b/ssh.c
+index 8ff9788..82ed15f 100644
+--- a/ssh.c
++++ b/ssh.c
+@@ -611,6 +611,41 @@ ssh_conn_info_free(struct ssh_conn_info *cinfo)
+ free(cinfo);
+ }
+
++static int
++valid_hostname(const char *s)
++{
++ size_t i;
++
++ if (*s == '-')
++ return 0;
++ for (i = 0; s[i] != 0; i++) {
++ if (strchr("'`\"$\\;&<>|(){}", s[i]) != NULL ||
++ isspace((u_char)s[i]) || iscntrl((u_char)s[i]))
++ return 0;
++ }
++ return 1;
++}
++
++static int
++valid_ruser(const char *s)
++{
++ size_t i;
++
++ if (*s == '-')
++ return 0;
++ for (i = 0; s[i] != 0; i++) {
++ if (strchr("'`\";&<>|(){}", s[i]) != NULL)
++ return 0;
++ /* Disallow '-' after whitespace */
++ if (isspace((u_char)s[i]) && s[i + 1] == '-')
++ return 0;
++ /* Disallow \ in last position */
++ if (s[i] == '\\' && s[i + 1] == '\0')
++ return 0;
++ }
++ return 1;
++}
++
+ /*
+ * Main program for the ssh client.
+ */
+@@ -1097,6 +1132,10 @@ main(int ac, char **av)
+ if (!host)
+ usage();
+
++ if (!valid_hostname(host))
++ fatal("hostname contains invalid characters");
++ if (options.user != NULL && !valid_ruser(options.user))
++ fatal("remote username contains invalid characters");
+ host_arg = xstrdup(host);
+
+ /* Initialize the command to execute on remote host. */
+--
+2.40.0
diff --git a/meta/recipes-connectivity/openssh/openssh/fix-authorized-principals-command.patch b/meta/recipes-connectivity/openssh/openssh/fix-authorized-principals-command.patch
new file mode 100644
index 0000000000..3790774f15
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/fix-authorized-principals-command.patch
@@ -0,0 +1,30 @@
+From fcd78e31cdd45a7e69ccfe6d8a3b1037dc1de290 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Wed, 24 May 2023 23:01:06 +0000
+Subject: [PATCH] upstream: fix AuthorizedPrincipalsCommand when
+ AuthorizedKeysCommand
+Description: Fix the wrong code as the Subject suggests
+ I added that description to mention, that the file header change was
+ incompatible with the proposed code below and failed to apply,
+ therefore I dropped that chunk of the code.
+Origin: backport, https://github.com/openssh/openssh-portable/commit/fcd78e31cdd45a7e69ccfe6d8a3b1037dc1de290
+Bug: https://bugzilla.mindrot.org/show_bug.cgi?id=3574
+Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/openssh/+bug/2031942
+Last-Update: 2023-09-01
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/openssh/tree/debian/patches/fix-authorized-principals-command.patch?h=ubuntu/jammy-security
+Upstream commit https://github.com/openssh/openssh-portable/commit/fcd78e31cdd45a7e69ccfe6d8a3b1037dc1de290]
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+This patch header follows DEP-3: http://dep.debian.net/deps/dep3/
+--- a/servconf.c
++++ b/servconf.c
+@@ -2372,7 +2372,7 @@ process_server_config_line_depth(ServerO
+ fatal("%.200s line %d: %s must be an absolute path",
+ filename, linenum, keyword);
+ }
+- if (*activep && options->authorized_keys_command == NULL)
++ if (*activep && *charptr == NULL)
+ *charptr = xstrdup(str + len);
+ argv_consume(&ac);
+ break;
diff --git a/meta/recipes-connectivity/openssh/openssh/run-ptest b/meta/recipes-connectivity/openssh/openssh/run-ptest
index 8a9b770d59..9a406e9b65 100755
--- a/meta/recipes-connectivity/openssh/openssh/run-ptest
+++ b/meta/recipes-connectivity/openssh/openssh/run-ptest
@@ -5,7 +5,7 @@ export SKIP_UNIT=1
cd regress
sed -i "/\t\tagent-ptrace /d" Makefile
-make -k BUILDDIR=`pwd`/.. .OBJDIR=`pwd` .CURDIR=`pwd` SUDO="sudo" tests \
+make -k BUILDDIR=`pwd`/.. .OBJDIR=`pwd` .CURDIR=`pwd` SUDO="" tests \
| sed -u -e 's/^skipped/SKIP: /g' -e 's/^ok /PASS: /g' -e 's/^failed/FAIL: /g'
SSHAGENT=`which ssh-agent`
diff --git a/meta/recipes-connectivity/openssh/openssh_8.9p1.bb b/meta/recipes-connectivity/openssh/openssh_8.9p1.bb
index f306b1245a..6411a64eff 100644
--- a/meta/recipes-connectivity/openssh/openssh_8.9p1.bb
+++ b/meta/recipes-connectivity/openssh/openssh_8.9p1.bb
@@ -26,6 +26,16 @@ SRC_URI = "http://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-${PV}.tar
file://add-test-support-for-busybox.patch \
file://f107467179428a0e3ea9e4aa9738ac12ff02822d.patch \
file://0001-Default-to-not-using-sandbox-when-cross-compiling.patch \
+ file://7280401bdd77ca54be6867a154cc01e0d72612e0.patch \
+ file://0001-upstream-include-destination-constraints-for-smartca.patch \
+ file://CVE-2023-38408-0001.patch \
+ file://CVE-2023-38408-0002.patch \
+ file://CVE-2023-38408-0003.patch \
+ file://CVE-2023-38408-0004.patch \
+ file://fix-authorized-principals-command.patch \
+ file://CVE-2023-48795.patch \
+ file://CVE-2023-51384.patch \
+ file://CVE-2023-51385.patch \
"
SRC_URI[sha256sum] = "fd497654b7ab1686dac672fb83dfb4ba4096e8b5ffcdaccd262380ae58bec5e7"
@@ -39,6 +49,11 @@ CVE_CHECK_IGNORE += "CVE-2014-9278"
# CVE only applies to some distributed RHEL binaries
CVE_CHECK_IGNORE += "CVE-2008-3844"
+# Upstream does not consider CVE-2023-51767 a bug underlying in OpenSSH and
+# does not intent to address it in OpenSSH
+# https://security-tracker.debian.org/tracker/CVE-2023-51767
+CVE_CHECK_IGNORE += "CVE-2023-51767"
+
PAM_SRC_URI = "file://sshd"
inherit manpages useradd update-rc.d update-alternatives systemd
@@ -54,15 +69,12 @@ SYSTEMD_SERVICE:${PN}-sshd = "sshd.socket"
inherit autotools-brokensep ptest
-PACKAGECONFIG ??= "rng-tools"
+PACKAGECONFIG ??= ""
PACKAGECONFIG[kerberos] = "--with-kerberos5,--without-kerberos5,krb5"
PACKAGECONFIG[ldns] = "--with-ldns,--without-ldns,ldns"
PACKAGECONFIG[libedit] = "--with-libedit,--without-libedit,libedit"
PACKAGECONFIG[manpages] = "--with-mantype=man,--with-mantype=cat"
-# Add RRECOMMENDS to rng-tools for sshd package
-PACKAGECONFIG[rng-tools] = ""
-
EXTRA_AUTORECONF += "--exclude=aclocal"
# login path is hardcoded in sshd
@@ -160,14 +172,14 @@ FILES:${PN}-sftp-server = "${libexecdir}/sftp-server"
FILES:${PN}-misc = "${bindir}/ssh* ${libexecdir}/ssh*"
FILES:${PN}-keygen = "${bindir}/ssh-keygen"
-RDEPENDS:${PN} += "${PN}-scp ${PN}-ssh ${PN}-sshd ${PN}-keygen"
+RDEPENDS:${PN} += "${PN}-scp ${PN}-ssh ${PN}-sshd ${PN}-keygen ${PN}-sftp-server"
RDEPENDS:${PN}-sshd += "${PN}-keygen ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'pam-plugin-keyinit pam-plugin-loginuid', '', d)}"
-RRECOMMENDS:${PN}-sshd:append:class-target = "\
- ${@bb.utils.filter('PACKAGECONFIG', 'rng-tools', d)} \
-"
-
+# break dependency on base package for -dev package
+# otherwise SDK fails to build as the main openssh and dropbear packages
+# conflict with each other
+RDEPENDS:${PN}-dev = ""
# gdb would make attach-ptrace test pass rather than skip but not worth the build dependencies
-RDEPENDS:${PN}-ptest += "${PN}-sftp ${PN}-misc ${PN}-sftp-server make sed sudo coreutils"
+RDEPENDS:${PN}-ptest += "${PN}-sftp ${PN}-misc ${PN}-sftp-server make sed coreutils"
RPROVIDES:${PN}-ssh = "ssh"
RPROVIDES:${PN}-sshd = "sshd"
diff --git a/meta/recipes-connectivity/openssl/files/environment.d-openssl.sh b/meta/recipes-connectivity/openssl/files/environment.d-openssl.sh
index b9cc24a7ac..6f23490c87 100644
--- a/meta/recipes-connectivity/openssl/files/environment.d-openssl.sh
+++ b/meta/recipes-connectivity/openssl/files/environment.d-openssl.sh
@@ -1 +1,5 @@
export OPENSSL_CONF="$OECORE_NATIVE_SYSROOT/usr/lib/ssl/openssl.cnf"
+export SSL_CERT_DIR="$OECORE_NATIVE_SYSROOT/usr/lib/ssl/certs"
+export SSL_CERT_FILE="$OECORE_NATIVE_SYSROOT/usr/lib/ssl/certs/ca-certificates.crt"
+export OPENSSL_MODULES="$OECORE_NATIVE_SYSROOT/usr/lib/ossl-modules/"
+export OPENSSL_ENGINES="$OECORE_NATIVE_SYSROOT/usr/lib/engines-3"
diff --git a/meta/recipes-connectivity/openssl/openssl/0001-Configure-do-not-tweak-mips-cflags.patch b/meta/recipes-connectivity/openssl/openssl/0001-Configure-do-not-tweak-mips-cflags.patch
index 5effa6c6f6..af435472a5 100644
--- a/meta/recipes-connectivity/openssl/openssl/0001-Configure-do-not-tweak-mips-cflags.patch
+++ b/meta/recipes-connectivity/openssl/openssl/0001-Configure-do-not-tweak-mips-cflags.patch
@@ -13,16 +13,16 @@ Signed-off-by: Alexander Kanavin <alex@linutronix.de>
Configure | 10 ----------
1 file changed, 10 deletions(-)
-diff --git a/Configure b/Configure
-index 821e680..0387a74 100755
---- a/Configure
-+++ b/Configure
-@@ -1422,16 +1422,6 @@ if ($target =~ /^mingw/ && `$config{CC} --target-help 2>&1` =~ m/-mno-cygwin/m)
+Index: openssl-3.0.4/Configure
+===================================================================
+--- openssl-3.0.4.orig/Configure
++++ openssl-3.0.4/Configure
+@@ -1423,16 +1423,6 @@ if ($target =~ /^mingw/ && `$config{CC}
push @{$config{shared_ldflag}}, "-mno-cygwin";
}
-if ($target =~ /linux.*-mips/ && !$disabled{asm}
-- && !grep { $_ !~ /-m(ips|arch=)/ } (@{$config{CFLAGS}})) {
+- && !grep { $_ =~ /-m(ips|arch=)/ } (@{$config{CFLAGS}})) {
- # minimally required architecture flags for assembly modules
- my $value;
- $value = '-mips2' if ($target =~ /mips32/);
diff --git a/meta/recipes-connectivity/openssl/openssl/0001-buildinfo-strip-sysroot-and-debug-prefix-map-from-co.patch b/meta/recipes-connectivity/openssl/openssl/0001-buildinfo-strip-sysroot-and-debug-prefix-map-from-co.patch
index 60890c666d..bafdbaa46f 100644
--- a/meta/recipes-connectivity/openssl/openssl/0001-buildinfo-strip-sysroot-and-debug-prefix-map-from-co.patch
+++ b/meta/recipes-connectivity/openssl/openssl/0001-buildinfo-strip-sysroot-and-debug-prefix-map-from-co.patch
@@ -34,11 +34,11 @@ Signed-off-by: Khem Raj <raj.khem@gmail.com>
crypto/build.info | 2 +-
2 files changed, 12 insertions(+), 2 deletions(-)
-diff --git a/Configurations/unix-Makefile.tmpl b/Configurations/unix-Makefile.tmpl
-index f88a70f..528cdef 100644
---- a/Configurations/unix-Makefile.tmpl
-+++ b/Configurations/unix-Makefile.tmpl
-@@ -471,13 +471,23 @@ BIN_LDFLAGS={- join(' ', $target{bin_lflags} || (),
+Index: openssl-3.0.4/Configurations/unix-Makefile.tmpl
+===================================================================
+--- openssl-3.0.4.orig/Configurations/unix-Makefile.tmpl
++++ openssl-3.0.4/Configurations/unix-Makefile.tmpl
+@@ -472,13 +472,23 @@ BIN_LDFLAGS={- join(' ', $target{bin_lfl
'$(CNF_LDFLAGS)', '$(LDFLAGS)') -}
BIN_EX_LIBS=$(CNF_EX_LIBS) $(EX_LIBS)
@@ -63,10 +63,10 @@ index f88a70f..528cdef 100644
PERLASM_SCHEME= {- $target{perlasm_scheme} -}
# For x86 assembler: Set PROCESSOR to 386 if you want to support
-diff --git a/crypto/build.info b/crypto/build.info
-index efca6cc..eda433e 100644
---- a/crypto/build.info
-+++ b/crypto/build.info
+Index: openssl-3.0.4/crypto/build.info
+===================================================================
+--- openssl-3.0.4.orig/crypto/build.info
++++ openssl-3.0.4/crypto/build.info
@@ -109,7 +109,7 @@ DEFINE[../libcrypto]=$UPLINKDEF
DEPEND[info.o]=buildinf.h
@@ -74,5 +74,5 @@ index efca6cc..eda433e 100644
-GENERATE[buildinf.h]=../util/mkbuildinf.pl "$(CC) $(LIB_CFLAGS) $(CPPFLAGS_Q)" "$(PLATFORM)"
+GENERATE[buildinf.h]=../util/mkbuildinf.pl "$(CC_Q) $(CFLAGS_Q) $(CPPFLAGS_Q)" "$(PLATFORM)"
- GENERATE[uplink-x86.s]=../ms/uplink-x86.pl
+ GENERATE[uplink-x86.S]=../ms/uplink-x86.pl
GENERATE[uplink-x86_64.s]=../ms/uplink-x86_64.pl
diff --git a/meta/recipes-connectivity/openssl/openssl/770aea88c3888cc5cb3ebc94ffcef706c68bc1d2.patch b/meta/recipes-connectivity/openssl/openssl/770aea88c3888cc5cb3ebc94ffcef706c68bc1d2.patch
deleted file mode 100644
index 0249d4181b..0000000000
--- a/meta/recipes-connectivity/openssl/openssl/770aea88c3888cc5cb3ebc94ffcef706c68bc1d2.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From 770aea88c3888cc5cb3ebc94ffcef706c68bc1d2 Mon Sep 17 00:00:00 2001
-From: Tomas Mraz <tomas@openssl.org>
-Date: Wed, 1 Jun 2022 12:06:33 +0200
-Subject: [PATCH] Update expired SCT issuer certificate
-
-Fixes #15179
-
-Reviewed-by: Matt Caswell <matt@openssl.org>
-Reviewed-by: Dmitry Belyavskiy <beldmit@gmail.com>
-(Merged from https://github.com/openssl/openssl/pull/18444)
-
-Upstream-Status: Backport
-[Fixes ptest failures in OE-Core]
----
- test/certs/embeddedSCTs1_issuer.pem | 30 ++++++++++++++---------------
- 1 file changed, 15 insertions(+), 15 deletions(-)
-
-diff --git a/test/certs/embeddedSCTs1_issuer.pem b/test/certs/embeddedSCTs1_issuer.pem
-index 1fa449d5a098..6aa9455f09ed 100644
---- a/test/certs/embeddedSCTs1_issuer.pem
-+++ b/test/certs/embeddedSCTs1_issuer.pem
-@@ -1,18 +1,18 @@
- -----BEGIN CERTIFICATE-----
--MIIC0DCCAjmgAwIBAgIBADANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJHQjEk
-+MIIC0jCCAjugAwIBAgIBADANBgkqhkiG9w0BAQsFADBVMQswCQYDVQQGEwJHQjEk
- MCIGA1UEChMbQ2VydGlmaWNhdGUgVHJhbnNwYXJlbmN5IENBMQ4wDAYDVQQIEwVX
--YWxlczEQMA4GA1UEBxMHRXJ3IFdlbjAeFw0xMjA2MDEwMDAwMDBaFw0yMjA2MDEw
--MDAwMDBaMFUxCzAJBgNVBAYTAkdCMSQwIgYDVQQKExtDZXJ0aWZpY2F0ZSBUcmFu
--c3BhcmVuY3kgQ0ExDjAMBgNVBAgTBVdhbGVzMRAwDgYDVQQHEwdFcncgV2VuMIGf
--MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDVimhTYhCicRmTbneDIRgcKkATxtB7
--jHbrkVfT0PtLO1FuzsvRyY2RxS90P6tjXVUJnNE6uvMa5UFEJFGnTHgW8iQ8+EjP
--KDHM5nugSlojgZ88ujfmJNnDvbKZuDnd/iYx0ss6hPx7srXFL8/BT/9Ab1zURmnL
--svfP34b7arnRsQIDAQABo4GvMIGsMB0GA1UdDgQWBBRfnYgNyHPmVNT4DdjmsMEk
--tEfDVTB9BgNVHSMEdjB0gBRfnYgNyHPmVNT4DdjmsMEktEfDVaFZpFcwVTELMAkG
--A1UEBhMCR0IxJDAiBgNVBAoTG0NlcnRpZmljYXRlIFRyYW5zcGFyZW5jeSBDQTEO
--MAwGA1UECBMFV2FsZXMxEDAOBgNVBAcTB0VydyBXZW6CAQAwDAYDVR0TBAUwAwEB
--/zANBgkqhkiG9w0BAQUFAAOBgQAGCMxKbWTyIF4UbASydvkrDvqUpdryOvw4BmBt
--OZDQoeojPUApV2lGOwRmYef6HReZFSCa6i4Kd1F2QRIn18ADB8dHDmFYT9czQiRy
--f1HWkLxHqd81TbD26yWVXeGJPE3VICskovPkQNJ0tU4b03YmnKliibduyqQQkOFP
--OwqULg==
-+YWxlczEQMA4GA1UEBxMHRXJ3IFdlbjAgFw0yMjA2MDExMDM4MDJaGA8yMTIyMDUw
-+ODEwMzgwMlowVTELMAkGA1UEBhMCR0IxJDAiBgNVBAoTG0NlcnRpZmljYXRlIFRy
-+YW5zcGFyZW5jeSBDQTEOMAwGA1UECBMFV2FsZXMxEDAOBgNVBAcTB0VydyBXZW4w
-+gZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANWKaFNiEKJxGZNud4MhGBwqQBPG
-+0HuMduuRV9PQ+0s7UW7Oy9HJjZHFL3Q/q2NdVQmc0Tq68xrlQUQkUadMeBbyJDz4
-+SM8oMczme6BKWiOBnzy6N+Yk2cO9spm4Od3+JjHSyzqE/HuytcUvz8FP/0BvXNRG
-+acuy98/fhvtqudGxAgMBAAGjga8wgawwHQYDVR0OBBYEFF+diA3Ic+ZU1PgN2Oaw
-+wSS0R8NVMH0GA1UdIwR2MHSAFF+diA3Ic+ZU1PgN2OawwSS0R8NVoVmkVzBVMQsw
-+CQYDVQQGEwJHQjEkMCIGA1UEChMbQ2VydGlmaWNhdGUgVHJhbnNwYXJlbmN5IENB
-+MQ4wDAYDVQQIEwVXYWxlczEQMA4GA1UEBxMHRXJ3IFdlboIBADAMBgNVHRMEBTAD
-+AQH/MA0GCSqGSIb3DQEBCwUAA4GBAD0aYh9OkFYfXV7kBfhrtD0PJG2U47OV/1qq
-++uFpqB0S1WO06eJT0pzYf1ebUcxjBkajbJZm/FHT85VthZ1lFHsky87aFD8XlJCo
-+2IOhKOkvvWKPUdFLoO/ZVXqEVKkcsS1eXK1glFvb07eJZya3JVG0KdMhV2YoDg6c
-+Doud4XrO
- -----END CERTIFICATE-----
diff --git a/meta/recipes-connectivity/openssl/openssl/CVE-2024-2511.patch b/meta/recipes-connectivity/openssl/openssl/CVE-2024-2511.patch
new file mode 100644
index 0000000000..8aea686205
--- /dev/null
+++ b/meta/recipes-connectivity/openssl/openssl/CVE-2024-2511.patch
@@ -0,0 +1,122 @@
+From b52867a9f618bb955bed2a3ce3db4d4f97ed8e5d Mon Sep 17 00:00:00 2001
+From: Matt Caswell <matt@openssl.org>
+Date: Tue, 5 Mar 2024 15:43:53 +0000
+Subject: [PATCH] Fix unconstrained session cache growth in TLSv1.3
+
+In TLSv1.3 we create a new session object for each ticket that we send.
+We do this by duplicating the original session. If SSL_OP_NO_TICKET is in
+use then the new session will be added to the session cache. However, if
+early data is not in use (and therefore anti-replay protection is being
+used), then multiple threads could be resuming from the same session
+simultaneously. If this happens and a problem occurs on one of the threads,
+then the original session object could be marked as not_resumable. When we
+duplicate the session object this not_resumable status gets copied into the
+new session object. The new session object is then added to the session
+cache even though it is not_resumable.
+
+Subsequently, another bug means that the session_id_length is set to 0 for
+sessions that are marked as not_resumable - even though that session is
+still in the cache. Once this happens the session can never be removed from
+the cache. When that object gets to be the session cache tail object the
+cache never shrinks again and grows indefinitely.
+
+CVE-2024-2511
+
+Reviewed-by: Neil Horman <nhorman@openssl.org>
+Reviewed-by: Tomas Mraz <tomas@openssl.org>
+(Merged from https://github.com/openssl/openssl/pull/24044)
+
+(cherry picked from commit 7e4d731b1c07201ad9374c1cd9ac5263bdf35bce)
+
+CVE: CVE-2024-2511
+Upstream-Status: Backport [https://github.com/openssl/openssl/commit/b52867a9f618bb955bed2a3ce3db4d4f97ed8e5d]
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ ssl/ssl_lib.c | 5 +++--
+ ssl/ssl_sess.c | 28 ++++++++++++++++++++++------
+ ssl/statem/statem_srvr.c | 5 ++---
+ 3 files changed, 27 insertions(+), 11 deletions(-)
+
+diff --git a/ssl/ssl_lib.c b/ssl/ssl_lib.c
+index 2c8479eb5fc69..eed649c6fdee9 100644
+--- a/ssl/ssl_lib.c
++++ b/ssl/ssl_lib.c
+@@ -3736,9 +3736,10 @@ void ssl_update_cache(SSL *s, int mode)
+
+ /*
+ * If the session_id_length is 0, we are not supposed to cache it, and it
+- * would be rather hard to do anyway :-)
++ * would be rather hard to do anyway :-). Also if the session has already
++ * been marked as not_resumable we should not cache it for later reuse.
+ */
+- if (s->session->session_id_length == 0)
++ if (s->session->session_id_length == 0 || s->session->not_resumable)
+ return;
+
+ /*
+diff --git a/ssl/ssl_sess.c b/ssl/ssl_sess.c
+index d836b33ed0e81..75adbd9e52b40 100644
+--- a/ssl/ssl_sess.c
++++ b/ssl/ssl_sess.c
+@@ -152,16 +152,11 @@ SSL_SESSION *SSL_SESSION_new(void)
+ return ss;
+ }
+
+-SSL_SESSION *SSL_SESSION_dup(const SSL_SESSION *src)
+-{
+- return ssl_session_dup(src, 1);
+-}
+-
+ /*
+ * Create a new SSL_SESSION and duplicate the contents of |src| into it. If
+ * ticket == 0 then no ticket information is duplicated, otherwise it is.
+ */
+-SSL_SESSION *ssl_session_dup(const SSL_SESSION *src, int ticket)
++static SSL_SESSION *ssl_session_dup_intern(const SSL_SESSION *src, int ticket)
+ {
+ SSL_SESSION *dest;
+
+@@ -285,6 +280,27 @@ SSL_SESSION *ssl_session_dup(const SSL_SESSION *src, int ticket)
+ return NULL;
+ }
+
++SSL_SESSION *SSL_SESSION_dup(const SSL_SESSION *src)
++{
++ return ssl_session_dup_intern(src, 1);
++}
++
++/*
++ * Used internally when duplicating a session which might be already shared.
++ * We will have resumed the original session. Subsequently we might have marked
++ * it as non-resumable (e.g. in another thread) - but this copy should be ok to
++ * resume from.
++ */
++SSL_SESSION *ssl_session_dup(const SSL_SESSION *src, int ticket)
++{
++ SSL_SESSION *sess = ssl_session_dup_intern(src, ticket);
++
++ if (sess != NULL)
++ sess->not_resumable = 0;
++
++ return sess;
++}
++
+ const unsigned char *SSL_SESSION_get_id(const SSL_SESSION *s, unsigned int *len)
+ {
+ if (len)
+diff --git a/ssl/statem/statem_srvr.c b/ssl/statem/statem_srvr.c
+index a9e67f9d32a77..6c942e6bcec29 100644
+--- a/ssl/statem/statem_srvr.c
++++ b/ssl/statem/statem_srvr.c
+@@ -2338,9 +2338,8 @@ int tls_construct_server_hello(SSL *s, WPACKET *pkt)
+ * so the following won't overwrite an ID that we're supposed
+ * to send back.
+ */
+- if (s->session->not_resumable ||
+- (!(s->ctx->session_cache_mode & SSL_SESS_CACHE_SERVER)
+- && !s->hit))
++ if (!(s->ctx->session_cache_mode & SSL_SESS_CACHE_SERVER)
++ && !s->hit)
+ s->session->session_id_length = 0;
+
+ if (usetls13) {
diff --git a/meta/recipes-connectivity/openssl/openssl/afalg.patch b/meta/recipes-connectivity/openssl/openssl/afalg.patch
index b7c0e9697f..cf77e873a2 100644
--- a/meta/recipes-connectivity/openssl/openssl/afalg.patch
+++ b/meta/recipes-connectivity/openssl/openssl/afalg.patch
@@ -3,11 +3,11 @@ Don't refuse to build afalgeng if cross-compiling or the host kernel is too old.
Upstream-Status: Submitted [hhttps://github.com/openssl/openssl/pull/7688]
Signed-off-by: Ross Burton <ross.burton@intel.com>
-diff --git a/Configure b/Configure
-index 3baa8ce..9ef52ed 100755
---- a/Configure
-+++ b/Configure
-@@ -1550,20 +1550,7 @@ unless ($disabled{"crypto-mdebug-backtrace"})
+Index: openssl-3.0.4/Configure
+===================================================================
+--- openssl-3.0.4.orig/Configure
++++ openssl-3.0.4/Configure
+@@ -1681,20 +1681,7 @@ $config{CFLAGS} = [ map { $_ eq '--ossl-
unless ($disabled{afalgeng}) {
$config{afalgeng}="";
if (grep { $_ eq 'afalgeng' } @{$target{enable}}) {
diff --git a/meta/recipes-connectivity/openssl/openssl_3.0.3.bb b/meta/recipes-connectivity/openssl/openssl_3.0.13.bb
index 35a62755ad..3b253ddde0 100644
--- a/meta/recipes-connectivity/openssl/openssl_3.0.3.bb
+++ b/meta/recipes-connectivity/openssl/openssl_3.0.13.bb
@@ -12,14 +12,14 @@ SRC_URI = "http://www.openssl.org/source/openssl-${PV}.tar.gz \
file://0001-buildinfo-strip-sysroot-and-debug-prefix-map-from-co.patch \
file://afalg.patch \
file://0001-Configure-do-not-tweak-mips-cflags.patch \
- file://770aea88c3888cc5cb3ebc94ffcef706c68bc1d2.patch \
+ file://CVE-2024-2511.patch \
"
SRC_URI:append:class-nativesdk = " \
file://environment.d-openssl.sh \
"
-SRC_URI[sha256sum] = "ee0078adcef1de5f003c62c80cc96527721609c6f3bb42b7795df31f8b558c0b"
+SRC_URI[sha256sum] = "88525753f79d3bec27d2fa7c66aa0b92b3aa9498dafd93d7cfa4b3780cdae313"
inherit lib_package multilib_header multilib_script ptest perlnative
MULTILIB_SCRIPTS = "${PN}-bin:${bindir}/c_rehash"
@@ -78,7 +78,7 @@ do_configure () {
esac
target="$os-${HOST_ARCH}"
case $target in
- linux-arc)
+ linux-arc | linux-microblaze*)
target=linux-latomic
;;
linux-arm*)
@@ -106,7 +106,7 @@ do_configure () {
linux-*-mips64 | linux-mips64 | linux-*-mips64el | linux-mips64el)
target=linux64-mips64
;;
- linux-microblaze* | linux-nios2* | linux-sh3 | linux-sh4 | linux-arc*)
+ linux-nios2* | linux-sh3 | linux-sh4 | linux-arc*)
target=linux-generic32
;;
linux-powerpc)
@@ -138,7 +138,9 @@ do_configure () {
fi
# WARNING: do not set compiler/linker flags (-I/-D etc.) in EXTRA_OECONF, as they will fully replace the
# environment variables set by bitbake. Adjust the environment variables instead.
- HASHBANGPERL="/usr/bin/env perl" PERL=perl PERL5LIB="${S}/external/perl/Text-Template-1.46/lib/" \
+ PERLEXTERNAL="$(realpath ${S}/external/perl/Text-Template-*/lib)"
+ test -d "$PERLEXTERNAL" || bberror "PERLEXTERNAL '$PERLEXTERNAL' not found!"
+ HASHBANGPERL="/usr/bin/env perl" PERL=perl PERL5LIB="$PERLEXTERNAL" \
perl ${S}/Configure ${EXTRA_OECONF} ${PACKAGECONFIG_CONFARGS} ${DEPRECATED_CRYPTO_FLAGS} --prefix=$useprefix --openssldir=${libdir}/ssl-3 --libdir=${libdir} $target
perl ${B}/configdata.pm --dump
}
@@ -184,6 +186,7 @@ PTEST_BUILD_HOST_PATTERN = "perl_version ="
do_install_ptest () {
install -d ${D}${PTEST_PATH}/test
install -m755 ${B}/test/p_test.so ${D}${PTEST_PATH}/test
+ install -m755 ${B}/test/p_minimal.so ${D}${PTEST_PATH}/test
install -m755 ${B}/test/provider_internal_test.cnf ${D}${PTEST_PATH}/test
# Prune the build tree
diff --git a/meta/recipes-connectivity/ppp/ppp/CVE-2022-4603.patch b/meta/recipes-connectivity/ppp/ppp/CVE-2022-4603.patch
new file mode 100644
index 0000000000..4325b1d6b0
--- /dev/null
+++ b/meta/recipes-connectivity/ppp/ppp/CVE-2022-4603.patch
@@ -0,0 +1,48 @@
+From a75fb7b198eed50d769c80c36629f38346882cbf Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Thu, 4 Aug 2022 12:23:08 +1000
+Subject: [PATCH] pppdump: Avoid out-of-range access to packet buffer
+
+This fixes a potential vulnerability where data is written to spkt.buf
+and rpkt.buf without a check on the array index. To fix this, we
+check the array index (pkt->cnt) before storing the byte or
+incrementing the count. This also means we no longer have a potential
+signed integer overflow on the increment of pkt->cnt.
+
+Fortunately, pppdump is not used in the normal process of setting up a
+PPP connection, is not installed setuid-root, and is not invoked
+automatically in any scenario that I am aware of.
+
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+---
+ pppdump/pppdump.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/pppdump/pppdump.c b/pppdump/pppdump.c
+index 2b815fc9..b85a8627 100644
+--- a/pppdump/pppdump.c
++++ b/pppdump/pppdump.c
+@@ -297,6 +297,10 @@ dumpppp(f)
+ printf("%s aborted packet:\n ", dir);
+ q = " ";
+ }
++ if (pkt->cnt >= sizeof(pkt->buf)) {
++ printf("%s over-long packet truncated:\n ", dir);
++ q = " ";
++ }
+ nb = pkt->cnt;
+ p = pkt->buf;
+ pkt->cnt = 0;
+@@ -400,7 +404,8 @@ dumpppp(f)
+ c ^= 0x20;
+ pkt->esc = 0;
+ }
+- pkt->buf[pkt->cnt++] = c;
++ if (pkt->cnt < sizeof(pkt->buf))
++ pkt->buf[pkt->cnt++] = c;
+ break;
+ }
+ }
diff --git a/meta/recipes-connectivity/ppp/ppp_2.4.9.bb b/meta/recipes-connectivity/ppp/ppp_2.4.9.bb
index 700ece61dc..7e3ae43b58 100644
--- a/meta/recipes-connectivity/ppp/ppp_2.4.9.bb
+++ b/meta/recipes-connectivity/ppp/ppp_2.4.9.bb
@@ -25,6 +25,7 @@ SRC_URI = "https://download.samba.org/pub/${BPN}/${BP}.tar.gz \
file://provider \
file://ppp@.service \
file://0001-ppp-fix-build-against-5.15-headers.patch \
+ file://CVE-2022-4603.patch \
"
SRC_URI[sha256sum] = "f938b35eccde533ea800b15a7445b2f1137da7f88e32a16898d02dee8adc058d"
diff --git a/meta/recipes-connectivity/resolvconf/resolvconf/0001-avoid-using-m-option-for-readlink.patch b/meta/recipes-connectivity/resolvconf/resolvconf/0001-avoid-using-m-option-for-readlink.patch
new file mode 100644
index 0000000000..ab32f26754
--- /dev/null
+++ b/meta/recipes-connectivity/resolvconf/resolvconf/0001-avoid-using-m-option-for-readlink.patch
@@ -0,0 +1,37 @@
+From 6bf2bb136a0b3961339369bc08e58b661fba0edb Mon Sep 17 00:00:00 2001
+From: Chen Qi <Qi.Chen@windriver.com>
+Date: Thu, 17 Nov 2022 17:26:30 +0800
+Subject: [PATCH] avoid using -m option for readlink
+
+Use a more widely used option '-f' instead of '-m' here to
+avoid dependency on coreutils.
+
+Looking at the git history of the resolvconf repo, the '-m'
+is deliberately used. And it wants to depend on coreutils.
+But in case of OE, the existence of /etc is ensured, and busybox
+readlink provides '-f' option, so we can just use '-f'. In this
+way, the coreutils dependency is not necessary any more.
+
+Upstream-Status: Inappropriate [OE Specific]
+
+Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+---
+ etc/resolvconf/update.d/libc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/etc/resolvconf/update.d/libc b/etc/resolvconf/update.d/libc
+index 1c4f6bc..f75d22c 100755
+--- a/etc/resolvconf/update.d/libc
++++ b/etc/resolvconf/update.d/libc
+@@ -57,7 +57,7 @@ fi
+ report_warning() { echo "$0: Warning: $*" >&2 ; }
+
+ resolv_conf_is_symlinked_to_dynamic_file() {
+- [ -L ${ETC}/resolv.conf ] && [ "$(readlink -m ${ETC}/resolv.conf)" = "$DYNAMICRSLVCNFFILE" ]
++ [ -L ${ETC}/resolv.conf ] && [ "$(readlink -f ${ETC}/resolv.conf)" = "$DYNAMICRSLVCNFFILE" ]
+ }
+
+ if ! resolv_conf_is_symlinked_to_dynamic_file ; then
+--
+2.17.1
+
diff --git a/meta/recipes-connectivity/resolvconf/resolvconf_1.91.bb b/meta/recipes-connectivity/resolvconf/resolvconf_1.91.bb
index 94fd2c1a70..3f1b75d07d 100644
--- a/meta/recipes-connectivity/resolvconf/resolvconf_1.91.bb
+++ b/meta/recipes-connectivity/resolvconf/resolvconf_1.91.bb
@@ -9,10 +9,11 @@ LICENSE = "GPL-2.0-or-later"
LIC_FILES_CHKSUM = "file://COPYING;md5=c93c0550bd3173f4504b2cbd8991e50b"
AUTHOR = "Thomas Hood"
HOMEPAGE = "http://packages.debian.org/resolvconf"
-RDEPENDS:${PN} = "bash"
+RDEPENDS:${PN} = "bash sed util-linux-flock"
SRC_URI = "git://salsa.debian.org/debian/resolvconf.git;protocol=https;branch=unstable \
file://99_resolvconf \
+ file://0001-avoid-using-m-option-for-readlink.patch \
"
SRCREV = "859209d573e7aec0e95d812c6b52444591a628d1"
@@ -23,8 +24,6 @@ S = "${WORKDIR}/git"
# so we check the latest upstream from a directory that does get updated
UPSTREAM_CHECK_URI = "${DEBIAN_MIRROR}/main/r/resolvconf/"
-inherit allarch
-
do_compile () {
:
}
@@ -39,12 +38,14 @@ do_install () {
fi
install -d ${D}${base_libdir}/${BPN}
install -d ${D}${sysconfdir}/${BPN}
+ install -d ${D}${nonarch_base_libdir}/${BPN}
ln -snf ${localstatedir}/run/${BPN} ${D}${sysconfdir}/${BPN}/run
install -d ${D}${sysconfdir} ${D}${base_sbindir}
install -d ${D}${mandir}/man8 ${D}${docdir}/${P}
cp -pPR etc/resolvconf ${D}${sysconfdir}/
chown -R root:root ${D}${sysconfdir}/
install -m 0755 bin/resolvconf ${D}${base_sbindir}/
+ install -m 0755 bin/normalize-resolvconf ${D}${nonarch_base_libdir}/${BPN}
install -m 0755 bin/list-records ${D}${base_libdir}/${BPN}
install -d ${D}/${sysconfdir}/network/if-up.d
install -m 0755 debian/resolvconf.000resolvconf.if-up ${D}/${sysconfdir}/network/if-up.d/000resolvconf
@@ -64,4 +65,4 @@ pkg_postinst:${PN} () {
fi
}
-FILES:${PN} += "${base_libdir}/${BPN}"
+FILES:${PN} += "${base_libdir}/${BPN} ${nonarch_base_libdir}/${BPN}"
diff --git a/meta/recipes-connectivity/socat/socat/0001-configure.ac-check-getprotobynumber_r-with-AC_TRY_LI.patch b/meta/recipes-connectivity/socat/socat/0001-configure.ac-check-getprotobynumber_r-with-AC_TRY_LI.patch
deleted file mode 100644
index fbfb0816dd..0000000000
--- a/meta/recipes-connectivity/socat/socat/0001-configure.ac-check-getprotobynumber_r-with-AC_TRY_LI.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From d67d6b4f981db9612d808bd723176a1d2996d53a Mon Sep 17 00:00:00 2001
-From: Alexander Kanavin <alex@linutronix.de>
-Date: Mon, 17 Jan 2022 13:21:32 +0100
-Subject: [PATCH] configure.ac: check getprotobynumber_r with AC_TRY_LINK
-
-AC_TRY_COMPILE won't error out if the function is altogether absent
-(e.g. on linux musl C library), the test needs to link all the way.
-
-Upstream-Status: Submitted [via email to socat@dest-unreach.org]
-Signed-off-by: Alexander Kanavin <alex@linutronix.de>
----
- configure.ac | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/configure.ac b/configure.ac
-index d4acc9e..973a7f2 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -137,13 +137,13 @@ AC_MSG_RESULT($sc_cv_have_prototype_hstrerror)
- # getprotobynumber_r() is not standardized
- AC_MSG_CHECKING(for getprotobynumber_r() variant)
- AC_CACHE_VAL(sc_cv_getprotobynumber_r,
--[AC_TRY_COMPILE([#include <stddef.h>
-+[AC_TRY_LINK([#include <stddef.h>
- #include <netdb.h>],[getprotobynumber_r(1,NULL,NULL,1024,NULL);],
- [sc_cv_getprotobynumber_r=1; tmp_bynum_variant=Linux],
-- [AC_TRY_COMPILE([#include <stddef.h>
-+ [AC_TRY_LINK([#include <stddef.h>
- #include <netdb.h>],[getprotobynumber_r(1,NULL,NULL,1024);],
- [sc_cv_getprotobynumber_r=2; tmp_bynum_variant=Solaris],
-- [AC_TRY_COMPILE([#include <stddef.h>
-+ [AC_TRY_LINK([#include <stddef.h>
- #include <netdb.h>],[getprotobynumber_r(1,NULL,NULL);],
- [sc_cv_getprotobynumber_r=3; tmp_bynum_variant=AIX],
-
diff --git a/meta/recipes-connectivity/socat/socat_1.7.4.3.bb b/meta/recipes-connectivity/socat/socat_1.7.4.4.bb
index a4a0a8933e..5a379380d1 100644
--- a/meta/recipes-connectivity/socat/socat_1.7.4.3.bb
+++ b/meta/recipes-connectivity/socat/socat_1.7.4.4.bb
@@ -9,11 +9,9 @@ LICENSE = "GPL-2.0-with-OpenSSL-exception"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
file://README;beginline=257;endline=287;md5=82520b052f322ac2b5b3dfdc7c7eea86"
-SRC_URI = "http://www.dest-unreach.org/socat/download/socat-${PV}.tar.bz2 \
- file://0001-configure.ac-check-getprotobynumber_r-with-AC_TRY_LI.patch \
- "
+SRC_URI = "http://www.dest-unreach.org/socat/download/socat-${PV}.tar.bz2"
-SRC_URI[sha256sum] = "d47318104415077635119dfee44bcfb41de3497374a9a001b1aff6e2f0858007"
+SRC_URI[sha256sum] = "fbd42bd2f0e54a3af6d01bdf15385384ab82dbc0e4f1a5e153b3e0be1b6380ac"
inherit autotools
diff --git a/meta/recipes-core/base-files/base-files/hosts b/meta/recipes-core/base-files/base-files/hosts
index b94f414d5c..10a5b6c704 100644
--- a/meta/recipes-core/base-files/base-files/hosts
+++ b/meta/recipes-core/base-files/base-files/hosts
@@ -1,4 +1,4 @@
-127.0.0.1 localhost.localdomain localhost
+127.0.0.1 localhost
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
diff --git a/meta/recipes-core/busybox/busybox.inc b/meta/recipes-core/busybox/busybox.inc
index 5f1c473d5e..62dc839245 100644
--- a/meta/recipes-core/busybox/busybox.inc
+++ b/meta/recipes-core/busybox/busybox.inc
@@ -138,19 +138,26 @@ do_configure () {
do_prepare_config
merge_config.sh -m .config ${@" ".join(find_cfgs(d))}
cml1_do_configure
+
+ # Save a copy of .config and autoconf.h.
+ cp .config .config.orig
+ cp include/autoconf.h include/autoconf.h.orig
}
do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
export KCONFIG_NOTIMESTAMP=1
+ # Ensure we start do_compile with the original .config and autoconf.h.
+ # These files should always have matching timestamps.
+ cp .config.orig .config
+ cp include/autoconf.h.orig include/autoconf.h
+
if [ "${BUSYBOX_SPLIT_SUID}" = "1" -a x`grep "CONFIG_FEATURE_INDIVIDUAL=y" .config` = x ]; then
+ # Guard againt interrupted do_compile: clean temporary files.
+ rm -f .config.app.suid .config.app.nosuid .config.disable.apps .config.nonapps
+
# split the .config into two parts, and make two busybox binaries
- if [ -e .config.orig ]; then
- # Need to guard again an interrupted do_compile - restore any backup
- cp .config.orig .config
- fi
- cp .config .config.orig
oe_runmake busybox.cfg.suid
oe_runmake busybox.cfg.nosuid
@@ -187,15 +194,18 @@ do_compile() {
bbfatal "busybox suid binary incorrectly provides /bin/sh"
fi
- # copy .config.orig back to .config, because the install process may check this file
- cp .config.orig .config
# cleanup
- rm .config.orig .config.app.suid .config.app.nosuid .config.disable.apps .config.nonapps
+ rm .config.app.suid .config.app.nosuid .config.disable.apps .config.nonapps
else
oe_runmake busybox_unstripped
cp busybox_unstripped busybox
oe_runmake busybox.links
fi
+
+ # restore original .config and autoconf.h, because the install process
+ # may check these files
+ cp .config.orig .config
+ cp include/autoconf.h.orig include/autoconf.h
}
do_install () {
diff --git a/meta/recipes-core/busybox/busybox/0001-depmod-Ignore-.debug-directories.patch b/meta/recipes-core/busybox/busybox/0001-depmod-Ignore-.debug-directories.patch
index 354f83a4a5..d76118f85b 100644
--- a/meta/recipes-core/busybox/busybox/0001-depmod-Ignore-.debug-directories.patch
+++ b/meta/recipes-core/busybox/busybox/0001-depmod-Ignore-.debug-directories.patch
@@ -21,7 +21,7 @@ index bb42bbe..aa5a2de 100644
/* Arbitrary. Was sb->st_size, but that breaks .gz etc */
size_t len = (64*1024*1024 - 4096);
-+ if (strstr(fname, ".debug") == NULL)
++ if (strstr(fname, ".debug") != NULL)
+ return TRUE;
+
if (strrstr(fname, ".ko") == NULL)
diff --git a/meta/recipes-core/busybox/busybox/0001-devmem-add-128-bit-width.patch b/meta/recipes-core/busybox/busybox/0001-devmem-add-128-bit-width.patch
new file mode 100644
index 0000000000..985e2bf1d9
--- /dev/null
+++ b/meta/recipes-core/busybox/busybox/0001-devmem-add-128-bit-width.patch
@@ -0,0 +1,128 @@
+From d432049f288c9acdc4a7caa729c68ceba3c5dca1 Mon Sep 17 00:00:00 2001
+From: Aaro Koskinen <aaro.koskinen@nokia.com>
+Date: Thu, 25 Aug 2022 18:47:02 +0300
+Subject: [PATCH] devmem: add 128-bit width
+
+Add 128-bit width if the compiler provides the needed type.
+
+function old new delta
+devmem_main 405 464 +59
+.rodata 109025 109043 +18
+------------------------------------------------------------------------------
+(add/remove: 0/0 grow/shrink: 2/0 up/down: 77/0) Total: 77 bytes
+
+Upstream-Status: Backport [https://git.busybox.net/busybox/commit/?id=d432049f288c9acdc4a7caa729c68ceba3c5dca1]
+
+Signed-off-by: Aaro Koskinen <aaro.koskinen@nokia.com>
+Signed-off-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ miscutils/devmem.c | 68 ++++++++++++++++++++++++++++++----------------
+ 1 file changed, 44 insertions(+), 24 deletions(-)
+
+diff --git a/miscutils/devmem.c b/miscutils/devmem.c
+index f9f0276bc..f21621bd6 100644
+--- a/miscutils/devmem.c
++++ b/miscutils/devmem.c
+@@ -29,7 +29,6 @@ int devmem_main(int argc UNUSED_PARAM, char **argv)
+ {
+ void *map_base, *virt_addr;
+ uint64_t read_result;
+- uint64_t writeval = writeval; /* for compiler */
+ off_t target;
+ unsigned page_size, mapped_size, offset_in_page;
+ int fd;
+@@ -64,9 +63,6 @@ int devmem_main(int argc UNUSED_PARAM, char **argv)
+ width = strchrnul(bhwl, (argv[2][0] | 0x20)) - bhwl;
+ width = sizes[width];
+ }
+- /* VALUE */
+- if (argv[3])
+- writeval = bb_strtoull(argv[3], NULL, 0);
+ } else { /* argv[2] == NULL */
+ /* make argv[3] to be a valid thing to fetch */
+ argv--;
+@@ -96,28 +92,46 @@ int devmem_main(int argc UNUSED_PARAM, char **argv)
+ virt_addr = (char*)map_base + offset_in_page;
+
+ if (!argv[3]) {
+- switch (width) {
+- case 8:
+- read_result = *(volatile uint8_t*)virt_addr;
+- break;
+- case 16:
+- read_result = *(volatile uint16_t*)virt_addr;
+- break;
+- case 32:
+- read_result = *(volatile uint32_t*)virt_addr;
+- break;
+- case 64:
+- read_result = *(volatile uint64_t*)virt_addr;
+- break;
+- default:
+- bb_simple_error_msg_and_die("bad width");
++#ifdef __SIZEOF_INT128__
++ if (width == 128) {
++ unsigned __int128 rd =
++ *(volatile unsigned __int128 *)virt_addr;
++ printf("0x%016llX%016llX\n",
++ (unsigned long long)(uint64_t)(rd >> 64),
++ (unsigned long long)(uint64_t)rd
++ );
++ } else
++#endif
++ {
++ switch (width) {
++ case 8:
++ read_result = *(volatile uint8_t*)virt_addr;
++ break;
++ case 16:
++ read_result = *(volatile uint16_t*)virt_addr;
++ break;
++ case 32:
++ read_result = *(volatile uint32_t*)virt_addr;
++ break;
++ case 64:
++ read_result = *(volatile uint64_t*)virt_addr;
++ break;
++ default:
++ bb_simple_error_msg_and_die("bad width");
++ }
++// printf("Value at address 0x%"OFF_FMT"X (%p): 0x%llX\n",
++// target, virt_addr,
++// (unsigned long long)read_result);
++ /* Zero-padded output shows the width of access just done */
++ printf("0x%0*llX\n", (width >> 2), (unsigned long long)read_result);
+ }
+-// printf("Value at address 0x%"OFF_FMT"X (%p): 0x%llX\n",
+-// target, virt_addr,
+-// (unsigned long long)read_result);
+- /* Zero-padded output shows the width of access just done */
+- printf("0x%0*llX\n", (width >> 2), (unsigned long long)read_result);
+ } else {
++ /* parse VALUE */
++#ifdef __SIZEOF_INT128__
++ unsigned __int128 writeval = strtoumax(argv[3], NULL, 0);
++#else
++ uint64_t writeval = bb_strtoull(argv[3], NULL, 0);
++#endif
+ switch (width) {
+ case 8:
+ *(volatile uint8_t*)virt_addr = writeval;
+@@ -135,6 +149,12 @@ int devmem_main(int argc UNUSED_PARAM, char **argv)
+ *(volatile uint64_t*)virt_addr = writeval;
+ // read_result = *(volatile uint64_t*)virt_addr;
+ break;
++#ifdef __SIZEOF_INT128__
++ case 128:
++ *(volatile unsigned __int128 *)virt_addr = writeval;
++// read_result = *(volatile uint64_t*)virt_addr;
++ break;
++#endif
+ default:
+ bb_simple_error_msg_and_die("bad width");
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-core/busybox/busybox/CVE-2022-30065.patch b/meta/recipes-core/busybox/busybox/CVE-2022-30065.patch
new file mode 100644
index 0000000000..25ad653b25
--- /dev/null
+++ b/meta/recipes-core/busybox/busybox/CVE-2022-30065.patch
@@ -0,0 +1,29 @@
+Fix use-after-free in awk.
+
+CVE: CVE-2022-30065
+Upstream-Status: Submitted [http://lists.busybox.net/pipermail/busybox/2022-June/089768.html]
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+fixes https://bugs.busybox.net/show_bug.cgi?id=14781
+
+Signed-off-by: Natanael Copa <ncopa at alpinelinux.org>
+---
+ editors/awk.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/editors/awk.c b/editors/awk.c
+index 079d0bde5..728ee8685 100644
+--- a/editors/awk.c
++++ b/editors/awk.c
+@@ -3128,6 +3128,9 @@ static var *evaluate(node *op, var *res)
+
+ case XC( OC_MOVE ):
+ debug_printf_eval("MOVE\n");
++ /* make sure that we never return a temp var */
++ if (L.v == TMPVAR0)
++ L.v = res;
+ /* if source is a temporary string, jusk relink it to dest */
+ if (R.v == TMPVAR1
+ && !(R.v->type & VF_NUMBER)
+--
+2.36.1
diff --git a/meta/recipes-core/busybox/busybox/CVE-2022-48174.patch b/meta/recipes-core/busybox/busybox/CVE-2022-48174.patch
new file mode 100644
index 0000000000..dd0ea19f02
--- /dev/null
+++ b/meta/recipes-core/busybox/busybox/CVE-2022-48174.patch
@@ -0,0 +1,80 @@
+From cf5d0889262e1b04ec2aa4caff2f5da2d602c665 Mon Sep 17 00:00:00 2001
+From: Denys Vlasenko <vda.linux@googlemail.com>
+Date: Mon, 12 Jun 2023 17:48:47 +0200
+Subject: [PATCH] busybox: shell: avoid segfault on ${0::0/0~09J}. Closes 15216
+function old new delta evaluate_string 1011 1053 +42
+
+Upstream-Status: Backport [https://git.busybox.net/busybox/commit/?id=d417193cf37ca1005830d7e16f5fa7e1d8a44209]
+CVE: CVE-2022-48174
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ shell/math.c | 39 +++++++++++++++++++++++++++++++++++----
+ 1 file changed, 35 insertions(+), 4 deletions(-)
+
+diff --git a/shell/math.c b/shell/math.c
+index 76d22c9..727c294 100644
+--- a/shell/math.c
++++ b/shell/math.c
+@@ -577,6 +577,28 @@ static arith_t strto_arith_t(const char *nptr, char **endptr)
+ # endif
+ #endif
+
++//TODO: much better estimation than expr_len/2? Such as:
++//static unsigned estimate_nums_and_names(const char *expr)
++//{
++// unsigned count = 0;
++// while (*(expr = skip_whitespace(expr)) != '\0') {
++// const char *p;
++// if (isdigit(*expr)) {
++// while (isdigit(*++expr))
++// continue;
++// count++;
++// continue;
++// }
++// p = endofname(expr);
++// if (p != expr) {
++// expr = p;
++// count++;
++// continue;
++// }
++// }
++// return count;
++//}
++
+ static arith_t
+ evaluate_string(arith_state_t *math_state, const char *expr)
+ {
+@@ -584,10 +606,12 @@ evaluate_string(arith_state_t *math_state, const char *expr)
+ const char *errmsg;
+ const char *start_expr = expr = skip_whitespace(expr);
+ unsigned expr_len = strlen(expr) + 2;
+- /* Stack of integers */
+- /* The proof that there can be no more than strlen(startbuf)/2+1
+- * integers in any given correct or incorrect expression
+- * is left as an exercise to the reader. */
++ /* Stack of integers/names */
++ /* There can be no more than strlen(startbuf)/2+1
++ * integers/names in any given correct or incorrect expression.
++ * (modulo "09v09v09v09v09v" case,
++ * but we have code to detect that early)
++ */
+ var_or_num_t *const numstack = alloca((expr_len / 2) * sizeof(numstack[0]));
+ var_or_num_t *numstackptr = numstack;
+ /* Stack of operator tokens */
+@@ -652,6 +676,13 @@ evaluate_string(arith_state_t *math_state, const char *expr)
+ numstackptr->var = NULL;
+ errno = 0;
+ numstackptr->val = strto_arith_t(expr, (char**) &expr);
++ /* A number can't be followed by another number, or a variable name.
++ * We'd catch this later anyway, but this would require numstack[]
++ * to be twice as deep to handle strings where _every_ char is
++ * a new number or name. Example: 09v09v09v09v09v09v09v09v09v
++ */
++ if (isalnum(*expr) || *expr == '_')
++ goto err;
+ //bb_error_msg("val:%lld", numstackptr->val);
+ if (errno)
+ numstackptr->val = 0; /* bash compat */
+--
+2.40.0
diff --git a/meta/recipes-core/busybox/busybox_1.35.0.bb b/meta/recipes-core/busybox/busybox_1.35.0.bb
index f2f1b35902..07a5137d2a 100644
--- a/meta/recipes-core/busybox/busybox_1.35.0.bb
+++ b/meta/recipes-core/busybox/busybox_1.35.0.bb
@@ -49,6 +49,9 @@ SRC_URI = "https://busybox.net/downloads/busybox-${PV}.tar.bz2;name=tarball \
file://0001-sysctl-ignore-EIO-of-stable_secret-below-proc-sys-ne.patch \
file://0001-libbb-sockaddr2str-ensure-only-printable-characters-.patch \
file://0002-nslookup-sanitize-all-printed-strings-with-printable.patch \
+ file://CVE-2022-30065.patch \
+ file://0001-devmem-add-128-bit-width.patch \
+ file://CVE-2022-48174.patch \
"
SRC_URI:append:libc-musl = " file://musl.cfg "
diff --git a/meta/recipes-core/coreutils/coreutils_9.0.bb b/meta/recipes-core/coreutils/coreutils_9.0.bb
index e4a948c7e5..8a2fbeca32 100644
--- a/meta/recipes-core/coreutils/coreutils_9.0.bb
+++ b/meta/recipes-core/coreutils/coreutils_9.0.bb
@@ -49,6 +49,7 @@ PACKAGECONFIG[acl] = "--enable-acl,--disable-acl,acl,"
PACKAGECONFIG[xattr] = "--enable-xattr,--disable-xattr,attr,"
PACKAGECONFIG[single-binary] = "--enable-single-binary,--disable-single-binary,,"
PACKAGECONFIG[selinux] = "--with-selinux,--without-selinux,libselinux"
+PACKAGECONFIG[openssl] = "--with-openssl=yes,--with-openssl=no,openssl"
# [ df mktemp nice printenv base64 gets a special treatment and is not included in this
bindir_progs = "arch basename chcon cksum comm csplit cut dir dircolors dirname du \
@@ -174,8 +175,9 @@ RDEPENDS:${PN}-ptest += "bash findutils gawk liberror-perl make perl perl-module
# -dev automatic dependencies fails as we don't want libmodule-build-perl-dev, its too heavy
# may need tweaking if DEPENDS changes
+# Can't use ${PN}-dev here since flags with overrides and key expansion not supported
RRECOMMENDS:coreutils-dev[nodeprrecs] = "1"
-RRECOMMENDS:coreutils-dev = "acl-dev attr-dev gmp-dev libcap-dev bash-dev findutils-dev gawk-dev shadow-dev"
+RRECOMMENDS:${PN}-dev += "acl-dev attr-dev gmp-dev libcap-dev bash-dev findutils-dev gawk-dev shadow-dev"
do_install_ptest () {
install -d ${D}${PTEST_PATH}/tests
diff --git a/meta/recipes-core/dbus/dbus_1.14.0.bb b/meta/recipes-core/dbus/dbus_1.14.8.bb
index 7598c45f8e..f03e5c2d2e 100644
--- a/meta/recipes-core/dbus/dbus_1.14.0.bb
+++ b/meta/recipes-core/dbus/dbus_1.14.8.bb
@@ -6,16 +6,17 @@ SECTION = "base"
inherit autotools pkgconfig gettext upstream-version-is-even ptest-gnome
LICENSE = "AFL-2.1 | GPL-2.0-or-later"
-LIC_FILES_CHKSUM = "file://COPYING;md5=10dded3b58148f3f1fd804b26354af3e \
- file://dbus/dbus.h;beginline=6;endline=20;md5=866739837ccd835350af94dccd6457d8"
+LIC_FILES_CHKSUM = "file://COPYING;md5=6423dcd74d7be9715b0db247fd889da3 \
+ file://dbus/dbus.h;beginline=6;endline=20;md5=866739837ccd835350af94dccd6457d8 \
+ "
SRC_URI = "https://dbus.freedesktop.org/releases/dbus/dbus-${PV}.tar.xz \
file://run-ptest \
file://tmpdir.patch \
file://dbus-1.init \
-"
+ "
-SRC_URI[sha256sum] = "ccd7cce37596e0a19558fd6648d1272ab43f011d80c8635aea8fd0bad58aebd4"
+SRC_URI[sha256sum] = "a6bd5bac5cf19f0c3c594bdae2565a095696980a683a0ef37cb6212e093bde35"
EXTRA_OECONF = "--disable-xml-docs \
--disable-doxygen-docs \
@@ -24,6 +25,7 @@ EXTRA_OECONF = "--disable-xml-docs \
--enable-tests \
--enable-checks \
--enable-asserts \
+ --runstatedir=/run \
"
EXTRA_OECONF:append:class-target = " SYSTEMCTL=${base_bindir}/systemctl"
@@ -131,7 +133,7 @@ do_install() {
sed 's:@bindir@:${bindir}:' < ${WORKDIR}/dbus-1.init >${WORKDIR}/dbus-1.init.sh
install -m 0755 ${WORKDIR}/dbus-1.init.sh ${D}${sysconfdir}/init.d/dbus-1
install -d ${D}${sysconfdir}/default/volatiles
- echo "d messagebus messagebus 0755 ${localstatedir}/run/dbus none" \
+ echo "d messagebus messagebus 0755 /run/dbus none" \
> ${D}${sysconfdir}/default/volatiles/99_dbus
fi
@@ -181,3 +183,5 @@ do_install:class-nativesdk() {
rm -rf ${D}${localstatedir}/run
}
BBCLASSEXTEND = "native nativesdk"
+
+CVE_PRODUCT += "d-bus_project:d-bus freedesktop:dbus freedesktop:libdbus"
diff --git a/meta/recipes-core/dropbear/dropbear.inc b/meta/recipes-core/dropbear/dropbear.inc
index 78f9f9adbd..a32242949b 100644
--- a/meta/recipes-core/dropbear/dropbear.inc
+++ b/meta/recipes-core/dropbear/dropbear.inc
@@ -12,6 +12,11 @@ DEPENDS = "zlib virtual/crypt"
RPROVIDES:${PN} = "ssh sshd"
RCONFLICTS:${PN} = "openssh-sshd openssh"
+# break dependency on base package for -dev package
+# otherwise SDK fails to build as the main openssh and dropbear packages
+# conflict with each other
+RDEPENDS:${PN}-dev = ""
+
DEPENDS += "${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'libpam', '', d)}"
SRC_URI = "http://matt.ucc.asn.au/dropbear/releases/dropbear-${PV}.tar.bz2 \
@@ -22,7 +27,11 @@ SRC_URI = "http://matt.ucc.asn.au/dropbear/releases/dropbear-${PV}.tar.bz2 \
file://dropbear.socket \
file://dropbear.default \
${@bb.utils.contains('DISTRO_FEATURES', 'pam', '${PAM_SRC_URI}', '', d)} \
- ${@bb.utils.contains('PACKAGECONFIG', 'disable-weak-ciphers', 'file://dropbear-disable-weak-ciphers.patch', '', d)} "
+ ${@bb.utils.contains('PACKAGECONFIG', 'disable-weak-ciphers', 'file://dropbear-disable-weak-ciphers.patch', '', d)} \
+ file://CVE-2021-36369.patch \
+ file://CVE-2023-36328.patch \
+ file://CVE-2023-48795.patch \
+ "
PAM_SRC_URI = "file://0005-dropbear-enable-pam.patch \
file://0006-dropbear-configuration-file.patch \
diff --git a/meta/recipes-core/dropbear/dropbear/CVE-2021-36369.patch b/meta/recipes-core/dropbear/dropbear/CVE-2021-36369.patch
new file mode 100644
index 0000000000..5ff11abdd6
--- /dev/null
+++ b/meta/recipes-core/dropbear/dropbear/CVE-2021-36369.patch
@@ -0,0 +1,145 @@
+From e9b15a8b1035b62413b2b881315c6bffd02205d4 Mon Sep 17 00:00:00 2001
+From: Manfred Kaiser <37737811+manfred-kaiser@users.noreply.github.com>
+Date: Thu, 19 Aug 2021 17:37:14 +0200
+Subject: [PATCH] added option to disable trivial auth methods (#128)
+
+* added option to disable trivial auth methods
+
+* rename argument to match with other ssh clients
+
+* fixed trivial auth detection for pubkeys
+
+[https://github.com/mkj/dropbear/pull/128]
+Upstream-Status: Backport
+CVE: CVE-2021-36369
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ cli-auth.c | 3 +++
+ cli-authinteract.c | 1 +
+ cli-authpasswd.c | 2 +-
+ cli-authpubkey.c | 1 +
+ cli-runopts.c | 7 +++++++
+ cli-session.c | 1 +
+ runopts.h | 1 +
+ session.h | 1 +
+ 8 files changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/cli-auth.c b/cli-auth.c
+index 2e509e5..6f04495 100644
+--- a/cli-auth.c
++++ b/cli-auth.c
+@@ -267,6 +267,9 @@ void recv_msg_userauth_success() {
+ if DROPBEAR_CLI_IMMEDIATE_AUTH is set */
+
+ TRACE(("received msg_userauth_success"))
++ if (cli_opts.disable_trivial_auth && cli_ses.is_trivial_auth) {
++ dropbear_exit("trivial authentication not allowed");
++ }
+ /* Note: in delayed-zlib mode, setting authdone here
+ * will enable compression in the transport layer */
+ ses.authstate.authdone = 1;
+diff --git a/cli-authinteract.c b/cli-authinteract.c
+index e1cc9a1..f7128ee 100644
+--- a/cli-authinteract.c
++++ b/cli-authinteract.c
+@@ -114,6 +114,7 @@ void recv_msg_userauth_info_request() {
+ m_free(instruction);
+
+ for (i = 0; i < num_prompts; i++) {
++ cli_ses.is_trivial_auth = 0;
+ unsigned int response_len = 0;
+ prompt = buf_getstring(ses.payload, NULL);
+ cleantext(prompt);
+diff --git a/cli-authpasswd.c b/cli-authpasswd.c
+index 00fdd8b..a24d43e 100644
+--- a/cli-authpasswd.c
++++ b/cli-authpasswd.c
+@@ -155,7 +155,7 @@ void cli_auth_password() {
+
+ encrypt_packet();
+ m_burn(password, strlen(password));
+-
++ cli_ses.is_trivial_auth = 0;
+ TRACE(("leave cli_auth_password"))
+ }
+ #endif /* DROPBEAR_CLI_PASSWORD_AUTH */
+diff --git a/cli-authpubkey.c b/cli-authpubkey.c
+index 42c4e3f..fa01807 100644
+--- a/cli-authpubkey.c
++++ b/cli-authpubkey.c
+@@ -176,6 +176,7 @@ static void send_msg_userauth_pubkey(sign_key *key, enum signature_type sigtype,
+ buf_putbytes(sigbuf, ses.writepayload->data, ses.writepayload->len);
+ cli_buf_put_sign(ses.writepayload, key, sigtype, sigbuf);
+ buf_free(sigbuf); /* Nothing confidential in the buffer */
++ cli_ses.is_trivial_auth = 0;
+ }
+
+ encrypt_packet();
+diff --git a/cli-runopts.c b/cli-runopts.c
+index 3654b9a..255b47e 100644
+--- a/cli-runopts.c
++++ b/cli-runopts.c
+@@ -152,6 +152,7 @@ void cli_getopts(int argc, char ** argv) {
+ #if DROPBEAR_CLI_ANYTCPFWD
+ cli_opts.exit_on_fwd_failure = 0;
+ #endif
++ cli_opts.disable_trivial_auth = 0;
+ #if DROPBEAR_CLI_LOCALTCPFWD
+ cli_opts.localfwds = list_new();
+ opts.listen_fwd_all = 0;
+@@ -889,6 +890,7 @@ static void add_extendedopt(const char* origstr) {
+ #if DROPBEAR_CLI_ANYTCPFWD
+ "\tExitOnForwardFailure\n"
+ #endif
++ "\tDisableTrivialAuth\n"
+ #ifndef DISABLE_SYSLOG
+ "\tUseSyslog\n"
+ #endif
+@@ -916,5 +918,10 @@ static void add_extendedopt(const char* origstr) {
+ return;
+ }
+
++ if (match_extendedopt(&optstr, "DisableTrivialAuth") == DROPBEAR_SUCCESS) {
++ cli_opts.disable_trivial_auth = parse_flag_value(optstr);
++ return;
++ }
++
+ dropbear_log(LOG_WARNING, "Ignoring unknown configuration option '%s'", origstr);
+ }
+diff --git a/cli-session.c b/cli-session.c
+index 5e5af22..afb54a1 100644
+--- a/cli-session.c
++++ b/cli-session.c
+@@ -165,6 +165,7 @@ static void cli_session_init(pid_t proxy_cmd_pid) {
+ /* Auth */
+ cli_ses.lastprivkey = NULL;
+ cli_ses.lastauthtype = 0;
++ cli_ses.is_trivial_auth = 1;
+
+ /* For printing "remote host closed" for the user */
+ ses.remoteclosed = cli_remoteclosed;
+diff --git a/runopts.h b/runopts.h
+index 6a4a94c..01201d2 100644
+--- a/runopts.h
++++ b/runopts.h
+@@ -159,6 +159,7 @@ typedef struct cli_runopts {
+ #if DROPBEAR_CLI_ANYTCPFWD
+ int exit_on_fwd_failure;
+ #endif
++ int disable_trivial_auth;
+ #if DROPBEAR_CLI_REMOTETCPFWD
+ m_list * remotefwds;
+ #endif
+diff --git a/session.h b/session.h
+index fb5b8cb..6706592 100644
+--- a/session.h
++++ b/session.h
+@@ -316,6 +316,7 @@ struct clientsession {
+
+ int lastauthtype; /* either AUTH_TYPE_PUBKEY or AUTH_TYPE_PASSWORD,
+ for the last type of auth we tried */
++ int is_trivial_auth;
+ int ignore_next_auth_response;
+ #if DROPBEAR_CLI_INTERACT_AUTH
+ int auth_interact_failed; /* flag whether interactive auth can still
diff --git a/meta/recipes-core/dropbear/dropbear/CVE-2023-36328.patch b/meta/recipes-core/dropbear/dropbear/CVE-2023-36328.patch
new file mode 100644
index 0000000000..4d8c40f70b
--- /dev/null
+++ b/meta/recipes-core/dropbear/dropbear/CVE-2023-36328.patch
@@ -0,0 +1,144 @@
+From beba892bc0d4e4ded4d667ab1d2a94f4d75109a9 Mon Sep 17 00:00:00 2001
+From: czurnieden <czurnieden@gmx.de>
+Date: Wed, 6 Sep 2023 10:48:58 +0000
+Subject: [PATCH] Fix possible integer overflow
+
+CVE: CVE-2023-36328
+
+Upstream-Status: Backport [https://github.com/libtom/libtommath/commit/beba892bc0d4e4ded4d667ab1d2a94f4d75109a9]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ libtommath/bn_mp_2expt.c | 4 ++++
+ libtommath/bn_mp_grow.c | 4 ++++
+ libtommath/bn_mp_init_size.c | 5 +++++
+ libtommath/bn_mp_mul_2d.c | 4 ++++
+ libtommath/bn_s_mp_mul_digs.c | 4 ++++
+ libtommath/bn_s_mp_mul_digs_fast.c | 4 ++++
+ libtommath/bn_s_mp_mul_high_digs.c | 4 ++++
+ libtommath/bn_s_mp_mul_high_digs_fast.c | 4 ++++
+ 8 files changed, 33 insertions(+)
+
+diff --git a/libtommath/bn_mp_2expt.c b/libtommath/bn_mp_2expt.c
+index 0ae3df1..7d4d884 100644
+--- a/libtommath/bn_mp_2expt.c
++++ b/libtommath/bn_mp_2expt.c
+@@ -12,6 +12,10 @@ mp_err mp_2expt(mp_int *a, int b)
+ {
+ mp_err err;
+
++ if (b < 0) {
++ return MP_VAL;
++ }
++
+ /* zero a as per default */
+ mp_zero(a);
+
+diff --git a/libtommath/bn_mp_grow.c b/libtommath/bn_mp_grow.c
+index 9e904c5..e7b186c 100644
+--- a/libtommath/bn_mp_grow.c
++++ b/libtommath/bn_mp_grow.c
+@@ -9,6 +9,10 @@ mp_err mp_grow(mp_int *a, int size)
+ int i;
+ mp_digit *tmp;
+
++ if (size < 0) {
++ return MP_VAL;
++ }
++
+ /* if the alloc size is smaller alloc more ram */
+ if (a->alloc < size) {
+ /* reallocate the array a->dp
+diff --git a/libtommath/bn_mp_init_size.c b/libtommath/bn_mp_init_size.c
+index d622687..5fefa96 100644
+--- a/libtommath/bn_mp_init_size.c
++++ b/libtommath/bn_mp_init_size.c
+@@ -6,6 +6,11 @@
+ /* init an mp_init for a given size */
+ mp_err mp_init_size(mp_int *a, int size)
+ {
++
++ if (size < 0) {
++ return MP_VAL;
++ }
++
+ size = MP_MAX(MP_MIN_PREC, size);
+
+ /* alloc mem */
+diff --git a/libtommath/bn_mp_mul_2d.c b/libtommath/bn_mp_mul_2d.c
+index 87354de..2744163 100644
+--- a/libtommath/bn_mp_mul_2d.c
++++ b/libtommath/bn_mp_mul_2d.c
+@@ -9,6 +9,10 @@ mp_err mp_mul_2d(const mp_int *a, int b, mp_int *c)
+ mp_digit d;
+ mp_err err;
+
++ if (b < 0) {
++ return MP_VAL;
++ }
++
+ /* copy */
+ if (a != c) {
+ if ((err = mp_copy(a, c)) != MP_OKAY) {
+diff --git a/libtommath/bn_s_mp_mul_digs.c b/libtommath/bn_s_mp_mul_digs.c
+index 64509d4..2d2f5b0 100644
+--- a/libtommath/bn_s_mp_mul_digs.c
++++ b/libtommath/bn_s_mp_mul_digs.c
+@@ -16,6 +16,10 @@ mp_err s_mp_mul_digs(const mp_int *a, const mp_int *b, mp_int *c, int digs)
+ mp_word r;
+ mp_digit tmpx, *tmpt, *tmpy;
+
++ if (digs < 0) {
++ return MP_VAL;
++ }
++
+ /* can we use the fast multiplier? */
+ if ((digs < MP_WARRAY) &&
+ (MP_MIN(a->used, b->used) < MP_MAXFAST)) {
+diff --git a/libtommath/bn_s_mp_mul_digs_fast.c b/libtommath/bn_s_mp_mul_digs_fast.c
+index b2a287b..d6dd3cc 100644
+--- a/libtommath/bn_s_mp_mul_digs_fast.c
++++ b/libtommath/bn_s_mp_mul_digs_fast.c
+@@ -26,6 +26,10 @@ mp_err s_mp_mul_digs_fast(const mp_int *a, const mp_int *b, mp_int *c, int digs)
+ mp_digit W[MP_WARRAY];
+ mp_word _W;
+
++ if (digs < 0) {
++ return MP_VAL;
++ }
++
+ /* grow the destination as required */
+ if (c->alloc < digs) {
+ if ((err = mp_grow(c, digs)) != MP_OKAY) {
+diff --git a/libtommath/bn_s_mp_mul_high_digs.c b/libtommath/bn_s_mp_mul_high_digs.c
+index 2bb2a50..c9dd355 100644
+--- a/libtommath/bn_s_mp_mul_high_digs.c
++++ b/libtommath/bn_s_mp_mul_high_digs.c
+@@ -15,6 +15,10 @@ mp_err s_mp_mul_high_digs(const mp_int *a, const mp_int *b, mp_int *c, int digs)
+ mp_word r;
+ mp_digit tmpx, *tmpt, *tmpy;
+
++ if (digs < 0) {
++ return MP_VAL;
++ }
++
+ /* can we use the fast multiplier? */
+ if (MP_HAS(S_MP_MUL_HIGH_DIGS_FAST)
+ && ((a->used + b->used + 1) < MP_WARRAY)
+diff --git a/libtommath/bn_s_mp_mul_high_digs_fast.c b/libtommath/bn_s_mp_mul_high_digs_fast.c
+index a2c4fb6..4ce7f59 100644
+--- a/libtommath/bn_s_mp_mul_high_digs_fast.c
++++ b/libtommath/bn_s_mp_mul_high_digs_fast.c
+@@ -19,6 +19,10 @@ mp_err s_mp_mul_high_digs_fast(const mp_int *a, const mp_int *b, mp_int *c, int
+ mp_digit W[MP_WARRAY];
+ mp_word _W;
+
++ if (digs < 0) {
++ return MP_VAL;
++ }
++
+ /* grow the destination as required */
+ pa = a->used + b->used;
+ if (c->alloc < pa) {
+--
+2.35.5
diff --git a/meta/recipes-core/dropbear/dropbear/CVE-2023-48795.patch b/meta/recipes-core/dropbear/dropbear/CVE-2023-48795.patch
new file mode 100644
index 0000000000..6800672ab0
--- /dev/null
+++ b/meta/recipes-core/dropbear/dropbear/CVE-2023-48795.patch
@@ -0,0 +1,234 @@
+From 6e43be5c7b99dbee49dc72b6f989f29fdd7e9356 Mon Sep 17 00:00:00 2001
+From: Matt Johnston <matt@ucc.asn.au>
+Date: Mon, 20 Nov 2023 14:02:47 +0800
+Subject: [PATCH] Implement Strict KEX mode
+
+As specified by OpenSSH with kex-strict-c-v00@openssh.com and
+kex-strict-s-v00@openssh.com.
+
+CVE: CVE-2023-48795
+Upstream-Status: Backport [https://github.com/mkj/dropbear/commit/6e43be5c7b99dbee49dc72b6f989f29fdd7e9356]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ cli-session.c | 11 +++++++++++
+ common-algo.c | 6 ++++++
+ common-kex.c | 26 +++++++++++++++++++++++++-
+ kex.h | 3 +++
+ process-packet.c | 34 +++++++++++++++++++---------------
+ ssh.h | 4 ++++
+ svr-session.c | 3 +++
+ 7 files changed, 71 insertions(+), 16 deletions(-)
+
+diff --git a/cli-session.c b/src/cli-session.c
+index 5981b2470..d261c8f82 100644
+--- a/cli-session.c
++++ b/cli-session.c
+@@ -46,6 +46,7 @@ static void cli_finished(void) ATTRIB_NORETURN;
+ static void recv_msg_service_accept(void);
+ static void cli_session_cleanup(void);
+ static void recv_msg_global_request_cli(void);
++static void cli_algos_initialise(void);
+
+ struct clientsession cli_ses; /* GLOBAL */
+
+@@ -114,6 +115,7 @@ void cli_session(int sock_in, int sock_out, struct dropbear_progress_connection
+ }
+
+ chaninitialise(cli_chantypes);
++ cli_algos_initialise();
+
+ /* Set up cli_ses vars */
+ cli_session_init(proxy_cmd_pid);
+@@ -473,3 +475,12 @@ void cli_dropbear_log(int priority, const char* format, va_list param) {
+ fflush(stderr);
+ }
+
++static void cli_algos_initialise(void) {
++ algo_type *algo;
++ for (algo = sshkex; algo->name; algo++) {
++ if (strcmp(algo->name, SSH_STRICT_KEX_S) == 0) {
++ algo->usable = 0;
++ }
++ }
++}
++
+diff --git a/common-algo.c b/src/common-algo.c
+index 378f0ca8e..f9d46ebb6 100644
+--- a/common-algo.c
++++ b/common-algo.c
+@@ -332,6 +332,12 @@ algo_type sshkex[] = {
+ /* Set unusable by svr_algos_initialise() */
+ {SSH_EXT_INFO_C, 0, NULL, 1, NULL},
+ #endif
++#endif
++#if DROPBEAR_CLIENT
++ {SSH_STRICT_KEX_C, 0, NULL, 1, NULL},
++#endif
++#if DROPBEAR_SERVER
++ {SSH_STRICT_KEX_S, 0, NULL, 1, NULL},
+ #endif
+ {NULL, 0, NULL, 0, NULL}
+ };
+diff --git a/common-kex.c b/src/common-kex.c
+index ac8844246..8e33b12a6 100644
+--- a/common-kex.c
++++ b/common-kex.c
+@@ -183,6 +183,10 @@ void send_msg_newkeys() {
+ gen_new_keys();
+ switch_keys();
+
++ if (ses.kexstate.strict_kex) {
++ ses.transseq = 0;
++ }
++
+ TRACE(("leave send_msg_newkeys"))
+ }
+
+@@ -193,7 +197,11 @@ void recv_msg_newkeys() {
+
+ ses.kexstate.recvnewkeys = 1;
+ switch_keys();
+-
++
++ if (ses.kexstate.strict_kex) {
++ ses.recvseq = 0;
++ }
++
+ TRACE(("leave recv_msg_newkeys"))
+ }
+
+@@ -551,6 +559,10 @@ void recv_msg_kexinit() {
+
+ ses.kexstate.recvkexinit = 1;
+
++ if (ses.kexstate.strict_kex && !ses.kexstate.donefirstkex && ses.recvseq != 1) {
++ dropbear_exit("First packet wasn't kexinit");
++ }
++
+ TRACE(("leave recv_msg_kexinit"))
+ }
+
+@@ -861,6 +873,18 @@ static void read_kex_algos() {
+ }
+ #endif
+
++ if (!ses.kexstate.donefirstkex) {
++ const char* strict_name;
++ if (IS_DROPBEAR_CLIENT) {
++ strict_name = SSH_STRICT_KEX_S;
++ } else {
++ strict_name = SSH_STRICT_KEX_C;
++ }
++ if (buf_has_algo(ses.payload, strict_name) == DROPBEAR_SUCCESS) {
++ ses.kexstate.strict_kex = 1;
++ }
++ }
++
+ algo = buf_match_algo(ses.payload, sshkex, kexguess2, &goodguess);
+ allgood &= goodguess;
+ if (algo == NULL || algo->data == NULL) {
+diff --git a/kex.h b/src/kex.h
+index 77cf21a37..7fcc3c252 100644
+--- a/kex.h
++++ b/kex.h
+@@ -83,6 +83,9 @@ struct KEXState {
+
+ unsigned our_first_follows_matches : 1;
+
++ /* Boolean indicating that strict kex mode is in use */
++ unsigned int strict_kex;
++
+ time_t lastkextime; /* time of the last kex */
+ unsigned int datatrans; /* data transmitted since last kex */
+ unsigned int datarecv; /* data received since last kex */
+diff --git a/process-packet.c b/src/process-packet.c
+index 945416023..133a152d0 100644
+--- a/process-packet.c
++++ b/process-packet.c
+@@ -44,6 +44,7 @@ void process_packet() {
+
+ unsigned char type;
+ unsigned int i;
++ unsigned int first_strict_kex = ses.kexstate.strict_kex && !ses.kexstate.donefirstkex;
+ time_t now;
+
+ TRACE2(("enter process_packet"))
+@@ -54,22 +55,24 @@ void process_packet() {
+ now = monotonic_now();
+ ses.last_packet_time_keepalive_recv = now;
+
+- /* These packets we can receive at any time */
+- switch(type) {
+
+- case SSH_MSG_IGNORE:
+- goto out;
+- case SSH_MSG_DEBUG:
+- goto out;
++ if (type == SSH_MSG_DISCONNECT) {
++ /* Allowed at any time */
++ dropbear_close("Disconnect received");
++ }
+
+- case SSH_MSG_UNIMPLEMENTED:
+- /* debugging XXX */
+- TRACE(("SSH_MSG_UNIMPLEMENTED"))
+- goto out;
+-
+- case SSH_MSG_DISCONNECT:
+- /* TODO cleanup? */
+- dropbear_close("Disconnect received");
++ /* These packets may be received at any time,
++ except during first kex with strict kex */
++ if (!first_strict_kex) {
++ switch(type) {
++ case SSH_MSG_IGNORE:
++ goto out;
++ case SSH_MSG_DEBUG:
++ goto out;
++ case SSH_MSG_UNIMPLEMENTED:
++ TRACE(("SSH_MSG_UNIMPLEMENTED"))
++ goto out;
++ }
+ }
+
+ /* Ignore these packet types so that keepalives don't interfere with
+@@ -98,7 +101,8 @@ void process_packet() {
+ if (type >= 1 && type <= 49
+ && type != SSH_MSG_SERVICE_REQUEST
+ && type != SSH_MSG_SERVICE_ACCEPT
+- && type != SSH_MSG_KEXINIT)
++ && type != SSH_MSG_KEXINIT
++ && !first_strict_kex)
+ {
+ TRACE(("unknown allowed packet during kexinit"))
+ recv_unimplemented();
+diff --git a/ssh.h b/src/ssh.h
+index 1b4fec65f..ef3efdca0 100644
+--- a/ssh.h
++++ b/ssh.h
+@@ -100,6 +100,10 @@
+ #define SSH_EXT_INFO_C "ext-info-c"
+ #define SSH_SERVER_SIG_ALGS "server-sig-algs"
+
++/* OpenSSH strict KEX feature */
++#define SSH_STRICT_KEX_S "kex-strict-s-v00@openssh.com"
++#define SSH_STRICT_KEX_C "kex-strict-c-v00@openssh.com"
++
+ /* service types */
+ #define SSH_SERVICE_USERAUTH "ssh-userauth"
+ #define SSH_SERVICE_USERAUTH_LEN 12
+diff --git a/svr-session.c b/src/svr-session.c
+index 769f0731d..a538e2c5c 100644
+--- a/svr-session.c
++++ b/svr-session.c
+@@ -342,6 +342,9 @@ static void svr_algos_initialise(void) {
+ algo->usable = 0;
+ }
+ #endif
++ if (strcmp(algo->name, SSH_STRICT_KEX_C) == 0) {
++ algo->usable = 0;
++ }
+ }
+ }
+
diff --git a/meta/recipes-core/ell/ell_0.49.bb b/meta/recipes-core/ell/ell_0.50.bb
index 9edd6fc92a..243ac01530 100644
--- a/meta/recipes-core/ell/ell_0.49.bb
+++ b/meta/recipes-core/ell/ell_0.50.bb
@@ -16,7 +16,7 @@ inherit autotools pkgconfig
SRC_URI = "https://mirrors.edge.kernel.org/pub/linux/libs/${BPN}/${BPN}-${PV}.tar.xz \
"
-SRC_URI[sha256sum] = "a7ff8ecbc76b187d942dd22b61cb489711400897c790319ffb7e944791687c3f"
+SRC_URI[sha256sum] = "0fe51d51c6eddc2a2784092f1dfdd1143a5ef27f15c274ecfbadd680d3a72fd9"
do_configure:prepend () {
mkdir -p ${S}/build-aux
diff --git a/meta/recipes-core/expat/expat/CVE-2023-52426-001.patch b/meta/recipes-core/expat/expat/CVE-2023-52426-001.patch
new file mode 100644
index 0000000000..c38a334540
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2023-52426-001.patch
@@ -0,0 +1,35 @@
+From cdead241d4f1136c2f38d1b28e95073c59753d30 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Thu, 26 Oct 2023 01:40:05 +0200
+Subject: [PATCH] doc/reference.html: Clarify effect of XML_DTD on external
+ entities
+
+Defining XML_DTD emnables support for external parameter(!)
+entities. External general(!) entities have been supported
+even with XML_DTD undefined. (Only now with Expat 2.6.0
+defining XML_GE as 0 can take that away.)
+
+CVE: CVE-2023-52426
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/cdead241d4f1136c2f38d1b28e95073c59753d30]
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ doc/reference.html | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/doc/reference.html b/doc/reference.html
+index 8b0d47d..a30e462 100644
+--- a/doc/reference.html
++++ b/doc/reference.html
+@@ -365,7 +365,7 @@ this is defined, default attribute values from an external DTD subset
+ are reported and attribute value normalization occurs based on the
+ type of attributes defined in the external subset. Without
+ this, Expat has a smaller memory footprint and can be faster, but will
+-not load external entities or process conditional sections. If defined, makes
++not load external parameter entities or process conditional sections. If defined, makes
+ the functions <code><a
+ href="#XML_SetBillionLaughsAttackProtectionMaximumAmplification">
+ XML_SetBillionLaughsAttackProtectionMaximumAmplification</a></code> and <code>
+--
+2.40.0
+
diff --git a/meta/recipes-core/expat/expat/CVE-2023-52426-002.patch b/meta/recipes-core/expat/expat/CVE-2023-52426-002.patch
new file mode 100644
index 0000000000..9aedc3010a
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2023-52426-002.patch
@@ -0,0 +1,72 @@
+From daa89e42c005cc7f4f7af9eee271ae0723d30300 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Thu, 26 Oct 2023 00:59:52 +0200
+
+Subject: [PATCH] cmake: Introduce option EXPAT_GE to control macro XML_GE
+
+CVE: CVE-2023-52426
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/daa89e42c005cc7f4f7af9eee271ae0723d30300]
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ CMakeLists.txt | 9 +++++++++
+ expat_config.h.cmake | 3 +++
+ 2 files changed, 12 insertions(+)
+
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 2b4c13c..416fe96 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -140,6 +140,8 @@ expat_shy_set(EXPAT_CONTEXT_BYTES 1024 CACHE STRING "Define to specify how much
+ mark_as_advanced(EXPAT_CONTEXT_BYTES)
+ expat_shy_set(EXPAT_DTD ON CACHE BOOL "Define to make parameter entity parsing functionality available")
+ mark_as_advanced(EXPAT_DTD)
++expat_shy_set(EXPAT_GE ON CACHE BOOL "Define to make general entity parsing functionality available")
++mark_as_advanced(EXPAT_GE)
+ expat_shy_set(EXPAT_NS ON CACHE BOOL "Define to make XML Namespaces functionality available")
+ mark_as_advanced(EXPAT_NS)
+ expat_shy_set(EXPAT_WARNINGS_AS_ERRORS OFF CACHE BOOL "Treat all compiler warnings as errors")
+@@ -172,6 +174,11 @@ endif()
+ #
+ # Environment checks
+ #
++if(EXPAT_DTD AND NOT EXPAT_GE)
++ message(SEND_ERROR "Option EXPAT_DTD requires that EXPAT_GE is also enabled.")
++ message(SEND_ERROR "Please either enable option EXPAT_GE (recommended) or disable EXPAT_DTD also.")
++endif()
++
+ if(EXPAT_WITH_LIBBSD)
+ find_library(LIB_BSD NAMES bsd)
+ if(NOT LIB_BSD)
+@@ -274,6 +281,7 @@ endif()
+
+ _expat_copy_bool_int(EXPAT_ATTR_INFO XML_ATTR_INFO)
+ _expat_copy_bool_int(EXPAT_DTD XML_DTD)
++_expat_copy_bool_int(EXPAT_GE XML_GE)
+ _expat_copy_bool_int(EXPAT_LARGE_SIZE XML_LARGE_SIZE)
+ _expat_copy_bool_int(EXPAT_MIN_SIZE XML_MIN_SIZE)
+ _expat_copy_bool_int(EXPAT_NS XML_NS)
+@@ -893,6 +901,7 @@ message(STATUS " // Advanced options, changes not advised")
+ message(STATUS " Attributes info .......... ${EXPAT_ATTR_INFO}")
+ message(STATUS " Context bytes ............ ${EXPAT_CONTEXT_BYTES}")
+ message(STATUS " DTD support .............. ${EXPAT_DTD}")
++message(STATUS " General entities ......... ${EXPAT_GE}")
+ message(STATUS " Large size ............... ${EXPAT_LARGE_SIZE}")
+ message(STATUS " Minimum size ............. ${EXPAT_MIN_SIZE}")
+ message(STATUS " Namespace support ........ ${EXPAT_NS}")
+diff --git a/expat_config.h.cmake b/expat_config.h.cmake
+index 78fcb4c..330945e 100644
+--- a/expat_config.h.cmake
++++ b/expat_config.h.cmake
+@@ -103,6 +103,9 @@
+ /* Define to make parameter entity parsing functionality available. */
+ #cmakedefine XML_DTD
+
++/* Define as 1/0 to enable/disable support for general entities. */
++#define XML_GE @XML_GE@
++
+ /* Define to make XML Namespaces functionality available. */
+ #cmakedefine XML_NS
+
+--
+2.40.0
diff --git a/meta/recipes-core/expat/expat/CVE-2023-52426-003.patch b/meta/recipes-core/expat/expat/CVE-2023-52426-003.patch
new file mode 100644
index 0000000000..96a62dcffc
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2023-52426-003.patch
@@ -0,0 +1,28 @@
+From ed87a4793404e91c0cc0c81435fcfcc64a8be9f4 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Thu, 26 Oct 2023 00:45:23 +0200
+Subject: [PATCH] configure.ac: Define macro XML_GE as 1
+
+CVE: CVE-2023-52426
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/ed87a4793404e91c0cc0c81435fcfcc64a8be9f4]
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ configure.ac | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/configure.ac b/configure.ac
+index d3642de..153bb8e 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -295,6 +295,8 @@ AC_SUBST(FILEMAP)
+ dnl Some basic configuration:
+ AC_DEFINE([XML_NS], 1,
+ [Define to make XML Namespaces functionality available.])
++AC_DEFINE([XML_GE], 1,
++ [Define as 1/0 to enable/disable support for general entities.])
+ AC_DEFINE([XML_DTD], 1,
+ [Define to make parameter entity parsing functionality available.])
+ AC_DEFINE([XML_DEV_URANDOM], 1,
+--
+2.40.0
diff --git a/meta/recipes-core/expat/expat/CVE-2023-52426-004.patch b/meta/recipes-core/expat/expat/CVE-2023-52426-004.patch
new file mode 100644
index 0000000000..460113caf7
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2023-52426-004.patch
@@ -0,0 +1,429 @@
+From 0f075ec8ecb5e43f8fdca5182f8cca4703da0404 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Thu, 26 Oct 2023 00:43:22 +0200
+Subject: [PATCH] lib|xmlwf|cmake: Extend scope of billion laughs attack
+ protection
+
+.. from "defined(XML_DTD)" to "defined(XML_DTD) || XML_GE==1".
+
+CVE: CVE-2023-52426
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/0f075ec8ecb5e43f8fdca5182f8cca4703da0404]
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ CMakeLists.txt | 8 ++++-
+ lib/expat.h | 8 +++--
+ lib/internal.h | 2 +-
+ lib/libexpat.def.cmake | 4 +--
+ lib/xmlparse.c | 71 ++++++++++++++++++++++--------------------
+ xmlwf/xmlwf.c | 18 ++++++-----
+ 6 files changed, 62 insertions(+), 49 deletions(-)
+
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 416fe96..e6939e2 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -389,7 +389,13 @@ if(EXPAT_SHARED_LIBS)
+ endif()
+ endmacro()
+
+- _expat_def_file_toggle(EXPAT_DTD _EXPAT_COMMENT_DTD)
++ if(EXPAT_DTD OR EXPAT_GE)
++ set(_EXPAT_DTD_OR_GE TRUE)
++ else()
++ set(_EXPAT_DTD_OR_GE FALSE)
++ endif()
++
++ _expat_def_file_toggle(_EXPAT_DTD_OR_GE _EXPAT_COMMENT_DTD_OR_GE)
+ _expat_def_file_toggle(EXPAT_ATTR_INFO _EXPAT_COMMENT_ATTR_INFO)
+
+ configure_file("${CMAKE_CURRENT_SOURCE_DIR}/lib/libexpat.def.cmake" "${CMAKE_CURRENT_BINARY_DIR}/lib/libexpat.def")
+diff --git a/lib/expat.h b/lib/expat.h
+index 1c83563..33c94af 100644
+--- a/lib/expat.h
++++ b/lib/expat.h
+@@ -1038,13 +1038,15 @@ typedef struct {
+ XMLPARSEAPI(const XML_Feature *)
+ XML_GetFeatureList(void);
+
+-#ifdef XML_DTD
+-/* Added in Expat 2.4.0. */
++#if defined(XML_DTD) || XML_GE == 1
++/* Added in Expat 2.4.0 for XML_DTD defined and
++ * added in Expat 2.6.0 for XML_GE == 1. */
+ XMLPARSEAPI(XML_Bool)
+ XML_SetBillionLaughsAttackProtectionMaximumAmplification(
+ XML_Parser parser, float maximumAmplificationFactor);
+
+-/* Added in Expat 2.4.0. */
++/* Added in Expat 2.4.0 for XML_DTD defined and
++ * added in Expat 2.6.0 for XML_GE == 1. */
+ XMLPARSEAPI(XML_Bool)
+ XML_SetBillionLaughsAttackProtectionActivationThreshold(
+ XML_Parser parser, unsigned long long activationThresholdBytes);
+diff --git a/lib/internal.h b/lib/internal.h
+index e09f533..1851925 100644
+--- a/lib/internal.h
++++ b/lib/internal.h
+@@ -154,7 +154,7 @@ extern "C" {
+ void _INTERNAL_trim_to_complete_utf8_characters(const char *from,
+ const char **fromLimRef);
+
+-#if defined(XML_DTD)
++#if defined(XML_DTD) || XML_GE == 1
+ unsigned long long testingAccountingGetCountBytesDirect(XML_Parser parser);
+ unsigned long long testingAccountingGetCountBytesIndirect(XML_Parser parser);
+ const char *unsignedCharToPrintable(unsigned char c);
+diff --git a/lib/libexpat.def.cmake b/lib/libexpat.def.cmake
+index cf434a2..61a4f00 100644
+--- a/lib/libexpat.def.cmake
++++ b/lib/libexpat.def.cmake
+@@ -75,5 +75,5 @@ EXPORTS
+ XML_SetHashSalt @67
+ ; internal @68 removed with version 2.3.1
+ ; added with version 2.4.0
+-@_EXPAT_COMMENT_DTD@ XML_SetBillionLaughsAttackProtectionActivationThreshold @69
+-@_EXPAT_COMMENT_DTD@ XML_SetBillionLaughsAttackProtectionMaximumAmplification @70
++@_EXPAT_COMMENT_DTD_OR_GE@ XML_SetBillionLaughsAttackProtectionActivationThreshold @69
++@_EXPAT_COMMENT_DTD_OR_GE@ XML_SetBillionLaughsAttackProtectionMaximumAmplification @70
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index b6c2eca..e23441e 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -408,7 +408,7 @@ enum XML_Account {
+ XML_ACCOUNT_NONE /* i.e. do not account, was accounted already */
+ };
+
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ typedef unsigned long long XmlBigCount;
+ typedef struct accounting {
+ XmlBigCount countBytesDirect;
+@@ -424,7 +424,7 @@ typedef struct entity_stats {
+ unsigned int maximumDepthSeen;
+ int debugLevel;
+ } ENTITY_STATS;
+-#endif /* XML_DTD */
++#endif /* defined(XML_DTD) || XML_GE == 1 */
+
+ typedef enum XML_Error PTRCALL Processor(XML_Parser parser, const char *start,
+ const char *end, const char **endPtr);
+@@ -562,7 +562,7 @@ static XML_Parser parserCreate(const XML_Char *encodingName,
+
+ static void parserInit(XML_Parser parser, const XML_Char *encodingName);
+
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ static float accountingGetCurrentAmplification(XML_Parser rootParser);
+ static void accountingReportStats(XML_Parser originParser, const char *epilog);
+ static void accountingOnAbort(XML_Parser originParser);
+@@ -585,7 +585,7 @@ static void entityTrackingOnClose(XML_Parser parser, ENTITY *entity,
+
+ static XML_Parser getRootParserOf(XML_Parser parser,
+ unsigned int *outLevelDiff);
+-#endif /* XML_DTD */
++#endif /* defined(XML_DTD) || XML_GE == 1 */
+
+ static unsigned long getDebugLevel(const char *variableName,
+ unsigned long defaultDebugLevel);
+@@ -703,7 +703,7 @@ struct XML_ParserStruct {
+ enum XML_ParamEntityParsing m_paramEntityParsing;
+ #endif
+ unsigned long m_hash_secret_salt;
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ ACCOUNTING m_accounting;
+ ENTITY_STATS m_entity_stats;
+ #endif
+@@ -1163,7 +1163,7 @@ parserInit(XML_Parser parser, const XML_Char *encodingName) {
+ #endif
+ parser->m_hash_secret_salt = 0;
+
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ memset(&parser->m_accounting, 0, sizeof(ACCOUNTING));
+ parser->m_accounting.debugLevel = getDebugLevel("EXPAT_ACCOUNTING_DEBUG", 0u);
+ parser->m_accounting.maximumAmplificationFactor
+@@ -2522,8 +2522,9 @@ XML_GetFeatureList(void) {
+ #ifdef XML_ATTR_INFO
+ {XML_FEATURE_ATTR_INFO, XML_L("XML_ATTR_INFO"), 0},
+ #endif
+-#ifdef XML_DTD
+- /* Added in Expat 2.4.0. */
++#if defined(XML_DTD) || XML_GE == 1
++ /* Added in Expat 2.4.0 for XML_DTD defined and
++ * added in Expat 2.6.0 for XML_GE == 1. */
+ {XML_FEATURE_BILLION_LAUGHS_ATTACK_PROTECTION_MAXIMUM_AMPLIFICATION_DEFAULT,
+ XML_L("XML_BLAP_MAX_AMP"),
+ (long int)
+@@ -2537,7 +2538,7 @@ XML_GetFeatureList(void) {
+ return features;
+ }
+
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ XML_Bool XMLCALL
+ XML_SetBillionLaughsAttackProtectionMaximumAmplification(
+ XML_Parser parser, float maximumAmplificationFactor) {
+@@ -2559,7 +2560,7 @@ XML_SetBillionLaughsAttackProtectionActivationThreshold(
+ parser->m_accounting.activationThresholdBytes = activationThresholdBytes;
+ return XML_TRUE;
+ }
+-#endif /* XML_DTD */
++#endif /* defined(XML_DTD) || XML_GE == 1 */
+
+ /* Initially tag->rawName always points into the parse buffer;
+ for those TAG instances opened while the current parse buffer was
+@@ -2645,13 +2646,13 @@ externalEntityInitProcessor2(XML_Parser parser, const char *start,
+ int tok = XmlContentTok(parser->m_encoding, start, end, &next);
+ switch (tok) {
+ case XML_TOK_BOM:
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, start, next, __LINE__,
+ XML_ACCOUNT_DIRECT)) {
+ accountingOnAbort(parser);
+ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
+ }
+-#endif /* XML_DTD */
++#endif /* defined(XML_DTD) || XML_GE == 1 */
+
+ /* If we are at the end of the buffer, this would cause the next stage,
+ i.e. externalEntityInitProcessor3, to pass control directly to
+@@ -2765,7 +2766,7 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
+ for (;;) {
+ const char *next = s; /* XmlContentTok doesn't always set the last arg */
+ int tok = XmlContentTok(enc, s, end, &next);
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ const char *accountAfter
+ = ((tok == XML_TOK_TRAILING_RSQB) || (tok == XML_TOK_TRAILING_CR))
+ ? (haveMore ? s /* i.e. 0 bytes */ : end)
+@@ -2831,14 +2832,14 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
+ XML_Char ch = (XML_Char)XmlPredefinedEntityName(
+ enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar);
+ if (ch) {
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ /* NOTE: We are replacing 4-6 characters original input for 1 character
+ * so there is no amplification and hence recording without
+ * protection. */
+ accountingDiffTolerated(parser, tok, (char *)&ch,
+ ((char *)&ch) + sizeof(XML_Char), __LINE__,
+ XML_ACCOUNT_ENTITY_EXPANSION);
+-#endif /* XML_DTD */
++#endif /* defined(XML_DTD) || XML_GE == 1 */
+ if (parser->m_characterDataHandler)
+ parser->m_characterDataHandler(parser->m_handlerArg, &ch, 1);
+ else if (parser->m_defaultHandler)
+@@ -4040,7 +4041,7 @@ doCdataSection(XML_Parser parser, const ENCODING *enc, const char **startPtr,
+ for (;;) {
+ const char *next = s; /* in case of XML_TOK_NONE or XML_TOK_PARTIAL */
+ int tok = XmlCdataSectionTok(enc, s, end, &next);
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__, account)) {
+ accountingOnAbort(parser);
+ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
+@@ -4192,7 +4193,7 @@ doIgnoreSection(XML_Parser parser, const ENCODING *enc, const char **startPtr,
+ *eventPP = s;
+ *startPtr = NULL;
+ tok = XmlIgnoreSectionTok(enc, s, end, &next);
+-# ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__,
+ XML_ACCOUNT_DIRECT)) {
+ accountingOnAbort(parser);
+@@ -4284,7 +4285,7 @@ processXmlDecl(XML_Parser parser, int isGeneralTextEntity, const char *s,
+ const XML_Char *storedversion = NULL;
+ int standalone = -1;
+
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ if (! accountingDiffTolerated(parser, XML_TOK_XML_DECL, s, next, __LINE__,
+ XML_ACCOUNT_DIRECT)) {
+ accountingOnAbort(parser);
+@@ -4491,7 +4492,7 @@ entityValueInitProcessor(XML_Parser parser, const char *s, const char *end,
+ */
+ else if (tok == XML_TOK_BOM && next == end
+ && ! parser->m_parsingStatus.finalBuffer) {
+-# ifdef XML_DTD
++# if defined(XML_DTD) || XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__,
+ XML_ACCOUNT_DIRECT)) {
+ accountingOnAbort(parser);
+@@ -4707,11 +4708,13 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ }
+ }
+ role = XmlTokenRole(&parser->m_prologState, tok, s, next, enc);
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ switch (role) {
+ case XML_ROLE_INSTANCE_START: // bytes accounted in contentProcessor
+ case XML_ROLE_XML_DECL: // bytes accounted in processXmlDecl
+- case XML_ROLE_TEXT_DECL: // bytes accounted in processXmlDecl
++ # ifdef XML_DTD
++ case XML_ROLE_TEXT_DECL: // bytes accounted in processXmlDecl
++# endif
+ break;
+ default:
+ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__, account)) {
+@@ -5648,7 +5651,7 @@ epilogProcessor(XML_Parser parser, const char *s, const char *end,
+ for (;;) {
+ const char *next = NULL;
+ int tok = XmlPrologTok(parser->m_encoding, s, end, &next);
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__,
+ XML_ACCOUNT_DIRECT)) {
+ accountingOnAbort(parser);
+@@ -5728,7 +5731,7 @@ processInternalEntity(XML_Parser parser, ENTITY *entity, XML_Bool betweenDecl) {
+ return XML_ERROR_NO_MEMORY;
+ }
+ entity->open = XML_TRUE;
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ entityTrackingOnOpen(parser, entity, __LINE__);
+ #endif
+ entity->processed = 0;
+@@ -5762,9 +5765,9 @@ processInternalEntity(XML_Parser parser, ENTITY *entity, XML_Bool betweenDecl) {
+ entity->processed = (int)(next - textStart);
+ parser->m_processor = internalEntityProcessor;
+ } else {
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ entityTrackingOnClose(parser, entity, __LINE__);
+-#endif /* XML_DTD */
++#endif /* defined(XML_DTD) || XML_GE == 1 */
+ entity->open = XML_FALSE;
+ parser->m_openInternalEntities = openEntity->next;
+ /* put openEntity back in list of free instances */
+@@ -5813,7 +5816,7 @@ internalEntityProcessor(XML_Parser parser, const char *s, const char *end,
+ return result;
+ }
+
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ entityTrackingOnClose(parser, entity, __LINE__);
+ #endif
+ entity->open = XML_FALSE;
+@@ -5892,7 +5895,7 @@ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
+ const char *next
+ = ptr; /* XmlAttributeValueTok doesn't always set the last arg */
+ int tok = XmlAttributeValueTok(enc, ptr, end, &next);
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, ptr, next, __LINE__, account)) {
+ accountingOnAbort(parser);
+ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
+@@ -5957,14 +5960,14 @@ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
+ XML_Char ch = (XML_Char)XmlPredefinedEntityName(
+ enc, ptr + enc->minBytesPerChar, next - enc->minBytesPerChar);
+ if (ch) {
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ /* NOTE: We are replacing 4-6 characters original input for 1 character
+ * so there is no amplification and hence recording without
+ * protection. */
+ accountingDiffTolerated(parser, tok, (char *)&ch,
+ ((char *)&ch) + sizeof(XML_Char), __LINE__,
+ XML_ACCOUNT_ENTITY_EXPANSION);
+-#endif /* XML_DTD */
++#endif /* defined(XML_DTD) || XML_GE == 1 */
+ if (! poolAppendChar(pool, ch))
+ return XML_ERROR_NO_MEMORY;
+ break;
+@@ -6042,14 +6045,14 @@ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
+ enum XML_Error result;
+ const XML_Char *textEnd = entity->textPtr + entity->textLen;
+ entity->open = XML_TRUE;
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ entityTrackingOnOpen(parser, entity, __LINE__);
+ #endif
+ result = appendAttributeValue(parser, parser->m_internalEncoding,
+ isCdata, (const char *)entity->textPtr,
+ (const char *)textEnd, pool,
+ XML_ACCOUNT_ENTITY_EXPANSION);
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ entityTrackingOnClose(parser, entity, __LINE__);
+ #endif
+ entity->open = XML_FALSE;
+@@ -6105,7 +6108,7 @@ storeEntityValue(XML_Parser parser, const ENCODING *enc,
+ = entityTextPtr; /* XmlEntityValueTok doesn't always set the last arg */
+ int tok = XmlEntityValueTok(enc, entityTextPtr, entityTextEnd, &next);
+
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, entityTextPtr, next, __LINE__,
+ account)) {
+ accountingOnAbort(parser);
+@@ -7651,7 +7654,7 @@ copyString(const XML_Char *s, const XML_Memory_Handling_Suite *memsuite) {
+ return result;
+ }
+
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+
+ static float
+ accountingGetCurrentAmplification(XML_Parser rootParser) {
+@@ -8382,7 +8385,7 @@ unsignedCharToPrintable(unsigned char c) {
+ assert(0); /* never gets here */
+ }
+
+-#endif /* XML_DTD */
++#endif /* defined(XML_DTD) || XML_GE == 1 */
+
+ static unsigned long
+ getDebugLevel(const char *variableName, unsigned long defaultDebugLevel) {
+diff --git a/xmlwf/xmlwf.c b/xmlwf/xmlwf.c
+index 471f2a2..be23f5a 100644
+--- a/xmlwf/xmlwf.c
++++ b/xmlwf/xmlwf.c
+@@ -1062,9 +1062,10 @@ tmain(int argc, XML_Char **argv) {
+ " (needs a floating point number greater or equal than 1.0)"));
+ exit(XMLWF_EXIT_USAGE_ERROR);
+ }
+-#ifndef XML_DTD
+- ftprintf(stderr, T("Warning: Given amplification limit ignored") T(
+- ", xmlwf has been compiled without DTD support.\n"));
++#if ! defined(XML_DTD) && XML_GE == 0
++ ftprintf(stderr,
++ T("Warning: Given amplification limit ignored")
++ T(", xmlwf has been compiled without DTD/GE support.\n"));
+ #endif
+ break;
+ }
+@@ -1083,9 +1084,10 @@ tmain(int argc, XML_Char **argv) {
+ exit(XMLWF_EXIT_USAGE_ERROR);
+ }
+ attackThresholdGiven = XML_TRUE;
+-#ifndef XML_DTD
+- ftprintf(stderr, T("Warning: Given attack threshold ignored") T(
+- ", xmlwf has been compiled without DTD support.\n"));
++#if ! defined(XML_DTD) && XML_GE == 0
++ ftprintf(stderr,
++ T("Warning: Given attack threshold ignored")
++ T(", xmlwf has been compiled without DTD/GE support.\n"));
+ #endif
+ break;
+ }
+@@ -1120,13 +1122,13 @@ tmain(int argc, XML_Char **argv) {
+ }
+
+ if (attackMaximumAmplification != -1.0f) {
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ XML_SetBillionLaughsAttackProtectionMaximumAmplification(
+ parser, attackMaximumAmplification);
+ #endif
+ }
+ if (attackThresholdGiven) {
+-#ifdef XML_DTD
++#if defined(XML_DTD) || XML_GE == 1
+ XML_SetBillionLaughsAttackProtectionActivationThreshold(
+ parser, attackThresholdBytes);
+ #else
+--
+2.40.0
diff --git a/meta/recipes-core/expat/expat/CVE-2023-52426-005.patch b/meta/recipes-core/expat/expat/CVE-2023-52426-005.patch
new file mode 100644
index 0000000000..1e8223fff0
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2023-52426-005.patch
@@ -0,0 +1,34 @@
+From b0975cb73a41869fbecf0fa55afd35b69b64cc50 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Thu, 26 Oct 2023 00:47:52 +0200
+Subject: [PATCH] lib: Fail the build if XML_GE is not set to 1 or 0
+
+CVE: CVE-2023-52426
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/b0975cb73a41869fbecf0fa55afd35b69b64cc50]
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ lib/xmlparse.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index e23441e..ac3efe1 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -62,6 +62,14 @@
+
+ #include <expat_config.h>
+
++#if ! defined(XML_GE) || (1 - XML_GE - 1 == 2) || (XML_GE < 0) || (XML_GE > 1)
++# error XML_GE (for general entities) must be defined, non-empty, either 1 or 0 (0 to disable, 1 to enable; 1 is a common default)
++#endif
++
++#if defined(XML_DTD) && XML_GE == 0
++# error Either undefine XML_DTD or define XML_GE to 1.
++#endif
++
+ #if ! defined(_GNU_SOURCE)
+ # define _GNU_SOURCE 1 /* syscall prototype */
+ #endif
+--
+2.40.0
diff --git a/meta/recipes-core/expat/expat/CVE-2023-52426-006.patch b/meta/recipes-core/expat/expat/CVE-2023-52426-006.patch
new file mode 100644
index 0000000000..d1ab52fa32
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2023-52426-006.patch
@@ -0,0 +1,174 @@
+From 2b127c20b220b673cf52c6be8bef725bf04cbeaf Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Thu, 26 Oct 2023 18:32:11 +0200
+Subject: [PATCH] lib: Make XML_GE==0 use self-references as entity replacement
+ text
+
+CVE: CVE-2023-52426
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/2b127c20b220b673cf52c6be8bef725bf04cbeaf]
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ lib/xmlparse.c | 79 +++++++++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 71 insertions(+), 8 deletions(-)
+
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index ac3efe1..c479174 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -504,9 +504,13 @@ static enum XML_Error appendAttributeValue(XML_Parser parser, const ENCODING *,
+ static ATTRIBUTE_ID *getAttributeId(XML_Parser parser, const ENCODING *enc,
+ const char *start, const char *end);
+ static int setElementTypePrefix(XML_Parser parser, ELEMENT_TYPE *);
++#if XML_GE == 1
+ static enum XML_Error storeEntityValue(XML_Parser parser, const ENCODING *enc,
+ const char *start, const char *end,
+ enum XML_Account account);
++#else
++static enum XML_Error storeSelfEntityValue(XML_Parser parser, ENTITY *entity);
++#endif
+ static int reportProcessingInstruction(XML_Parser parser, const ENCODING *enc,
+ const char *start, const char *end);
+ static int reportComment(XML_Parser parser, const ENCODING *enc,
+@@ -5040,6 +5044,9 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ break;
+ case XML_ROLE_ENTITY_VALUE:
+ if (dtd->keepProcessing) {
++#if defined(XML_DTD) || XML_GE == 1
++ // This will store the given replacement text in
++ // parser->m_declEntity->textPtr.
+ enum XML_Error result
+ = storeEntityValue(parser, enc, s + enc->minBytesPerChar,
+ next - enc->minBytesPerChar, XML_ACCOUNT_NONE);
+@@ -5060,6 +5067,25 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ poolDiscard(&dtd->entityValuePool);
+ if (result != XML_ERROR_NONE)
+ return result;
++#else
++ // This will store "&amp;entity123;" in parser->m_declEntity->textPtr
++ // to end up as "&entity123;" in the handler.
++ if (parser->m_declEntity != NULL) {
++ const enum XML_Error result
++ = storeSelfEntityValue(parser, parser->m_declEntity);
++ if (result != XML_ERROR_NONE)
++ return result;
++
++ if (parser->m_entityDeclHandler) {
++ *eventEndPP = s;
++ parser->m_entityDeclHandler(
++ parser->m_handlerArg, parser->m_declEntity->name,
++ parser->m_declEntity->is_param, parser->m_declEntity->textPtr,
++ parser->m_declEntity->textLen, parser->m_curBase, 0, 0, 0);
++ handleDefault = XML_FALSE;
++ }
++ }
++#endif
+ }
+ break;
+ case XML_ROLE_DOCTYPE_SYSTEM_ID:
+@@ -5102,6 +5128,16 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ #endif /* XML_DTD */
+ /* fall through */
+ case XML_ROLE_ENTITY_SYSTEM_ID:
++#if XML_GE == 0
++ // This will store "&amp;entity123;" in entity->textPtr
++ // to end up as "&entity123;" in the handler.
++ if (parser->m_declEntity != NULL) {
++ const enum XML_Error result
++ = storeSelfEntityValue(parser, parser->m_declEntity);
++ if (result != XML_ERROR_NONE)
++ return result;
++ }
++#endif
+ if (dtd->keepProcessing && parser->m_declEntity) {
+ parser->m_declEntity->systemId
+ = poolStoreString(&dtd->pool, enc, s + enc->minBytesPerChar,
+@@ -6090,6 +6126,7 @@ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
+ /* not reached */
+ }
+
++#if XML_GE == 1
+ static enum XML_Error
+ storeEntityValue(XML_Parser parser, const ENCODING *enc,
+ const char *entityTextPtr, const char *entityTextEnd,
+@@ -6097,12 +6134,12 @@ storeEntityValue(XML_Parser parser, const ENCODING *enc,
+ DTD *const dtd = parser->m_dtd; /* save one level of indirection */
+ STRING_POOL *pool = &(dtd->entityValuePool);
+ enum XML_Error result = XML_ERROR_NONE;
+-#ifdef XML_DTD
++# ifdef XML_DTD
+ int oldInEntityValue = parser->m_prologState.inEntityValue;
+ parser->m_prologState.inEntityValue = 1;
+-#else
++# else
+ UNUSED_P(account);
+-#endif /* XML_DTD */
++# endif /* XML_DTD */
+ /* never return Null for the value argument in EntityDeclHandler,
+ since this would indicate an external entity; therefore we
+ have to make sure that entityValuePool.start is not null */
+@@ -6116,18 +6153,18 @@ storeEntityValue(XML_Parser parser, const ENCODING *enc,
+ = entityTextPtr; /* XmlEntityValueTok doesn't always set the last arg */
+ int tok = XmlEntityValueTok(enc, entityTextPtr, entityTextEnd, &next);
+
+-#if defined(XML_DTD) || XML_GE == 1
++# if defined(XML_DTD) || XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, entityTextPtr, next, __LINE__,
+ account)) {
+ accountingOnAbort(parser);
+ result = XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
+ goto endEntityValue;
+ }
+-#endif
++# endif
+
+ switch (tok) {
+ case XML_TOK_PARAM_ENTITY_REF:
+-#ifdef XML_DTD
++# ifdef XML_DTD
+ if (parser->m_isParamEntity || enc != parser->m_encoding) {
+ const XML_Char *name;
+ ENTITY *entity;
+@@ -6270,12 +6307,38 @@ storeEntityValue(XML_Parser parser, const ENCODING *enc,
+ entityTextPtr = next;
+ }
+ endEntityValue:
+-#ifdef XML_DTD
++# ifdef XML_DTD
+ parser->m_prologState.inEntityValue = oldInEntityValue;
+-#endif /* XML_DTD */
++# endif /* XML_DTD */
+ return result;
+ }
+
++#else /* XML_GE == 0 */
++
++static enum XML_Error
++storeSelfEntityValue(XML_Parser parser, ENTITY *entity) {
++ // This will store "&amp;entity123;" in entity->textPtr
++ // to end up as "&entity123;" in the handler.
++ const char *const entity_start = "&amp;";
++ const char *const entity_end = ";";
++
++ STRING_POOL *const pool = &(parser->m_dtd->entityValuePool);
++ if (! poolAppendString(pool, entity_start)
++ || ! poolAppendString(pool, entity->name)
++ || ! poolAppendString(pool, entity_end)) {
++ poolDiscard(pool);
++ return XML_ERROR_NO_MEMORY;
++ }
++
++ entity->textPtr = poolStart(pool);
++ entity->textLen = (int)(poolLength(pool));
++ poolFinish(pool);
++
++ return XML_ERROR_NONE;
++}
++
++#endif /* XML_GE == 0 */
++
+ static void FASTCALL
+ normalizeLines(XML_Char *s) {
+ XML_Char *p;
+--
+2.40.0
diff --git a/meta/recipes-core/expat/expat/CVE-2023-52426-007.patch b/meta/recipes-core/expat/expat/CVE-2023-52426-007.patch
new file mode 100644
index 0000000000..a141bbf915
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2023-52426-007.patch
@@ -0,0 +1,53 @@
+From d3f7bbd37bef2565d64f31b549e197a3a414574e Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Thu, 26 Oct 2023 01:39:39 +0200
+Subject: [PATCH] doc/reference.html: Document build time macro XML_GE
+
+CVE: CVE-2023-52426
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/d3f7bbd37bef2565d64f31b549e197a3a414574e]
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ doc/reference.html | 27 +++++++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+diff --git a/doc/reference.html b/doc/reference.html
+index 8b0d47d..74ba012 100644
+--- a/doc/reference.html
++++ b/doc/reference.html
+@@ -359,6 +359,33 @@ and the definition of character types in the case of
+ <code>XML_UNICODE_WCHAR_T</code>. The symbols are:</p>
+
+ <dl class="cpp-symbols">
++<dt><a name="XML_GE">XML_GE</a></dt>
++<dd>
++Added in Expat 2.6.0.
++Include support for
++<a href="https://www.w3.org/TR/2006/REC-xml-20060816/#sec-physical-struct">general entities</a>
++(syntax <code>&amp;e1;</code> to reference and
++syntax <code>&lt;!ENTITY e1 'value1'&gt;</code> (an internal general entity) or
++<code>&lt;!ENTITY e2 SYSTEM 'file2'&gt;</code> (an external general entity) to declare).
++With <code>XML_GE</code> enabled, general entities will be replaced by their declared replacement text;
++for this to work for <em>external</em> general entities, in addition an
++<code><a href="#XML_SetExternalEntityRefHandler">XML_ExternalEntityRefHandler</a></code> must be set using
++<code><a href="#XML_SetExternalEntityRefHandler">XML_SetExternalEntityRefHandler</a></code>.
++Also, enabling <code>XML_GE</code> makes
++the functions <code><a href="#XML_SetBillionLaughsAttackProtectionMaximumAmplification">
++XML_SetBillionLaughsAttackProtectionMaximumAmplification</a></code> and <code>
++<a href="#XML_SetBillionLaughsAttackProtectionActivationThreshold">
++XML_SetBillionLaughsAttackProtectionActivationThreshold</a></code> available.
++<br/>
++With <code>XML_GE</code> disabled, Expat has a smaller memory footprint and can be faster, but will
++not load external general entities and will replace all general entities
++(except the <a href="https://www.w3.org/TR/2006/REC-xml-20060816/#sec-predefined-ent">predefined five</a>:
++<code>amp</code>, <code>apos</code>, <code>gt</code>, <code>lt</code>, <code>quot</code>)
++with a self-reference:
++for example, referencing an entity <code>e1</code> via <code>&amp;e1;</code> will be replaced
++by text <code>&amp;e1;</code>.
++</dd>
++
+ <dt>XML_DTD</dt>
+ <dd>Include support for using and reporting DTD-based content. If
+ this is defined, default attribute values from an external DTD subset
+--
+2.40.0
diff --git a/meta/recipes-core/expat/expat/CVE-2023-52426-008.patch b/meta/recipes-core/expat/expat/CVE-2023-52426-008.patch
new file mode 100644
index 0000000000..d07c62ccf0
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2023-52426-008.patch
@@ -0,0 +1,37 @@
+From 2848dc4e7067de503934b388717e7a3d8d0c5bca Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Fri, 27 Oct 2023 18:45:50 +0200
+Subject: [PATCH] Simplify "! defined(XML_DTD) && XML_GE == 0" to "XML_GE == 0"
+
+CVE: CVE-2023-52426
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/2848dc4e7067de503934b388717e7a3d8d0c5bca]
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ xmlwf/xmlwf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/xmlwf/xmlwf.c b/xmlwf/xmlwf.c
+index be23f5a..04ca759 100644
+--- a/xmlwf/xmlwf.c
++++ b/xmlwf/xmlwf.c
+@@ -1062,7 +1062,7 @@ tmain(int argc, XML_Char **argv) {
+ " (needs a floating point number greater or equal than 1.0)"));
+ exit(XMLWF_EXIT_USAGE_ERROR);
+ }
+-#if ! defined(XML_DTD) && XML_GE == 0
++#if XML_GE == 0
+ ftprintf(stderr,
+ T("Warning: Given amplification limit ignored")
+ T(", xmlwf has been compiled without DTD/GE support.\n"));
+@@ -1084,7 +1084,7 @@ tmain(int argc, XML_Char **argv) {
+ exit(XMLWF_EXIT_USAGE_ERROR);
+ }
+ attackThresholdGiven = XML_TRUE;
+-#if ! defined(XML_DTD) && XML_GE == 0
++#if XML_GE == 0
+ ftprintf(stderr,
+ T("Warning: Given attack threshold ignored")
+ T(", xmlwf has been compiled without DTD/GE support.\n"));
+--
+2.40.0
diff --git a/meta/recipes-core/expat/expat/CVE-2023-52426-009.patch b/meta/recipes-core/expat/expat/CVE-2023-52426-009.patch
new file mode 100644
index 0000000000..99460249c0
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2023-52426-009.patch
@@ -0,0 +1,354 @@
+From caa27198637683b15d810737bb8a6a81af19bfa5 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Fri, 27 Oct 2023 18:47:37 +0200
+Subject: [PATCH] Simplify "defined(XML_DTD) || XML_GE == 1" to "XML_GE == 1"
+
+CVE: CVE-2023-52426
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/caa27198637683b15d810737bb8a6a81af19bfa5]
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ lib/expat.h | 2 +-
+ lib/internal.h | 2 +-
+ lib/xmlparse.c | 66 +++++++++++++++++++++++++-------------------------
+ xmlwf/xmlwf.c | 4 +--
+ 4 files changed, 37 insertions(+), 37 deletions(-)
+
+diff --git a/lib/expat.h b/lib/expat.h
+index 33c94af..fa2eb45 100644
+--- a/lib/expat.h
++++ b/lib/expat.h
+@@ -1038,7 +1038,7 @@ typedef struct {
+ XMLPARSEAPI(const XML_Feature *)
+ XML_GetFeatureList(void);
+
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ /* Added in Expat 2.4.0 for XML_DTD defined and
+ * added in Expat 2.6.0 for XML_GE == 1. */
+ XMLPARSEAPI(XML_Bool)
+diff --git a/lib/internal.h b/lib/internal.h
+index 1851925..03c8fde 100644
+--- a/lib/internal.h
++++ b/lib/internal.h
+@@ -154,7 +154,7 @@ extern "C" {
+ void _INTERNAL_trim_to_complete_utf8_characters(const char *from,
+ const char **fromLimRef);
+
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ unsigned long long testingAccountingGetCountBytesDirect(XML_Parser parser);
+ unsigned long long testingAccountingGetCountBytesIndirect(XML_Parser parser);
+ const char *unsignedCharToPrintable(unsigned char c);
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index c479174..2d8f4c0 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -416,7 +416,7 @@ enum XML_Account {
+ XML_ACCOUNT_NONE /* i.e. do not account, was accounted already */
+ };
+
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ typedef unsigned long long XmlBigCount;
+ typedef struct accounting {
+ XmlBigCount countBytesDirect;
+@@ -432,7 +432,7 @@ typedef struct entity_stats {
+ unsigned int maximumDepthSeen;
+ int debugLevel;
+ } ENTITY_STATS;
+-#endif /* defined(XML_DTD) || XML_GE == 1 */
++#endif /* XML_GE == 1 */
+
+ typedef enum XML_Error PTRCALL Processor(XML_Parser parser, const char *start,
+ const char *end, const char **endPtr);
+@@ -574,7 +574,7 @@ static XML_Parser parserCreate(const XML_Char *encodingName,
+
+ static void parserInit(XML_Parser parser, const XML_Char *encodingName);
+
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ static float accountingGetCurrentAmplification(XML_Parser rootParser);
+ static void accountingReportStats(XML_Parser originParser, const char *epilog);
+ static void accountingOnAbort(XML_Parser originParser);
+@@ -597,7 +597,7 @@ static void entityTrackingOnClose(XML_Parser parser, ENTITY *entity,
+
+ static XML_Parser getRootParserOf(XML_Parser parser,
+ unsigned int *outLevelDiff);
+-#endif /* defined(XML_DTD) || XML_GE == 1 */
++#endif /* XML_GE == 1 */
+
+ static unsigned long getDebugLevel(const char *variableName,
+ unsigned long defaultDebugLevel);
+@@ -715,7 +715,7 @@ struct XML_ParserStruct {
+ enum XML_ParamEntityParsing m_paramEntityParsing;
+ #endif
+ unsigned long m_hash_secret_salt;
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ ACCOUNTING m_accounting;
+ ENTITY_STATS m_entity_stats;
+ #endif
+@@ -1175,7 +1175,7 @@ parserInit(XML_Parser parser, const XML_Char *encodingName) {
+ #endif
+ parser->m_hash_secret_salt = 0;
+
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ memset(&parser->m_accounting, 0, sizeof(ACCOUNTING));
+ parser->m_accounting.debugLevel = getDebugLevel("EXPAT_ACCOUNTING_DEBUG", 0u);
+ parser->m_accounting.maximumAmplificationFactor
+@@ -2534,7 +2534,7 @@ XML_GetFeatureList(void) {
+ #ifdef XML_ATTR_INFO
+ {XML_FEATURE_ATTR_INFO, XML_L("XML_ATTR_INFO"), 0},
+ #endif
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ /* Added in Expat 2.4.0 for XML_DTD defined and
+ * added in Expat 2.6.0 for XML_GE == 1. */
+ {XML_FEATURE_BILLION_LAUGHS_ATTACK_PROTECTION_MAXIMUM_AMPLIFICATION_DEFAULT,
+@@ -2550,7 +2550,7 @@ XML_GetFeatureList(void) {
+ return features;
+ }
+
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ XML_Bool XMLCALL
+ XML_SetBillionLaughsAttackProtectionMaximumAmplification(
+ XML_Parser parser, float maximumAmplificationFactor) {
+@@ -2572,7 +2572,7 @@ XML_SetBillionLaughsAttackProtectionActivationThreshold(
+ parser->m_accounting.activationThresholdBytes = activationThresholdBytes;
+ return XML_TRUE;
+ }
+-#endif /* defined(XML_DTD) || XML_GE == 1 */
++#endif /* XML_GE == 1 */
+
+ /* Initially tag->rawName always points into the parse buffer;
+ for those TAG instances opened while the current parse buffer was
+@@ -2658,13 +2658,13 @@ externalEntityInitProcessor2(XML_Parser parser, const char *start,
+ int tok = XmlContentTok(parser->m_encoding, start, end, &next);
+ switch (tok) {
+ case XML_TOK_BOM:
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, start, next, __LINE__,
+ XML_ACCOUNT_DIRECT)) {
+ accountingOnAbort(parser);
+ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
+ }
+-#endif /* defined(XML_DTD) || XML_GE == 1 */
++#endif /* XML_GE == 1 */
+
+ /* If we are at the end of the buffer, this would cause the next stage,
+ i.e. externalEntityInitProcessor3, to pass control directly to
+@@ -2778,7 +2778,7 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
+ for (;;) {
+ const char *next = s; /* XmlContentTok doesn't always set the last arg */
+ int tok = XmlContentTok(enc, s, end, &next);
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ const char *accountAfter
+ = ((tok == XML_TOK_TRAILING_RSQB) || (tok == XML_TOK_TRAILING_CR))
+ ? (haveMore ? s /* i.e. 0 bytes */ : end)
+@@ -2844,14 +2844,14 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
+ XML_Char ch = (XML_Char)XmlPredefinedEntityName(
+ enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar);
+ if (ch) {
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ /* NOTE: We are replacing 4-6 characters original input for 1 character
+ * so there is no amplification and hence recording without
+ * protection. */
+ accountingDiffTolerated(parser, tok, (char *)&ch,
+ ((char *)&ch) + sizeof(XML_Char), __LINE__,
+ XML_ACCOUNT_ENTITY_EXPANSION);
+-#endif /* defined(XML_DTD) || XML_GE == 1 */
++#endif /* XML_GE == 1 */
+ if (parser->m_characterDataHandler)
+ parser->m_characterDataHandler(parser->m_handlerArg, &ch, 1);
+ else if (parser->m_defaultHandler)
+@@ -4053,7 +4053,7 @@ doCdataSection(XML_Parser parser, const ENCODING *enc, const char **startPtr,
+ for (;;) {
+ const char *next = s; /* in case of XML_TOK_NONE or XML_TOK_PARTIAL */
+ int tok = XmlCdataSectionTok(enc, s, end, &next);
+-#if defined(XML_DTD) || XML_GE == 1
++# if XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__, account)) {
+ accountingOnAbort(parser);
+ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
+@@ -4205,7 +4205,7 @@ doIgnoreSection(XML_Parser parser, const ENCODING *enc, const char **startPtr,
+ *eventPP = s;
+ *startPtr = NULL;
+ tok = XmlIgnoreSectionTok(enc, s, end, &next);
+-#if defined(XML_DTD) || XML_GE == 1
++# if XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__,
+ XML_ACCOUNT_DIRECT)) {
+ accountingOnAbort(parser);
+@@ -4297,7 +4297,7 @@ processXmlDecl(XML_Parser parser, int isGeneralTextEntity, const char *s,
+ const XML_Char *storedversion = NULL;
+ int standalone = -1;
+
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ if (! accountingDiffTolerated(parser, XML_TOK_XML_DECL, s, next, __LINE__,
+ XML_ACCOUNT_DIRECT)) {
+ accountingOnAbort(parser);
+@@ -4504,7 +4504,7 @@ entityValueInitProcessor(XML_Parser parser, const char *s, const char *end,
+ */
+ else if (tok == XML_TOK_BOM && next == end
+ && ! parser->m_parsingStatus.finalBuffer) {
+-# if defined(XML_DTD) || XML_GE == 1
++# if XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__,
+ XML_ACCOUNT_DIRECT)) {
+ accountingOnAbort(parser);
+@@ -4720,7 +4720,7 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ }
+ }
+ role = XmlTokenRole(&parser->m_prologState, tok, s, next, enc);
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ switch (role) {
+ case XML_ROLE_INSTANCE_START: // bytes accounted in contentProcessor
+ case XML_ROLE_XML_DECL: // bytes accounted in processXmlDecl
+@@ -5044,7 +5044,7 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ break;
+ case XML_ROLE_ENTITY_VALUE:
+ if (dtd->keepProcessing) {
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ // This will store the given replacement text in
+ // parser->m_declEntity->textPtr.
+ enum XML_Error result
+@@ -5695,7 +5695,7 @@ epilogProcessor(XML_Parser parser, const char *s, const char *end,
+ for (;;) {
+ const char *next = NULL;
+ int tok = XmlPrologTok(parser->m_encoding, s, end, &next);
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__,
+ XML_ACCOUNT_DIRECT)) {
+ accountingOnAbort(parser);
+@@ -5775,7 +5775,7 @@ processInternalEntity(XML_Parser parser, ENTITY *entity, XML_Bool betweenDecl) {
+ return XML_ERROR_NO_MEMORY;
+ }
+ entity->open = XML_TRUE;
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ entityTrackingOnOpen(parser, entity, __LINE__);
+ #endif
+ entity->processed = 0;
+@@ -5809,9 +5809,9 @@ processInternalEntity(XML_Parser parser, ENTITY *entity, XML_Bool betweenDecl) {
+ entity->processed = (int)(next - textStart);
+ parser->m_processor = internalEntityProcessor;
+ } else {
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ entityTrackingOnClose(parser, entity, __LINE__);
+-#endif /* defined(XML_DTD) || XML_GE == 1 */
++#endif /* XML_GE == 1 */
+ entity->open = XML_FALSE;
+ parser->m_openInternalEntities = openEntity->next;
+ /* put openEntity back in list of free instances */
+@@ -5860,7 +5860,7 @@ internalEntityProcessor(XML_Parser parser, const char *s, const char *end,
+ return result;
+ }
+
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ entityTrackingOnClose(parser, entity, __LINE__);
+ #endif
+ entity->open = XML_FALSE;
+@@ -5939,7 +5939,7 @@ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
+ const char *next
+ = ptr; /* XmlAttributeValueTok doesn't always set the last arg */
+ int tok = XmlAttributeValueTok(enc, ptr, end, &next);
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, ptr, next, __LINE__, account)) {
+ accountingOnAbort(parser);
+ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
+@@ -6004,14 +6004,14 @@ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
+ XML_Char ch = (XML_Char)XmlPredefinedEntityName(
+ enc, ptr + enc->minBytesPerChar, next - enc->minBytesPerChar);
+ if (ch) {
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ /* NOTE: We are replacing 4-6 characters original input for 1 character
+ * so there is no amplification and hence recording without
+ * protection. */
+ accountingDiffTolerated(parser, tok, (char *)&ch,
+ ((char *)&ch) + sizeof(XML_Char), __LINE__,
+ XML_ACCOUNT_ENTITY_EXPANSION);
+-#endif /* defined(XML_DTD) || XML_GE == 1 */
++#endif /* XML_GE == 1 */
+ if (! poolAppendChar(pool, ch))
+ return XML_ERROR_NO_MEMORY;
+ break;
+@@ -6089,14 +6089,14 @@ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
+ enum XML_Error result;
+ const XML_Char *textEnd = entity->textPtr + entity->textLen;
+ entity->open = XML_TRUE;
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ entityTrackingOnOpen(parser, entity, __LINE__);
+ #endif
+ result = appendAttributeValue(parser, parser->m_internalEncoding,
+ isCdata, (const char *)entity->textPtr,
+ (const char *)textEnd, pool,
+ XML_ACCOUNT_ENTITY_EXPANSION);
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ entityTrackingOnClose(parser, entity, __LINE__);
+ #endif
+ entity->open = XML_FALSE;
+@@ -6153,7 +6153,7 @@ storeEntityValue(XML_Parser parser, const ENCODING *enc,
+ = entityTextPtr; /* XmlEntityValueTok doesn't always set the last arg */
+ int tok = XmlEntityValueTok(enc, entityTextPtr, entityTextEnd, &next);
+
+-# if defined(XML_DTD) || XML_GE == 1
++# if XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, entityTextPtr, next, __LINE__,
+ account)) {
+ accountingOnAbort(parser);
+@@ -7725,7 +7725,7 @@ copyString(const XML_Char *s, const XML_Memory_Handling_Suite *memsuite) {
+ return result;
+ }
+
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+
+ static float
+ accountingGetCurrentAmplification(XML_Parser rootParser) {
+@@ -8456,7 +8456,7 @@ unsignedCharToPrintable(unsigned char c) {
+ assert(0); /* never gets here */
+ }
+
+-#endif /* defined(XML_DTD) || XML_GE == 1 */
++#endif /* XML_GE == 1 */
+
+ static unsigned long
+ getDebugLevel(const char *variableName, unsigned long defaultDebugLevel) {
+diff --git a/xmlwf/xmlwf.c b/xmlwf/xmlwf.c
+index 04ca759..dd023a9 100644
+--- a/xmlwf/xmlwf.c
++++ b/xmlwf/xmlwf.c
+@@ -1122,13 +1122,13 @@ tmain(int argc, XML_Char **argv) {
+ }
+
+ if (attackMaximumAmplification != -1.0f) {
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ XML_SetBillionLaughsAttackProtectionMaximumAmplification(
+ parser, attackMaximumAmplification);
+ #endif
+ }
+ if (attackThresholdGiven) {
+-#if defined(XML_DTD) || XML_GE == 1
++#if XML_GE == 1
+ XML_SetBillionLaughsAttackProtectionActivationThreshold(
+ parser, attackThresholdBytes);
+ #else
+--
+2.40.0
diff --git a/meta/recipes-core/expat/expat/CVE-2023-52426-010.patch b/meta/recipes-core/expat/expat/CVE-2023-52426-010.patch
new file mode 100644
index 0000000000..4b5c5cb2e1
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2023-52426-010.patch
@@ -0,0 +1,50 @@
+From 55fecd6aa4af4a540812b81234679cd6b5714f1b Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Wed, 1 Nov 2023 18:24:55 +0100
+Subject: [PATCH] Drop redundant "XML_GE == 1" guards
+
+These are redundant because further out there is a guard
+for "XML_GE == 1" already. In the visual world, the pattern
+is this:
+
+> #if XML_GE == 1
+> [..]
+> # if XML_GE == 1
+> [..]
+> # endif
+> [..]
+> #endif
+
+Spotted by Snild Dolkow, thanks!
+
+Co-authored-by: Snild Dolkow <snild@sony.com>
+
+CVE: CVE-2023-52426
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/55fecd6aa4af4a540812b81234679cd6b5714f1b]
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ lib/xmlparse.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index 2d8f4c0..82a8006 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -6153,14 +6153,12 @@ storeEntityValue(XML_Parser parser, const ENCODING *enc,
+ = entityTextPtr; /* XmlEntityValueTok doesn't always set the last arg */
+ int tok = XmlEntityValueTok(enc, entityTextPtr, entityTextEnd, &next);
+
+-# if XML_GE == 1
+ if (! accountingDiffTolerated(parser, tok, entityTextPtr, next, __LINE__,
+ account)) {
+ accountingOnAbort(parser);
+ result = XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
+ goto endEntityValue;
+ }
+-# endif
+
+ switch (tok) {
+ case XML_TOK_PARAM_ENTITY_REF:
+--
+2.40.0
diff --git a/meta/recipes-core/expat/expat/CVE-2023-52426-011.patch b/meta/recipes-core/expat/expat/CVE-2023-52426-011.patch
new file mode 100644
index 0000000000..d1b0be2aff
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2023-52426-011.patch
@@ -0,0 +1,45 @@
+From 8a6c61de4a425977e357cafd8667a0d7771ce292 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Thu, 26 Oct 2023 01:29:03 +0200
+Subject: [PATCH] lib: Add XML_GE to XML_GetFeatureList and XML_FeatureEnum
+ Co-authored-by: Snild Dolkow <snild@sony.com>
+
+CVE: CVE-2023-52426
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/8a6c61de4a425977e357cafd8667a0d7771ce292]
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ lib/expat.h | 4 +++-
+ lib/xmlparse.c | 2 ++
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/lib/expat.h b/lib/expat.h
+index fa2eb45..9e64174 100644
+--- a/lib/expat.h
++++ b/lib/expat.h
+@@ -1025,7 +1025,9 @@ enum XML_FeatureEnum {
+ XML_FEATURE_ATTR_INFO,
+ /* Added in Expat 2.4.0. */
+ XML_FEATURE_BILLION_LAUGHS_ATTACK_PROTECTION_MAXIMUM_AMPLIFICATION_DEFAULT,
+- XML_FEATURE_BILLION_LAUGHS_ATTACK_PROTECTION_ACTIVATION_THRESHOLD_DEFAULT
++ XML_FEATURE_BILLION_LAUGHS_ATTACK_PROTECTION_ACTIVATION_THRESHOLD_DEFAULT,
++ /* Added in Expat 2.6.0. */
++ XML_FEATURE_GE
+ /* Additional features must be added to the end of this enum. */
+ };
+
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index 82a8006..0627d6c 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -2544,6 +2544,8 @@ XML_GetFeatureList(void) {
+ {XML_FEATURE_BILLION_LAUGHS_ATTACK_PROTECTION_ACTIVATION_THRESHOLD_DEFAULT,
+ XML_L("XML_BLAP_ACT_THRES"),
+ EXPAT_BILLION_LAUGHS_ATTACK_PROTECTION_ACTIVATION_THRESHOLD_DEFAULT},
++ /* Added in Expat 2.6.0. */
++ {XML_FEATURE_GE, XML_L("XML_GE"), 0},
+ #endif
+ {XML_FEATURE_END, NULL, 0}};
+
+--
+2.40.0
diff --git a/meta/recipes-core/expat/expat/CVE-2024-28757.patch b/meta/recipes-core/expat/expat/CVE-2024-28757.patch
new file mode 100755
index 0000000000..768dab0c84
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2024-28757.patch
@@ -0,0 +1,58 @@
+From 1d50b80cf31de87750103656f6eb693746854aa8 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Mon, 4 Mar 2024 23:49:06 +0100
+Subject: [PATCH] lib/xmlparse.c: Detect billion laughs attack with isolated
+ external parser
+
+When parsing DTD content with code like ..
+
+ XML_Parser parser = XML_ParserCreate(NULL);
+ XML_Parser ext_parser = XML_ExternalEntityParserCreate(parser, NULL, NULL);
+ enum XML_Status status = XML_Parse(ext_parser, doc, (int)strlen(doc), XML_TRUE);
+
+.. there are 0 bytes accounted as direct input and all input from `doc` accounted
+as indirect input. Now function accountingGetCurrentAmplification cannot calculate
+the current amplification ratio as "(direct + indirect) / direct", and it did refuse
+to divide by 0 as one would expect, but it returned 1.0 for this case to indicate
+no amplification over direct input. As a result, billion laughs attacks from
+DTD-only input were not detected with this isolated way of using an external parser.
+
+The new approach is to assume direct input of length not 0 but 22 -- derived from
+ghost input "<!ENTITY a SYSTEM 'b'>", the shortest possible way to include an external
+DTD --, and do the usual "(direct + indirect) / direct" math with "direct := 22".
+
+GitHub issue #839 has more details on this issue and its origin in ClusterFuzz
+finding 66812.
+
+CVE: CVE-2024-28757
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/1d50b80cf31de87750103656f6eb693746854aa8]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ lib/xmlparse.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index b884d82b5..d44baa68d 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -7655,6 +7655,8 @@ copyString(const XML_Char *s, const XML_Memory_Handling_Suite *memsuite) {
+
+ static float
+ accountingGetCurrentAmplification(XML_Parser rootParser) {
++ // 1.........1.........12 => 22
++ const size_t lenOfShortestInclude = sizeof("<!ENTITY a SYSTEM 'b'>") - 1;
+ const XmlBigCount countBytesOutput
+ = rootParser->m_accounting.countBytesDirect
+ + rootParser->m_accounting.countBytesIndirect;
+@@ -7662,7 +7664,9 @@ accountingGetCurrentAmplification(XML_Parser rootParser) {
+ = rootParser->m_accounting.countBytesDirect
+ ? (countBytesOutput
+ / (float)(rootParser->m_accounting.countBytesDirect))
+- : 1.0f;
++ : ((lenOfShortestInclude
++ + rootParser->m_accounting.countBytesIndirect)
++ / (float)lenOfShortestInclude);
+ assert(! rootParser->m_parentParser);
+ return amplificationFactor;
+ }
diff --git a/meta/recipes-core/expat/expat_2.4.7.bb b/meta/recipes-core/expat/expat_2.5.0.bb
index bf1ca8d56e..31e989cfe2 100644
--- a/meta/recipes-core/expat/expat_2.4.7.bb
+++ b/meta/recipes-core/expat/expat_2.5.0.bb
@@ -4,17 +4,29 @@ HOMEPAGE = "https://github.com/libexpat/libexpat"
SECTION = "libs"
LICENSE = "MIT"
-LIC_FILES_CHKSUM = "file://COPYING;md5=9e2ce3b3c4c0f2670883a23bbd7c37a9"
+LIC_FILES_CHKSUM = "file://COPYING;md5=7b3b078238d0901d3b339289117cb7fb"
VERSION_TAG = "${@d.getVar('PV').replace('.', '_')}"
SRC_URI = "https://github.com/libexpat/libexpat/releases/download/R_${VERSION_TAG}/expat-${PV}.tar.bz2 \
file://run-ptest \
+ file://CVE-2024-28757.patch \
+ file://CVE-2023-52426-001.patch \
+ file://CVE-2023-52426-002.patch \
+ file://CVE-2023-52426-003.patch \
+ file://CVE-2023-52426-004.patch \
+ file://CVE-2023-52426-005.patch \
+ file://CVE-2023-52426-006.patch \
+ file://CVE-2023-52426-007.patch \
+ file://CVE-2023-52426-008.patch \
+ file://CVE-2023-52426-009.patch \
+ file://CVE-2023-52426-010.patch \
+ file://CVE-2023-52426-011.patch \
"
UPSTREAM_CHECK_URI = "https://github.com/libexpat/libexpat/releases/"
-SRC_URI[sha256sum] = "e149bdd8b90254c62b3d195da53a09bd531a4d63a963b0d8a5268d48dd2f6a65"
+SRC_URI[sha256sum] = "6f0e6e01f7b30025fa05c85fdad1e5d0ec7fd35d9f61b22f34998de11969ff67"
EXTRA_OECMAKE:class-native += "-DEXPAT_BUILD_DOCS=OFF"
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/0001-gio-tests-g-file-info-don-t-assume-million-in-one-ev.patch b/meta/recipes-core/glib-2.0/glib-2.0/0001-gio-tests-g-file-info-don-t-assume-million-in-one-ev.patch
new file mode 100644
index 0000000000..c33fa88a76
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/0001-gio-tests-g-file-info-don-t-assume-million-in-one-ev.patch
@@ -0,0 +1,51 @@
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/merge_requests/2990]
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+From 14838522a706ebdcc3cdab661d4c368099fe3a4e Mon Sep 17 00:00:00 2001
+From: Ross Burton <ross.burton@arm.com>
+Date: Tue, 6 Jul 2021 19:26:03 +0100
+Subject: [PATCH] gio/tests/g-file-info: don't assume million-in-one events
+ don't happen
+
+The access and creation time tests create a file, gets the time in
+seconds, then gets the time in microseconds and assumes that the
+difference between the two has to be above 0.
+
+As rare as this may be, it can happen:
+
+$ stat g-file-info-test-50A450 -c %y
+2021-07-06 18:24:56.000000767 +0100
+
+Change the test to simply assert that the difference not negative to
+handle this case.
+
+This is the same fix as 289f8b, but that was just modification time.
+---
+ gio/tests/g-file-info.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gio/tests/g-file-info.c b/gio/tests/g-file-info.c
+index 59411c3a8..a213e4b92 100644
+--- a/gio/tests/g-file-info.c
++++ b/gio/tests/g-file-info.c
+@@ -239,7 +239,7 @@ test_g_file_info_access_time (void)
+ g_assert_nonnull (dt_usecs);
+
+ ts = g_date_time_difference (dt_usecs, dt);
+- g_assert_cmpint (ts, >, 0);
++ g_assert_cmpint (ts, >=, 0);
+ g_assert_cmpint (ts, <, G_USEC_PER_SEC);
+
+ /* Try round-tripping the access time. */
+@@ -316,7 +316,7 @@ test_g_file_info_creation_time (void)
+ g_assert_nonnull (dt_usecs);
+
+ ts = g_date_time_difference (dt_usecs, dt);
+- g_assert_cmpint (ts, >, 0);
++ g_assert_cmpint (ts, >=, 0);
+ g_assert_cmpint (ts, <, G_USEC_PER_SEC);
+
+ /* Try round-tripping the creation time. */
+--
+2.34.1
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-29499.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-29499.patch
new file mode 100644
index 0000000000..65174efa6d
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-29499.patch
@@ -0,0 +1,291 @@
+From 5f4485c4ff57fdefb1661531788def7ca5a47328 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 04:19:44 +0000
+Subject: [PATCH] gvariant-serialiser: Check offset table entry size is minimal
+
+The entries in an offset table (which is used for variable sized arrays
+and tuples containing variable sized members) are sized so that they can
+address every byte in the overall variant.
+
+The specification requires that for a variant to be in normal form, its
+offset table entries must be the minimum width such that they can
+address every byte in the variant.
+
+That minimality requirement was not checked in
+`g_variant_is_normal_form()`, leading to two different byte arrays being
+interpreted as the normal form of a given variant tree. That kind of
+confusion could potentially be exploited, and is certainly a bug.
+
+Fix it by adding the necessary checks on offset table entry width, and
+unit tests.
+
+Spotted by William Manley.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Fixes: #2794
+
+CVE: CVE-2023-29499
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/5f4485c4ff57fdefb1661531788def7ca5a47328]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ glib/gvariant-serialiser.c | 19 +++-
+ glib/tests/gvariant.c | 176 +++++++++++++++++++++++++++++++++++++
+ 2 files changed, 194 insertions(+), 1 deletion(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index 9c7f12a..3d6e7b8 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -694,6 +694,10 @@ gvs_variable_sized_array_get_frame_offsets (GVariantSerialised value)
+ out.data_size = last_end;
+ out.array = value.data + last_end;
+ out.length = offsets_array_size / out.offset_size;
++
++ if (out.length > 0 && gvs_calculate_total_size (last_end, out.length) != value.size)
++ return out; /* offset size not minimal */
++
+ out.is_normal = TRUE;
+
+ return out;
+@@ -1201,6 +1205,7 @@ gvs_tuple_is_normal (GVariantSerialised value)
+ gsize length;
+ gsize offset;
+ gsize i;
++ gsize offset_table_size;
+
+ /* as per the comment in gvs_tuple_get_child() */
+ if G_UNLIKELY (value.data == NULL && value.size != 0)
+@@ -1305,7 +1310,19 @@ gvs_tuple_is_normal (GVariantSerialised value)
+ }
+ }
+
+- return offset_ptr == offset;
++ /* @offset_ptr has been counting backwards from the end of the variant, to
++ * find the beginning of the offset table. @offset has been counting forwards
++ * from the beginning of the variant to find the end of the data. They should
++ * have met in the middle. */
++ if (offset_ptr != offset)
++ return FALSE;
++
++ offset_table_size = value.size - offset_ptr;
++ if (value.size > 0 &&
++ gvs_calculate_total_size (offset, offset_table_size / offset_size) != value.size)
++ return FALSE; /* offset size not minimal */
++
++ return TRUE;
+ }
+
+ /* Variants {{{2
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 44e4451..ad45043 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -5076,6 +5076,86 @@ test_normal_checking_array_offsets2 (void)
+ g_variant_unref (variant);
+ }
+
++/* Test that an otherwise-valid serialised GVariant is considered non-normal if
++ * its offset table entries are too wide.
++ *
++ * See §2.3.6 (Framing Offsets) of the GVariant specification. */
++static void
++test_normal_checking_array_offsets_minimal_sized (void)
++{
++ GVariantBuilder builder;
++ gsize i;
++ GVariant *aay_constructed = NULL;
++ const guint8 *data = NULL;
++ guint8 *data_owned = NULL;
++ GVariant *aay_deserialised = NULL;
++ GVariant *aay_normalised = NULL;
++
++ /* Construct an array of type aay, consisting of 128 elements which are each
++ * an empty array, i.e. `[[] * 128]`. This is chosen because the inner
++ * elements are variable sized (making the outer array variable sized, so it
++ * must have an offset table), but they are also zero-sized when serialised.
++ * So the serialised representation of @aay_constructed consists entirely of
++ * its offset table, which is entirely zeroes.
++ *
++ * The array is chosen to be 128 elements long because that means offset
++ * table entries which are 1 byte long. If the elements in the array were
++ * non-zero-sized (to the extent that the overall array is ≥256 bytes long),
++ * the offset table entries would end up being 2 bytes long. */
++ g_variant_builder_init (&builder, G_VARIANT_TYPE ("aay"));
++
++ for (i = 0; i < 128; i++)
++ g_variant_builder_add_value (&builder, g_variant_new_array (G_VARIANT_TYPE_BYTE, NULL, 0));
++
++ aay_constructed = g_variant_builder_end (&builder);
++
++ /* Verify that the constructed array is in normal form, and its serialised
++ * form is `b'\0' * 128`. */
++ g_assert_true (g_variant_is_normal_form (aay_constructed));
++ g_assert_cmpuint (g_variant_n_children (aay_constructed), ==, 128);
++ g_assert_cmpuint (g_variant_get_size (aay_constructed), ==, 128);
++
++ data = g_variant_get_data (aay_constructed);
++ for (i = 0; i < g_variant_get_size (aay_constructed); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ /* Construct a serialised `aay` GVariant which is `b'\0' * 256`. This has to
++ * be a non-normal form of `[[] * 128]`, with 2-byte-long offset table
++ * entries, because each offset table entry has to be able to reference all of
++ * the byte boundaries in the container. All the entries in the offset table
++ * are zero, so all the elements of the array are zero-sized. */
++ data = data_owned = g_malloc0 (256);
++ aay_deserialised = g_variant_new_from_data (G_VARIANT_TYPE ("aay"),
++ data,
++ 256,
++ FALSE,
++ g_free,
++ g_steal_pointer (&data_owned));
++
++ g_assert_false (g_variant_is_normal_form (aay_deserialised));
++ g_assert_cmpuint (g_variant_n_children (aay_deserialised), ==, 128);
++ g_assert_cmpuint (g_variant_get_size (aay_deserialised), ==, 256);
++
++ data = g_variant_get_data (aay_deserialised);
++ for (i = 0; i < g_variant_get_size (aay_deserialised); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ /* Get its normal form. That should change the serialised size. */
++ aay_normalised = g_variant_get_normal_form (aay_deserialised);
++
++ g_assert_true (g_variant_is_normal_form (aay_normalised));
++ g_assert_cmpuint (g_variant_n_children (aay_normalised), ==, 128);
++ g_assert_cmpuint (g_variant_get_size (aay_normalised), ==, 128);
++
++ data = g_variant_get_data (aay_normalised);
++ for (i = 0; i < g_variant_get_size (aay_normalised); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ g_variant_unref (aay_normalised);
++ g_variant_unref (aay_deserialised);
++ g_variant_unref (aay_constructed);
++}
++
+ /* Test that a tuple with invalidly large values in its offset table is
+ * normalised successfully without looping infinitely. */
+ static void
+@@ -5270,6 +5350,98 @@ test_normal_checking_tuple_offsets4 (void)
+ g_variant_unref (variant);
+ }
+
++/* Test that an otherwise-valid serialised GVariant is considered non-normal if
++ * its offset table entries are too wide.
++ *
++ * See §2.3.6 (Framing Offsets) of the GVariant specification. */
++static void
++test_normal_checking_tuple_offsets_minimal_sized (void)
++{
++ GString *type_string = NULL;
++ GVariantBuilder builder;
++ gsize i;
++ GVariant *ray_constructed = NULL;
++ const guint8 *data = NULL;
++ guint8 *data_owned = NULL;
++ GVariant *ray_deserialised = NULL;
++ GVariant *ray_normalised = NULL;
++
++ /* Construct a tuple of type (ay…ay), consisting of 129 members which are each
++ * an empty array, i.e. `([] * 129)`. This is chosen because the inner
++ * members are variable sized, so the outer tuple must have an offset table,
++ * but they are also zero-sized when serialised. So the serialised
++ * representation of @ray_constructed consists entirely of its offset table,
++ * which is entirely zeroes.
++ *
++ * The tuple is chosen to be 129 members long because that means it has 128
++ * offset table entries which are 1 byte long each. If the members in the
++ * tuple were non-zero-sized (to the extent that the overall tuple is ≥256
++ * bytes long), the offset table entries would end up being 2 bytes long.
++ *
++ * 129 members are used unlike 128 array elements in
++ * test_normal_checking_array_offsets_minimal_sized(), because the last member
++ * in a tuple never needs an offset table entry. */
++ type_string = g_string_new ("");
++ g_string_append_c (type_string, '(');
++ for (i = 0; i < 129; i++)
++ g_string_append (type_string, "ay");
++ g_string_append_c (type_string, ')');
++
++ g_variant_builder_init (&builder, G_VARIANT_TYPE (type_string->str));
++
++ for (i = 0; i < 129; i++)
++ g_variant_builder_add_value (&builder, g_variant_new_array (G_VARIANT_TYPE_BYTE, NULL, 0));
++
++ ray_constructed = g_variant_builder_end (&builder);
++
++ /* Verify that the constructed tuple is in normal form, and its serialised
++ * form is `b'\0' * 128`. */
++ g_assert_true (g_variant_is_normal_form (ray_constructed));
++ g_assert_cmpuint (g_variant_n_children (ray_constructed), ==, 129);
++ g_assert_cmpuint (g_variant_get_size (ray_constructed), ==, 128);
++
++ data = g_variant_get_data (ray_constructed);
++ for (i = 0; i < g_variant_get_size (ray_constructed); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ /* Construct a serialised `(ay…ay)` GVariant which is `b'\0' * 256`. This has
++ * to be a non-normal form of `([] * 129)`, with 2-byte-long offset table
++ * entries, because each offset table entry has to be able to reference all of
++ * the byte boundaries in the container. All the entries in the offset table
++ * are zero, so all the members of the tuple are zero-sized. */
++ data = data_owned = g_malloc0 (256);
++ ray_deserialised = g_variant_new_from_data (G_VARIANT_TYPE (type_string->str),
++ data,
++ 256,
++ FALSE,
++ g_free,
++ g_steal_pointer (&data_owned));
++
++ g_assert_false (g_variant_is_normal_form (ray_deserialised));
++ g_assert_cmpuint (g_variant_n_children (ray_deserialised), ==, 129);
++ g_assert_cmpuint (g_variant_get_size (ray_deserialised), ==, 256);
++
++ data = g_variant_get_data (ray_deserialised);
++ for (i = 0; i < g_variant_get_size (ray_deserialised); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ /* Get its normal form. That should change the serialised size. */
++ ray_normalised = g_variant_get_normal_form (ray_deserialised);
++
++ g_assert_true (g_variant_is_normal_form (ray_normalised));
++ g_assert_cmpuint (g_variant_n_children (ray_normalised), ==, 129);
++ g_assert_cmpuint (g_variant_get_size (ray_normalised), ==, 128);
++
++ data = g_variant_get_data (ray_normalised);
++ for (i = 0; i < g_variant_get_size (ray_normalised); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ g_variant_unref (ray_normalised);
++ g_variant_unref (ray_deserialised);
++ g_variant_unref (ray_constructed);
++ g_string_free (type_string, TRUE);
++}
++
+ /* Test that an empty object path is normalised successfully to the base object
+ * path, ‘/’. */
+ static void
+@@ -5414,6 +5586,8 @@ main (int argc, char **argv)
+ test_normal_checking_array_offsets);
+ g_test_add_func ("/gvariant/normal-checking/array-offsets2",
+ test_normal_checking_array_offsets2);
++ g_test_add_func ("/gvariant/normal-checking/array-offsets/minimal-sized",
++ test_normal_checking_array_offsets_minimal_sized);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets",
+ test_normal_checking_tuple_offsets);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets2",
+@@ -5422,6 +5596,8 @@ main (int argc, char **argv)
+ test_normal_checking_tuple_offsets3);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets4",
+ test_normal_checking_tuple_offsets4);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets/minimal-sized",
++ test_normal_checking_tuple_offsets_minimal_sized);
+ g_test_add_func ("/gvariant/normal-checking/empty-object-path",
+ test_normal_checking_empty_object_path);
+
+--
+2.40.0
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0001.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0001.patch
new file mode 100644
index 0000000000..cc4b4055b2
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0001.patch
@@ -0,0 +1,97 @@
+From 4c4cf568f0f710baf0bd04d52df715636bc6b971 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 04:23:41 +0000
+Subject: [PATCH] gvariant: Fix g_variant_byteswap() returning non-normal data
+
+If `g_variant_byteswap()` was called on a non-normal variant of a type
+which doesn’t need byteswapping, it would return a non-normal output.
+
+That contradicts the documentation, which says that the return value is
+always in normal form.
+
+Fix the code so it matches the documentation.
+
+Includes a unit test.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2797
+
+CVE: CVE-2023-32611
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/4c4cf568f0f710baf0bd04d52df715636bc6b971]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ glib/gvariant.c | 8 +++++---
+ glib/tests/gvariant.c | 24 ++++++++++++++++++++++++
+ 2 files changed, 29 insertions(+), 3 deletions(-)
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index 30a3280..7e568d1 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -6004,14 +6004,16 @@ g_variant_byteswap (GVariant *value)
+ g_variant_serialised_byteswap (serialised);
+
+ bytes = g_bytes_new_take (serialised.data, serialised.size);
+- new = g_variant_new_from_bytes (g_variant_get_type (value), bytes, TRUE);
++ new = g_variant_ref_sink (g_variant_new_from_bytes (g_variant_get_type (value), bytes, TRUE));
+ g_bytes_unref (bytes);
+ }
+ else
+ /* contains no multi-byte data */
+- new = value;
++ new = g_variant_get_normal_form (value);
+
+- return g_variant_ref_sink (new);
++ g_assert (g_variant_is_trusted (new));
++
++ return g_steal_pointer (&new);
+ }
+
+ /**
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index ad45043..36c86c2 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -3818,6 +3818,29 @@ test_gv_byteswap (void)
+ g_free (string);
+ }
+
++static void
++test_gv_byteswap_non_normal_non_aligned (void)
++{
++ const guint8 data[] = { 0x02 };
++ GVariant *v = NULL;
++ GVariant *v_byteswapped = NULL;
++
++ g_test_summary ("Test that calling g_variant_byteswap() on a variant which "
++ "is in non-normal form and doesn’t need byteswapping returns "
++ "the same variant in normal form.");
++
++ v = g_variant_new_from_data (G_VARIANT_TYPE_BOOLEAN, data, sizeof (data), FALSE, NULL, NULL);
++ g_assert_false (g_variant_is_normal_form (v));
++
++ v_byteswapped = g_variant_byteswap (v);
++ g_assert_true (g_variant_is_normal_form (v_byteswapped));
++
++ g_assert_cmpvariant (v, v_byteswapped);
++
++ g_variant_unref (v);
++ g_variant_unref (v_byteswapped);
++}
++
+ static void
+ test_parser (void)
+ {
+@@ -5553,6 +5576,7 @@ main (int argc, char **argv)
+ g_test_add_func ("/gvariant/builder-memory", test_builder_memory);
+ g_test_add_func ("/gvariant/hashing", test_hashing);
+ g_test_add_func ("/gvariant/byteswap", test_gv_byteswap);
++ g_test_add_func ("/gvariant/byteswap/non-normal-non-aligned", test_gv_byteswap_non_normal_non_aligned);
+ g_test_add_func ("/gvariant/parser", test_parses);
+ g_test_add_func ("/gvariant/parser/integer-bounds", test_parser_integer_bounds);
+ g_test_add_func ("/gvariant/parser/recursion", test_parser_recursion);
+--
+2.40.0
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0002.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0002.patch
new file mode 100644
index 0000000000..304c15bceb
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0002.patch
@@ -0,0 +1,282 @@
+From 7d7efce1d9c379fdd7d2ff58caea88f8806fdd2e Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 05:05:39 +0000
+Subject: [PATCH] gvariant: Allow g_variant_byteswap() to operate on tree-form
+ variants
+
+This avoids needing to always serialise a variant before byteswapping it.
+With variants in non-normal forms, serialisation can result in a large
+increase in size of the variant, and a lot of allocations for leaf
+`GVariant`s. This can lead to a denial of service attack.
+
+Avoid that by changing byteswapping so that it happens on the tree form
+of the variant if the input is in non-normal form. If the input is in
+normal form (either serialised or in tree form), continue using the
+existing code as byteswapping an already-serialised normal variant is
+about 3× faster than byteswapping on the equivalent tree form.
+
+The existing unit tests cover byteswapping well, but need some
+adaptation so that they operate on tree form variants too.
+
+I considered dropping the serialised byteswapping code and doing all
+byteswapping on tree-form variants, as that would make maintenance
+simpler (avoiding having two parallel implementations of byteswapping).
+However, most inputs to `g_variant_byteswap()` are likely to be
+serialised variants (coming from a byte array of input from some foreign
+source) and most of them are going to be in normal form (as corruption
+and malicious action are rare). So getting rid of the serialised
+byteswapping code would impose quite a performance penalty on the common
+case.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Fixes: #2797
+
+CVE: CVE-2023-32611
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/7d7efce1d9c379fdd7d2ff58caea88f8806fdd2e]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ glib/gvariant.c | 83 ++++++++++++++++++++++++++++++++-----------
+ glib/tests/gvariant.c | 57 +++++++++++++++++++++++++----
+ 2 files changed, 113 insertions(+), 27 deletions(-)
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index 7e568d1..65b8443 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5839,7 +5839,8 @@ g_variant_iter_loop (GVariantIter *iter,
+
+ /* Serialized data {{{1 */
+ static GVariant *
+-g_variant_deep_copy (GVariant *value)
++g_variant_deep_copy (GVariant *value,
++ gboolean byteswap)
+ {
+ switch (g_variant_classify (value))
+ {
+@@ -5857,7 +5858,7 @@ g_variant_deep_copy (GVariant *value)
+ for (i = 0, n_children = g_variant_n_children (value); i < n_children; i++)
+ {
+ GVariant *child = g_variant_get_child_value (value, i);
+- g_variant_builder_add_value (&builder, g_variant_deep_copy (child));
++ g_variant_builder_add_value (&builder, g_variant_deep_copy (child, byteswap));
+ g_variant_unref (child);
+ }
+
+@@ -5871,28 +5872,63 @@ g_variant_deep_copy (GVariant *value)
+ return g_variant_new_byte (g_variant_get_byte (value));
+
+ case G_VARIANT_CLASS_INT16:
+- return g_variant_new_int16 (g_variant_get_int16 (value));
++ if (byteswap)
++ return g_variant_new_int16 (GUINT16_SWAP_LE_BE (g_variant_get_int16 (value)));
++ else
++ return g_variant_new_int16 (g_variant_get_int16 (value));
+
+ case G_VARIANT_CLASS_UINT16:
+- return g_variant_new_uint16 (g_variant_get_uint16 (value));
++ if (byteswap)
++ return g_variant_new_uint16 (GUINT16_SWAP_LE_BE (g_variant_get_uint16 (value)));
++ else
++ return g_variant_new_uint16 (g_variant_get_uint16 (value));
+
+ case G_VARIANT_CLASS_INT32:
+- return g_variant_new_int32 (g_variant_get_int32 (value));
++ if (byteswap)
++ return g_variant_new_int32 (GUINT32_SWAP_LE_BE (g_variant_get_int32 (value)));
++ else
++ return g_variant_new_int32 (g_variant_get_int32 (value));
+
+ case G_VARIANT_CLASS_UINT32:
+- return g_variant_new_uint32 (g_variant_get_uint32 (value));
++ if (byteswap)
++ return g_variant_new_uint32 (GUINT32_SWAP_LE_BE (g_variant_get_uint32 (value)));
++ else
++ return g_variant_new_uint32 (g_variant_get_uint32 (value));
+
+ case G_VARIANT_CLASS_INT64:
+- return g_variant_new_int64 (g_variant_get_int64 (value));
++ if (byteswap)
++ return g_variant_new_int64 (GUINT64_SWAP_LE_BE (g_variant_get_int64 (value)));
++ else
++ return g_variant_new_int64 (g_variant_get_int64 (value));
+
+ case G_VARIANT_CLASS_UINT64:
+- return g_variant_new_uint64 (g_variant_get_uint64 (value));
++ if (byteswap)
++ return g_variant_new_uint64 (GUINT64_SWAP_LE_BE (g_variant_get_uint64 (value)));
++ else
++ return g_variant_new_uint64 (g_variant_get_uint64 (value));
+
+ case G_VARIANT_CLASS_HANDLE:
+- return g_variant_new_handle (g_variant_get_handle (value));
++ if (byteswap)
++ return g_variant_new_handle (GUINT32_SWAP_LE_BE (g_variant_get_handle (value)));
++ else
++ return g_variant_new_handle (g_variant_get_handle (value));
+
+ case G_VARIANT_CLASS_DOUBLE:
+- return g_variant_new_double (g_variant_get_double (value));
++ if (byteswap)
++ {
++ /* We have to convert the double to a uint64 here using a union,
++ * because a cast will round it numerically. */
++ union
++ {
++ guint64 u64;
++ gdouble dbl;
++ } u1, u2;
++ u1.dbl = g_variant_get_double (value);
++ u2.u64 = GUINT64_SWAP_LE_BE (u1.u64);
++ return g_variant_new_double (u2.dbl);
++ }
++ else
++ return g_variant_new_double (g_variant_get_double (value));
+
+ case G_VARIANT_CLASS_STRING:
+ return g_variant_new_string (g_variant_get_string (value, NULL));
+@@ -5947,7 +5983,7 @@ g_variant_get_normal_form (GVariant *value)
+ if (g_variant_is_normal_form (value))
+ return g_variant_ref (value);
+
+- trusted = g_variant_deep_copy (value);
++ trusted = g_variant_deep_copy (value, FALSE);
+ g_assert (g_variant_is_trusted (trusted));
+
+ return g_variant_ref_sink (trusted);
+@@ -5967,6 +6003,11 @@ g_variant_get_normal_form (GVariant *value)
+ * contain multi-byte numeric data. That include strings, booleans,
+ * bytes and containers containing only these things (recursively).
+ *
++ * While this function can safely handle untrusted, non-normal data, it is
++ * recommended to check whether the input is in normal form beforehand, using
++ * g_variant_is_normal_form(), and to reject non-normal inputs if your
++ * application can be strict about what inputs it rejects.
++ *
+ * The returned value is always in normal form and is marked as trusted.
+ *
+ * Returns: (transfer full): the byteswapped form of @value
+@@ -5984,22 +6025,21 @@ g_variant_byteswap (GVariant *value)
+
+ g_variant_type_info_query (type_info, &alignment, NULL);
+
+- if (alignment)
+- /* (potentially) contains multi-byte numeric data */
++ if (alignment && g_variant_is_normal_form (value))
+ {
++ /* (potentially) contains multi-byte numeric data, but is also already in
++ * normal form so we can use a faster byteswapping codepath on the
++ * serialised data */
+ GVariantSerialised serialised = { 0, };
+- GVariant *trusted;
+ GBytes *bytes;
+
+- trusted = g_variant_get_normal_form (value);
+- serialised.type_info = g_variant_get_type_info (trusted);
+- serialised.size = g_variant_get_size (trusted);
++ serialised.type_info = g_variant_get_type_info (value);
++ serialised.size = g_variant_get_size (value);
+ serialised.data = g_malloc (serialised.size);
+- serialised.depth = g_variant_get_depth (trusted);
++ serialised.depth = g_variant_get_depth (value);
+ serialised.ordered_offsets_up_to = G_MAXSIZE; /* operating on the normal form */
+ serialised.checked_offsets_up_to = G_MAXSIZE;
+- g_variant_store (trusted, serialised.data);
+- g_variant_unref (trusted);
++ g_variant_store (value, serialised.data);
+
+ g_variant_serialised_byteswap (serialised);
+
+@@ -6007,6 +6047,9 @@ g_variant_byteswap (GVariant *value)
+ new = g_variant_ref_sink (g_variant_new_from_bytes (g_variant_get_type (value), bytes, TRUE));
+ g_bytes_unref (bytes);
+ }
++ else if (alignment)
++ /* (potentially) contains multi-byte numeric data */
++ new = g_variant_ref_sink (g_variant_deep_copy (value, TRUE));
+ else
+ /* contains no multi-byte data */
+ new = g_variant_get_normal_form (value);
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 36c86c2..43091f2 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -2280,24 +2280,67 @@ serialise_tree (TreeInstance *tree,
+ static void
+ test_byteswap (void)
+ {
+- GVariantSerialised one = { 0, }, two = { 0, };
++ GVariantSerialised one = { 0, }, two = { 0, }, three = { 0, };
+ TreeInstance *tree;
+-
++ GVariant *one_variant = NULL;
++ GVariant *two_variant = NULL;
++ GVariant *two_byteswapped = NULL;
++ GVariant *three_variant = NULL;
++ GVariant *three_byteswapped = NULL;
++ guint8 *three_data_copy = NULL;
++ gsize three_size_copy = 0;
++
++ /* Write a tree out twice, once normally and once byteswapped. */
+ tree = tree_instance_new (NULL, 3);
+ serialise_tree (tree, &one);
+
++ one_variant = g_variant_new_from_data (G_VARIANT_TYPE (g_variant_type_info_get_type_string (one.type_info)),
++ one.data, one.size, FALSE, NULL, NULL);
++
+ i_am_writing_byteswapped = TRUE;
+ serialise_tree (tree, &two);
++ serialise_tree (tree, &three);
+ i_am_writing_byteswapped = FALSE;
+
+- g_variant_serialised_byteswap (two);
+-
+- g_assert_cmpmem (one.data, one.size, two.data, two.size);
+- g_assert_cmpuint (one.depth, ==, two.depth);
+-
++ /* Swap the first byteswapped one back using the function we want to test. */
++ two_variant = g_variant_new_from_data (G_VARIANT_TYPE (g_variant_type_info_get_type_string (two.type_info)),
++ two.data, two.size, FALSE, NULL, NULL);
++ two_byteswapped = g_variant_byteswap (two_variant);
++
++ /* Make the second byteswapped one non-normal (hopefully), and then byteswap
++ * it back using the function we want to test in its non-normal mode.
++ * This might not work because it’s not necessarily possible to make an
++ * arbitrary random variant non-normal. Adding a single zero byte to the end
++ * often makes something non-normal but still readable. */
++ three_size_copy = three.size + 1;
++ three_data_copy = g_malloc (three_size_copy);
++ memcpy (three_data_copy, three.data, three.size);
++ three_data_copy[three.size] = '\0';
++
++ three_variant = g_variant_new_from_data (G_VARIANT_TYPE (g_variant_type_info_get_type_string (three.type_info)),
++ three_data_copy, three_size_copy, FALSE, NULL, NULL);
++ three_byteswapped = g_variant_byteswap (three_variant);
++
++ /* Check they’re the same. We can always compare @one_variant and
++ * @two_byteswapped. We can only compare @two_byteswapped and
++ * @three_byteswapped if @two_variant and @three_variant are equal: in that
++ * case, the corruption to @three_variant was enough to make it non-normal but
++ * not enough to change its value. */
++ g_assert_cmpvariant (one_variant, two_byteswapped);
++
++ if (g_variant_equal (two_variant, three_variant))
++ g_assert_cmpvariant (two_byteswapped, three_byteswapped);
++
++ g_variant_unref (three_byteswapped);
++ g_variant_unref (three_variant);
++ g_variant_unref (two_byteswapped);
++ g_variant_unref (two_variant);
++ g_variant_unref (one_variant);
+ tree_instance_free (tree);
+ g_free (one.data);
+ g_free (two.data);
++ g_free (three.data);
++ g_free (three_data_copy);
+ }
+
+ static void
+--
+2.40.0
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32636.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32636.patch
new file mode 100644
index 0000000000..311993625a
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32636.patch
@@ -0,0 +1,50 @@
+From 21a204147b16539b3eda3143b32844c49e29f4d4 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 11:33:49 +0000
+Subject: [PATCH] gvariant: Propagate trust when getting a child of a
+ serialised variant
+
+If a variant is trusted, that means all its children are trusted, so
+ensure that their checked offsets are set as such.
+
+This allows a lot of the offset table checks to be avoided when getting
+children from trusted serialised tuples, which speeds things up.
+
+No unit test is included because this is just a performance fix. If
+there are other slownesses, or regressions, in serialised `GVariant`
+performance, the fuzzing setup will catch them like it did this one.
+
+This change does reduce the time to run the oss-fuzz reproducer from 80s
+to about 0.7s on my machine.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Fixes: #2841
+oss-fuzz#54314
+
+CVE: CVE-2023-32636
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/21a204147b16539b3eda3143b32844c49e29f4d4]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ glib/gvariant-core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index 7b71efc..a2c7d2d 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -1195,8 +1195,8 @@ g_variant_get_child_value (GVariant *value,
+ child->contents.serialised.bytes =
+ g_bytes_ref (value->contents.serialised.bytes);
+ child->contents.serialised.data = s_child.data;
+- child->contents.serialised.ordered_offsets_up_to = s_child.ordered_offsets_up_to;
+- child->contents.serialised.checked_offsets_up_to = s_child.checked_offsets_up_to;
++ child->contents.serialised.ordered_offsets_up_to = (value->state & STATE_TRUSTED) ? G_MAXSIZE : s_child.ordered_offsets_up_to;
++ child->contents.serialised.checked_offsets_up_to = (value->state & STATE_TRUSTED) ? G_MAXSIZE : s_child.checked_offsets_up_to;
+
+ return child;
+ }
+--
+2.40.0
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32643.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32643.patch
new file mode 100644
index 0000000000..b5cb4273b6
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32643.patch
@@ -0,0 +1,155 @@
+From 78da5faccb3e065116b75b3ff87ff55381da6c76 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 11:24:43 +0000
+Subject: [PATCH] gvariant: Check offset table doesn't fall outside variant
+ bounds
+
+When dereferencing the first entry in the offset table for a tuple,
+check that it doesn’t fall outside the bounds of the variant first.
+
+This prevents an out-of-bounds read from some non-normal tuples.
+
+This bug was introduced in commit 73d0aa81c2575a5c9ae77d.
+
+Includes a unit test, although the test will likely only catch the
+original bug if run with asan enabled.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Fixes: #2840
+oss-fuzz#54302
+
+CVE: CVE-2023-32643
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/78da5faccb3e065116b75b3ff87ff55381da6c76]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ glib/gvariant-serialiser.c | 12 ++++++--
+ glib/tests/gvariant.c | 63 ++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 72 insertions(+), 3 deletions(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index 3d6e7b8..5abb87e 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -979,7 +979,8 @@ gvs_tuple_get_member_bounds (GVariantSerialised value,
+
+ member_info = g_variant_type_info_member_info (value.type_info, index_);
+
+- if (member_info->i + 1)
++ if (member_info->i + 1 &&
++ offset_size * (member_info->i + 1) <= value.size)
+ member_start = gvs_read_unaligned_le (value.data + value.size -
+ offset_size * (member_info->i + 1),
+ offset_size);
+@@ -990,7 +991,8 @@ gvs_tuple_get_member_bounds (GVariantSerialised value,
+ member_start &= member_info->b;
+ member_start |= member_info->c;
+
+- if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_LAST)
++ if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_LAST &&
++ offset_size * (member_info->i + 1) <= value.size)
+ member_end = value.size - offset_size * (member_info->i + 1);
+
+ else if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_FIXED)
+@@ -1001,11 +1003,15 @@ gvs_tuple_get_member_bounds (GVariantSerialised value,
+ member_end = member_start + fixed_size;
+ }
+
+- else /* G_VARIANT_MEMBER_ENDING_OFFSET */
++ else if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_OFFSET &&
++ offset_size * (member_info->i + 2) <= value.size)
+ member_end = gvs_read_unaligned_le (value.data + value.size -
+ offset_size * (member_info->i + 2),
+ offset_size);
+
++ else /* invalid */
++ member_end = G_MAXSIZE;
++
+ if (out_member_start != NULL)
+ *out_member_start = member_start;
+ if (out_member_end != NULL)
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 43091f2..ab0361a 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -5416,6 +5416,67 @@ test_normal_checking_tuple_offsets4 (void)
+ g_variant_unref (variant);
+ }
+
++/* This is a regression test that dereferencing the first element in the offset
++ * table doesn’t dereference memory before the start of the GVariant. The first
++ * element in the offset table gives the offset of the final member in the
++ * tuple (the offset table is stored in reverse), and the position of this final
++ * member is needed to check that none of the tuple members overlap with the
++ * offset table
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2840 */
++static void
++test_normal_checking_tuple_offsets5 (void)
++{
++ /* A tuple of type (sss) in normal form would have an offset table with two
++ * entries:
++ * - The first entry (lowest index in the table) gives the offset of the
++ * third `s` in the tuple, as the offset table is reversed compared to the
++ * tuple members.
++ * - The second entry (highest index in the table) gives the offset of the
++ * second `s` in the tuple.
++ * - The offset of the first `s` in the tuple is always 0.
++ *
++ * See §2.5.4 (Structures) of the GVariant specification for details, noting
++ * that the table is only layed out this way because all three members of the
++ * tuple have non-fixed sizes.
++ *
++ * It’s not clear whether the 0xaa data of this variant is part of the strings
++ * in the tuple, or part of the offset table. It doesn’t really matter. This
++ * is a regression test to check that the code to validate the offset table
++ * doesn’t unconditionally try to access the first entry in the offset table
++ * by subtracting the table size from the end of the GVariant data.
++ *
++ * In this non-normal case, that would result in an address off the start of
++ * the GVariant data, and an out-of-bounds read, because the GVariant is one
++ * byte long, but the offset table is calculated as two bytes long (with 1B
++ * sized entries) from the tuple’s type.
++ */
++ const GVariantType *data_type = G_VARIANT_TYPE ("(sss)");
++ const guint8 data[] = { 0xaa };
++ gsize size = sizeof (data);
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ g_test_bug ("https://gitlab.gnome.org/GNOME/glib/-/issues/2840");
++
++ variant = g_variant_new_from_data (data_type, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ g_assert_false (g_variant_is_normal_form (variant));
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++
++ expected = g_variant_new_parsed ("('', '', '')");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
+ /* Test that an otherwise-valid serialised GVariant is considered non-normal if
+ * its offset table entries are too wide.
+ *
+@@ -5663,6 +5724,8 @@ main (int argc, char **argv)
+ test_normal_checking_tuple_offsets3);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets4",
+ test_normal_checking_tuple_offsets4);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets5",
++ test_normal_checking_tuple_offsets5);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets/minimal-sized",
+ test_normal_checking_tuple_offsets_minimal_sized);
+ g_test_add_func ("/gvariant/normal-checking/empty-object-path",
+--
+2.40.0
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0001.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0001.patch
new file mode 100644
index 0000000000..2b7536c42d
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0001.patch
@@ -0,0 +1,104 @@
+From 1deacdd4e8e35a5cf1417918ca4f6b0afa6409b1 Mon Sep 17 00:00:00 2001
+From: William Manley <will@stb-tester.com>
+Date: Wed, 9 Aug 2023 10:04:49 +0000
+Subject: [PATCH] gvariant-core: Consolidate construction of
+ `GVariantSerialised`
+
+So I only need to change it in one place.
+
+This introduces no functional changes.
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/1deacdd4e8e35a5cf1417918ca4f6b0afa6409b1]
+
+Signed-off-by: Soumya <soumya.sambu@windriver.com>
+---
+ glib/gvariant-core.c | 49 ++++++++++++++++++++++----------------------
+ 1 file changed, 25 insertions(+), 24 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index a31d396..496f2e2 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -349,6 +349,27 @@ g_variant_ensure_size (GVariant *value)
+ }
+ }
+
++/* < private >
++ * g_variant_to_serialised:
++ * @value: a #GVariant
++ *
++ * Gets a GVariantSerialised for a GVariant in state STATE_SERIALISED.
++ */
++inline static GVariantSerialised
++g_variant_to_serialised (GVariant *value)
++{
++ g_assert (value->state & STATE_SERIALISED);
++ {
++ GVariantSerialised serialised = {
++ value->type_info,
++ (gpointer) value->contents.serialised.data,
++ value->size,
++ value->depth,
++ };
++ return serialised;
++ }
++}
++
+ /* < private >
+ * g_variant_serialise:
+ * @value: a #GVariant
+@@ -1007,16 +1028,8 @@ g_variant_n_children (GVariant *value)
+ g_variant_lock (value);
+
+ if (value->state & STATE_SERIALISED)
+- {
+- GVariantSerialised serialised = {
+- value->type_info,
+- (gpointer) value->contents.serialised.data,
+- value->size,
+- value->depth,
+- };
+-
+- n_children = g_variant_serialised_n_children (serialised);
+- }
++ n_children = g_variant_serialised_n_children (
++ g_variant_to_serialised (value));
+ else
+ n_children = value->contents.tree.n_children;
+
+@@ -1083,12 +1096,7 @@ g_variant_get_child_value (GVariant *value,
+ }
+
+ {
+- GVariantSerialised serialised = {
+- value->type_info,
+- (gpointer) value->contents.serialised.data,
+- value->size,
+- value->depth,
+- };
++ GVariantSerialised serialised = g_variant_to_serialised (value);
+ GVariantSerialised s_child;
+ GVariant *child;
+
+@@ -1201,14 +1209,7 @@ g_variant_is_normal_form (GVariant *value)
+
+ if (value->state & STATE_SERIALISED)
+ {
+- GVariantSerialised serialised = {
+- value->type_info,
+- (gpointer) value->contents.serialised.data,
+- value->size,
+- value->depth
+- };
+-
+- if (g_variant_serialised_is_normal (serialised))
++ if (g_variant_serialised_is_normal (g_variant_to_serialised (value)))
+ value->state |= STATE_TRUSTED;
+ }
+ else
+--
+2.40.0
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0002.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0002.patch
new file mode 100644
index 0000000000..4eff85a5f3
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0002.patch
@@ -0,0 +1,211 @@
+From 446e69f5edd72deb2196dee36bbaf8056caf6948 Mon Sep 17 00:00:00 2001
+From: William Manley <will@stb-tester.com>
+Date: Wed, 9 Aug 2023 10:39:34 +0000
+Subject: [PATCH] gvariant-serialiser: Factor out functions for dealing with
+ framing offsets
+
+This introduces no functional changes.
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/446e69f5edd72deb2196dee36bbaf8056caf6948]
+
+Signed-off-by: Soumya <soumya.sambu@windriver.com>
+---
+ glib/gvariant-serialiser.c | 108 +++++++++++++++++++------------------
+ 1 file changed, 57 insertions(+), 51 deletions(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index 7b13381..e71248e 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -633,30 +633,62 @@ gvs_calculate_total_size (gsize body_size,
+ return body_size + 8 * offsets;
+ }
+
++struct Offsets
++{
++ gsize data_size;
++
++ guchar *array;
++ gsize length;
++ guint offset_size;
++
++ gboolean is_normal;
++};
++
+ static gsize
+-gvs_variable_sized_array_n_children (GVariantSerialised value)
++gvs_offsets_get_offset_n (struct Offsets *offsets,
++ gsize n)
++{
++ return gvs_read_unaligned_le (
++ offsets->array + (offsets->offset_size * n), offsets->offset_size);
++}
++
++static struct Offsets
++gvs_variable_sized_array_get_frame_offsets (GVariantSerialised value)
+ {
++ struct Offsets out = { 0, };
+ gsize offsets_array_size;
+- gsize offset_size;
+ gsize last_end;
+
+ if (value.size == 0)
+- return 0;
+-
+- offset_size = gvs_get_offset_size (value.size);
++ {
++ out.is_normal = TRUE;
++ return out;
++ }
+
+- last_end = gvs_read_unaligned_le (value.data + value.size -
+- offset_size, offset_size);
++ out.offset_size = gvs_get_offset_size (value.size);
++ last_end = gvs_read_unaligned_le (value.data + value.size - out.offset_size,
++ out.offset_size);
+
+ if (last_end > value.size)
+- return 0;
++ return out; /* offsets not normal */
+
+ offsets_array_size = value.size - last_end;
+
+- if (offsets_array_size % offset_size)
+- return 0;
++ if (offsets_array_size % out.offset_size)
++ return out; /* offsets not normal */
++
++ out.data_size = last_end;
++ out.array = value.data + last_end;
++ out.length = offsets_array_size / out.offset_size;
++ out.is_normal = TRUE;
+
+- return offsets_array_size / offset_size;
++ return out;
++}
++
++static gsize
++gvs_variable_sized_array_n_children (GVariantSerialised value)
++{
++ return gvs_variable_sized_array_get_frame_offsets (value).length;
+ }
+
+ static GVariantSerialised
+@@ -664,8 +696,9 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ gsize index_)
+ {
+ GVariantSerialised child = { 0, };
+- gsize offset_size;
+- gsize last_end;
++
++ struct Offsets offsets = gvs_variable_sized_array_get_frame_offsets (value);
++
+ gsize start;
+ gsize end;
+
+@@ -673,18 +706,11 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ g_variant_type_info_ref (child.type_info);
+ child.depth = value.depth + 1;
+
+- offset_size = gvs_get_offset_size (value.size);
+-
+- last_end = gvs_read_unaligned_le (value.data + value.size -
+- offset_size, offset_size);
+-
+ if (index_ > 0)
+ {
+ guint alignment;
+
+- start = gvs_read_unaligned_le (value.data + last_end +
+- (offset_size * (index_ - 1)),
+- offset_size);
++ start = gvs_offsets_get_offset_n (&offsets, index_ - 1);
+
+ g_variant_type_info_query (child.type_info, &alignment, NULL);
+ start += (-start) & alignment;
+@@ -692,11 +718,9 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ else
+ start = 0;
+
+- end = gvs_read_unaligned_le (value.data + last_end +
+- (offset_size * index_),
+- offset_size);
++ end = gvs_offsets_get_offset_n (&offsets, index_);
+
+- if (start < end && end <= value.size && end <= last_end)
++ if (start < end && end <= value.size && end <= offsets.data_size)
+ {
+ child.data = value.data + start;
+ child.size = end - start;
+@@ -768,34 +792,16 @@ static gboolean
+ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ {
+ GVariantSerialised child = { 0, };
+- gsize offsets_array_size;
+- guchar *offsets_array;
+- guint offset_size;
+ guint alignment;
+- gsize last_end;
+- gsize length;
+ gsize offset;
+ gsize i;
+
+- if (value.size == 0)
+- return TRUE;
+-
+- offset_size = gvs_get_offset_size (value.size);
+- last_end = gvs_read_unaligned_le (value.data + value.size -
+- offset_size, offset_size);
++ struct Offsets offsets = gvs_variable_sized_array_get_frame_offsets (value);
+
+- if (last_end > value.size)
++ if (!offsets.is_normal)
+ return FALSE;
+
+- offsets_array_size = value.size - last_end;
+-
+- if (offsets_array_size % offset_size)
+- return FALSE;
+-
+- offsets_array = value.data + value.size - offsets_array_size;
+- length = offsets_array_size / offset_size;
+-
+- if (length == 0)
++ if (value.size != 0 && offsets.length == 0)
+ return FALSE;
+
+ child.type_info = g_variant_type_info_element (value.type_info);
+@@ -803,14 +809,14 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ child.depth = value.depth + 1;
+ offset = 0;
+
+- for (i = 0; i < length; i++)
++ for (i = 0; i < offsets.length; i++)
+ {
+ gsize this_end;
+
+- this_end = gvs_read_unaligned_le (offsets_array + offset_size * i,
+- offset_size);
++ this_end = gvs_read_unaligned_le (offsets.array + offsets.offset_size * i,
++ offsets.offset_size);
+
+- if (this_end < offset || this_end > last_end)
++ if (this_end < offset || this_end > offsets.data_size)
+ return FALSE;
+
+ while (offset & alignment)
+@@ -832,7 +838,7 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ offset = this_end;
+ }
+
+- g_assert (offset == last_end);
++ g_assert (offset == offsets.data_size);
+
+ return TRUE;
+ }
+--
+2.40.0
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0003.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0003.patch
new file mode 100644
index 0000000000..183c6b20e7
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0003.patch
@@ -0,0 +1,418 @@
+From ade71fb544391b2e33e1859645726bfee0d5eaaf Mon Sep 17 00:00:00 2001
+From: William Manley <will@stb-tester.com>
+Date: Wed, 16 Aug 2023 03:12:21 +0000
+Subject: [PATCH] gvariant: Don't allow child elements to overlap with each
+ other
+
+If different elements of a variable sized array can overlap with each
+other then we can cause a `GVariant` to normalise to a much larger type.
+
+This commit changes the behaviour of `GVariant` with non-normal form data. If
+an invalid frame offset is found all subsequent elements are given their
+default value.
+
+When retrieving an element at index `n` we scan the frame offsets up to index
+`n` and if they are not in order we return an element with the default value
+for that type. This guarantees that elements don't overlap with each
+other. We remember the offset we've scanned up to so we don't need to
+repeat this work on subsequent accesses. We skip these checks for trusted
+data.
+
+Unfortunately this makes random access of untrusted data O(n) — at least
+on first access. It doesn't affect the algorithmic complexity of accessing
+elements in order, such as when using the `GVariantIter` interface. Also:
+the cost of validation will be amortised as the `GVariant` instance is
+continued to be used.
+
+I've implemented this with 4 different functions, 1 for each element size,
+rather than looping calling `gvs_read_unaligned_le` in the hope that the
+compiler will find it easy to optimise and should produce fairly tight
+code.
+
+Fixes: #2121
+
+CVE: CVE-2023-32665
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/ade71fb544391b2e33e1859645726bfee0d5eaaf]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ glib/gvariant-core.c | 35 ++++++++++++++++
+ glib/gvariant-serialiser.c | 86 ++++++++++++++++++++++++++++++++++++--
+ glib/gvariant-serialiser.h | 8 ++++
+ glib/tests/gvariant.c | 45 ++++++++++++++++++++
+ 4 files changed, 171 insertions(+), 3 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index 496f2e2..4e0b2b5 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -65,6 +65,7 @@ struct _GVariant
+ {
+ GBytes *bytes;
+ gconstpointer data;
++ gsize ordered_offsets_up_to;
+ } serialised;
+
+ struct
+@@ -162,6 +163,24 @@ struct _GVariant
+ * if .data pointed to the appropriate number of nul
+ * bytes.
+ *
++ * .ordered_offsets_up_to: If ordered_offsets_up_to == n this means that all
++ * the frame offsets up to and including the frame
++ * offset determining the end of element n are in
++ * order. This guarantees that the bytes of element
++ * n don't overlap with any previous element.
++ *
++ * For trusted data this is set to G_MAXSIZE and we
++ * don't check that the frame offsets are in order.
++ *
++ * Note: This doesn't imply the offsets are good in
++ * any way apart from their ordering. In particular
++ * offsets may be out of bounds for this value or
++ * may imply that the data overlaps the frame
++ * offsets themselves.
++ *
++ * This field is only relevant for arrays of non
++ * fixed width types.
++ *
+ * .tree: Only valid when the instance is in tree form.
+ *
+ * Note that accesses from other threads could result in
+@@ -365,6 +384,7 @@ g_variant_to_serialised (GVariant *value)
+ (gpointer) value->contents.serialised.data,
+ value->size,
+ value->depth,
++ value->contents.serialised.ordered_offsets_up_to,
+ };
+ return serialised;
+ }
+@@ -396,6 +416,7 @@ g_variant_serialise (GVariant *value,
+ serialised.size = value->size;
+ serialised.data = data;
+ serialised.depth = value->depth;
++ serialised.ordered_offsets_up_to = 0;
+
+ children = (gpointer *) value->contents.tree.children;
+ n_children = value->contents.tree.n_children;
+@@ -439,6 +460,15 @@ g_variant_fill_gvs (GVariantSerialised *serialised,
+ g_assert (serialised->size == value->size);
+ serialised->depth = value->depth;
+
++ if (value->state & STATE_SERIALISED)
++ {
++ serialised->ordered_offsets_up_to = value->contents.serialised.ordered_offsets_up_to;
++ }
++ else
++ {
++ serialised->ordered_offsets_up_to = 0;
++ }
++
+ if (serialised->data)
+ /* g_variant_store() is a public API, so it
+ * it will reacquire the lock if it needs to.
+@@ -481,6 +511,7 @@ g_variant_ensure_serialised (GVariant *value)
+ bytes = g_bytes_new_take (data, value->size);
+ value->contents.serialised.data = g_bytes_get_data (bytes, NULL);
+ value->contents.serialised.bytes = bytes;
++ value->contents.serialised.ordered_offsets_up_to = G_MAXSIZE;
+ value->state |= STATE_SERIALISED;
+ }
+ }
+@@ -561,6 +592,7 @@ g_variant_new_from_bytes (const GVariantType *type,
+ serialised.type_info = value->type_info;
+ serialised.data = (guchar *) g_bytes_get_data (bytes, &serialised.size);
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = trusted ? G_MAXSIZE : 0;
+
+ if (!g_variant_serialised_check (serialised))
+ {
+@@ -611,6 +643,8 @@ g_variant_new_from_bytes (const GVariantType *type,
+ value->contents.serialised.data = g_bytes_get_data (bytes, &value->size);
+ }
+
++ value->contents.serialised.ordered_offsets_up_to = trusted ? G_MAXSIZE : 0;
++
+ g_clear_pointer (&owned_bytes, g_bytes_unref);
+
+ return value;
+@@ -1130,6 +1164,7 @@ g_variant_get_child_value (GVariant *value,
+ child->contents.serialised.bytes =
+ g_bytes_ref (value->contents.serialised.bytes);
+ child->contents.serialised.data = s_child.data;
++ child->contents.serialised.ordered_offsets_up_to = s_child.ordered_offsets_up_to;
+
+ return child;
+ }
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index e71248e..6fb3f2f 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -1,6 +1,7 @@
+ /*
+ * Copyright © 2007, 2008 Ryan Lortie
+ * Copyright © 2010 Codethink Limited
++ * Copyright © 2020 William Manley
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+@@ -264,6 +265,7 @@ gvs_fixed_sized_maybe_get_child (GVariantSerialised value,
+ value.type_info = g_variant_type_info_element (value.type_info);
+ g_variant_type_info_ref (value.type_info);
+ value.depth++;
++ value.ordered_offsets_up_to = 0;
+
+ return value;
+ }
+@@ -295,7 +297,7 @@ gvs_fixed_sized_maybe_serialise (GVariantSerialised value,
+ {
+ if (n_children)
+ {
+- GVariantSerialised child = { NULL, value.data, value.size, value.depth + 1 };
++ GVariantSerialised child = { NULL, value.data, value.size, value.depth + 1, 0 };
+
+ gvs_filler (&child, children[0]);
+ }
+@@ -317,6 +319,7 @@ gvs_fixed_sized_maybe_is_normal (GVariantSerialised value)
+ /* proper element size: "Just". recurse to the child. */
+ value.type_info = g_variant_type_info_element (value.type_info);
+ value.depth++;
++ value.ordered_offsets_up_to = 0;
+
+ return g_variant_serialised_is_normal (value);
+ }
+@@ -358,6 +361,7 @@ gvs_variable_sized_maybe_get_child (GVariantSerialised value,
+ value.data = NULL;
+
+ value.depth++;
++ value.ordered_offsets_up_to = 0;
+
+ return value;
+ }
+@@ -388,7 +392,7 @@ gvs_variable_sized_maybe_serialise (GVariantSerialised value,
+ {
+ if (n_children)
+ {
+- GVariantSerialised child = { NULL, value.data, value.size - 1, value.depth + 1 };
++ GVariantSerialised child = { NULL, value.data, value.size - 1, value.depth + 1, 0 };
+
+ /* write the data for the child. */
+ gvs_filler (&child, children[0]);
+@@ -408,6 +412,7 @@ gvs_variable_sized_maybe_is_normal (GVariantSerialised value)
+ value.type_info = g_variant_type_info_element (value.type_info);
+ value.size--;
+ value.depth++;
++ value.ordered_offsets_up_to = 0;
+
+ return g_variant_serialised_is_normal (value);
+ }
+@@ -691,6 +696,32 @@ gvs_variable_sized_array_n_children (GVariantSerialised value)
+ return gvs_variable_sized_array_get_frame_offsets (value).length;
+ }
+
++/* Find the index of the first out-of-order element in @data, assuming that
++ * @data is an array of elements of given @type, starting at index @start and
++ * containing a further @len-@start elements. */
++#define DEFINE_FIND_UNORDERED(type) \
++ static gsize \
++ find_unordered_##type (const guint8 *data, gsize start, gsize len) \
++ { \
++ gsize off; \
++ type current, previous; \
++ \
++ memcpy (&previous, data + start * sizeof (current), sizeof (current)); \
++ for (off = (start + 1) * sizeof (current); off < len * sizeof (current); off += sizeof (current)) \
++ { \
++ memcpy (&current, data + off, sizeof (current)); \
++ if (current < previous) \
++ break; \
++ previous = current; \
++ } \
++ return off / sizeof (current) - 1; \
++ }
++
++DEFINE_FIND_UNORDERED (guint8);
++DEFINE_FIND_UNORDERED (guint16);
++DEFINE_FIND_UNORDERED (guint32);
++DEFINE_FIND_UNORDERED (guint64);
++
+ static GVariantSerialised
+ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ gsize index_)
+@@ -706,6 +737,49 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ g_variant_type_info_ref (child.type_info);
+ child.depth = value.depth + 1;
+
++ /* If the requested @index_ is beyond the set of indices whose framing offsets
++ * have been checked, check the remaining offsets to see whether they’re
++ * normal (in order, no overlapping array elements). */
++ if (index_ > value.ordered_offsets_up_to)
++ {
++ switch (offsets.offset_size)
++ {
++ case 1:
++ {
++ value.ordered_offsets_up_to = find_unordered_guint8 (
++ offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ break;
++ }
++ case 2:
++ {
++ value.ordered_offsets_up_to = find_unordered_guint16 (
++ offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ break;
++ }
++ case 4:
++ {
++ value.ordered_offsets_up_to = find_unordered_guint32 (
++ offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ break;
++ }
++ case 8:
++ {
++ value.ordered_offsets_up_to = find_unordered_guint64 (
++ offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ break;
++ }
++ default:
++ /* gvs_get_offset_size() only returns maximum 8 */
++ g_assert_not_reached ();
++ }
++ }
++
++ if (index_ > value.ordered_offsets_up_to)
++ {
++ /* Offsets are invalid somewhere, so return an empty child. */
++ return child;
++ }
++
+ if (index_ > 0)
+ {
+ guint alignment;
+@@ -840,6 +914,9 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+
+ g_assert (offset == offsets.data_size);
+
++ /* All offsets have now been checked. */
++ value.ordered_offsets_up_to = G_MAXSIZE;
++
+ return TRUE;
+ }
+
+@@ -1072,7 +1149,7 @@ gvs_tuple_is_normal (GVariantSerialised value)
+ for (i = 0; i < length; i++)
+ {
+ const GVariantMemberInfo *member_info;
+- GVariantSerialised child;
++ GVariantSerialised child = { 0, };
+ gsize fixed_size;
+ guint alignment;
+ gsize end;
+@@ -1132,6 +1209,9 @@ gvs_tuple_is_normal (GVariantSerialised value)
+ offset = end;
+ }
+
++ /* All element bounds have been checked above. */
++ value.ordered_offsets_up_to = G_MAXSIZE;
++
+ {
+ gsize fixed_size;
+ guint alignment;
+diff --git a/glib/gvariant-serialiser.h b/glib/gvariant-serialiser.h
+index 859cb7b..3ab83b3 100644
+--- a/glib/gvariant-serialiser.h
++++ b/glib/gvariant-serialiser.h
+@@ -29,6 +29,14 @@ typedef struct
+ guchar *data;
+ gsize size;
+ gsize depth; /* same semantics as GVariant.depth */
++ /* If ordered_offsets_up_to == n this means that all the frame offsets up to and
++ * including the frame offset determining the end of element n are in order.
++ * This guarantees that the bytes of element n don't overlap with any previous
++ * element.
++ *
++ * This is both read and set by g_variant_serialised_get_child for arrays of
++ * non-fixed-width types */
++ gsize ordered_offsets_up_to;
+ } GVariantSerialised;
+
+ /* deserialization */
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 0110f26..291f796 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -1,5 +1,6 @@
+ /*
+ * Copyright © 2010 Codethink Limited
++ * Copyright © 2020 William Manley
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+@@ -1279,6 +1280,7 @@ random_instance_filler (GVariantSerialised *serialised,
+ serialised->size = instance->size;
+
+ serialised->depth = 0;
++ serialised->ordered_offsets_up_to = 0;
+
+ g_assert_true (serialised->type_info == instance->type_info);
+ g_assert_cmpuint (serialised->size, ==, instance->size);
+@@ -5023,6 +5025,47 @@ test_normal_checking_array_offsets (void)
+ g_variant_unref (variant);
+ }
+
++/* This is a regression test that we can't have non-normal values that take up
++ * significantly more space than the normal equivalent, by specifying the
++ * offset table entries so that array elements overlap.
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_832242 */
++static void
++test_normal_checking_array_offsets2 (void)
++{
++ const guint8 data[] = {
++ 'h', 'i', '\0',
++ 0x03, 0x00, 0x03,
++ 0x06, 0x00, 0x06,
++ 0x09, 0x00, 0x09,
++ 0x0c, 0x00, 0x0c,
++ 0x0f, 0x00, 0x0f,
++ 0x12, 0x00, 0x12,
++ 0x15, 0x00, 0x15,
++ };
++ gsize size = sizeof (data);
++ const GVariantType *aaaaaaas = G_VARIANT_TYPE ("aaaaaaas");
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ variant = g_variant_new_from_data (aaaaaaas, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++ g_assert_cmpuint (g_variant_get_size (normal_variant), <=, size * 2);
++
++ expected = g_variant_new_parsed (
++ "[[[[[[['hi', '', ''], [], []], [], []], [], []], [], []], [], []], [], []]");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
+ /* Test that a tuple with invalidly large values in its offset table is
+ * normalised successfully without looping infinitely. */
+ static void
+@@ -5189,6 +5232,8 @@ main (int argc, char **argv)
+ test_normal_checking_tuples);
+ g_test_add_func ("/gvariant/normal-checking/array-offsets",
+ test_normal_checking_array_offsets);
++ g_test_add_func ("/gvariant/normal-checking/array-offsets2",
++ test_normal_checking_array_offsets2);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets",
+ test_normal_checking_tuple_offsets);
+ g_test_add_func ("/gvariant/normal-checking/empty-object-path",
+--
+2.40.0
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0004.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0004.patch
new file mode 100644
index 0000000000..791efdee87
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0004.patch
@@ -0,0 +1,114 @@
+From 345cae9c1aa7bf6752039225ef4c8d8d69fa8d76 Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Fri, 11 Aug 2023 04:09:12 +0000
+Subject: [PATCH] gvariant-serialiser: Factor out code to get bounds of a tuple
+ member
+
+This introduces no functional changes.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/345cae9c1aa7bf6752039225ef4c8d8d69fa8d76]
+
+Signed-off-by: Soumya <soumya.sambu@windriver.com>
+---
+ glib/gvariant-serialiser.c | 73 ++++++++++++++++++++++++--------------
+ 1 file changed, 46 insertions(+), 27 deletions(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index 2932427..1c23eab 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -946,6 +946,51 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ * for the tuple. See the notes in gvarianttypeinfo.h.
+ */
+
++static void
++gvs_tuple_get_member_bounds (GVariantSerialised value,
++ gsize index_,
++ gsize offset_size,
++ gsize *out_member_start,
++ gsize *out_member_end)
++{
++ const GVariantMemberInfo *member_info;
++ gsize member_start, member_end;
++
++ member_info = g_variant_type_info_member_info (value.type_info, index_);
++
++ if (member_info->i + 1)
++ member_start = gvs_read_unaligned_le (value.data + value.size -
++ offset_size * (member_info->i + 1),
++ offset_size);
++ else
++ member_start = 0;
++
++ member_start += member_info->a;
++ member_start &= member_info->b;
++ member_start |= member_info->c;
++
++ if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_LAST)
++ member_end = value.size - offset_size * (member_info->i + 1);
++
++ else if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_FIXED)
++ {
++ gsize fixed_size;
++
++ g_variant_type_info_query (member_info->type_info, NULL, &fixed_size);
++ member_end = member_start + fixed_size;
++ }
++
++ else /* G_VARIANT_MEMBER_ENDING_OFFSET */
++ member_end = gvs_read_unaligned_le (value.data + value.size -
++ offset_size * (member_info->i + 2),
++ offset_size);
++
++ if (out_member_start != NULL)
++ *out_member_start = member_start;
++ if (out_member_end != NULL)
++ *out_member_end = member_end;
++}
++
+ static gsize
+ gvs_tuple_n_children (GVariantSerialised value)
+ {
+@@ -1001,33 +1046,7 @@ gvs_tuple_get_child (GVariantSerialised value,
+ }
+ }
+
+- if (member_info->i + 1)
+- start = gvs_read_unaligned_le (value.data + value.size -
+- offset_size * (member_info->i + 1),
+- offset_size);
+- else
+- start = 0;
+-
+- start += member_info->a;
+- start &= member_info->b;
+- start |= member_info->c;
+-
+- if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_LAST)
+- end = value.size - offset_size * (member_info->i + 1);
+-
+- else if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_FIXED)
+- {
+- gsize fixed_size;
+-
+- g_variant_type_info_query (child.type_info, NULL, &fixed_size);
+- end = start + fixed_size;
+- child.size = fixed_size;
+- }
+-
+- else /* G_VARIANT_MEMBER_ENDING_OFFSET */
+- end = gvs_read_unaligned_le (value.data + value.size -
+- offset_size * (member_info->i + 2),
+- offset_size);
++ gvs_tuple_get_member_bounds (value, index_, offset_size, &start, &end);
+
+ /* The child should not extend into the offset table. */
+ if (index_ != g_variant_type_info_n_members (value.type_info) - 1)
+--
+2.40.0
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0005.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0005.patch
new file mode 100644
index 0000000000..6c71c20819
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0005.patch
@@ -0,0 +1,81 @@
+From 73d0aa81c2575a5c9ae77dcb94da919579014fc0 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Fri, 11 Aug 2023 04:13:02 +0000
+Subject: [PATCH] gvariant-serialiser: Rework child size calculation
+
+This reduces a few duplicate calls to `g_variant_type_info_query()` and
+explains why they’re needed.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/73d0aa81c2575a5c9ae77dcb94da919579014fc0]
+
+Signed-off-by: Soumya <soumya.sambu@windriver.com>
+---
+ glib/gvariant-serialiser.c | 31 +++++++++----------------------
+ 1 file changed, 9 insertions(+), 22 deletions(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index 1c23eab..b63e99f 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -1011,14 +1011,18 @@ gvs_tuple_get_child (GVariantSerialised value,
+ child.depth = value.depth + 1;
+ offset_size = gvs_get_offset_size (value.size);
+
++ /* Ensure the size is set for fixed-sized children, or
++ * g_variant_serialised_check() will fail, even if we return
++ * (child.data == NULL) to indicate an error. */
++ if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_FIXED)
++ g_variant_type_info_query (child.type_info, NULL, &child.size);
++
+ /* tuples are the only (potentially) fixed-sized containers, so the
+ * only ones that have to deal with the possibility of having %NULL
+ * data with a non-zero %size if errors occurred elsewhere.
+ */
+ if G_UNLIKELY (value.data == NULL && value.size != 0)
+ {
+- g_variant_type_info_query (child.type_info, NULL, &child.size);
+-
+ /* this can only happen in fixed-sized tuples,
+ * so the child must also be fixed sized.
+ */
+@@ -1036,29 +1040,12 @@ gvs_tuple_get_child (GVariantSerialised value,
+ else
+ {
+ if (offset_size * (member_info->i + 1) > value.size)
+- {
+- /* if the child is fixed size, return its size.
+- * if child is not fixed-sized, return size = 0.
+- */
+- g_variant_type_info_query (child.type_info, NULL, &child.size);
+-
+- return child;
+- }
++ return child;
+ }
+
+- gvs_tuple_get_member_bounds (value, index_, offset_size, &start, &end);
+-
+ /* The child should not extend into the offset table. */
+- if (index_ != g_variant_type_info_n_members (value.type_info) - 1)
+- {
+- GVariantSerialised last_child;
+- last_child = gvs_tuple_get_child (value,
+- g_variant_type_info_n_members (value.type_info) - 1);
+- last_end = last_child.data + last_child.size - value.data;
+- g_variant_type_info_unref (last_child.type_info);
+- }
+- else
+- last_end = end;
++ gvs_tuple_get_member_bounds (value, index_, offset_size, &start, &end);
++ gvs_tuple_get_member_bounds (value, g_variant_type_info_n_members (value.type_info) - 1, offset_size, NULL, &last_end);
+
+ if (start < end && end <= value.size && end <= last_end)
+ {
+--
+2.40.0
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0006.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0006.patch
new file mode 100644
index 0000000000..194ce15287
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0006.patch
@@ -0,0 +1,397 @@
+From 7cf6f5b69146d20948d42f0c476688fe17fef787 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Wed, 16 Aug 2023 12:09:06 +0000
+Subject: [PATCH] gvariant: Don't allow child elements of a tuple to overlap
+ each other
+
+This is similar to the earlier commit which prevents child elements of a
+variable-sized array from overlapping each other, but this time for
+tuples. It is based heavily on ideas by William Manley.
+
+Tuples are slightly different from variable-sized arrays in that they
+contain a mixture of fixed and variable sized elements. All but one of
+the variable sized elements have an entry in the frame offsets table.
+This means that if we were to just check the ordering of the frame
+offsets table, the variable sized elements could still overlap
+interleaving fixed sized elements, which would be bad.
+
+Therefore we have to check the elements rather than the frame offsets.
+
+The logic of checking the elements up to the index currently being
+requested, and caching the result in `ordered_offsets_up_to`, means that
+the algorithmic cost implications are the same for this commit as for
+variable-sized arrays: an O(N) cost for these checks is amortised out
+over N accesses to O(1) per access.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Fixes: #2121
+
+CVE: CVE-2023-32665
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/7cf6f5b69146d20948d42f0c476688fe17fef787]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ glib/gvariant-core.c | 6 +-
+ glib/gvariant-serialiser.c | 40 ++++++++
+ glib/gvariant-serialiser.h | 7 +-
+ glib/gvariant.c | 1 +
+ glib/tests/gvariant.c | 181 +++++++++++++++++++++++++++++++++++++
+ 5 files changed, 232 insertions(+), 3 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index 4e0b2b5..c57ee77 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -1,6 +1,7 @@
+ /*
+ * Copyright © 2007, 2008 Ryan Lortie
+ * Copyright © 2010 Codethink Limited
++ * Copyright © 2022 Endless OS Foundation, LLC
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+@@ -179,7 +180,7 @@ struct _GVariant
+ * offsets themselves.
+ *
+ * This field is only relevant for arrays of non
+- * fixed width types.
++ * fixed width types and for tuples.
+ *
+ * .tree: Only valid when the instance is in tree form.
+ *
+@@ -1139,6 +1140,9 @@ g_variant_get_child_value (GVariant *value,
+ */
+ s_child = g_variant_serialised_get_child (serialised, index_);
+
++ /* Update the cached ordered_offsets_up_to, since @serialised will be thrown away when this function exits */
++ value->contents.serialised.ordered_offsets_up_to = MAX (value->contents.serialised.ordered_offsets_up_to, serialised.ordered_offsets_up_to);
++
+ /* Check whether this would cause nesting too deep. If so, return a fake
+ * child. The only situation we expect this to happen in is with a variant,
+ * as all other deeply-nested types have a static type, and hence should
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index 2493e76..d46d05c 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -942,6 +942,10 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ * for the tuple. See the notes in gvarianttypeinfo.h.
+ */
+
++/* Note: This doesn’t guarantee that @out_member_end >= @out_member_start; that
++ * condition may not hold true for invalid serialised variants. The caller is
++ * responsible for checking the returned values and handling invalid ones
++ * appropriately. */
+ static void
+ gvs_tuple_get_member_bounds (GVariantSerialised value,
+ gsize index_,
+@@ -1028,6 +1032,42 @@ gvs_tuple_get_child (GVariantSerialised value,
+ return child;
+ }
+
++ /* If the requested @index_ is beyond the set of indices whose framing offsets
++ * have been checked, check the remaining offsets to see whether they’re
++ * normal (in order, no overlapping tuple elements).
++ *
++ * Unlike the checks in gvs_variable_sized_array_get_child(), we have to check
++ * all the tuple *elements* here, not just all the framing offsets, since
++ * tuples contain a mix of elements which use framing offsets and ones which
++ * don’t. None of them are allowed to overlap. */
++ if (index_ > value.ordered_offsets_up_to)
++ {
++ gsize i, prev_i_end = 0;
++
++ if (value.ordered_offsets_up_to > 0)
++ gvs_tuple_get_member_bounds (value, value.ordered_offsets_up_to - 1, offset_size, NULL, &prev_i_end);
++
++ for (i = value.ordered_offsets_up_to; i <= index_; i++)
++ {
++ gsize i_start, i_end;
++
++ gvs_tuple_get_member_bounds (value, i, offset_size, &i_start, &i_end);
++
++ if (i_start > i_end || i_start < prev_i_end || i_end > value.size)
++ break;
++
++ prev_i_end = i_end;
++ }
++
++ value.ordered_offsets_up_to = i - 1;
++ }
++
++ if (index_ > value.ordered_offsets_up_to)
++ {
++ /* Offsets are invalid somewhere, so return an empty child. */
++ return child;
++ }
++
+ if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_OFFSET)
+ {
+ if (offset_size * (member_info->i + 2) > value.size)
+diff --git a/glib/gvariant-serialiser.h b/glib/gvariant-serialiser.h
+index 3ab83b3..03826f9 100644
+--- a/glib/gvariant-serialiser.h
++++ b/glib/gvariant-serialiser.h
+@@ -34,8 +34,11 @@ typedef struct
+ * This guarantees that the bytes of element n don't overlap with any previous
+ * element.
+ *
+- * This is both read and set by g_variant_serialised_get_child for arrays of
+- * non-fixed-width types */
++ * This is both read and set by g_variant_serialised_get_child() for arrays of
++ * non-fixed-width types, and for tuples.
++ *
++ * Even when dealing with tuples, @ordered_offsets_up_to is an element index,
++ * rather than an index into the frame offsets. */
+ gsize ordered_offsets_up_to;
+ } GVariantSerialised;
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index 42ffc9a..f645e05 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5997,6 +5997,7 @@ g_variant_byteswap (GVariant *value)
+ serialised.size = g_variant_get_size (trusted);
+ serialised.data = g_malloc (serialised.size);
+ serialised.depth = g_variant_get_depth (trusted);
++ serialised.ordered_offsets_up_to = G_MAXSIZE; /* operating on the normal form */
+ g_variant_store (trusted, serialised.data);
+ g_variant_unref (trusted);
+
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 291f796..3ddff96 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -1,6 +1,7 @@
+ /*
+ * Copyright © 2010 Codethink Limited
+ * Copyright © 2020 William Manley
++ * Copyright © 2022 Endless OS Foundation, LLC
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+@@ -1447,6 +1448,7 @@ test_maybe (void)
+ serialised.data = flavoured_malloc (needed_size, flavour);
+ serialised.size = needed_size;
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised,
+ random_instance_filler,
+@@ -1570,6 +1572,7 @@ test_array (void)
+ serialised.data = flavoured_malloc (needed_size, flavour);
+ serialised.size = needed_size;
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) instances, n_children);
+@@ -1734,6 +1737,7 @@ test_tuple (void)
+ serialised.data = flavoured_malloc (needed_size, flavour);
+ serialised.size = needed_size;
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) instances, n_children);
+@@ -1830,6 +1834,7 @@ test_variant (void)
+ serialised.data = flavoured_malloc (needed_size, flavour);
+ serialised.size = needed_size;
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) &instance, 1);
+@@ -5090,6 +5095,176 @@ test_normal_checking_tuple_offsets (void)
+ g_variant_unref (variant);
+ }
+
++/* This is a regression test that we can't have non-normal values that take up
++ * significantly more space than the normal equivalent, by specifying the
++ * offset table entries so that tuple elements overlap.
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_838503 and
++ * https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_838513 */
++static void
++test_normal_checking_tuple_offsets2 (void)
++{
++ const GVariantType *data_type = G_VARIANT_TYPE ("(yyaiyyaiyy)");
++ const guint8 data[] = {
++ 0x12, 0x34, 0x56, 0x78, 0x01,
++ /*
++ ^───────────────────┘
++
++ ^^^^^^^^^^ 1st yy
++ ^^^^^^^^^^ 2nd yy
++ ^^^^^^^^^^ 3rd yy
++ ^^^^ Framing offsets
++ */
++
++ /* If this variant was encoded normally, it would be something like this:
++ * 0x12, 0x34, pad, pad, [array bytes], 0x56, 0x78, pad, pad, [array bytes], 0x9A, 0xBC, 0xXX
++ * ^─────────────────────────────────────────────────────┘
++ *
++ * ^^^^^^^^^^ 1st yy
++ * ^^^^^^^^^^ 2nd yy
++ * ^^^^^^^^^^ 3rd yy
++ * ^^^^ Framing offsets
++ */
++ };
++ gsize size = sizeof (data);
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ variant = g_variant_new_from_data (data_type, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++ g_assert_cmpuint (g_variant_get_size (normal_variant), <=, size * 3);
++
++ expected = g_variant_new_parsed (
++ "@(yyaiyyaiyy) (0x12, 0x34, [], 0x00, 0x00, [], 0x00, 0x00)");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
++/* This is a regression test that overlapping entries in the offset table are
++ * decoded consistently, even though they’re non-normal.
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_910935 */
++static void
++test_normal_checking_tuple_offsets3 (void)
++{
++ /* The expected decoding of this non-normal byte stream is complex. See
++ * section 2.7.3 (Handling Non-Normal Serialised Data) of the GVariant
++ * specification.
++ *
++ * The rule “Child Values Overlapping Framing Offsets” from the specification
++ * says that the first `ay` must be decoded as `[0x01]` even though it
++ * overlaps the first byte of the offset table. However, since commit
++ * 7eedcd76f7d5b8c98fa60013e1fe6e960bf19df3, GLib explicitly doesn’t allow
++ * this as it’s exploitable. So the first `ay` must be given a default value.
++ *
++ * The second and third `ay`s must be given default values because of rule
++ * “End Boundary Precedes Start Boundary”.
++ *
++ * The `i` must be given a default value because of rule “Start or End
++ * Boundary of a Child Falls Outside the Container”.
++ */
++ const GVariantType *data_type = G_VARIANT_TYPE ("(ayayiay)");
++ const guint8 data[] = {
++ 0x01, 0x00, 0x02,
++ /*
++ ^──┘
++
++ ^^^^^^^^^^ 1st ay, bytes 0-2 (but given a default value anyway, see above)
++ 2nd ay, bytes 2-0
++ i, bytes 0-4
++ 3rd ay, bytes 4-1
++ ^^^^^^^^^^ Framing offsets
++ */
++ };
++ gsize size = sizeof (data);
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ variant = g_variant_new_from_data (data_type, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ g_assert_false (g_variant_is_normal_form (variant));
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++ g_assert_cmpuint (g_variant_get_size (normal_variant), <=, size * 3);
++
++ expected = g_variant_new_parsed ("@(ayayiay) ([], [], 0, [])");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
++/* This is a regression test that overlapping entries in the offset table are
++ * decoded consistently, even though they’re non-normal.
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_910935 */
++static void
++test_normal_checking_tuple_offsets4 (void)
++{
++ /* The expected decoding of this non-normal byte stream is complex. See
++ * section 2.7.3 (Handling Non-Normal Serialised Data) of the GVariant
++ * specification.
++ *
++ * The rule “Child Values Overlapping Framing Offsets” from the specification
++ * says that the first `ay` must be decoded as `[0x01]` even though it
++ * overlaps the first byte of the offset table. However, since commit
++ * 7eedcd76f7d5b8c98fa60013e1fe6e960bf19df3, GLib explicitly doesn’t allow
++ * this as it’s exploitable. So the first `ay` must be given a default value.
++ *
++ * The second `ay` must be given a default value because of rule “End Boundary
++ * Precedes Start Boundary”.
++ *
++ * The third `ay` must be given a default value because its framing offsets
++ * overlap that of the first `ay`.
++ */
++ const GVariantType *data_type = G_VARIANT_TYPE ("(ayayay)");
++ const guint8 data[] = {
++ 0x01, 0x00, 0x02,
++ /*
++ ^──┘
++
++ ^^^^^^^^^^ 1st ay, bytes 0-2 (but given a default value anyway, see above)
++ 2nd ay, bytes 2-0
++ 3rd ay, bytes 0-1
++ ^^^^^^^^^^ Framing offsets
++ */
++ };
++ gsize size = sizeof (data);
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ variant = g_variant_new_from_data (data_type, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ g_assert_false (g_variant_is_normal_form (variant));
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++ g_assert_cmpuint (g_variant_get_size (normal_variant), <=, size * 3);
++
++ expected = g_variant_new_parsed ("@(ayayay) ([], [], [])");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
+ /* Test that an empty object path is normalised successfully to the base object
+ * path, ‘/’. */
+ static void
+@@ -5236,6 +5411,12 @@ main (int argc, char **argv)
+ test_normal_checking_array_offsets2);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets",
+ test_normal_checking_tuple_offsets);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets2",
++ test_normal_checking_tuple_offsets2);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets3",
++ test_normal_checking_tuple_offsets3);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets4",
++ test_normal_checking_tuple_offsets4);
+ g_test_add_func ("/gvariant/normal-checking/empty-object-path",
+ test_normal_checking_empty_object_path);
+
+--
+2.40.0
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0007.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0007.patch
new file mode 100644
index 0000000000..8a408ab030
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0007.patch
@@ -0,0 +1,50 @@
+From e6490c84e84ba9f182fbd83b51ff4f9f5a0a1793 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Wed, 16 Aug 2023 03:42:47 +0000
+Subject: [PATCH] gvariant: Port g_variant_deep_copy() to count its iterations
+ directly
+
+This is equivalent to what `GVariantIter` does, but it means that
+`g_variant_deep_copy()` is making its own `g_variant_get_child_value()`
+calls.
+
+This will be useful in an upcoming commit, where those child values will
+be inspected a little more deeply.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/e6490c84e84ba9f182fbd83b51ff4f9f5a0a1793]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ glib/gvariant.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index 42ffc9a..ca13cc1 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5850,14 +5850,13 @@ g_variant_deep_copy (GVariant *value)
+ case G_VARIANT_CLASS_VARIANT:
+ {
+ GVariantBuilder builder;
+- GVariantIter iter;
+- GVariant *child;
++ gsize i, n_children;
+
+ g_variant_builder_init (&builder, g_variant_get_type (value));
+- g_variant_iter_init (&iter, value);
+
+- while ((child = g_variant_iter_next_value (&iter)))
++ for (i = 0, n_children = g_variant_n_children (value); i < n_children; i++)
+ {
++ GVariant *child = g_variant_get_child_value (value, i);
+ g_variant_builder_add_value (&builder, g_variant_deep_copy (child));
+ g_variant_unref (child);
+ }
+--
+2.40.0
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0008.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0008.patch
new file mode 100644
index 0000000000..9b074a543d
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0008.patch
@@ -0,0 +1,395 @@
+From d1a293c4e29880b8d17bb826c9a426a440ca4a91 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 01:30:38 +0000
+Subject: [PATCH] gvariant: Track checked and ordered offsets independently
+
+The past few commits introduced the concept of known-good offsets in the
+offset table (which is used for variable-width arrays and tuples).
+Good offsets are ones which are non-overlapping with all the previous
+offsets in the table.
+
+If a bad offset is encountered when indexing into the array or tuple,
+the cached known-good offset index will not be increased. In this way,
+all child variants at and beyond the first bad offset can be returned as
+default values rather than dereferencing potentially invalid data.
+
+In this case, there was no information about the fact that the indexes
+between the highest known-good index and the requested one had been
+checked already. That could lead to a pathological case where an offset
+table with an invalid first offset is repeatedly checked in full when
+trying to access higher-indexed children.
+
+Avoid that by storing the index of the highest checked offset in the
+table, as well as the index of the highest good/ordered offset.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/d1a293c4e29880b8d17bb826c9a426a440ca4a91]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ glib/gvariant-core.c | 28 ++++++++++++++++++++++++
+ glib/gvariant-serialiser.c | 44 +++++++++++++++++++++++++++-----------
+ glib/gvariant-serialiser.h | 9 ++++++++
+ glib/gvariant.c | 1 +
+ glib/tests/gvariant.c | 5 +++++
+ 5 files changed, 75 insertions(+), 12 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index c57ee77..7b71efc 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -67,6 +67,7 @@ struct _GVariant
+ GBytes *bytes;
+ gconstpointer data;
+ gsize ordered_offsets_up_to;
++ gsize checked_offsets_up_to;
+ } serialised;
+
+ struct
+@@ -182,6 +183,24 @@ struct _GVariant
+ * This field is only relevant for arrays of non
+ * fixed width types and for tuples.
+ *
++ * .checked_offsets_up_to: Similarly to .ordered_offsets_up_to, this stores
++ * the index of the highest element, n, whose frame
++ * offsets (and all the preceding frame offsets)
++ * have been checked for validity.
++ *
++ * It is always the case that
++ * .checked_offsets_up_to ≥ .ordered_offsets_up_to.
++ *
++ * If .checked_offsets_up_to == .ordered_offsets_up_to,
++ * then a bad offset has not been found so far.
++ *
++ * If .checked_offsets_up_to > .ordered_offsets_up_to,
++ * then a bad offset has been found at
++ * (.ordered_offsets_up_to + 1).
++ *
++ * This field is only relevant for arrays of non
++ * fixed width types and for tuples.
++ *
+ * .tree: Only valid when the instance is in tree form.
+ *
+ * Note that accesses from other threads could result in
+@@ -386,6 +405,7 @@ g_variant_to_serialised (GVariant *value)
+ value->size,
+ value->depth,
+ value->contents.serialised.ordered_offsets_up_to,
++ value->contents.serialised.checked_offsets_up_to,
+ };
+ return serialised;
+ }
+@@ -418,6 +438,7 @@ g_variant_serialise (GVariant *value,
+ serialised.data = data;
+ serialised.depth = value->depth;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ children = (gpointer *) value->contents.tree.children;
+ n_children = value->contents.tree.n_children;
+@@ -464,10 +485,12 @@ g_variant_fill_gvs (GVariantSerialised *serialised,
+ if (value->state & STATE_SERIALISED)
+ {
+ serialised->ordered_offsets_up_to = value->contents.serialised.ordered_offsets_up_to;
++ serialised->checked_offsets_up_to = value->contents.serialised.checked_offsets_up_to;
+ }
+ else
+ {
+ serialised->ordered_offsets_up_to = 0;
++ serialised->checked_offsets_up_to = 0;
+ }
+
+ if (serialised->data)
+@@ -513,6 +536,7 @@ g_variant_ensure_serialised (GVariant *value)
+ value->contents.serialised.data = g_bytes_get_data (bytes, NULL);
+ value->contents.serialised.bytes = bytes;
+ value->contents.serialised.ordered_offsets_up_to = G_MAXSIZE;
++ value->contents.serialised.checked_offsets_up_to = G_MAXSIZE;
+ value->state |= STATE_SERIALISED;
+ }
+ }
+@@ -594,6 +618,7 @@ g_variant_new_from_bytes (const GVariantType *type,
+ serialised.data = (guchar *) g_bytes_get_data (bytes, &serialised.size);
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = trusted ? G_MAXSIZE : 0;
++ serialised.checked_offsets_up_to = trusted ? G_MAXSIZE : 0;
+
+ if (!g_variant_serialised_check (serialised))
+ {
+@@ -645,6 +670,7 @@ g_variant_new_from_bytes (const GVariantType *type,
+ }
+
+ value->contents.serialised.ordered_offsets_up_to = trusted ? G_MAXSIZE : 0;
++ value->contents.serialised.checked_offsets_up_to = trusted ? G_MAXSIZE : 0;
+
+ g_clear_pointer (&owned_bytes, g_bytes_unref);
+
+@@ -1142,6 +1168,7 @@ g_variant_get_child_value (GVariant *value,
+
+ /* Update the cached ordered_offsets_up_to, since @serialised will be thrown away when this function exits */
+ value->contents.serialised.ordered_offsets_up_to = MAX (value->contents.serialised.ordered_offsets_up_to, serialised.ordered_offsets_up_to);
++ value->contents.serialised.checked_offsets_up_to = MAX (value->contents.serialised.checked_offsets_up_to, serialised.checked_offsets_up_to);
+
+ /* Check whether this would cause nesting too deep. If so, return a fake
+ * child. The only situation we expect this to happen in is with a variant,
+@@ -1169,6 +1196,7 @@ g_variant_get_child_value (GVariant *value,
+ g_bytes_ref (value->contents.serialised.bytes);
+ child->contents.serialised.data = s_child.data;
+ child->contents.serialised.ordered_offsets_up_to = s_child.ordered_offsets_up_to;
++ child->contents.serialised.checked_offsets_up_to = s_child.checked_offsets_up_to;
+
+ return child;
+ }
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index d46d05c..9c7f12a 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -120,6 +120,8 @@
+ *
+ * @depth has no restrictions; the depth of a top-level serialized #GVariant is
+ * zero, and it increases for each level of nested child.
++ *
++ * @checked_offsets_up_to is always ≥ @ordered_offsets_up_to
+ */
+
+ /* < private >
+@@ -147,6 +149,9 @@ g_variant_serialised_check (GVariantSerialised serialised)
+ !(serialised.size == 0 || serialised.data != NULL))
+ return FALSE;
+
++ if (serialised.ordered_offsets_up_to > serialised.checked_offsets_up_to)
++ return FALSE;
++
+ /* Depending on the native alignment requirements of the machine, the
+ * compiler will insert either 3 or 7 padding bytes after the char.
+ * This will result in the sizeof() the struct being 12 or 16.
+@@ -266,6 +271,7 @@ gvs_fixed_sized_maybe_get_child (GVariantSerialised value,
+ g_variant_type_info_ref (value.type_info);
+ value.depth++;
+ value.ordered_offsets_up_to = 0;
++ value.checked_offsets_up_to = 0;
+
+ return value;
+ }
+@@ -297,7 +303,7 @@ gvs_fixed_sized_maybe_serialise (GVariantSerialised value,
+ {
+ if (n_children)
+ {
+- GVariantSerialised child = { NULL, value.data, value.size, value.depth + 1, 0 };
++ GVariantSerialised child = { NULL, value.data, value.size, value.depth + 1, 0, 0 };
+
+ gvs_filler (&child, children[0]);
+ }
+@@ -320,6 +326,7 @@ gvs_fixed_sized_maybe_is_normal (GVariantSerialised value)
+ value.type_info = g_variant_type_info_element (value.type_info);
+ value.depth++;
+ value.ordered_offsets_up_to = 0;
++ value.checked_offsets_up_to = 0;
+
+ return g_variant_serialised_is_normal (value);
+ }
+@@ -362,6 +369,7 @@ gvs_variable_sized_maybe_get_child (GVariantSerialised value,
+
+ value.depth++;
+ value.ordered_offsets_up_to = 0;
++ value.checked_offsets_up_to = 0;
+
+ return value;
+ }
+@@ -392,7 +400,7 @@ gvs_variable_sized_maybe_serialise (GVariantSerialised value,
+ {
+ if (n_children)
+ {
+- GVariantSerialised child = { NULL, value.data, value.size - 1, value.depth + 1, 0 };
++ GVariantSerialised child = { NULL, value.data, value.size - 1, value.depth + 1, 0, 0 };
+
+ /* write the data for the child. */
+ gvs_filler (&child, children[0]);
+@@ -413,6 +421,7 @@ gvs_variable_sized_maybe_is_normal (GVariantSerialised value)
+ value.size--;
+ value.depth++;
+ value.ordered_offsets_up_to = 0;
++ value.checked_offsets_up_to = 0;
+
+ return g_variant_serialised_is_normal (value);
+ }
+@@ -739,39 +748,46 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+
+ /* If the requested @index_ is beyond the set of indices whose framing offsets
+ * have been checked, check the remaining offsets to see whether they’re
+- * normal (in order, no overlapping array elements). */
+- if (index_ > value.ordered_offsets_up_to)
++ * normal (in order, no overlapping array elements).
++ *
++ * Don’t bother checking if the highest known-good offset is lower than the
++ * highest checked offset, as that means there’s an invalid element at that
++ * index, so there’s no need to check further. */
++ if (index_ > value.checked_offsets_up_to &&
++ value.ordered_offsets_up_to == value.checked_offsets_up_to)
+ {
+ switch (offsets.offset_size)
+ {
+ case 1:
+ {
+ value.ordered_offsets_up_to = find_unordered_guint8 (
+- offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ offsets.array, value.checked_offsets_up_to, index_ + 1);
+ break;
+ }
+ case 2:
+ {
+ value.ordered_offsets_up_to = find_unordered_guint16 (
+- offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ offsets.array, value.checked_offsets_up_to, index_ + 1);
+ break;
+ }
+ case 4:
+ {
+ value.ordered_offsets_up_to = find_unordered_guint32 (
+- offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ offsets.array, value.checked_offsets_up_to, index_ + 1);
+ break;
+ }
+ case 8:
+ {
+ value.ordered_offsets_up_to = find_unordered_guint64 (
+- offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ offsets.array, value.checked_offsets_up_to, index_ + 1);
+ break;
+ }
+ default:
+ /* gvs_get_offset_size() only returns maximum 8 */
+ g_assert_not_reached ();
+ }
++
++ value.checked_offsets_up_to = index_;
+ }
+
+ if (index_ > value.ordered_offsets_up_to)
+@@ -916,6 +932,7 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+
+ /* All offsets have now been checked. */
+ value.ordered_offsets_up_to = G_MAXSIZE;
++ value.checked_offsets_up_to = G_MAXSIZE;
+
+ return TRUE;
+ }
+@@ -1040,14 +1057,15 @@ gvs_tuple_get_child (GVariantSerialised value,
+ * all the tuple *elements* here, not just all the framing offsets, since
+ * tuples contain a mix of elements which use framing offsets and ones which
+ * don’t. None of them are allowed to overlap. */
+- if (index_ > value.ordered_offsets_up_to)
++ if (index_ > value.checked_offsets_up_to &&
++ value.ordered_offsets_up_to == value.checked_offsets_up_to)
+ {
+ gsize i, prev_i_end = 0;
+
+- if (value.ordered_offsets_up_to > 0)
+- gvs_tuple_get_member_bounds (value, value.ordered_offsets_up_to - 1, offset_size, NULL, &prev_i_end);
++ if (value.checked_offsets_up_to > 0)
++ gvs_tuple_get_member_bounds (value, value.checked_offsets_up_to - 1, offset_size, NULL, &prev_i_end);
+
+- for (i = value.ordered_offsets_up_to; i <= index_; i++)
++ for (i = value.checked_offsets_up_to; i <= index_; i++)
+ {
+ gsize i_start, i_end;
+
+@@ -1060,6 +1078,7 @@ gvs_tuple_get_child (GVariantSerialised value,
+ }
+
+ value.ordered_offsets_up_to = i - 1;
++ value.checked_offsets_up_to = index_;
+ }
+
+ if (index_ > value.ordered_offsets_up_to)
+@@ -1257,6 +1276,7 @@ gvs_tuple_is_normal (GVariantSerialised value)
+
+ /* All element bounds have been checked above. */
+ value.ordered_offsets_up_to = G_MAXSIZE;
++ value.checked_offsets_up_to = G_MAXSIZE;
+
+ {
+ gsize fixed_size;
+diff --git a/glib/gvariant-serialiser.h b/glib/gvariant-serialiser.h
+index 03826f9..2423e01 100644
+--- a/glib/gvariant-serialiser.h
++++ b/glib/gvariant-serialiser.h
+@@ -40,6 +40,15 @@ typedef struct
+ * Even when dealing with tuples, @ordered_offsets_up_to is an element index,
+ * rather than an index into the frame offsets. */
+ gsize ordered_offsets_up_to;
++
++ /* Similar to @ordered_offsets_up_to. This gives the index of the child element
++ * whose frame offset is the highest in the offset table which has been
++ * checked so far.
++ *
++ * This is always ≥ @ordered_offsets_up_to. It is always an element index.
++ *
++ * See documentation in gvariant-core.c for `struct GVariant` for details. */
++ gsize checked_offsets_up_to;
+ } GVariantSerialised;
+
+ /* deserialization */
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index 1b1cbdc..2e288af 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5997,6 +5997,7 @@ g_variant_byteswap (GVariant *value)
+ serialised.data = g_malloc (serialised.size);
+ serialised.depth = g_variant_get_depth (trusted);
+ serialised.ordered_offsets_up_to = G_MAXSIZE; /* operating on the normal form */
++ serialised.checked_offsets_up_to = G_MAXSIZE;
+ g_variant_store (trusted, serialised.data);
+ g_variant_unref (trusted);
+
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 3ddff96..31a7dde 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -1282,6 +1282,7 @@ random_instance_filler (GVariantSerialised *serialised,
+
+ serialised->depth = 0;
+ serialised->ordered_offsets_up_to = 0;
++ serialised->checked_offsets_up_to = 0;
+
+ g_assert_true (serialised->type_info == instance->type_info);
+ g_assert_cmpuint (serialised->size, ==, instance->size);
+@@ -1449,6 +1450,7 @@ test_maybe (void)
+ serialised.size = needed_size;
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised,
+ random_instance_filler,
+@@ -1573,6 +1575,7 @@ test_array (void)
+ serialised.size = needed_size;
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) instances, n_children);
+@@ -1738,6 +1741,7 @@ test_tuple (void)
+ serialised.size = needed_size;
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) instances, n_children);
+@@ -1835,6 +1839,7 @@ test_variant (void)
+ serialised.size = needed_size;
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) &instance, 1);
+--
+2.40.0
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0009.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0009.patch
new file mode 100644
index 0000000000..7a43b138f3
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0009.patch
@@ -0,0 +1,98 @@
+From 298a537d5f6783e55d87e40011ee3fd3b22b72f9 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 01:39:01 +0000
+Subject: [PATCH] gvariant: Zero-initialise various GVariantSerialised objects
+
+The following few commits will add a couple of new fields to
+`GVariantSerialised`, and they should be zero-filled by default.
+
+Try and pre-empt that a bit by zero-filling `GVariantSerialised` by
+default in a few places.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/298a537d5f6783e55d87e40011ee3fd3b22b72f9]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ glib/gvariant.c | 2 +-
+ glib/tests/gvariant.c | 12 ++++++------
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index 2e288af..30a3280 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5987,7 +5987,7 @@ g_variant_byteswap (GVariant *value)
+ if (alignment)
+ /* (potentially) contains multi-byte numeric data */
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+ GVariant *trusted;
+ GBytes *bytes;
+
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 31a7dde..2f33a3e 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -1442,7 +1442,7 @@ test_maybe (void)
+
+ for (flavour = 0; flavour < 8; flavour += alignment)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+ GVariantSerialised child;
+
+ serialised.type_info = type_info;
+@@ -1568,7 +1568,7 @@ test_array (void)
+
+ for (flavour = 0; flavour < 8; flavour += alignment)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+
+ serialised.type_info = array_info;
+ serialised.data = flavoured_malloc (needed_size, flavour);
+@@ -1734,7 +1734,7 @@ test_tuple (void)
+
+ for (flavour = 0; flavour < 8; flavour += alignment)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+
+ serialised.type_info = type_info;
+ serialised.data = flavoured_malloc (needed_size, flavour);
+@@ -1831,7 +1831,7 @@ test_variant (void)
+
+ for (flavour = 0; flavour < 8; flavour += alignment)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+ GVariantSerialised child;
+
+ serialised.type_info = type_info;
+@@ -2280,7 +2280,7 @@ serialise_tree (TreeInstance *tree,
+ static void
+ test_byteswap (void)
+ {
+- GVariantSerialised one, two;
++ GVariantSerialised one = { 0, }, two = { 0, };
+ TreeInstance *tree;
+
+ tree = tree_instance_new (NULL, 3);
+@@ -2354,7 +2354,7 @@ test_serialiser_children (void)
+ static void
+ test_fuzz (gdouble *fuzziness)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+ TreeInstance *tree;
+
+ /* make an instance */
+--
+2.40.0
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/relocate-modules.patch b/meta/recipes-core/glib-2.0/glib-2.0/relocate-modules.patch
index fb50fff6a5..c0114397d8 100644
--- a/meta/recipes-core/glib-2.0/glib-2.0/relocate-modules.patch
+++ b/meta/recipes-core/glib-2.0/glib-2.0/relocate-modules.patch
@@ -1,4 +1,4 @@
-From a30eb17c20e070124b55523d86729348f2929f95 Mon Sep 17 00:00:00 2001
+From 9a66887179d28d696562dcac43ad05d67580cfdb Mon Sep 17 00:00:00 2001
From: Ross Burton <ross.burton@intel.com>
Date: Fri, 11 Mar 2016 15:35:55 +0000
Subject: [PATCH] glib-2.0: relocate the GIO module directory for native builds
diff --git a/meta/recipes-core/glib-2.0/glib-2.0_2.72.2.bb b/meta/recipes-core/glib-2.0/glib-2.0_2.72.3.bb
index c4a8a4b8ad..24c590a714 100644
--- a/meta/recipes-core/glib-2.0/glib-2.0_2.72.2.bb
+++ b/meta/recipes-core/glib-2.0/glib-2.0_2.72.3.bb
@@ -16,10 +16,25 @@ SRC_URI = "${GNOME_MIRROR}/glib/${SHRT_VER}/glib-${PV}.tar.xz \
file://0001-Do-not-write-bindir-into-pkg-config-files.patch \
file://0001-meson-Run-atomics-test-on-clang-as-well.patch \
file://0001-gio-tests-resources.c-comment-out-a-build-host-only-.patch \
+ file://0001-gio-tests-g-file-info-don-t-assume-million-in-one-ev.patch \
+ file://CVE-2023-32665-0001.patch \
+ file://CVE-2023-32665-0002.patch \
+ file://CVE-2023-32665-0003.patch \
+ file://CVE-2023-32665-0004.patch \
+ file://CVE-2023-32665-0005.patch \
+ file://CVE-2023-32665-0006.patch \
+ file://CVE-2023-32665-0007.patch \
+ file://CVE-2023-32665-0008.patch \
+ file://CVE-2023-32665-0009.patch \
+ file://CVE-2023-29499.patch \
+ file://CVE-2023-32611-0001.patch \
+ file://CVE-2023-32611-0002.patch \
+ file://CVE-2023-32643.patch \
+ file://CVE-2023-32636.patch \
"
SRC_URI:append:class-native = " file://relocate-modules.patch"
-SRC_URI[sha256sum] = "78d599a133dba7fe2036dfa8db8fb6131ab9642783fc9578b07a20995252d2de"
+SRC_URI[sha256sum] = "4a39a2f624b8512d500d5840173eda7fa85f51c109052eae806acece85d345f0"
# Find any meson cross files in FILESPATH that are relevant for the current
# build (using siteinfo) and add them to EXTRA_OEMESON.
diff --git a/meta/recipes-core/glib-networking/glib-networking_2.72.0.bb b/meta/recipes-core/glib-networking/glib-networking_2.72.2.bb
index d578f17aa5..746d1bc39c 100644
--- a/meta/recipes-core/glib-networking/glib-networking_2.72.0.bb
+++ b/meta/recipes-core/glib-networking/glib-networking_2.72.2.bb
@@ -9,7 +9,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
SECTION = "libs"
DEPENDS = "glib-2.0"
-SRC_URI[archive.sha256sum] = "100aaebb369285041de52da422b6b716789d5e4d7549a3a71ba587b932e0823b"
+SRC_URI[archive.sha256sum] = "cd2a084c7bb91d78e849fb55d40e472f6d8f6862cddc9f12c39149359ba18268"
PACKAGECONFIG ??= "openssl ${@bb.utils.contains('PTEST_ENABLED', '1', 'tests', '', d)}"
diff --git a/meta/recipes-core/glibc/glibc-locale.inc b/meta/recipes-core/glibc/glibc-locale.inc
index b8de7d3192..69b29c836c 100644
--- a/meta/recipes-core/glibc/glibc-locale.inc
+++ b/meta/recipes-core/glibc/glibc-locale.inc
@@ -5,14 +5,9 @@ SUMMARY = "Locale data from glibc"
BPN = "glibc"
LOCALEBASEPN = "${MLPREFIX}glibc"
-# glibc-collateral.inc inhibits all default deps, but do_package needs objcopy
-# ERROR: objcopy failed with exit code 127 (cmd was 'i586-webos-linux-objcopy' --only-keep-debug 'glibc-locale/2.17-r0/package/usr/lib/gconv/IBM1166.so' 'glibc-locale/2.17-r0/package/usr/lib/gconv/.debug/IBM1166.so')
-# ERROR: Function failed: split_and_strip_files
-BINUTILSDEP = "virtual/${MLPREFIX}${TARGET_PREFIX}binutils:do_populate_sysroot"
-BINUTILSDEP:class-nativesdk = "virtual/${TARGET_PREFIX}binutils-crosssdk:do_populate_sysroot"
-do_package[depends] += "${BINUTILSDEP}"
-
-DEPENDS += "virtual/libc"
+# Do not inhibit default deps, do_package requires binutils/gcc for
+# objcopy/gcc-nm and glibc-locale depends on virtual/libc directly.
+INHIBIT_DEFAULT_DEPS = ""
# Binary locales are generated at build time if ENABLE_BINARY_LOCALE_GENERATION
# is set. The idea is to avoid running localedef on the target (at first boot)
@@ -42,22 +37,22 @@ PACKAGES_DYNAMIC = "^locale-base-.* \
# Create a glibc-binaries package
ALLOW_EMPTY:${BPN}-binaries = "1"
PACKAGES += "${BPN}-binaries"
-RRECOMMENDS:${BPN}-binaries = "${@" ".join([p for p in d.getVar('PACKAGES').split() if p.find("glibc-binary") != -1])}"
+RRECOMMENDS:${BPN}-binaries = "${@" ".join([p for p in d.getVar('PACKAGES').split() if p.find("glibc-binary-") != -1])}"
# Create a glibc-charmaps package
ALLOW_EMPTY:${BPN}-charmaps = "1"
PACKAGES += "${BPN}-charmaps"
-RRECOMMENDS:${BPN}-charmaps = "${@" ".join([p for p in d.getVar('PACKAGES').split() if p.find("glibc-charmap") != -1])}"
+RRECOMMENDS:${BPN}-charmaps = "${@" ".join([p for p in d.getVar('PACKAGES').split() if p.find("glibc-charmap-") != -1])}"
# Create a glibc-gconvs package
ALLOW_EMPTY:${BPN}-gconvs = "1"
PACKAGES += "${BPN}-gconvs"
-RRECOMMENDS:${BPN}-gconvs = "${@" ".join([p for p in d.getVar('PACKAGES').split() if p.find("glibc-gconv") != -1])}"
+RRECOMMENDS:${BPN}-gconvs = "${@" ".join([p for p in d.getVar('PACKAGES').split() if p.find("glibc-gconv-") != -1])}"
# Create a glibc-localedatas package
ALLOW_EMPTY:${BPN}-localedatas = "1"
PACKAGES += "${BPN}-localedatas"
-RRECOMMENDS:${BPN}-localedatas = "${@" ".join([p for p in d.getVar('PACKAGES').split() if p.find("glibc-localedata") != -1])}"
+RRECOMMENDS:${BPN}-localedatas = "${@" ".join([p for p in d.getVar('PACKAGES').split() if p.find("glibc-localedata-") != -1])}"
DESCRIPTION:localedef = "glibc: compile locale definition files"
@@ -87,10 +82,9 @@ do_install() {
if [ ${PACKAGE_NO_GCONV} -eq 0 ]; then
copy_locale_files ${libdir}/gconv 0755
copy_locale_files ${datadir}/i18n 0644
- else
- # Remove the libdir if it is empty when gconv is not copied
- find ${D}${libdir} -type d -empty -delete
fi
+ # Remove empty dirs in libdir when gconv or locales are not copied
+ find ${D}${libdir} -type d -empty -delete
copy_locale_files ${datadir}/locale 0644
install -m 0644 ${LOCALETREESRC}/SUPPORTED ${WORKDIR}/SUPPORTED
}
diff --git a/meta/recipes-core/glibc/glibc-tests_2.35.bb b/meta/recipes-core/glibc/glibc-tests_2.35.bb
index 414f8660de..97d5dc29a3 100644
--- a/meta/recipes-core/glibc/glibc-tests_2.35.bb
+++ b/meta/recipes-core/glibc/glibc-tests_2.35.bb
@@ -4,7 +4,8 @@ require glibc-tests.inc
inherit ptest features_check
REQUIRED_DISTRO_FEATURES = "ptest"
-SRC_URI:append = " \
+SRC_URI += " \
+ file://reproducible-paths.patch \
file://run-ptest \
"
@@ -18,7 +19,8 @@ python __anonymous() {
d.setVar("PROVIDES", "${PN} ${PN}-ptest")
d.setVar("RPROVIDES", "${PN} ${PN}-ptest")
- d.setVar("BBCLASSEXTEND", "")
+ bbclassextend = d.getVar("BBCLASSEXTEND").replace("nativesdk", "").strip()
+ d.setVar("BBCLASSEXTEND", bbclassextend)
d.setVar("RRECOMMENDS", "")
d.setVar("SYSTEMD_SERVICE:nscd", "")
d.setVar("SYSTEMD_PACKAGES", "")
@@ -28,7 +30,9 @@ python __anonymous() {
RPROVIDES:${PN} = "${PN}"
RRECOMMENDS:${PN} = ""
RDEPENDS:${PN} = " glibc sed"
-DEPENDS:append = " sed"
+DEPENDS += "sed"
+
+export oe_srcdir="${exec_prefix}/src/debug/glibc/${PV}/"
# Just build tests for target - do not run them
do_check:append () {
@@ -95,7 +99,7 @@ python populate_packages:prepend () {
d.setVar('DEBIAN_NAMES', '')
}
-FILES:${PN} = "${PTEST_PATH}/* /usr/src/debug/glibc-tests/*"
+FILES:${PN} = "${PTEST_PATH}/* /usr/src/debug/${PN}/*"
EXCLUDE_FROM_SHLIBS = "1"
diff --git a/meta/recipes-core/glibc/glibc-version.inc b/meta/recipes-core/glibc/glibc-version.inc
index 080e905b6e..e0d47f283b 100644
--- a/meta/recipes-core/glibc/glibc-version.inc
+++ b/meta/recipes-core/glibc/glibc-version.inc
@@ -1,6 +1,6 @@
SRCBRANCH ?= "release/2.35/master"
PV = "2.35"
-SRCREV_glibc ?= "24962427071fa532c3c48c918e9d64d719cc8a6c"
+SRCREV_glibc ?= "c84018a05aec80f5ee6f682db0da1130b0196aef"
SRCREV_localedef ?= "794da69788cbf9bf57b59a852f9f11307663fa87"
GLIBC_GIT_URI ?= "git://sourceware.org/git/glibc.git"
diff --git a/meta/recipes-core/glibc/glibc.inc b/meta/recipes-core/glibc/glibc.inc
index fdd241d973..3b940b8ab2 100644
--- a/meta/recipes-core/glibc/glibc.inc
+++ b/meta/recipes-core/glibc/glibc.inc
@@ -1,7 +1,9 @@
require glibc-common.inc
require glibc-ld.inc
-DEPENDS = "virtual/${TARGET_PREFIX}gcc libgcc-initial linux-libc-headers"
+DEPENDS = "virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}binutils${BUSUFFIX} libgcc-initial linux-libc-headers"
+BUSUFFIX= ""
+BUSUFFIX:class-nativesdk = "-crosssdk"
PROVIDES = "virtual/libc"
PROVIDES += "virtual/libintl virtual/libiconv"
diff --git a/meta/recipes-core/glibc/glibc/0001-Revert-Linux-Implement-a-useful-version-of-_startup_.patch b/meta/recipes-core/glibc/glibc/0001-Revert-Linux-Implement-a-useful-version-of-_startup_.patch
new file mode 100644
index 0000000000..2421a63605
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/0001-Revert-Linux-Implement-a-useful-version-of-_startup_.patch
@@ -0,0 +1,128 @@
+From 6b8959add09e425df262bf9178b39ca35bc4003c Mon Sep 17 00:00:00 2001
+From: Martin Jansa <Martin.Jansa@gmail.com>
+Date: Sun, 24 Jul 2022 19:41:41 +0200
+Subject: [PATCH] Revert "Linux: Implement a useful version of _startup_fatal"
+
+This reverts commit 2d05ba7f8ef979947e910a37ae8115a816eb4d08.
+Upstream-Status: Inappropriate [temporary work around]
+
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+
+---
+ sysdeps/unix/sysv/linux/i386/startup.h | 23 ++++++++++++---
+ sysdeps/unix/sysv/linux/ia64/startup.h | 22 ---------------
+ sysdeps/unix/sysv/linux/startup.h | 39 --------------------------
+ 3 files changed, 19 insertions(+), 65 deletions(-)
+ delete mode 100644 sysdeps/unix/sysv/linux/ia64/startup.h
+ delete mode 100644 sysdeps/unix/sysv/linux/startup.h
+
+diff --git a/sysdeps/unix/sysv/linux/i386/startup.h b/sysdeps/unix/sysv/linux/i386/startup.h
+index 213805d7d2..67c9310f3a 100644
+--- a/sysdeps/unix/sysv/linux/i386/startup.h
++++ b/sysdeps/unix/sysv/linux/i386/startup.h
+@@ -1,5 +1,5 @@
+ /* Linux/i386 definitions of functions used by static libc main startup.
+- Copyright (C) 2022 Free Software Foundation, Inc.
++ Copyright (C) 2017-2022 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+@@ -16,7 +16,22 @@
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+-/* Can't use "call *%gs:SYSINFO_OFFSET" during startup. */
+-#define I386_USE_SYSENTER 0
++#if BUILD_PIE_DEFAULT
++/* Can't use "call *%gs:SYSINFO_OFFSET" during statup in static PIE. */
++# define I386_USE_SYSENTER 0
+
+-#include_next <startup.h>
++# include <sysdep.h>
++# include <abort-instr.h>
++
++__attribute__ ((__noreturn__))
++static inline void
++_startup_fatal (const char *message __attribute__ ((unused)))
++{
++ /* This is only called very early during startup in static PIE.
++ FIXME: How can it be improved? */
++ ABORT_INSTRUCTION;
++ __builtin_unreachable ();
++}
++#else
++# include_next <startup.h>
++#endif
+diff --git a/sysdeps/unix/sysv/linux/ia64/startup.h b/sysdeps/unix/sysv/linux/ia64/startup.h
+deleted file mode 100644
+index 77f29f15a2..0000000000
+--- a/sysdeps/unix/sysv/linux/ia64/startup.h
++++ /dev/null
+@@ -1,22 +0,0 @@
+-/* Linux/ia64 definitions of functions used by static libc main startup.
+- Copyright (C) 2022 Free Software Foundation, Inc.
+- This file is part of the GNU C Library.
+-
+- The GNU C Library is free software; you can redistribute it and/or
+- modify it under the terms of the GNU Lesser General Public
+- License as published by the Free Software Foundation; either
+- version 2.1 of the License, or (at your option) any later version.
+-
+- The GNU C Library is distributed in the hope that it will be useful,
+- but WITHOUT ANY WARRANTY; without even the implied warranty of
+- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- Lesser General Public License for more details.
+-
+- You should have received a copy of the GNU Lesser General Public
+- License along with the GNU C Library; if not, see
+- <https://www.gnu.org/licenses/>. */
+-
+-/* This code is used before the TCB is set up. */
+-#define IA64_USE_NEW_STUB 0
+-
+-#include_next <startup.h>
+diff --git a/sysdeps/unix/sysv/linux/startup.h b/sysdeps/unix/sysv/linux/startup.h
+deleted file mode 100644
+index 39859b404a..0000000000
+--- a/sysdeps/unix/sysv/linux/startup.h
++++ /dev/null
+@@ -1,39 +0,0 @@
+-/* Linux definitions of functions used by static libc main startup.
+- Copyright (C) 2017-2022 Free Software Foundation, Inc.
+- This file is part of the GNU C Library.
+-
+- The GNU C Library is free software; you can redistribute it and/or
+- modify it under the terms of the GNU Lesser General Public
+- License as published by the Free Software Foundation; either
+- version 2.1 of the License, or (at your option) any later version.
+-
+- The GNU C Library is distributed in the hope that it will be useful,
+- but WITHOUT ANY WARRANTY; without even the implied warranty of
+- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- Lesser General Public License for more details.
+-
+- You should have received a copy of the GNU Lesser General Public
+- License along with the GNU C Library; if not, see
+- <https://www.gnu.org/licenses/>. */
+-
+-#ifdef SHARED
+-# include_next <startup.h>
+-#else
+-# include <sysdep.h>
+-
+-/* Avoid a run-time invocation of strlen. */
+-#define _startup_fatal(message) \
+- do \
+- { \
+- size_t __message_length = __builtin_strlen (message); \
+- if (! __builtin_constant_p (__message_length)) \
+- { \
+- extern void _startup_fatal_not_constant (void); \
+- _startup_fatal_not_constant (); \
+- } \
+- INTERNAL_SYSCALL_CALL (write, STDERR_FILENO, (message), \
+- __message_length); \
+- INTERNAL_SYSCALL_CALL (exit_group, 127); \
+- } \
+- while (0)
+-#endif /* !SHARED */
diff --git a/meta/recipes-core/glibc/glibc/0002-get_nscd_addresses-Fix-subscript-typos-BZ-29605.patch b/meta/recipes-core/glibc/glibc/0002-get_nscd_addresses-Fix-subscript-typos-BZ-29605.patch
new file mode 100644
index 0000000000..629298c23e
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/0002-get_nscd_addresses-Fix-subscript-typos-BZ-29605.patch
@@ -0,0 +1,40 @@
+From 707a878b655395f41b954bbed78008d1d9252f1a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?J=C3=B6rg=20Sonnenberger?= <joerg@bec.de>
+Date: Mon, 26 Sep 2022 13:59:16 -0400
+Subject: [PATCH] get_nscd_addresses: Fix subscript typos [BZ #29605]
+
+Fix the subscript on air->family, which was accidentally set to COUNT
+when it should have remained as I.
+
+Resolves: BZ #29605
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=commit;h=c9226c03da0276593a0918eaa9a14835183343e8]
+
+Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+Signed-off-by: Haitao Liu <haitao.liu@windriver.com>
+---
+ sysdeps/posix/getaddrinfo.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/sysdeps/posix/getaddrinfo.c b/sysdeps/posix/getaddrinfo.c
+index f4c08d6e3b..fa333ad6ec 100644
+--- a/sysdeps/posix/getaddrinfo.c
++++ b/sysdeps/posix/getaddrinfo.c
+@@ -549,11 +549,11 @@ get_nscd_addresses (const char *name, const struct addrinfo *req,
+ at[count].addr[2] = htonl (0xffff);
+ }
+ else if (req->ai_family == AF_UNSPEC
+- || air->family[count] == req->ai_family)
++ || air->family[i] == req->ai_family)
+ {
+- at[count].family = air->family[count];
++ at[count].family = air->family[i];
+ memcpy (at[count].addr, addrs, size);
+- if (air->family[count] == AF_INET6)
++ if (air->family[i] == AF_INET6)
+ res->got_ipv6 = true;
+ }
+ at[count].next = at + count + 1;
+--
+2.35.5
+
diff --git a/meta/recipes-core/glibc/glibc/check-test-wrapper b/meta/recipes-core/glibc/glibc/check-test-wrapper
index 6ec9b9b29e..5cc993f718 100644
--- a/meta/recipes-core/glibc/glibc/check-test-wrapper
+++ b/meta/recipes-core/glibc/glibc/check-test-wrapper
@@ -58,7 +58,7 @@ elif targettype == "ssh":
user = os.environ.get("SSH_HOST_USER", None)
port = os.environ.get("SSH_HOST_PORT", None)
- command = ["ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no"]
+ command = ["ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-o", "LogLevel=quiet"]
if port:
command += ["-p", str(port)]
if not host:
diff --git a/meta/recipes-core/glibc/glibc/reproducible-paths.patch b/meta/recipes-core/glibc/glibc/reproducible-paths.patch
new file mode 100644
index 0000000000..0754dca62b
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/reproducible-paths.patch
@@ -0,0 +1,23 @@
+Avoid hardcoded build time paths in the output binaries by replacing the compile
+definitions with the output locations.
+
+Upstream-Status: Inappropriate [would need reworking somehow to be acceptable upstream]
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Index: git/support/Makefile
+===================================================================
+--- git.orig/support/Makefile
++++ git/support/Makefile
+@@ -216,9 +216,9 @@ libsupport-inhibit-o += .o
+ endif
+
+ CFLAGS-support_paths.c = \
+- -DSRCDIR_PATH=\"`cd .. ; pwd`\" \
+- -DOBJDIR_PATH=\"`cd $(objpfx)/..; pwd`\" \
+- -DOBJDIR_ELF_LDSO_PATH=\"`cd $(objpfx)/..; pwd`/elf/$(rtld-installed-name)\" \
++ -DSRCDIR_PATH=\"$(oe_srcdir)\" \
++ -DOBJDIR_PATH=\"$(libdir)/glibc-tests/ptest/tests/glibc-ptest\" \
++ -DOBJDIR_ELF_LDSO_PATH=\"$(slibdir)/$(rtld-installed-name)\" \
+ -DINSTDIR_PATH=\"$(prefix)\" \
+ -DLIBDIR_PATH=\"$(libdir)\" \
+ -DBINDIR_PATH=\"$(bindir)\" \
diff --git a/meta/recipes-core/glibc/glibc_2.35.bb b/meta/recipes-core/glibc/glibc_2.35.bb
index 96fe39c548..751427517f 100644
--- a/meta/recipes-core/glibc/glibc_2.35.bb
+++ b/meta/recipes-core/glibc/glibc_2.35.bb
@@ -16,6 +16,16 @@ CVE_CHECK_IGNORE += "CVE-2019-1010022 CVE-2019-1010023 CVE-2019-1010024"
# Potential patch at https://sourceware.org/bugzilla/show_bug.cgi?id=22853
CVE_CHECK_IGNORE += "CVE-2019-1010025"
+# glibc https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2023-4527
+# This vulnerability was introduced in 2.36 by commit
+# f282cdbe7f436c75864e5640a409a10485e9abb2 resolv: Implement no-aaaa stub resolver option
+# so our version is not yet vulnerable
+# See https://sourceware.org/bugzilla/show_bug.cgi?id=30842
+CVE_CHECK_IGNORE += "CVE-2023-4527"
+
+# To avoid these in cve-check reports since the recipe version did not change
+CVE_CHECK_IGNORE += "CVE-2023-0687 CVE-2023-4813 CVE-2023-4806 CVE-2023-4911 CVE-2023-5156"
+
DEPENDS += "gperf-native bison-native"
NATIVESDKFIXES ?= ""
@@ -48,6 +58,9 @@ SRC_URI = "${GLIBC_GIT_URI};branch=${SRCBRANCH};name=glibc \
file://0022-sysdeps-gnu-configure.ac-Set-libc_cv_rootsbindir-onl.patch \
file://0023-timezone-Make-shell-interpreter-overridable-in-tzsel.patch \
file://0024-fix-create-thread-failed-in-unprivileged-process-BZ-.patch \
+ \
+ file://0001-Revert-Linux-Implement-a-useful-version-of-_startup_.patch \
+ file://0002-get_nscd_addresses-Fix-subscript-typos-BZ-29605.patch \
"
S = "${WORKDIR}/git"
B = "${WORKDIR}/build-${TARGET_SYS}"
diff --git a/meta/recipes-core/glibc/ldconfig-native-2.12.1/ldconfig-handle-.dynstr-located-in-separate-segment.patch b/meta/recipes-core/glibc/ldconfig-native-2.12.1/ldconfig-handle-.dynstr-located-in-separate-segment.patch
new file mode 100644
index 0000000000..36f04adfde
--- /dev/null
+++ b/meta/recipes-core/glibc/ldconfig-native-2.12.1/ldconfig-handle-.dynstr-located-in-separate-segment.patch
@@ -0,0 +1,178 @@
+From 864054a6cb971688a181316b8227ae0361b4d69e Mon Sep 17 00:00:00 2001
+From: Andreas Schwab <schwab@suse.de>
+Date: Wed, 9 Oct 2019 17:46:47 +0200
+Subject: [PATCH] ldconfig: handle .dynstr located in separate segment (bug
+ 25087)
+
+To determine the load offset of the DT_STRTAB section search for the
+segment containing it, instead of using the load offset of the first
+segment.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=58e8f5fd2ba47b6dc47fd4d0a35e4175c7c87aaa]
+
+Backported: ported to support endianness and 32/64 bits.
+Signed-off-by: Fabien Mahot <fabien.mahot@external.desouttertools.com>
+---
+ readelflib.c | 86 +++++++++++++++++++++++++++++++---------------------
+ 1 file changed, 52 insertions(+), 34 deletions(-)
+
+diff --git a/readelflib.c b/readelflib.c
+index a01e1cede3..380aed563d 100644
+--- a/readelflib.c
++++ b/readelflib.c
+@@ -80,7 +80,6 @@ process_elf_file32 (const char *file_name, const char *lib, int *flag,
+ {
+ int i;
+ unsigned int j;
+- Elf32_Addr loadaddr;
+ unsigned int dynamic_addr;
+ size_t dynamic_size;
+ char *program_interpreter;
+@@ -110,7 +109,6 @@ process_elf_file32 (const char *file_name, const char *lib, int *flag,
+ libc5/libc6. */
+ *flag = FLAG_ELF;
+
+- loadaddr = -1;
+ dynamic_addr = 0;
+ dynamic_size = 0;
+ program_interpreter = NULL;
+@@ -121,11 +119,6 @@ process_elf_file32 (const char *file_name, const char *lib, int *flag,
+
+ switch (read32(segment->p_type, be))
+ {
+- case PT_LOAD:
+- if (loadaddr == (Elf32_Addr) -1)
+- loadaddr = read32(segment->p_vaddr, be) - read32(segment->p_offset, be);
+- break;
+-
+ case PT_DYNAMIC:
+ if (dynamic_addr)
+ error (0, 0, _("more than one dynamic segment\n"));
+@@ -188,11 +181,6 @@ process_elf_file32 (const char *file_name, const char *lib, int *flag,
+ }
+
+ }
+- if (loadaddr == (Elf32_Addr) -1)
+- {
+- /* Very strange. */
+- loadaddr = 0;
+- }
+
+ /* Now we can read the dynamic sections. */
+ if (dynamic_size == 0)
+@@ -208,11 +196,32 @@ process_elf_file32 (const char *file_name, const char *lib, int *flag,
+ {
+ check_ptr (dyn_entry);
+ if (read32(dyn_entry->d_tag, be) == DT_STRTAB)
+- {
+- dynamic_strings = (char *) (file_contents + read32(dyn_entry->d_un.d_val, be) - loadaddr);
+- check_ptr (dynamic_strings);
+- break;
+- }
++ {
++ /* Find the file offset of the segment containing the dynamic
++ string table. */
++ Elf32_Off loadoff = -1;
++ for (i = 0, segment = elf_pheader;
++ i < read16(elf_header->e_phnum, be); i++, segment++)
++ {
++ if (read32(segment->p_type, be) == PT_LOAD
++ && read32(dyn_entry->d_un.d_val, be) >= read32(segment->p_vaddr, be)
++ && (read32(dyn_entry->d_un.d_val, be) - read32(segment->p_vaddr, be)
++ < read32(segment->p_filesz, be)))
++ {
++ loadoff = read32(segment->p_vaddr, be) - read32(segment->p_offset, be);
++ break;
++ }
++ }
++ if (loadoff == (Elf32_Off) -1)
++ {
++ /* Very strange. */
++ loadoff = 0;
++ }
++
++ dynamic_strings = (char *) (file_contents + read32(dyn_entry->d_un.d_val, be) - loadoff);
++ check_ptr (dynamic_strings);
++ break;
++ }
+ }
+
+ if (dynamic_strings == NULL)
+@@ -269,7 +278,6 @@ process_elf_file64 (const char *file_name, const char *lib, int *flag,
+ {
+ int i;
+ unsigned int j;
+- Elf64_Addr loadaddr;
+ Elf64_Addr dynamic_addr;
+ Elf64_Xword dynamic_size;
+ char *program_interpreter;
+@@ -347,7 +355,6 @@ process_elf_file64 (const char *file_name, const char *lib, int *flag,
+ break;
+ }
+
+- loadaddr = -1;
+ dynamic_addr = 0;
+ dynamic_size = 0;
+ program_interpreter = NULL;
+@@ -358,11 +365,6 @@ process_elf_file64 (const char *file_name, const char *lib, int *flag,
+
+ switch (read32(segment->p_type, be))
+ {
+- case PT_LOAD:
+- if (loadaddr == (Elf64_Addr) -1)
+- loadaddr = read64(segment->p_vaddr, be) - read64(segment->p_offset, be);
+- break;
+-
+ case PT_DYNAMIC:
+ if (dynamic_addr)
+ error (0, 0, _("more than one dynamic segment\n"));
+@@ -426,11 +428,6 @@ process_elf_file64 (const char *file_name, const char *lib, int *flag,
+ }
+
+ }
+- if (loadaddr == (Elf64_Addr) -1)
+- {
+- /* Very strange. */
+- loadaddr = 0;
+- }
+
+ /* Now we can read the dynamic sections. */
+ if (dynamic_size == 0)
+@@ -446,11 +443,32 @@ process_elf_file64 (const char *file_name, const char *lib, int *flag,
+ {
+ check_ptr (dyn_entry);
+ if (read64(dyn_entry->d_tag, be) == DT_STRTAB)
+- {
+- dynamic_strings = (char *) (file_contents + read64(dyn_entry->d_un.d_val, be) - loadaddr);
+- check_ptr (dynamic_strings);
+- break;
+- }
++ {
++ /* Find the file offset of the segment containing the dynamic
++ string table. */
++ Elf64_Off loadoff = -1;
++ for (i = 0, segment = elf_pheader;
++ i < read16(elf_header->e_phnum, be); i++, segment++)
++ {
++ if (read64(segment->p_type, be) == PT_LOAD
++ && read64(dyn_entry->d_un.d_val, be) >= read64(segment->p_vaddr, be)
++ && (read64(dyn_entry->d_un.d_val, be) - read64(segment->p_vaddr, be)
++ < read64(segment->p_filesz, be)))
++ {
++ loadoff = read64(segment->p_vaddr, be) - read64(segment->p_offset, be);
++ break;
++ }
++ }
++ if (loadoff == (Elf32_Off) -1)
++ {
++ /* Very strange. */
++ loadoff = 0;
++ }
++
++ dynamic_strings = (char *) (file_contents + read64(dyn_entry->d_un.d_val, be) - loadoff);
++ check_ptr (dynamic_strings);
++ break;
++ }
+ }
+
+ if (dynamic_strings == NULL)
diff --git a/meta/recipes-core/glibc/ldconfig-native_2.12.1.bb b/meta/recipes-core/glibc/ldconfig-native_2.12.1.bb
index e867ceb3ec..665a3d324c 100644
--- a/meta/recipes-core/glibc/ldconfig-native_2.12.1.bb
+++ b/meta/recipes-core/glibc/ldconfig-native_2.12.1.bb
@@ -16,6 +16,7 @@ SRC_URI = "file://ldconfig-native-2.12.1.tar.bz2 \
file://add-64-bit-flag-for-ELF64-entries.patch \
file://no-aux-cache.patch \
file://add-riscv-support.patch \
+ file://ldconfig-handle-.dynstr-located-in-separate-segment.patch \
"
PR = "r2"
diff --git a/meta/recipes-core/ifupdown/ifupdown_0.8.37.bb b/meta/recipes-core/ifupdown/ifupdown_0.8.39.bb
index 57d4152a39..7096bc94d7 100644
--- a/meta/recipes-core/ifupdown/ifupdown_0.8.37.bb
+++ b/meta/recipes-core/ifupdown/ifupdown_0.8.39.bb
@@ -16,7 +16,7 @@ SRC_URI = "git://salsa.debian.org/debian/ifupdown.git;protocol=https;branch=mast
file://0001-ifupdown-skip-wrong-test-case.patch \
${@bb.utils.contains('DISTRO_FEATURES', 'ptest', 'file://tweak-ptest-script.patch', '', d)} \
"
-SRCREV = "2b4138f36ce3ba37186aa01b502273e0c39ab518"
+SRCREV = "be91dd267b4a8db502a6bbf5758563f7048b8078"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-core/images/build-appliance-image_15.0.0.bb b/meta/recipes-core/images/build-appliance-image_15.0.0.bb
index d4a3afd0f6..57274262ba 100644
--- a/meta/recipes-core/images/build-appliance-image_15.0.0.bb
+++ b/meta/recipes-core/images/build-appliance-image_15.0.0.bb
@@ -24,7 +24,7 @@ IMAGE_FSTYPES = "wic.vmdk wic.vhd wic.vhdx"
inherit core-image setuptools3
-SRCREV ?= "93491db3bfe95da71518a52b6190f912c2c2fe9b"
+SRCREV ?= "700eac59a68baaba3361ed40ab14fe55e66f8211"
SRC_URI = "git://git.yoctoproject.org/poky;branch=kirkstone \
file://Yocto_Build_Appliance.vmx \
file://Yocto_Build_Appliance.vmxf \
diff --git a/meta/recipes-core/initrdscripts/files/init-install-efi-testfs.sh b/meta/recipes-core/initrdscripts/files/init-install-efi-testfs.sh
index 1fcd29e54c..4bd6ace7b3 100644
--- a/meta/recipes-core/initrdscripts/files/init-install-efi-testfs.sh
+++ b/meta/recipes-core/initrdscripts/files/init-install-efi-testfs.sh
@@ -138,7 +138,7 @@ touch /ssd/etc/controllerimage
if [ -d /ssd/etc/ ] ; then
# We dont want udev to mount our root device while we're booting...
if [ -d /ssd/etc/udev/ ] ; then
- echo "/dev/${device}" >> /ssd/etc/udev/mount.blacklist
+ echo "/dev/${device}" >> /ssd/etc/udev/mount.ignorelist
fi
fi
diff --git a/meta/recipes-core/initrdscripts/files/init-install-efi.sh b/meta/recipes-core/initrdscripts/files/init-install-efi.sh
index f667518b89..ffd3870199 100644
--- a/meta/recipes-core/initrdscripts/files/init-install-efi.sh
+++ b/meta/recipes-core/initrdscripts/files/init-install-efi.sh
@@ -229,7 +229,7 @@ if [ -d /tgt_root/etc/ ] ; then
echo "UUID=$boot_uuid /boot vfat defaults 1 2" >> /tgt_root/etc/fstab
# We dont want udev to mount our root device while we're booting...
if [ -d /tgt_root/etc/udev/ ] ; then
- echo "${device}" >> /tgt_root/etc/udev/mount.blacklist
+ echo "${device}" >> /tgt_root/etc/udev/mount.ignorelist
fi
fi
diff --git a/meta/recipes-core/initrdscripts/files/init-install-testfs.sh b/meta/recipes-core/initrdscripts/files/init-install-testfs.sh
index 7b49001659..8ab74ddc5d 100644
--- a/meta/recipes-core/initrdscripts/files/init-install-testfs.sh
+++ b/meta/recipes-core/initrdscripts/files/init-install-testfs.sh
@@ -164,7 +164,7 @@ if [ -d /tgt_root/etc/ ] ; then
echo "$bootfs /boot ext3 defaults 1 2" >> /tgt_root/etc/fstab
# We dont want udev to mount our root device while we're booting...
if [ -d /tgt_root/etc/udev/ ] ; then
- echo "/dev/${device}" >> /tgt_root/etc/udev/mount.blacklist
+ echo "/dev/${device}" >> /tgt_root/etc/udev/mount.ignorelist
fi
fi
umount /tgt_root
diff --git a/meta/recipes-core/initrdscripts/files/init-install.sh b/meta/recipes-core/initrdscripts/files/init-install.sh
index e71579631b..df33791ec7 100644
--- a/meta/recipes-core/initrdscripts/files/init-install.sh
+++ b/meta/recipes-core/initrdscripts/files/init-install.sh
@@ -261,7 +261,7 @@ if [ -d /tgt_root/etc/ ] ; then
echo "$bootdev /boot ext3 defaults 1 2" >> /tgt_root/etc/fstab
# We dont want udev to mount our root device while we're booting...
if [ -d /tgt_root/etc/udev/ ] ; then
- echo "${device}" >> /tgt_root/etc/udev/mount.blacklist
+ echo "${device}" >> /tgt_root/etc/udev/mount.ignorelist
fi
fi
umount /tgt_root
diff --git a/meta/recipes-core/initrdscripts/initramfs-framework/finish b/meta/recipes-core/initrdscripts/initramfs-framework/finish
index f08a920867..ac0de9f996 100755
--- a/meta/recipes-core/initrdscripts/initramfs-framework/finish
+++ b/meta/recipes-core/initrdscripts/initramfs-framework/finish
@@ -26,6 +26,15 @@ finish_run() {
info "Switching root to '$ROOTFS_DIR'..."
+ debug "Moving basic mounts onto rootfs"
+ for dir in `awk '/\/dev.* \/run\/media/{print $2}' /proc/mounts`; do
+ # Parse any OCT or HEX encoded chars such as spaces
+ # in the mount points to actual ASCII chars
+ dir=`printf $dir`
+ mkdir -p "${ROOTFS_DIR}/media/${dir##*/}"
+ mount -n --move "$dir" "${ROOTFS_DIR}/media/${dir##*/}"
+ done
+
debug "Moving /dev, /proc and /sys onto rootfs..."
mount --move /dev $ROOTFS_DIR/dev
mount --move /proc $ROOTFS_DIR/proc
diff --git a/meta/recipes-core/initscripts/initscripts_1.0.bb b/meta/recipes-core/initscripts/initscripts_1.0.bb
index 2244d1b292..7c9d9ca4f1 100644
--- a/meta/recipes-core/initscripts/initscripts_1.0.bb
+++ b/meta/recipes-core/initscripts/initscripts_1.0.bb
@@ -130,7 +130,7 @@ do_install () {
update-rc.d -r ${D} rmnologin.sh start 99 2 3 4 5 .
update-rc.d -r ${D} sendsigs start 20 0 6 .
update-rc.d -r ${D} urandom start 38 S 0 6 .
- update-rc.d -r ${D} umountnfs.sh start 31 0 1 6 .
+ update-rc.d -r ${D} umountnfs.sh stop 31 0 1 6 .
update-rc.d -r ${D} umountfs start 40 0 6 .
update-rc.d -r ${D} reboot start 90 6 .
update-rc.d -r ${D} halt start 90 0 .
diff --git a/meta/recipes-core/libxcrypt/files/0001-Make-BuildCommon.pm-compatible-with-latest-perl.patch b/meta/recipes-core/libxcrypt/files/0001-Make-BuildCommon.pm-compatible-with-latest-perl.patch
new file mode 100644
index 0000000000..b3e43d5815
--- /dev/null
+++ b/meta/recipes-core/libxcrypt/files/0001-Make-BuildCommon.pm-compatible-with-latest-perl.patch
@@ -0,0 +1,50 @@
+From c3ec04f1aee68970b82e4b033bee1477e76798f9 Mon Sep 17 00:00:00 2001
+From: Leon Timmermans <fawaka@gmail.com>
+Date: Tue, 6 Jun 2023 17:03:57 +0200
+Subject: [PATCH] Make BuildCommon.pm compatible with latest perl
+
+It was previously using an experimental feature that has since been dropped.
+This removes the use of that feature.
+
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+Upstream-Status: Backport [v4.4.35 https://github.com/besser82/libxcrypt/commit/ce562f4d33dc090fcd8f6ea1af3ba32cdc2b3c9c]
+---
+ build-aux/scripts/BuildCommon.pm | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/build-aux/scripts/BuildCommon.pm b/build-aux/scripts/BuildCommon.pm
+index 0e6f2a3..c38ba21 100644
+--- a/build-aux/scripts/BuildCommon.pm
++++ b/build-aux/scripts/BuildCommon.pm
+@@ -11,7 +11,6 @@ use v5.14; # implicit use strict, use feature ':5.14'
+ use warnings FATAL => 'all';
+ use utf8;
+ use open qw(:utf8);
+-no if $] >= 5.018, warnings => 'experimental::smartmatch';
+ no if $] >= 5.022, warnings => 'experimental::re_strict';
+ use if $] >= 5.022, re => 'strict';
+
+@@ -519,19 +518,19 @@ sub parse_symver_args {
+ my $COMPAT_ABI;
+ local $_;
+ for (@args) {
+- when (/^SYMVER_MIN=(.+)$/) {
++ if (/^SYMVER_MIN=(.+)$/) {
+ $usage_error->() if defined $SYMVER_MIN;
+ $SYMVER_MIN = $1;
+ }
+- when (/^SYMVER_FLOOR=(.+)$/) {
++ elsif (/^SYMVER_FLOOR=(.+)$/) {
+ $usage_error->() if defined $SYMVER_FLOOR;
+ $SYMVER_FLOOR = $1;
+ }
+- when (/^COMPAT_ABI=(.+)$/) {
++ elsif (/^COMPAT_ABI=(.+)$/) {
+ $usage_error->() if defined $COMPAT_ABI;
+ $COMPAT_ABI = $1;
+ }
+- default {
++ else {
+ $usage_error->() if defined $map_in;
+ $map_in = $_;
+ }
diff --git a/meta/recipes-core/libxcrypt/files/0002-Remove-smartmatch-usage-from-gen-crypt-h.patch b/meta/recipes-core/libxcrypt/files/0002-Remove-smartmatch-usage-from-gen-crypt-h.patch
new file mode 100644
index 0000000000..603f52f792
--- /dev/null
+++ b/meta/recipes-core/libxcrypt/files/0002-Remove-smartmatch-usage-from-gen-crypt-h.patch
@@ -0,0 +1,62 @@
+From 95d6e03ae37f4ec948474d111105bbdd2938aba2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Andreas=20K=2E=20H=C3=BCttel?= <dilfridge@gentoo.org>
+Date: Sun, 25 Jun 2023 01:35:08 +0200
+Subject: [PATCH] Remove smartmatch usage from gen-crypt-h
+
+Needed for Perl 5.38
+
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+Upstream-Status: Backport [v4.4.36 https://github.com/besser82/libxcrypt/commit/95d6e03ae37f4ec948474d111105bbdd2938aba2]
+---
+ build-aux/scripts/gen-crypt-h | 31 ++++++++++++++-----------------
+ 1 file changed, 14 insertions(+), 17 deletions(-)
+
+diff --git a/build-aux/scripts/gen-crypt-h b/build-aux/scripts/gen-crypt-h
+index 12aecf6..b113b79 100644
+--- a/build-aux/scripts/gen-crypt-h
++++ b/build-aux/scripts/gen-crypt-h
+@@ -12,7 +12,6 @@ use v5.14; # implicit use strict, use feature ':5.14'
+ use warnings FATAL => 'all';
+ use utf8;
+ use open qw(:std :utf8);
+-no if $] >= 5.018, warnings => 'experimental::smartmatch';
+ no if $] >= 5.022, warnings => 'experimental::re_strict';
+ use if $] >= 5.022, re => 'strict';
+
+@@ -37,22 +36,20 @@ sub process_config_h {
+ local $_;
+ while (<$fh>) {
+ chomp;
+- # Yes, 'given $_' is really required here.
+- given ($_) {
+- when ('#define HAVE_SYS_CDEFS_H 1') {
+- $have_sys_cdefs_h = 1;
+- }
+- when ('#define HAVE_SYS_CDEFS_BEGIN_END_DECLS 1') {
+- $have_sys_cdefs_begin_end_decls = 1;
+- }
+- when ('#define HAVE_SYS_CDEFS_THROW 1') {
+- $have_sys_cdefs_throw = 1;
+- }
+- when (/^#define PACKAGE_VERSION "((\d+)\.(\d+)\.\d+)"$/) {
+- $substs{XCRYPT_VERSION_STR} = $1;
+- $substs{XCRYPT_VERSION_MAJOR} = $2;
+- $substs{XCRYPT_VERSION_MINOR} = $3;
+- }
++
++ if ($_ eq '#define HAVE_SYS_CDEFS_H 1') {
++ $have_sys_cdefs_h = 1;
++ }
++ elsif ($_ eq '#define HAVE_SYS_CDEFS_BEGIN_END_DECLS 1') {
++ $have_sys_cdefs_begin_end_decls = 1;
++ }
++ elsif ($_ eq '#define HAVE_SYS_CDEFS_THROW 1') {
++ $have_sys_cdefs_throw = 1;
++ }
++ elsif (/^#define PACKAGE_VERSION "((\d+)\.(\d+)\.\d+)"$/) {
++ $substs{XCRYPT_VERSION_STR} = $1;
++ $substs{XCRYPT_VERSION_MAJOR} = $2;
++ $substs{XCRYPT_VERSION_MINOR} = $3;
+ }
+ }
+
diff --git a/meta/recipes-core/libxcrypt/libxcrypt-compat_4.4.28.bb b/meta/recipes-core/libxcrypt/libxcrypt-compat_4.4.33.bb
index ec9f9f4fa3..ec9f9f4fa3 100644
--- a/meta/recipes-core/libxcrypt/libxcrypt-compat_4.4.28.bb
+++ b/meta/recipes-core/libxcrypt/libxcrypt-compat_4.4.33.bb
diff --git a/meta/recipes-core/libxcrypt/libxcrypt.inc b/meta/recipes-core/libxcrypt/libxcrypt.inc
index 39ba2636ff..342cbd0d06 100644
--- a/meta/recipes-core/libxcrypt/libxcrypt.inc
+++ b/meta/recipes-core/libxcrypt/libxcrypt.inc
@@ -10,19 +10,16 @@ LIC_FILES_CHKSUM = "file://LICENSING;md5=c0a30e2b1502c55a7f37e412cd6c6a4b \
inherit autotools pkgconfig
SRC_URI = "git://github.com/besser82/libxcrypt.git;branch=${SRCBRANCH};protocol=https"
-SRCREV = "50cf2b6dd4fdf04309445f2eec8de7051d953abf"
-SRCBRANCH ?= "develop"
+SRCREV = "d7fe1ac04c326dba7e0440868889d1dccb41a175"
+SRCBRANCH ?= "master"
-SRC_URI += "file://fix_cflags_handling.patch"
+SRC_URI += "file://fix_cflags_handling.patch \
+ file://0001-Make-BuildCommon.pm-compatible-with-latest-perl.patch \
+ file://0002-Remove-smartmatch-usage-from-gen-crypt-h.patch \
+"
PROVIDES = "virtual/crypt"
-FILES:${PN} = "${libdir}/libcrypt*.so.* \
- ${libdir}/libcrypt-*.so \
- ${libdir}/libowcrypt*.so.* \
- ${libdir}/libowcrypt-*.so \
-"
-
S = "${WORKDIR}/git"
BUILD_CPPFLAGS = "-I${STAGING_INCDIR_NATIVE}"
diff --git a/meta/recipes-core/libxcrypt/libxcrypt_4.4.28.bb b/meta/recipes-core/libxcrypt/libxcrypt_4.4.33.bb
index 79dba2f6dc..79dba2f6dc 100644
--- a/meta/recipes-core/libxcrypt/libxcrypt_4.4.28.bb
+++ b/meta/recipes-core/libxcrypt/libxcrypt_4.4.33.bb
diff --git a/meta/recipes-core/libxml/libxml2/0001-Port-gentest.py-to-Python-3.patch b/meta/recipes-core/libxml/libxml2/0001-Port-gentest.py-to-Python-3.patch
new file mode 100644
index 0000000000..c6567ac878
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/0001-Port-gentest.py-to-Python-3.patch
@@ -0,0 +1,814 @@
+From 2c20198b1ddb1bfb47269b8caf929ffb83748f78 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Thu, 21 Apr 2022 00:45:58 +0200
+Subject: [PATCH] Port gentest.py to Python 3
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/343fc1421cdae097fa6c4cffeb1a065a40be6bbb]
+
+* fixes:
+
+make[1]: 'testReader' is up to date.
+ File "../libxml2-2.9.10/gentest.py", line 11
+ print "libxml2 python bindings not available, skipping testapi.c generation"
+ ^
+SyntaxError: Missing parentheses in call to 'print'. Did you mean print("libxml2 python bindings not available, skipping testapi.c generation")?
+make[1]: [Makefile:2078: testapi.c] Error 1 (ignored)
+
+...
+
+make[1]: 'testReader' is up to date.
+ File "../libxml2-2.9.10/gentest.py", line 271
+ return 1
+ ^
+TabError: inconsistent use of tabs and spaces in indentation
+make[1]: [Makefile:2078: testapi.c] Error 1 (ignored)
+
+...
+
+aarch64-oe-linux-gcc: error: testapi.c: No such file or directory
+aarch64-oe-linux-gcc: fatal error: no input files
+compilation terminated.
+make[1]: *** [Makefile:1275: testapi.o] Error 1
+
+But there is still a bit mystery why it worked before, because check-am
+calls gentest.py with $(PYTHON), so it ignores the shebang in the script
+and libxml2 is using python3native (through python3targetconfig.bbclass)
+so something like:
+
+libxml2/2.9.10-r0/recipe-sysroot-native/usr/bin/python3-native/python3 gentest.py
+
+But that still fails (now without SyntaxError) with:
+libxml2 python bindings not available, skipping testapi.c generation
+
+because we don't have dependency on libxml2-native (to provide libxml2
+python bindings form python3native) and exported PYTHON_SITE_PACKAGES
+might be useless (e.g. /usr/lib/python3.8/site-packages on Ubuntu-22.10
+which uses python 3.10 and there is no site-packages with libxml2)
+
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+
+---
+ gentest.py | 421 ++++++++++++++++++++++++++---------------------------
+ 1 file changed, 209 insertions(+), 212 deletions(-)
+
+diff --git a/gentest.py b/gentest.py
+index b6cd866..af15a4f 100755
+--- a/gentest.py
++++ b/gentest.py
+@@ -8,7 +8,7 @@ import string
+ try:
+ import libxml2
+ except:
+- print "libxml2 python bindings not available, skipping testapi.c generation"
++ print("libxml2 python bindings not available, skipping testapi.c generation")
+ sys.exit(0)
+
+ if len(sys.argv) > 1:
+@@ -227,7 +227,7 @@ extra_post_call = {
+ if (old != NULL) {
+ xmlUnlinkNode(old);
+ xmlFreeNode(old) ; old = NULL ; }
+- ret_val = NULL;""",
++\t ret_val = NULL;""",
+ "xmlTextMerge":
+ """if ((first != NULL) && (first->type != XML_TEXT_NODE)) {
+ xmlUnlinkNode(second);
+@@ -236,7 +236,7 @@ extra_post_call = {
+ """if ((ret_val != NULL) && (ret_val != ncname) &&
+ (ret_val != prefix) && (ret_val != memory))
+ xmlFree(ret_val);
+- ret_val = NULL;""",
++\t ret_val = NULL;""",
+ "xmlNewDocElementContent":
+ """xmlFreeDocElementContent(doc, ret_val); ret_val = NULL;""",
+ "xmlDictReference": "xmlDictFree(dict);",
+@@ -268,29 +268,29 @@ modules = []
+ def is_skipped_module(name):
+ for mod in skipped_modules:
+ if mod == name:
+- return 1
++ return 1
+ return 0
+
+ def is_skipped_function(name):
+ for fun in skipped_functions:
+ if fun == name:
+- return 1
++ return 1
+ # Do not test destructors
+- if string.find(name, 'Free') != -1:
++ if name.find('Free') != -1:
+ return 1
+ return 0
+
+ def is_skipped_memcheck(name):
+ for fun in skipped_memcheck:
+ if fun == name:
+- return 1
++ return 1
+ return 0
+
+ missing_types = {}
+ def add_missing_type(name, func):
+ try:
+ list = missing_types[name]
+- list.append(func)
++ list.append(func)
+ except:
+ missing_types[name] = [func]
+
+@@ -310,7 +310,7 @@ def add_missing_functions(name, module):
+ missing_functions_nr = missing_functions_nr + 1
+ try:
+ list = missing_functions[module]
+- list.append(name)
++ list.append(name)
+ except:
+ missing_functions[module] = [name]
+
+@@ -319,45 +319,45 @@ def add_missing_functions(name, module):
+ #
+
+ def type_convert(str, name, info, module, function, pos):
+-# res = string.replace(str, " ", " ")
+-# res = string.replace(str, " ", " ")
+-# res = string.replace(str, " ", " ")
+- res = string.replace(str, " *", "_ptr")
+-# res = string.replace(str, "*", "_ptr")
+- res = string.replace(res, " ", "_")
++# res = str.replace(" ", " ")
++# res = str.replace(" ", " ")
++# res = str.replace(" ", " ")
++ res = str.replace(" *", "_ptr")
++# res = str.replace("*", "_ptr")
++ res = res.replace(" ", "_")
+ if res == 'const_char_ptr':
+- if string.find(name, "file") != -1 or \
+- string.find(name, "uri") != -1 or \
+- string.find(name, "URI") != -1 or \
+- string.find(info, "filename") != -1 or \
+- string.find(info, "URI") != -1 or \
+- string.find(info, "URL") != -1:
+- if string.find(function, "Save") != -1 or \
+- string.find(function, "Create") != -1 or \
+- string.find(function, "Write") != -1 or \
+- string.find(function, "Fetch") != -1:
+- return('fileoutput')
+- return('filepath')
++ if name.find("file") != -1 or \
++ name.find("uri") != -1 or \
++ name.find("URI") != -1 or \
++ info.find("filename") != -1 or \
++ info.find("URI") != -1 or \
++ info.find("URL") != -1:
++ if function.find("Save") != -1 or \
++ function.find("Create") != -1 or \
++ function.find("Write") != -1 or \
++ function.find("Fetch") != -1:
++ return('fileoutput')
++ return('filepath')
+ if res == 'void_ptr':
+ if module == 'nanoftp' and name == 'ctx':
+- return('xmlNanoFTPCtxtPtr')
++ return('xmlNanoFTPCtxtPtr')
+ if function == 'xmlNanoFTPNewCtxt' or \
+- function == 'xmlNanoFTPConnectTo' or \
+- function == 'xmlNanoFTPOpen':
+- return('xmlNanoFTPCtxtPtr')
++ function == 'xmlNanoFTPConnectTo' or \
++ function == 'xmlNanoFTPOpen':
++ return('xmlNanoFTPCtxtPtr')
+ if module == 'nanohttp' and name == 'ctx':
+- return('xmlNanoHTTPCtxtPtr')
+- if function == 'xmlNanoHTTPMethod' or \
+- function == 'xmlNanoHTTPMethodRedir' or \
+- function == 'xmlNanoHTTPOpen' or \
+- function == 'xmlNanoHTTPOpenRedir':
+- return('xmlNanoHTTPCtxtPtr');
++ return('xmlNanoHTTPCtxtPtr')
++ if function == 'xmlNanoHTTPMethod' or \
++ function == 'xmlNanoHTTPMethodRedir' or \
++ function == 'xmlNanoHTTPOpen' or \
++ function == 'xmlNanoHTTPOpenRedir':
++ return('xmlNanoHTTPCtxtPtr');
+ if function == 'xmlIOHTTPOpen':
+- return('xmlNanoHTTPCtxtPtr')
+- if string.find(name, "data") != -1:
+- return('userdata')
+- if string.find(name, "user") != -1:
+- return('userdata')
++ return('xmlNanoHTTPCtxtPtr')
++ if name.find("data") != -1:
++ return('userdata')
++ if name.find("user") != -1:
++ return('userdata')
+ if res == 'xmlDoc_ptr':
+ res = 'xmlDocPtr'
+ if res == 'xmlNode_ptr':
+@@ -366,18 +366,18 @@ def type_convert(str, name, info, module, function, pos):
+ res = 'xmlDictPtr'
+ if res == 'xmlNodePtr' and pos != 0:
+ if (function == 'xmlAddChild' and pos == 2) or \
+- (function == 'xmlAddChildList' and pos == 2) or \
++ (function == 'xmlAddChildList' and pos == 2) or \
+ (function == 'xmlAddNextSibling' and pos == 2) or \
+ (function == 'xmlAddSibling' and pos == 2) or \
+ (function == 'xmlDocSetRootElement' and pos == 2) or \
+ (function == 'xmlReplaceNode' and pos == 2) or \
+ (function == 'xmlTextMerge') or \
+- (function == 'xmlAddPrevSibling' and pos == 2):
+- return('xmlNodePtr_in');
++ (function == 'xmlAddPrevSibling' and pos == 2):
++ return('xmlNodePtr_in');
+ if res == 'const xmlBufferPtr':
+ res = 'xmlBufferPtr'
+ if res == 'xmlChar_ptr' and name == 'name' and \
+- string.find(function, "EatName") != -1:
++ function.find("EatName") != -1:
+ return('eaten_name')
+ if res == 'void_ptr*':
+ res = 'void_ptr_ptr'
+@@ -393,7 +393,7 @@ def type_convert(str, name, info, module, function, pos):
+ res = 'debug_FILE_ptr';
+ if res == 'int' and name == 'options':
+ if module == 'parser' or module == 'xmlreader':
+- res = 'parseroptions'
++ res = 'parseroptions'
+
+ return res
+
+@@ -402,28 +402,28 @@ known_param_types = []
+ def is_known_param_type(name):
+ for type in known_param_types:
+ if type == name:
+- return 1
++ return 1
+ return name[-3:] == 'Ptr' or name[-4:] == '_ptr'
+
+ def generate_param_type(name, rtype):
+ global test
+ for type in known_param_types:
+ if type == name:
+- return
++ return
+ for type in generated_param_types:
+ if type == name:
+- return
++ return
+
+ if name[-3:] == 'Ptr' or name[-4:] == '_ptr':
+ if rtype[0:6] == 'const ':
+- crtype = rtype[6:]
+- else:
+- crtype = rtype
++ crtype = rtype[6:]
++ else:
++ crtype = rtype
+
+ define = 0
+- if modules_defines.has_key(module):
+- test.write("#ifdef %s\n" % (modules_defines[module]))
+- define = 1
++ if module in modules_defines:
++ test.write("#ifdef %s\n" % (modules_defines[module]))
++ define = 1
+ test.write("""
+ #define gen_nb_%s 1
+ static %s gen_%s(int no ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
+@@ -433,7 +433,7 @@ static void des_%s(int no ATTRIBUTE_UNUSED, %s val ATTRIBUTE_UNUSED, int nr ATTR
+ }
+ """ % (name, crtype, name, name, rtype))
+ if define == 1:
+- test.write("#endif\n\n")
++ test.write("#endif\n\n")
+ add_generated_param_type(name)
+
+ #
+@@ -445,7 +445,7 @@ known_return_types = []
+ def is_known_return_type(name):
+ for type in known_return_types:
+ if type == name:
+- return 1
++ return 1
+ return 0
+
+ #
+@@ -471,7 +471,7 @@ def compare_and_save():
+ try:
+ os.system("rm testapi.c; mv testapi.c.new testapi.c")
+ except:
+- os.system("mv testapi.c.new testapi.c")
++ os.system("mv testapi.c.new testapi.c")
+ print("Updated testapi.c")
+ else:
+ print("Generated testapi.c is identical")
+@@ -481,17 +481,17 @@ while line != "":
+ if line == "/* CUT HERE: everything below that line is generated */\n":
+ break;
+ if line[0:15] == "#define gen_nb_":
+- type = string.split(line[15:])[0]
+- known_param_types.append(type)
++ type = line[15:].split()[0]
++ known_param_types.append(type)
+ if line[0:19] == "static void desret_":
+- type = string.split(line[19:], '(')[0]
+- known_return_types.append(type)
++ type = line[19:].split('(')[0]
++ known_return_types.append(type)
+ test.write(line)
+ line = input.readline()
+ input.close()
+
+ if line == "":
+- print "Could not find the CUT marker in testapi.c skipping generation"
++ print("Could not find the CUT marker in testapi.c skipping generation")
+ test.close()
+ sys.exit(0)
+
+@@ -505,7 +505,7 @@ test.write("/* CUT HERE: everything below that line is generated */\n")
+ #
+ doc = libxml2.readFile(srcPref + 'doc/libxml2-api.xml', None, 0)
+ if doc == None:
+- print "Failed to load doc/libxml2-api.xml"
++ print("Failed to load doc/libxml2-api.xml")
+ sys.exit(1)
+ ctxt = doc.xpathNewContext()
+
+@@ -519,9 +519,9 @@ for arg in args:
+ mod = arg.xpathEval('string(../@file)')
+ func = arg.xpathEval('string(../@name)')
+ if (mod not in skipped_modules) and (func not in skipped_functions):
+- type = arg.xpathEval('string(@type)')
+- if not argtypes.has_key(type):
+- argtypes[type] = func
++ type = arg.xpathEval('string(@type)')
++ if type not in argtypes:
++ argtypes[type] = func
+
+ # similarly for return types
+ rettypes = {}
+@@ -531,8 +531,8 @@ for ret in rets:
+ func = ret.xpathEval('string(../@name)')
+ if (mod not in skipped_modules) and (func not in skipped_functions):
+ type = ret.xpathEval('string(@type)')
+- if not rettypes.has_key(type):
+- rettypes[type] = func
++ if type not in rettypes:
++ rettypes[type] = func
+
+ #
+ # Generate constructors and return type handling for all enums
+@@ -549,49 +549,49 @@ for enum in enums:
+ continue;
+ define = 0
+
+- if argtypes.has_key(name) and is_known_param_type(name) == 0:
+- values = ctxt.xpathEval("/api/symbols/enum[@type='%s']" % name)
+- i = 0
+- vals = []
+- for value in values:
+- vname = value.xpathEval('string(@name)')
+- if vname == None:
+- continue;
+- i = i + 1
+- if i >= 5:
+- break;
+- vals.append(vname)
+- if vals == []:
+- print "Didn't find any value for enum %s" % (name)
+- continue
+- if modules_defines.has_key(module):
+- test.write("#ifdef %s\n" % (modules_defines[module]))
+- define = 1
+- test.write("#define gen_nb_%s %d\n" % (name, len(vals)))
+- test.write("""static %s gen_%s(int no, int nr ATTRIBUTE_UNUSED) {\n""" %
+- (name, name))
+- i = 1
+- for value in vals:
+- test.write(" if (no == %d) return(%s);\n" % (i, value))
+- i = i + 1
+- test.write(""" return(0);
++ if (name in argtypes) and is_known_param_type(name) == 0:
++ values = ctxt.xpathEval("/api/symbols/enum[@type='%s']" % name)
++ i = 0
++ vals = []
++ for value in values:
++ vname = value.xpathEval('string(@name)')
++ if vname == None:
++ continue;
++ i = i + 1
++ if i >= 5:
++ break;
++ vals.append(vname)
++ if vals == []:
++ print("Didn't find any value for enum %s" % (name))
++ continue
++ if module in modules_defines:
++ test.write("#ifdef %s\n" % (modules_defines[module]))
++ define = 1
++ test.write("#define gen_nb_%s %d\n" % (name, len(vals)))
++ test.write("""static %s gen_%s(int no, int nr ATTRIBUTE_UNUSED) {\n""" %
++ (name, name))
++ i = 1
++ for value in vals:
++ test.write(" if (no == %d) return(%s);\n" % (i, value))
++ i = i + 1
++ test.write(""" return(0);
+ }
+
+ static void des_%s(int no ATTRIBUTE_UNUSED, %s val ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
+ }
+
+ """ % (name, name));
+- known_param_types.append(name)
++ known_param_types.append(name)
+
+ if (is_known_return_type(name) == 0) and (name in rettypes):
+- if define == 0 and modules_defines.has_key(module):
+- test.write("#ifdef %s\n" % (modules_defines[module]))
+- define = 1
++ if define == 0 and (module in modules_defines):
++ test.write("#ifdef %s\n" % (modules_defines[module]))
++ define = 1
+ test.write("""static void desret_%s(%s val ATTRIBUTE_UNUSED) {
+ }
+
+ """ % (name, name))
+- known_return_types.append(name)
++ known_return_types.append(name)
+ if define == 1:
+ test.write("#endif\n\n")
+
+@@ -615,9 +615,9 @@ for file in headers:
+ # do not test deprecated APIs
+ #
+ desc = file.xpathEval('string(description)')
+- if string.find(desc, 'DEPRECATED') != -1:
+- print "Skipping deprecated interface %s" % name
+- continue;
++ if desc.find('DEPRECATED') != -1:
++ print("Skipping deprecated interface %s" % name)
++ continue;
+
+ test.write("#include <libxml/%s.h>\n" % name)
+ modules.append(name)
+@@ -679,7 +679,7 @@ def generate_test(module, node):
+ # and store the information for the generation
+ #
+ try:
+- args = node.xpathEval("arg")
++ args = node.xpathEval("arg")
+ except:
+ args = []
+ t_args = []
+@@ -687,37 +687,37 @@ def generate_test(module, node):
+ for arg in args:
+ n = n + 1
+ rtype = arg.xpathEval("string(@type)")
+- if rtype == 'void':
+- break;
+- info = arg.xpathEval("string(@info)")
+- nam = arg.xpathEval("string(@name)")
++ if rtype == 'void':
++ break;
++ info = arg.xpathEval("string(@info)")
++ nam = arg.xpathEval("string(@name)")
+ type = type_convert(rtype, nam, info, module, name, n)
+- if is_known_param_type(type) == 0:
+- add_missing_type(type, name);
+- no_gen = 1
++ if is_known_param_type(type) == 0:
++ add_missing_type(type, name);
++ no_gen = 1
+ if (type[-3:] == 'Ptr' or type[-4:] == '_ptr') and \
+- rtype[0:6] == 'const ':
+- crtype = rtype[6:]
+- else:
+- crtype = rtype
+- t_args.append((nam, type, rtype, crtype, info))
++ rtype[0:6] == 'const ':
++ crtype = rtype[6:]
++ else:
++ crtype = rtype
++ t_args.append((nam, type, rtype, crtype, info))
+
+ try:
+- rets = node.xpathEval("return")
++ rets = node.xpathEval("return")
+ except:
+ rets = []
+ t_ret = None
+ for ret in rets:
+ rtype = ret.xpathEval("string(@type)")
+- info = ret.xpathEval("string(@info)")
++ info = ret.xpathEval("string(@info)")
+ type = type_convert(rtype, 'return', info, module, name, 0)
+- if rtype == 'void':
+- break
+- if is_known_return_type(type) == 0:
+- add_missing_type(type, name);
+- no_gen = 1
+- t_ret = (type, rtype, info)
+- break
++ if rtype == 'void':
++ break
++ if is_known_return_type(type) == 0:
++ add_missing_type(type, name);
++ no_gen = 1
++ t_ret = (type, rtype, info)
++ break
+
+ if no_gen == 0:
+ for t_arg in t_args:
+@@ -733,7 +733,7 @@ test_%s(void) {
+
+ if no_gen == 1:
+ add_missing_functions(name, module)
+- test.write("""
++ test.write("""
+ /* missing type support */
+ return(test_ret);
+ }
+@@ -742,22 +742,22 @@ test_%s(void) {
+ return
+
+ try:
+- conds = node.xpathEval("cond")
+- for cond in conds:
+- test.write("#if %s\n" % (cond.get_content()))
+- nb_cond = nb_cond + 1
++ conds = node.xpathEval("cond")
++ for cond in conds:
++ test.write("#if %s\n" % (cond.get_content()))
++ nb_cond = nb_cond + 1
+ except:
+ pass
+
+ define = 0
+- if function_defines.has_key(name):
++ if name in function_defines:
+ test.write("#ifdef %s\n" % (function_defines[name]))
+- define = 1
++ define = 1
+
+ # Declare the memory usage counter
+ no_mem = is_skipped_memcheck(name)
+ if no_mem == 0:
+- test.write(" int mem_base;\n");
++ test.write(" int mem_base;\n");
+
+ # Declare the return value
+ if t_ret != None:
+@@ -766,29 +766,29 @@ test_%s(void) {
+ # Declare the arguments
+ for arg in t_args:
+ (nam, type, rtype, crtype, info) = arg;
+- # add declaration
+- test.write(" %s %s; /* %s */\n" % (crtype, nam, info))
+- test.write(" int n_%s;\n" % (nam))
++ # add declaration
++ test.write(" %s %s; /* %s */\n" % (crtype, nam, info))
++ test.write(" int n_%s;\n" % (nam))
+ test.write("\n")
+
+ # Cascade loop on of each argument list of values
+ for arg in t_args:
+ (nam, type, rtype, crtype, info) = arg;
+- #
+- test.write(" for (n_%s = 0;n_%s < gen_nb_%s;n_%s++) {\n" % (
+- nam, nam, type, nam))
++ #
++ test.write(" for (n_%s = 0;n_%s < gen_nb_%s;n_%s++) {\n" % (
++ nam, nam, type, nam))
+
+ # log the memory usage
+ if no_mem == 0:
+- test.write(" mem_base = xmlMemBlocks();\n");
++ test.write(" mem_base = xmlMemBlocks();\n");
+
+ # prepare the call
+ i = 0;
+ for arg in t_args:
+ (nam, type, rtype, crtype, info) = arg;
+- #
+- test.write(" %s = gen_%s(n_%s, %d);\n" % (nam, type, nam, i))
+- i = i + 1;
++ #
++ test.write(" %s = gen_%s(n_%s, %d);\n" % (nam, type, nam, i))
++ i = i + 1;
+
+ # add checks to avoid out-of-bounds array access
+ i = 0;
+@@ -797,7 +797,7 @@ test_%s(void) {
+ # assume that "size", "len", and "start" parameters apply to either
+ # the nearest preceding or following char pointer
+ if type == "int" and (nam == "size" or nam == "len" or nam == "start"):
+- for j in range(i - 1, -1, -1) + range(i + 1, len(t_args)):
++ for j in (*range(i - 1, -1, -1), *range(i + 1, len(t_args))):
+ (bnam, btype) = t_args[j][:2]
+ if btype == "const_char_ptr" or btype == "const_xmlChar_ptr":
+ test.write(
+@@ -806,42 +806,42 @@ test_%s(void) {
+ " continue;\n"
+ % (bnam, nam, bnam))
+ break
+- i = i + 1;
++ i = i + 1;
+
+ # do the call, and clanup the result
+- if extra_pre_call.has_key(name):
+- test.write(" %s\n"% (extra_pre_call[name]))
++ if name in extra_pre_call:
++ test.write(" %s\n"% (extra_pre_call[name]))
+ if t_ret != None:
+- test.write("\n ret_val = %s(" % (name))
+- need = 0
+- for arg in t_args:
+- (nam, type, rtype, crtype, info) = arg
+- if need:
+- test.write(", ")
+- else:
+- need = 1
+- if rtype != crtype:
+- test.write("(%s)" % rtype)
+- test.write("%s" % nam);
+- test.write(");\n")
+- if extra_post_call.has_key(name):
+- test.write(" %s\n"% (extra_post_call[name]))
+- test.write(" desret_%s(ret_val);\n" % t_ret[0])
++ test.write("\n ret_val = %s(" % (name))
++ need = 0
++ for arg in t_args:
++ (nam, type, rtype, crtype, info) = arg
++ if need:
++ test.write(", ")
++ else:
++ need = 1
++ if rtype != crtype:
++ test.write("(%s)" % rtype)
++ test.write("%s" % nam);
++ test.write(");\n")
++ if name in extra_post_call:
++ test.write(" %s\n"% (extra_post_call[name]))
++ test.write(" desret_%s(ret_val);\n" % t_ret[0])
+ else:
+- test.write("\n %s(" % (name));
+- need = 0;
+- for arg in t_args:
+- (nam, type, rtype, crtype, info) = arg;
+- if need:
+- test.write(", ")
+- else:
+- need = 1
+- if rtype != crtype:
+- test.write("(%s)" % rtype)
+- test.write("%s" % nam)
+- test.write(");\n")
+- if extra_post_call.has_key(name):
+- test.write(" %s\n"% (extra_post_call[name]))
++ test.write("\n %s(" % (name));
++ need = 0;
++ for arg in t_args:
++ (nam, type, rtype, crtype, info) = arg;
++ if need:
++ test.write(", ")
++ else:
++ need = 1
++ if rtype != crtype:
++ test.write("(%s)" % rtype)
++ test.write("%s" % nam)
++ test.write(");\n")
++ if name in extra_post_call:
++ test.write(" %s\n"% (extra_post_call[name]))
+
+ test.write(" call_tests++;\n");
+
+@@ -849,32 +849,32 @@ test_%s(void) {
+ i = 0;
+ for arg in t_args:
+ (nam, type, rtype, crtype, info) = arg;
+- # This is a hack to prevent generating a destructor for the
+- # 'input' argument in xmlTextReaderSetup. There should be
+- # a better, more generic way to do this!
+- if string.find(info, 'destroy') == -1:
+- test.write(" des_%s(n_%s, " % (type, nam))
+- if rtype != crtype:
+- test.write("(%s)" % rtype)
+- test.write("%s, %d);\n" % (nam, i))
+- i = i + 1;
++ # This is a hack to prevent generating a destructor for the
++ # 'input' argument in xmlTextReaderSetup. There should be
++ # a better, more generic way to do this!
++ if info.find('destroy') == -1:
++ test.write(" des_%s(n_%s, " % (type, nam))
++ if rtype != crtype:
++ test.write("(%s)" % rtype)
++ test.write("%s, %d);\n" % (nam, i))
++ i = i + 1;
+
+ test.write(" xmlResetLastError();\n");
+ # Check the memory usage
+ if no_mem == 0:
+- test.write(""" if (mem_base != xmlMemBlocks()) {
++ test.write(""" if (mem_base != xmlMemBlocks()) {
+ printf("Leak of %%d blocks found in %s",
+- xmlMemBlocks() - mem_base);
+- test_ret++;
++\t xmlMemBlocks() - mem_base);
++\t test_ret++;
+ """ % (name));
+- for arg in t_args:
+- (nam, type, rtype, crtype, info) = arg;
+- test.write(""" printf(" %%d", n_%s);\n""" % (nam))
+- test.write(""" printf("\\n");\n""")
+- test.write(" }\n")
++ for arg in t_args:
++ (nam, type, rtype, crtype, info) = arg;
++ test.write(""" printf(" %%d", n_%s);\n""" % (nam))
++ test.write(""" printf("\\n");\n""")
++ test.write(" }\n")
+
+ for arg in t_args:
+- test.write(" }\n")
++ test.write(" }\n")
+
+ test.write(" function_tests++;\n")
+ #
+@@ -882,7 +882,7 @@ test_%s(void) {
+ #
+ while nb_cond > 0:
+ test.write("#endif\n")
+- nb_cond = nb_cond -1
++ nb_cond = nb_cond -1
+ if define == 1:
+ test.write("#endif\n")
+
+@@ -900,10 +900,10 @@ test_%s(void) {
+ for module in modules:
+ # gather all the functions exported by that module
+ try:
+- functions = ctxt.xpathEval("/api/symbols/function[@file='%s']" % (module))
++ functions = ctxt.xpathEval("/api/symbols/function[@file='%s']" % (module))
+ except:
+- print "Failed to gather functions from module %s" % (module)
+- continue;
++ print("Failed to gather functions from module %s" % (module))
++ continue;
+
+ # iterate over all functions in the module generating the test
+ i = 0
+@@ -923,14 +923,14 @@ test_%s(void) {
+ # iterate over all functions in the module generating the call
+ for function in functions:
+ name = function.xpathEval('string(@name)')
+- if is_skipped_function(name):
+- continue
+- test.write(" test_ret += test_%s();\n" % (name))
++ if is_skipped_function(name):
++ continue
++ test.write(" test_ret += test_%s();\n" % (name))
+
+ # footer
+ test.write("""
+ if (test_ret != 0)
+- printf("Module %s: %%d errors\\n", test_ret);
++\tprintf("Module %s: %%d errors\\n", test_ret);
+ return(test_ret);
+ }
+ """ % (module))
+@@ -948,7 +948,7 @@ test.write(""" return(0);
+ }
+ """);
+
+-print "Generated test for %d modules and %d functions" %(len(modules), nb_tests)
++print("Generated test for %d modules and %d functions" %(len(modules), nb_tests))
+
+ compare_and_save()
+
+@@ -960,11 +960,8 @@ for missing in missing_types.keys():
+ n = len(missing_types[missing])
+ missing_list.append((n, missing))
+
+-def compare_missing(a, b):
+- return b[0] - a[0]
+-
+-missing_list.sort(compare_missing)
+-print "Missing support for %d functions and %d types see missing.lst" % (missing_functions_nr, len(missing_list))
++missing_list.sort(key=lambda a: a[0])
++print("Missing support for %d functions and %d types see missing.lst" % (missing_functions_nr, len(missing_list)))
+ lst = open("missing.lst", "w")
+ lst.write("Missing support for %d types" % (len(missing_list)))
+ lst.write("\n")
+@@ -974,9 +971,9 @@ for miss in missing_list:
+ for n in missing_types[miss[1]]:
+ i = i + 1
+ if i > 5:
+- lst.write(" ...")
+- break
+- lst.write(" %s" % (n))
++ lst.write(" ...")
++ break
++ lst.write(" %s" % (n))
+ lst.write("\n")
+ lst.write("\n")
+ lst.write("\n")
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2022-40303.patch b/meta/recipes-core/libxml/libxml2/CVE-2022-40303.patch
new file mode 100644
index 0000000000..346ec37a9f
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2022-40303.patch
@@ -0,0 +1,624 @@
+From 15050f59d2a62b97b34e9cab8b8076a68ef003bd Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Thu, 25 Aug 2022 17:43:08 +0200
+Subject: [PATCH] CVE-2022-40303
+
+Fix integer overflows with XML_PARSE_HUGE
+
+Also impose size limits when XML_PARSE_HUGE is set. Limit size of names
+to XML_MAX_TEXT_LENGTH (10 million bytes) and other content to
+XML_MAX_HUGE_LENGTH (1 billion bytes).
+
+Move some the length checks to the end of the respective loop to make
+them strict.
+
+xmlParseEntityValue didn't have a length limitation at all. But without
+XML_PARSE_HUGE, this should eventually trigger an error in xmlGROW.
+
+Thanks to Maddie Stone working with Google Project Zero for the report!
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/c846986356fc149915a74972bf198abc266bc2c0]
+CVE: CVE-2022-40303
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ parser.c | 233 +++++++++++++++++++++++++++++--------------------------
+ 1 file changed, 121 insertions(+), 112 deletions(-)
+
+diff --git a/parser.c b/parser.c
+index 1bc3713..0f76577 100644
+--- a/parser.c
++++ b/parser.c
+@@ -115,6 +115,8 @@ xmlParseElementEnd(xmlParserCtxtPtr ctxt);
+ * *
+ ************************************************************************/
+
++#define XML_MAX_HUGE_LENGTH 1000000000
++
+ #define XML_PARSER_BIG_ENTITY 1000
+ #define XML_PARSER_LOT_ENTITY 5000
+
+@@ -565,7 +567,7 @@ xmlFatalErr(xmlParserCtxtPtr ctxt, xmlParserErrors error, const char *info)
+ errmsg = "Malformed declaration expecting version";
+ break;
+ case XML_ERR_NAME_TOO_LONG:
+- errmsg = "Name too long use XML_PARSE_HUGE option";
++ errmsg = "Name too long";
+ break;
+ #if 0
+ case:
+@@ -3210,6 +3212,9 @@ xmlParseNameComplex(xmlParserCtxtPtr ctxt) {
+ int len = 0, l;
+ int c;
+ int count = 0;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ #ifdef DEBUG
+ nbParseNameComplex++;
+@@ -3275,7 +3280,8 @@ xmlParseNameComplex(xmlParserCtxtPtr ctxt) {
+ if (ctxt->instate == XML_PARSER_EOF)
+ return(NULL);
+ }
+- len += l;
++ if (len <= INT_MAX - l)
++ len += l;
+ NEXTL(l);
+ c = CUR_CHAR(l);
+ }
+@@ -3301,13 +3307,13 @@ xmlParseNameComplex(xmlParserCtxtPtr ctxt) {
+ if (ctxt->instate == XML_PARSER_EOF)
+ return(NULL);
+ }
+- len += l;
++ if (len <= INT_MAX - l)
++ len += l;
+ NEXTL(l);
+ c = CUR_CHAR(l);
+ }
+ }
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "Name");
+ return(NULL);
+ }
+@@ -3346,7 +3352,10 @@ const xmlChar *
+ xmlParseName(xmlParserCtxtPtr ctxt) {
+ const xmlChar *in;
+ const xmlChar *ret;
+- int count = 0;
++ size_t count = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ GROW;
+
+@@ -3370,8 +3379,7 @@ xmlParseName(xmlParserCtxtPtr ctxt) {
+ in++;
+ if ((*in > 0) && (*in < 0x80)) {
+ count = in - ctxt->input->cur;
+- if ((count > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (count > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "Name");
+ return(NULL);
+ }
+@@ -3392,6 +3400,9 @@ xmlParseNCNameComplex(xmlParserCtxtPtr ctxt) {
+ int len = 0, l;
+ int c;
+ int count = 0;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+ size_t startPosition = 0;
+
+ #ifdef DEBUG
+@@ -3412,17 +3423,13 @@ xmlParseNCNameComplex(xmlParserCtxtPtr ctxt) {
+ while ((c != ' ') && (c != '>') && (c != '/') && /* test bigname.xml */
+ (xmlIsNameChar(ctxt, c) && (c != ':'))) {
+ if (count++ > XML_PARSER_CHUNK_SIZE) {
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+- return(NULL);
+- }
+ count = 0;
+ GROW;
+ if (ctxt->instate == XML_PARSER_EOF)
+ return(NULL);
+ }
+- len += l;
++ if (len <= INT_MAX - l)
++ len += l;
+ NEXTL(l);
+ c = CUR_CHAR(l);
+ if (c == 0) {
+@@ -3440,8 +3447,7 @@ xmlParseNCNameComplex(xmlParserCtxtPtr ctxt) {
+ c = CUR_CHAR(l);
+ }
+ }
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+ return(NULL);
+ }
+@@ -3467,7 +3473,10 @@ static const xmlChar *
+ xmlParseNCName(xmlParserCtxtPtr ctxt) {
+ const xmlChar *in, *e;
+ const xmlChar *ret;
+- int count = 0;
++ size_t count = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ #ifdef DEBUG
+ nbParseNCName++;
+@@ -3492,8 +3501,7 @@ xmlParseNCName(xmlParserCtxtPtr ctxt) {
+ goto complex;
+ if ((*in > 0) && (*in < 0x80)) {
+ count = in - ctxt->input->cur;
+- if ((count > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (count > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+ return(NULL);
+ }
+@@ -3575,6 +3583,9 @@ xmlParseStringName(xmlParserCtxtPtr ctxt, const xmlChar** str) {
+ const xmlChar *cur = *str;
+ int len = 0, l;
+ int c;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ #ifdef DEBUG
+ nbParseStringName++;
+@@ -3610,12 +3621,6 @@ xmlParseStringName(xmlParserCtxtPtr ctxt, const xmlChar** str) {
+ if (len + 10 > max) {
+ xmlChar *tmp;
+
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+- xmlFree(buffer);
+- return(NULL);
+- }
+ max *= 2;
+ tmp = (xmlChar *) xmlRealloc(buffer,
+ max * sizeof(xmlChar));
+@@ -3629,14 +3634,18 @@ xmlParseStringName(xmlParserCtxtPtr ctxt, const xmlChar** str) {
+ COPY_BUF(l,buffer,len,c);
+ cur += l;
+ c = CUR_SCHAR(cur, l);
++ if (len > maxLength) {
++ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
++ xmlFree(buffer);
++ return(NULL);
++ }
+ }
+ buffer[len] = 0;
+ *str = cur;
+ return(buffer);
+ }
+ }
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+ return(NULL);
+ }
+@@ -3663,6 +3672,9 @@ xmlParseNmtoken(xmlParserCtxtPtr ctxt) {
+ int len = 0, l;
+ int c;
+ int count = 0;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ #ifdef DEBUG
+ nbParseNmToken++;
+@@ -3714,12 +3726,6 @@ xmlParseNmtoken(xmlParserCtxtPtr ctxt) {
+ if (len + 10 > max) {
+ xmlChar *tmp;
+
+- if ((max > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NmToken");
+- xmlFree(buffer);
+- return(NULL);
+- }
+ max *= 2;
+ tmp = (xmlChar *) xmlRealloc(buffer,
+ max * sizeof(xmlChar));
+@@ -3733,6 +3739,11 @@ xmlParseNmtoken(xmlParserCtxtPtr ctxt) {
+ COPY_BUF(l,buffer,len,c);
+ NEXTL(l);
+ c = CUR_CHAR(l);
++ if (len > maxLength) {
++ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NmToken");
++ xmlFree(buffer);
++ return(NULL);
++ }
+ }
+ buffer[len] = 0;
+ return(buffer);
+@@ -3740,8 +3751,7 @@ xmlParseNmtoken(xmlParserCtxtPtr ctxt) {
+ }
+ if (len == 0)
+ return(NULL);
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NmToken");
+ return(NULL);
+ }
+@@ -3767,6 +3777,9 @@ xmlParseEntityValue(xmlParserCtxtPtr ctxt, xmlChar **orig) {
+ int len = 0;
+ int size = XML_PARSER_BUFFER_SIZE;
+ int c, l;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ xmlChar stop;
+ xmlChar *ret = NULL;
+ const xmlChar *cur = NULL;
+@@ -3826,6 +3839,12 @@ xmlParseEntityValue(xmlParserCtxtPtr ctxt, xmlChar **orig) {
+ GROW;
+ c = CUR_CHAR(l);
+ }
++
++ if (len > maxLength) {
++ xmlFatalErrMsg(ctxt, XML_ERR_ENTITY_NOT_FINISHED,
++ "entity value too long\n");
++ goto error;
++ }
+ }
+ buf[len] = 0;
+ if (ctxt->instate == XML_PARSER_EOF)
+@@ -3913,6 +3932,9 @@ xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) {
+ xmlChar *rep = NULL;
+ size_t len = 0;
+ size_t buf_size = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ int c, l, in_space = 0;
+ xmlChar *current = NULL;
+ xmlEntityPtr ent;
+@@ -3944,16 +3966,6 @@ xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) {
+ while (((NXT(0) != limit) && /* checked */
+ (IS_CHAR(c)) && (c != '<')) &&
+ (ctxt->instate != XML_PARSER_EOF)) {
+- /*
+- * Impose a reasonable limit on attribute size, unless XML_PARSE_HUGE
+- * special option is given
+- */
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+- "AttValue length too long\n");
+- goto mem_error;
+- }
+ if (c == '&') {
+ in_space = 0;
+ if (NXT(1) == '#') {
+@@ -4101,6 +4113,11 @@ xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) {
+ }
+ GROW;
+ c = CUR_CHAR(l);
++ if (len > maxLength) {
++ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
++ "AttValue length too long\n");
++ goto mem_error;
++ }
+ }
+ if (ctxt->instate == XML_PARSER_EOF)
+ goto error;
+@@ -4122,16 +4139,6 @@ xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) {
+ } else
+ NEXT;
+
+- /*
+- * There we potentially risk an overflow, don't allow attribute value of
+- * length more than INT_MAX it is a very reasonable assumption !
+- */
+- if (len >= INT_MAX) {
+- xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+- "AttValue length too long\n");
+- goto mem_error;
+- }
+-
+ if (attlen != NULL) *attlen = (int) len;
+ return(buf);
+
+@@ -4202,6 +4209,9 @@ xmlParseSystemLiteral(xmlParserCtxtPtr ctxt) {
+ int len = 0;
+ int size = XML_PARSER_BUFFER_SIZE;
+ int cur, l;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+ xmlChar stop;
+ int state = ctxt->instate;
+ int count = 0;
+@@ -4229,13 +4239,6 @@ xmlParseSystemLiteral(xmlParserCtxtPtr ctxt) {
+ if (len + 5 >= size) {
+ xmlChar *tmp;
+
+- if ((size > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "SystemLiteral");
+- xmlFree(buf);
+- ctxt->instate = (xmlParserInputState) state;
+- return(NULL);
+- }
+ size *= 2;
+ tmp = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar));
+ if (tmp == NULL) {
+@@ -4264,6 +4267,12 @@ xmlParseSystemLiteral(xmlParserCtxtPtr ctxt) {
+ SHRINK;
+ cur = CUR_CHAR(l);
+ }
++ if (len > maxLength) {
++ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "SystemLiteral");
++ xmlFree(buf);
++ ctxt->instate = (xmlParserInputState) state;
++ return(NULL);
++ }
+ }
+ buf[len] = 0;
+ ctxt->instate = (xmlParserInputState) state;
+@@ -4291,6 +4300,9 @@ xmlParsePubidLiteral(xmlParserCtxtPtr ctxt) {
+ xmlChar *buf = NULL;
+ int len = 0;
+ int size = XML_PARSER_BUFFER_SIZE;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+ xmlChar cur;
+ xmlChar stop;
+ int count = 0;
+@@ -4318,12 +4330,6 @@ xmlParsePubidLiteral(xmlParserCtxtPtr ctxt) {
+ if (len + 1 >= size) {
+ xmlChar *tmp;
+
+- if ((size > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "Public ID");
+- xmlFree(buf);
+- return(NULL);
+- }
+ size *= 2;
+ tmp = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar));
+ if (tmp == NULL) {
+@@ -4351,6 +4357,11 @@ xmlParsePubidLiteral(xmlParserCtxtPtr ctxt) {
+ SHRINK;
+ cur = CUR;
+ }
++ if (len > maxLength) {
++ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "Public ID");
++ xmlFree(buf);
++ return(NULL);
++ }
+ }
+ buf[len] = 0;
+ if (cur != stop) {
+@@ -4750,6 +4761,9 @@ xmlParseCommentComplex(xmlParserCtxtPtr ctxt, xmlChar *buf,
+ int r, rl;
+ int cur, l;
+ size_t count = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ int inputid;
+
+ inputid = ctxt->input->id;
+@@ -4795,13 +4809,6 @@ xmlParseCommentComplex(xmlParserCtxtPtr ctxt, xmlChar *buf,
+ if ((r == '-') && (q == '-')) {
+ xmlFatalErr(ctxt, XML_ERR_HYPHEN_IN_COMMENT, NULL);
+ }
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsgStr(ctxt, XML_ERR_COMMENT_NOT_FINISHED,
+- "Comment too big found", NULL);
+- xmlFree (buf);
+- return;
+- }
+ if (len + 5 >= size) {
+ xmlChar *new_buf;
+ size_t new_size;
+@@ -4839,6 +4846,13 @@ xmlParseCommentComplex(xmlParserCtxtPtr ctxt, xmlChar *buf,
+ GROW;
+ cur = CUR_CHAR(l);
+ }
++
++ if (len > maxLength) {
++ xmlFatalErrMsgStr(ctxt, XML_ERR_COMMENT_NOT_FINISHED,
++ "Comment too big found", NULL);
++ xmlFree (buf);
++ return;
++ }
+ }
+ buf[len] = 0;
+ if (cur == 0) {
+@@ -4883,6 +4897,9 @@ xmlParseComment(xmlParserCtxtPtr ctxt) {
+ xmlChar *buf = NULL;
+ size_t size = XML_PARSER_BUFFER_SIZE;
+ size_t len = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ xmlParserInputState state;
+ const xmlChar *in;
+ size_t nbchar = 0;
+@@ -4966,8 +4983,7 @@ get_more:
+ buf[len] = 0;
+ }
+ }
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErrMsgStr(ctxt, XML_ERR_COMMENT_NOT_FINISHED,
+ "Comment too big found", NULL);
+ xmlFree (buf);
+@@ -5167,6 +5183,9 @@ xmlParsePI(xmlParserCtxtPtr ctxt) {
+ xmlChar *buf = NULL;
+ size_t len = 0;
+ size_t size = XML_PARSER_BUFFER_SIZE;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ int cur, l;
+ const xmlChar *target;
+ xmlParserInputState state;
+@@ -5242,14 +5261,6 @@ xmlParsePI(xmlParserCtxtPtr ctxt) {
+ return;
+ }
+ count = 0;
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsgStr(ctxt, XML_ERR_PI_NOT_FINISHED,
+- "PI %s too big found", target);
+- xmlFree(buf);
+- ctxt->instate = state;
+- return;
+- }
+ }
+ COPY_BUF(l,buf,len,cur);
+ NEXTL(l);
+@@ -5259,15 +5270,14 @@ xmlParsePI(xmlParserCtxtPtr ctxt) {
+ GROW;
+ cur = CUR_CHAR(l);
+ }
++ if (len > maxLength) {
++ xmlFatalErrMsgStr(ctxt, XML_ERR_PI_NOT_FINISHED,
++ "PI %s too big found", target);
++ xmlFree(buf);
++ ctxt->instate = state;
++ return;
++ }
+ }
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsgStr(ctxt, XML_ERR_PI_NOT_FINISHED,
+- "PI %s too big found", target);
+- xmlFree(buf);
+- ctxt->instate = state;
+- return;
+- }
+ buf[len] = 0;
+ if (cur != '?') {
+ xmlFatalErrMsgStr(ctxt, XML_ERR_PI_NOT_FINISHED,
+@@ -8959,6 +8969,9 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ const xmlChar *in = NULL, *start, *end, *last;
+ xmlChar *ret = NULL;
+ int line, col;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+
+ GROW;
+ in = (xmlChar *) CUR_PTR;
+@@ -8998,8 +9011,7 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ start = in;
+ if (in >= end) {
+ GROW_PARSE_ATT_VALUE_INTERNAL(ctxt, in, start, end)
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9012,8 +9024,7 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ if ((*in++ == 0x20) && (*in == 0x20)) break;
+ if (in >= end) {
+ GROW_PARSE_ATT_VALUE_INTERNAL(ctxt, in, start, end)
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9046,16 +9057,14 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ last = last + delta;
+ }
+ end = ctxt->input->end;
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+ }
+ }
+ }
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9068,8 +9077,7 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ col++;
+ if (in >= end) {
+ GROW_PARSE_ATT_VALUE_INTERNAL(ctxt, in, start, end)
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9077,8 +9085,7 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ }
+ }
+ last = in;
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9768,6 +9775,9 @@ xmlParseCDSect(xmlParserCtxtPtr ctxt) {
+ int s, sl;
+ int cur, l;
+ int count = 0;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+
+ /* Check 2.6.0 was NXT(0) not RAW */
+ if (CMP9(CUR_PTR, '<', '!', '[', 'C', 'D', 'A', 'T', 'A', '[')) {
+@@ -9801,13 +9811,6 @@ xmlParseCDSect(xmlParserCtxtPtr ctxt) {
+ if (len + 5 >= size) {
+ xmlChar *tmp;
+
+- if ((size > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsgStr(ctxt, XML_ERR_CDATA_NOT_FINISHED,
+- "CData section too big found", NULL);
+- xmlFree (buf);
+- return;
+- }
+ tmp = (xmlChar *) xmlRealloc(buf, size * 2 * sizeof(xmlChar));
+ if (tmp == NULL) {
+ xmlFree(buf);
+@@ -9834,6 +9837,12 @@ xmlParseCDSect(xmlParserCtxtPtr ctxt) {
+ }
+ NEXTL(l);
+ cur = CUR_CHAR(l);
++ if (len > maxLength) {
++ xmlFatalErrMsg(ctxt, XML_ERR_CDATA_NOT_FINISHED,
++ "CData section too big found\n");
++ xmlFree(buf);
++ return;
++ }
+ }
+ buf[len] = 0;
+ ctxt->instate = XML_PARSER_CONTENT;
+--
+2.25.1
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2022-40304.patch b/meta/recipes-core/libxml/libxml2/CVE-2022-40304.patch
new file mode 100644
index 0000000000..b24be03315
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2022-40304.patch
@@ -0,0 +1,106 @@
+From cde95d801abc9405ca821ad814c7730333328d96 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Wed, 31 Aug 2022 22:11:25 +0200
+Subject: [PATCH] CVE-2022-40304
+
+Fix dict corruption caused by entity reference cycles
+
+When an entity reference cycle is detected, the entity content is
+cleared by setting its first byte to zero. But the entity content might
+be allocated from a dict. In this case, the dict entry becomes corrupted
+leading to all kinds of logic errors, including memory errors like
+double-frees.
+
+Stop storing entity content, orig, ExternalID and SystemID in a dict.
+These values are unlikely to occur multiple times in a document, so they
+shouldn't have been stored in a dict in the first place.
+
+Thanks to Ned Williamson and Nathan Wachholz working with Google Project
+Zero for the report!
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/1b41ec4e9433b05bb0376be4725804c54ef1d80b]
+CVE: CVE-2022-40304
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ entities.c | 55 ++++++++++++++++--------------------------------------
+ 1 file changed, 16 insertions(+), 39 deletions(-)
+
+diff --git a/entities.c b/entities.c
+index 1a8f86f..ec1b9a7 100644
+--- a/entities.c
++++ b/entities.c
+@@ -112,36 +112,19 @@ xmlFreeEntity(xmlEntityPtr entity)
+ if ((entity->children) && (entity->owner == 1) &&
+ (entity == (xmlEntityPtr) entity->children->parent))
+ xmlFreeNodeList(entity->children);
+- if (dict != NULL) {
+- if ((entity->name != NULL) && (!xmlDictOwns(dict, entity->name)))
+- xmlFree((char *) entity->name);
+- if ((entity->ExternalID != NULL) &&
+- (!xmlDictOwns(dict, entity->ExternalID)))
+- xmlFree((char *) entity->ExternalID);
+- if ((entity->SystemID != NULL) &&
+- (!xmlDictOwns(dict, entity->SystemID)))
+- xmlFree((char *) entity->SystemID);
+- if ((entity->URI != NULL) && (!xmlDictOwns(dict, entity->URI)))
+- xmlFree((char *) entity->URI);
+- if ((entity->content != NULL)
+- && (!xmlDictOwns(dict, entity->content)))
+- xmlFree((char *) entity->content);
+- if ((entity->orig != NULL) && (!xmlDictOwns(dict, entity->orig)))
+- xmlFree((char *) entity->orig);
+- } else {
+- if (entity->name != NULL)
+- xmlFree((char *) entity->name);
+- if (entity->ExternalID != NULL)
+- xmlFree((char *) entity->ExternalID);
+- if (entity->SystemID != NULL)
+- xmlFree((char *) entity->SystemID);
+- if (entity->URI != NULL)
+- xmlFree((char *) entity->URI);
+- if (entity->content != NULL)
+- xmlFree((char *) entity->content);
+- if (entity->orig != NULL)
+- xmlFree((char *) entity->orig);
+- }
++ if ((entity->name != NULL) &&
++ ((dict == NULL) || (!xmlDictOwns(dict, entity->name))))
++ xmlFree((char *) entity->name);
++ if (entity->ExternalID != NULL)
++ xmlFree((char *) entity->ExternalID);
++ if (entity->SystemID != NULL)
++ xmlFree((char *) entity->SystemID);
++ if (entity->URI != NULL)
++ xmlFree((char *) entity->URI);
++ if (entity->content != NULL)
++ xmlFree((char *) entity->content);
++ if (entity->orig != NULL)
++ xmlFree((char *) entity->orig);
+ xmlFree(entity);
+ }
+
+@@ -177,18 +160,12 @@ xmlCreateEntity(xmlDictPtr dict, const xmlChar *name, int type,
+ ret->SystemID = xmlStrdup(SystemID);
+ } else {
+ ret->name = xmlDictLookup(dict, name, -1);
+- if (ExternalID != NULL)
+- ret->ExternalID = xmlDictLookup(dict, ExternalID, -1);
+- if (SystemID != NULL)
+- ret->SystemID = xmlDictLookup(dict, SystemID, -1);
++ ret->ExternalID = xmlStrdup(ExternalID);
++ ret->SystemID = xmlStrdup(SystemID);
+ }
+ if (content != NULL) {
+ ret->length = xmlStrlen(content);
+- if ((dict != NULL) && (ret->length < 5))
+- ret->content = (xmlChar *)
+- xmlDictLookup(dict, content, ret->length);
+- else
+- ret->content = xmlStrndup(content, ret->length);
++ ret->content = xmlStrndup(content, ret->length);
+ } else {
+ ret->length = 0;
+ ret->content = NULL;
+--
+2.25.1
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-28484.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-28484.patch
new file mode 100644
index 0000000000..907f2c4d47
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-28484.patch
@@ -0,0 +1,79 @@
+From e4f85f1bd2eb34d9b49da9154a4cc3a1bc284f68 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Fri, 7 Apr 2023 11:46:35 +0200
+Subject: [PATCH] [CVE-2023-28484] Fix null deref in xmlSchemaFixupComplexType
+
+Fix a null pointer dereference when parsing (invalid) XML schemas.
+
+Thanks to Robby Simpson for the report!
+
+Fixes #491.
+
+CVE: CVE-2023-28484
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/e4f85f1bd2eb34d9b49da9154a4cc3a1bc284f68]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ result/schemas/issue491_0_0.err | 1 +
+ test/schemas/issue491_0.xml | 1 +
+ test/schemas/issue491_0.xsd | 18 ++++++++++++++++++
+ xmlschemas.c | 2 +-
+ 4 files changed, 21 insertions(+), 1 deletion(-)
+ create mode 100644 result/schemas/issue491_0_0.err
+ create mode 100644 test/schemas/issue491_0.xml
+ create mode 100644 test/schemas/issue491_0.xsd
+
+diff --git a/result/schemas/issue491_0_0.err b/result/schemas/issue491_0_0.err
+new file mode 100644
+index 00000000..9b2bb969
+--- /dev/null
++++ b/result/schemas/issue491_0_0.err
+@@ -0,0 +1 @@
++./test/schemas/issue491_0.xsd:8: element complexType: Schemas parser error : complex type 'ChildType': The content type of both, the type and its base type, must either 'mixed' or 'element-only'.
+diff --git a/test/schemas/issue491_0.xml b/test/schemas/issue491_0.xml
+new file mode 100644
+index 00000000..e2b2fc2e
+--- /dev/null
++++ b/test/schemas/issue491_0.xml
+@@ -0,0 +1 @@
++<Child xmlns="http://www.test.com">5</Child>
+diff --git a/test/schemas/issue491_0.xsd b/test/schemas/issue491_0.xsd
+new file mode 100644
+index 00000000..81702649
+--- /dev/null
++++ b/test/schemas/issue491_0.xsd
+@@ -0,0 +1,18 @@
++<?xml version='1.0' encoding='UTF-8'?>
++<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns="http://www.test.com" targetNamespace="http://www.test.com" elementFormDefault="qualified" attributeFormDefault="unqualified">
++ <xs:complexType name="BaseType">
++ <xs:simpleContent>
++ <xs:extension base="xs:int" />
++ </xs:simpleContent>
++ </xs:complexType>
++ <xs:complexType name="ChildType">
++ <xs:complexContent>
++ <xs:extension base="BaseType">
++ <xs:sequence>
++ <xs:element name="bad" type="xs:int" minOccurs="0" maxOccurs="1"/>
++ </xs:sequence>
++ </xs:extension>
++ </xs:complexContent>
++ </xs:complexType>
++ <xs:element name="Child" type="ChildType" />
++</xs:schema>
+diff --git a/xmlschemas.c b/xmlschemas.c
+index 6a353858..a4eaf591 100644
+--- a/xmlschemas.c
++++ b/xmlschemas.c
+@@ -18632,7 +18632,7 @@ xmlSchemaFixupComplexType(xmlSchemaParserCtxtPtr pctxt,
+ "allowed to appear inside other model groups",
+ NULL, NULL);
+
+- } else if (! dummySequence) {
++ } else if ((!dummySequence) && (baseType->subtypes != NULL)) {
+ xmlSchemaTreeItemPtr effectiveContent =
+ (xmlSchemaTreeItemPtr) type->subtypes;
+ /*
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-29469.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-29469.patch
new file mode 100644
index 0000000000..f60d160c49
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-29469.patch
@@ -0,0 +1,42 @@
+From 547edbf1cbdccd46b2e8ff322a456eaa5931c5df Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Fri, 7 Apr 2023 11:49:27 +0200
+Subject: [PATCH] [CVE-2023-29469] Hashing of empty dict strings isn't
+ deterministic
+
+When hashing empty strings which aren't null-terminated,
+xmlDictComputeFastKey could produce inconsistent results. This could
+lead to various logic or memory errors, including double frees.
+
+For consistency the seed is also taken into account, but this shouldn't
+have an impact on security.
+
+Found by OSS-Fuzz.
+
+Fixes #510.
+
+CVE: CVE-2023-29469
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/547edbf1cbdccd46b2e8ff322a456eaa5931c5df]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ dict.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/dict.c b/dict.c
+index 86c3f6d7..d7fd1a06 100644
+--- a/dict.c
++++ b/dict.c
+@@ -433,7 +433,8 @@ static unsigned long
+ xmlDictComputeFastKey(const xmlChar *name, int namelen, int seed) {
+ unsigned long value = seed;
+
+- if (name == NULL) return(0);
++ if ((name == NULL) || (namelen <= 0))
++ return(value);
+ value += *name;
+ value <<= 5;
+ if (namelen > 10) {
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0001.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0001.patch
new file mode 100644
index 0000000000..3506779c4c
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0001.patch
@@ -0,0 +1,37 @@
+From d0c3f01e110d54415611c5fa0040cdf4a56053f9 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Sat May 6 17:47:37 2023 +0200
+Subject: [PATCH 1/2] parser: Fix old SAX1 parser with custom callbacks
+
+For some reason, xmlCtxtUseOptionsInternal set the start and end element
+SAX handlers to the internal DOM builder functions when XML_PARSE_SAX1
+was specified. This means that custom SAX handlers could never work with
+that flag because these functions would receive the wrong user data
+argument and crash immediately.
+
+Fixes #535.
+
+CVE: CVE-2023-39615
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/d0c3f01e110d54415611c5fa0040cdf4a56053f9]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ parser.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/parser.c b/parser.c
+index 0f76577..b781c80 100644
+--- a/parser.c
++++ b/parser.c
+@@ -15069,8 +15069,6 @@ xmlCtxtUseOptionsInternal(xmlParserCtxtPtr ctxt, int options, const char *encodi
+ }
+ #ifdef LIBXML_SAX1_ENABLED
+ if (options & XML_PARSE_SAX1) {
+- ctxt->sax->startElement = xmlSAX2StartElement;
+- ctxt->sax->endElement = xmlSAX2EndElement;
+ ctxt->sax->startElementNs = NULL;
+ ctxt->sax->endElementNs = NULL;
+ ctxt->sax->initialized = 1;
+--
+2.40.0
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0002.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0002.patch
new file mode 100644
index 0000000000..d922ddc730
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0002.patch
@@ -0,0 +1,72 @@
+From 235b15a590eecf97b09e87bdb7e4f8333e9de129 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Mon May 8 17:58:02 2023 +0200
+Subject: [PATCH 2/2] SAX: Always initialize SAX1 element handlers
+
+Follow-up to commit d0c3f01e. A parser context will be initialized to
+SAX version 2, but this can be overridden with XML_PARSE_SAX1 later,
+so we must initialize the SAX1 element handlers as well.
+
+Change the check in xmlDetectSAX2 to only look for XML_SAX2_MAGIC, so
+we don't switch to SAX1 if the SAX2 element handlers are NULL.
+
+CVE: CVE-2023-39615
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/235b15a590eecf97b09e87bdb7e4f8333e9de129]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ SAX2.c | 11 +++++++----
+ parser.c | 5 +----
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/SAX2.c b/SAX2.c
+index 0319246..f7c77c2 100644
+--- a/SAX2.c
++++ b/SAX2.c
+@@ -2842,20 +2842,23 @@ xmlSAXVersion(xmlSAXHandler *hdlr, int version)
+ {
+ if (hdlr == NULL) return(-1);
+ if (version == 2) {
+- hdlr->startElement = NULL;
+- hdlr->endElement = NULL;
+ hdlr->startElementNs = xmlSAX2StartElementNs;
+ hdlr->endElementNs = xmlSAX2EndElementNs;
+ hdlr->serror = NULL;
+ hdlr->initialized = XML_SAX2_MAGIC;
+ #ifdef LIBXML_SAX1_ENABLED
+ } else if (version == 1) {
+- hdlr->startElement = xmlSAX2StartElement;
+- hdlr->endElement = xmlSAX2EndElement;
+ hdlr->initialized = 1;
+ #endif /* LIBXML_SAX1_ENABLED */
+ } else
+ return(-1);
++#ifdef LIBXML_SAX1_ENABLED
++ hdlr->startElement = xmlSAX2StartElement;
++ hdlr->endElement = xmlSAX2EndElement;
++#else
++ hdlr->startElement = NULL;
++ hdlr->endElement = NULL;
++#endif /* LIBXML_SAX1_ENABLED */
+ hdlr->internalSubset = xmlSAX2InternalSubset;
+ hdlr->externalSubset = xmlSAX2ExternalSubset;
+ hdlr->isStandalone = xmlSAX2IsStandalone;
+diff --git a/parser.c b/parser.c
+index b781c80..738dbee 100644
+--- a/parser.c
++++ b/parser.c
+@@ -1109,10 +1109,7 @@ xmlDetectSAX2(xmlParserCtxtPtr ctxt) {
+ if (ctxt == NULL) return;
+ sax = ctxt->sax;
+ #ifdef LIBXML_SAX1_ENABLED
+- if ((sax) && (sax->initialized == XML_SAX2_MAGIC) &&
+- ((sax->startElementNs != NULL) ||
+- (sax->endElementNs != NULL) ||
+- ((sax->startElement == NULL) && (sax->endElement == NULL))))
++ if ((sax) && (sax->initialized == XML_SAX2_MAGIC))
+ ctxt->sax2 = 1;
+ #else
+ ctxt->sax2 = 1;
+--
+2.40.0
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-45322-1.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-45322-1.patch
new file mode 100644
index 0000000000..5f1cb72534
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-45322-1.patch
@@ -0,0 +1,49 @@
+From a22bd982bf10291deea8ba0c61bf75b898c604ce Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Wed, 2 Nov 2022 15:44:42 +0100
+Subject: [PATCH] malloc-fail: Fix memory leak in xmlStaticCopyNodeList
+
+Found with libFuzzer, see #344.
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/a22bd982bf10291deea8ba0c61bf75b898c604ce]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ tree.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/tree.c b/tree.c
+index 507869efe..647288ce3 100644
+--- a/tree.c
++++ b/tree.c
+@@ -4461,7 +4461,7 @@ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ }
+ if (doc->intSubset == NULL) {
+ q = (xmlNodePtr) xmlCopyDtd( (xmlDtdPtr) node );
+- if (q == NULL) return(NULL);
++ if (q == NULL) goto error;
+ q->doc = doc;
+ q->parent = parent;
+ doc->intSubset = (xmlDtdPtr) q;
+@@ -4473,7 +4473,7 @@ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ } else
+ #endif /* LIBXML_TREE_ENABLED */
+ q = xmlStaticCopyNode(node, doc, parent, 1);
+- if (q == NULL) return(NULL);
++ if (q == NULL) goto error;
+ if (ret == NULL) {
+ q->prev = NULL;
+ ret = p = q;
+@@ -4486,6 +4486,9 @@ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ node = node->next;
+ }
+ return(ret);
++error:
++ xmlFreeNodeList(ret);
++ return(NULL);
+ }
+
+ /**
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-45322-2.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-45322-2.patch
new file mode 100644
index 0000000000..845fd70c66
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-45322-2.patch
@@ -0,0 +1,79 @@
+From d39f78069dff496ec865c73aa44d7110e429bce9 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Wed, 23 Aug 2023 20:24:24 +0200
+Subject: [PATCH] tree: Fix copying of DTDs
+
+- Don't create multiple DTD nodes.
+- Fix UAF if malloc fails.
+- Skip DTD nodes if tree module is disabled.
+
+Fixes #583.
+
+CVE: CVE-2023-45322
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/d39f78069dff496ec865c73aa44d7110e429bce9]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ tree.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/tree.c b/tree.c
+index 6c8a875b9..02c1b5791 100644
+--- a/tree.c
++++ b/tree.c
+@@ -4471,29 +4471,28 @@ xmlNodePtr
+ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ xmlNodePtr ret = NULL;
+ xmlNodePtr p = NULL,q;
++ xmlDtdPtr newSubset = NULL;
+
+ while (node != NULL) {
+-#ifdef LIBXML_TREE_ENABLED
+ if (node->type == XML_DTD_NODE ) {
+- if (doc == NULL) {
++#ifdef LIBXML_TREE_ENABLED
++ if ((doc == NULL) || (doc->intSubset != NULL)) {
+ node = node->next;
+ continue;
+ }
+- if (doc->intSubset == NULL) {
+- q = (xmlNodePtr) xmlCopyDtd( (xmlDtdPtr) node );
+- if (q == NULL) goto error;
+- q->doc = doc;
+- q->parent = parent;
+- doc->intSubset = (xmlDtdPtr) q;
+- xmlAddChild(parent, q);
+- } else {
+- q = (xmlNodePtr) doc->intSubset;
+- xmlAddChild(parent, q);
+- }
+- } else
++ q = (xmlNodePtr) xmlCopyDtd( (xmlDtdPtr) node );
++ if (q == NULL) goto error;
++ q->doc = doc;
++ q->parent = parent;
++ newSubset = (xmlDtdPtr) q;
++#else
++ node = node->next;
++ continue;
+ #endif /* LIBXML_TREE_ENABLED */
++ } else {
+ q = xmlStaticCopyNode(node, doc, parent, 1);
+- if (q == NULL) goto error;
++ if (q == NULL) goto error;
++ }
+ if (ret == NULL) {
+ q->prev = NULL;
+ ret = p = q;
+@@ -4505,6 +4504,8 @@ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ }
+ node = node->next;
+ }
++ if (newSubset != NULL)
++ doc->intSubset = newSubset;
+ return(ret);
+ error:
+ xmlFreeNodeList(ret);
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2024-25062.patch b/meta/recipes-core/libxml/libxml2/CVE-2024-25062.patch
new file mode 100644
index 0000000000..5365d5546a
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2024-25062.patch
@@ -0,0 +1,33 @@
+From 2b0aac140d739905c7848a42efc60bfe783a39b7 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Sat, 14 Oct 2023 22:45:54 +0200
+Subject: [PATCH] [CVE-2024-25062] xmlreader: Don't expand XIncludes when
+ backtracking
+
+Fixes a use-after-free if XML Reader if used with DTD validation and
+XInclude expansion.
+
+Fixes #604.
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/2b0aac140d739905c7848a42efc60bfe783a39b7]
+CVE: CVE-2024-25062
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ xmlreader.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/xmlreader.c b/xmlreader.c
+index 979385a13..fefd68e0b 100644
+--- a/xmlreader.c
++++ b/xmlreader.c
+@@ -1443,6 +1443,7 @@ node_found:
+ * Handle XInclude if asked for
+ */
+ if ((reader->xinclude) && (reader->in_xinclude == 0) &&
++ (reader->state != XML_TEXTREADER_BACKTRACK) &&
+ (reader->node != NULL) &&
+ (reader->node->type == XML_ELEMENT_NODE) &&
+ (reader->node->ns != NULL) &&
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2_2.9.14.bb b/meta/recipes-core/libxml/libxml2_2.9.14.bb
index 3081ebf92f..2b7e9999d9 100644
--- a/meta/recipes-core/libxml/libxml2_2.9.14.bb
+++ b/meta/recipes-core/libxml/libxml2_2.9.14.bb
@@ -13,7 +13,7 @@ DEPENDS = "zlib virtual/libiconv"
inherit gnomebase
-SRC_URI += "http://www.w3.org/XML/Test/xmlts20080827.tar.gz;subdir=${BP};name=testtar \
+SRC_URI += "http://www.w3.org/XML/Test/xmlts20080827.tar;subdir=${BP};name=testtar \
file://libxml-64bit.patch \
file://runtest.patch \
file://run-ptest \
@@ -22,13 +22,27 @@ SRC_URI += "http://www.w3.org/XML/Test/xmlts20080827.tar.gz;subdir=${BP};name=te
file://fix-execution-of-ptests.patch \
file://remove-fuzz-from-ptests.patch \
file://libxml-m4-use-pkgconfig.patch \
+ file://0001-Port-gentest.py-to-Python-3.patch \
+ file://CVE-2022-40303.patch \
+ file://CVE-2022-40304.patch \
+ file://CVE-2023-28484.patch \
+ file://CVE-2023-29469.patch \
+ file://CVE-2023-39615-0001.patch \
+ file://CVE-2023-39615-0002.patch \
+ file://CVE-2023-45322-1.patch \
+ file://CVE-2023-45322-2.patch \
+ file://CVE-2024-25062.patch \
"
SRC_URI[archive.sha256sum] = "60d74a257d1ccec0475e749cba2f21559e48139efba6ff28224357c7c798dfee"
-SRC_URI[testtar.sha256sum] = "96151685cec997e1f9f3387e3626d61e6284d4d6e66e0e440c209286c03e9cc7"
+SRC_URI[testtar.sha256sum] = "9b2c865aba66c6429ca301a7ef048d7eca2cdb7a9106184416710853c7b37d0d"
BINCONFIG = "${bindir}/xml2-config"
+# Fixed since 2.9.11 via
+# https://gitlab.gnome.org/GNOME/libxml2/-/commit/c1ba6f54d32b707ca6d91cb3257ce9de82876b6f
+CVE_CHECK_IGNORE += "CVE-2016-3709"
+
PACKAGECONFIG ??= "python \
${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} \
"
@@ -78,6 +92,16 @@ do_configure:prepend () {
}
do_compile_ptest() {
+ # Make sure that testapi.c is newer than gentests.py, because
+ # with reproducible builds, they will both get e.g. Jan 1 1970
+ # modification time from SOURCE_DATE_EPOCH and then check-am
+ # might try to rebuild_testapi, which will fail even with
+ # 0001-Port-gentest.py-to-Python-3.patch, because it needs
+ # libxml2 module (libxml2-native dependency and correctly
+ # set PYTHON_SITE_PACKAGES), it's easier to
+ # just rely on pre-generated testapi.c from the release
+ touch ${S}/testapi.c
+
oe_runmake check-am
}
diff --git a/meta/recipes-core/meta/build-sysroots.bb b/meta/recipes-core/meta/build-sysroots.bb
index ad22a75eb2..72da88921a 100644
--- a/meta/recipes-core/meta/build-sysroots.bb
+++ b/meta/recipes-core/meta/build-sysroots.bb
@@ -1,5 +1,6 @@
-INHIBIT_DEFAULT_DEPS = "1"
LICENSE = "MIT"
+SUMMARY = "Build old style sysroot based on everything in the components directory that matches the current MACHINE"
+INHIBIT_DEFAULT_DEPS = "1"
STANDALONE_SYSROOT = "${STAGING_DIR}/${MACHINE}"
STANDALONE_SYSROOT_NATIVE = "${STAGING_DIR}/${BUILD_ARCH}"
diff --git a/meta/recipes-core/meta/buildtools-tarball.bb b/meta/recipes-core/meta/buildtools-tarball.bb
index 6b59e4934d..70d740b4e0 100644
--- a/meta/recipes-core/meta/buildtools-tarball.bb
+++ b/meta/recipes-core/meta/buildtools-tarball.bb
@@ -67,12 +67,17 @@ create_sdk_files:append () {
# Generate new (mini) sdk-environment-setup file
script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-${SDK_SYS}}
touch $script
- echo 'export PATH=${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${sbindir_nativesdk}:${SDKPATHNATIVE}${base_bindir_nativesdk}:${SDKPATHNATIVE}${base_sbindir_nativesdk}:$PATH' >> $script
+ echo 'export PATH="${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${sbindir_nativesdk}:${SDKPATHNATIVE}${base_bindir_nativesdk}:${SDKPATHNATIVE}${base_sbindir_nativesdk}:$PATH"' >> $script
echo 'export OECORE_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script
if [ -e "${SDK_OUTPUT}${SDKPATHNATIVE}${sysconfdir}/ssl/certs/ca-certificates.crt" ]; then
echo 'export GIT_SSL_CAINFO="${SDKPATHNATIVE}${sysconfdir}/ssl/certs/ca-certificates.crt"' >>$script
echo 'export SSL_CERT_FILE="${SDKPATHNATIVE}${sysconfdir}/ssl/certs/ca-certificates.crt"' >>$script
+ echo 'export REQUESTS_CA_BUNDLE="${SDKPATHNATIVE}${sysconfdir}/ssl/certs/ca-certificates.crt"' >>$script
+ echo 'export CURL_CA_BUNDLE="${SDKPATHNATIVE}${sysconfdir}/ssl/certs/ca-certificates.crt"' >>$script
fi
+ echo 'HOST_PKG_PATH=$(command -p pkg-config --variable=pc_path pkg-config 2>/dev/null)' >>$script
+ echo 'export PKG_CONFIG_LIBDIR=${SDKPATHNATIVE}/${libdir}/pkgconfig:${SDKPATHNATIVE}/${datadir}/pkgconfig:${HOST_PKG_PATH:-/usr/lib/pkgconfig:/usr/share/pkgconfig}' >>$script
+ echo 'unset HOST_PKG_PATH'
toolchain_create_sdk_version ${SDK_OUTPUT}/${SDKPATH}/version-${SDK_SYS}
diff --git a/meta/recipes-core/meta/cve-update-db-native.bb b/meta/recipes-core/meta/cve-update-db-native.bb
index 18af89b53e..e042e67b09 100644
--- a/meta/recipes-core/meta/cve-update-db-native.bb
+++ b/meta/recipes-core/meta/cve-update-db-native.bb
@@ -18,6 +18,11 @@ NVDCVE_URL ?= "https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-"
# Use a negative value to skip the update
CVE_DB_UPDATE_INTERVAL ?= "86400"
+# Timeout for blocking socket operations, such as the connection attempt.
+CVE_SOCKET_TIMEOUT ?= "60"
+
+CVE_DB_TEMP_FILE ?= "${CVE_CHECK_DB_DIR}/temp_nvdcve_1.1.db"
+
python () {
if not bb.data.inherits_class("cve-check", d):
raise bb.parse.SkipRecipe("Skip recipe when cve-check class is not loaded.")
@@ -29,23 +34,15 @@ python do_fetch() {
"""
import bb.utils
import bb.progress
- import sqlite3, urllib, urllib.parse, gzip
- from datetime import date
+ import shutil
bb.utils.export_proxies(d)
- YEAR_START = 2002
-
db_file = d.getVar("CVE_CHECK_DB_FILE")
db_dir = os.path.dirname(db_file)
+ db_tmp_file = d.getVar("CVE_DB_TEMP_FILE")
- if os.path.exists("{0}-journal".format(db_file)):
- # If a journal is present the last update might have been interrupted. In that case,
- # just wipe any leftovers and force the DB to be recreated.
- os.remove("{0}-journal".format(db_file))
-
- if os.path.exists(db_file):
- os.remove(db_file)
+ cleanup_db_download(db_file, db_tmp_file)
# The NVD database changes once a day, so no need to update more frequently
# Allow the user to force-update
@@ -63,12 +60,61 @@ python do_fetch() {
pass
bb.utils.mkdirhier(db_dir)
+ if os.path.exists(db_file):
+ shutil.copy2(db_file, db_tmp_file)
+
+ if update_db_file(db_tmp_file, d) == True:
+ # Update downloaded correctly, can swap files
+ shutil.move(db_tmp_file, db_file)
+ else:
+ # Update failed, do not modify the database
+ bb.note("CVE database update failed")
+ os.remove(db_tmp_file)
+}
- # Connect to database
- conn = sqlite3.connect(db_file)
- c = conn.cursor()
+do_fetch[lockfiles] += "${CVE_CHECK_DB_FILE_LOCK}"
+do_fetch[file-checksums] = ""
+do_fetch[vardeps] = ""
+
+def cleanup_db_download(db_file, db_tmp_file):
+ """
+ Cleanup the download space from possible failed downloads
+ """
+
+ # Clean up the updates done on the main file
+ # Remove it only if a journal file exists - it means a complete re-download
+ if os.path.exists("{0}-journal".format(db_file)):
+ # If a journal is present the last update might have been interrupted. In that case,
+ # just wipe any leftovers and force the DB to be recreated.
+ os.remove("{0}-journal".format(db_file))
+
+ if os.path.exists(db_file):
+ os.remove(db_file)
+
+ # Clean-up the temporary file downloads, we can remove both journal
+ # and the temporary database
+ if os.path.exists("{0}-journal".format(db_tmp_file)):
+ # If a journal is present the last update might have been interrupted. In that case,
+ # just wipe any leftovers and force the DB to be recreated.
+ os.remove("{0}-journal".format(db_tmp_file))
+
+ if os.path.exists(db_tmp_file):
+ os.remove(db_tmp_file)
- initialize_db(c)
+def update_db_file(db_tmp_file, d):
+ """
+ Update the given database file
+ """
+ import bb.utils, bb.progress
+ from datetime import date
+ import urllib, gzip, sqlite3
+
+ YEAR_START = 2002
+ cve_socket_timeout = int(d.getVar("CVE_SOCKET_TIMEOUT"))
+
+ # Connect to database
+ conn = sqlite3.connect(db_tmp_file)
+ initialize_db(conn)
with bb.progress.ProgressHandler(d) as ph, open(os.path.join(d.getVar("TMPDIR"), 'cve_check'), 'a') as cve_f:
total_years = date.today().year + 1 - YEAR_START
@@ -81,11 +127,14 @@ python do_fetch() {
# Retrieve meta last modified date
try:
- response = urllib.request.urlopen(meta_url)
+ response = urllib.request.urlopen(meta_url, timeout=cve_socket_timeout)
except urllib.error.URLError as e:
cve_f.write('Warning: CVE db update error, Unable to fetch CVE data.\n\n')
- bb.warn("Failed to fetch CVE data (%s)" % e.reason)
- return
+ bb.warn("Failed to fetch CVE data (%s)" % e)
+ import socket
+ result = socket.getaddrinfo("nvd.nist.gov", 443, proto=socket.IPPROTO_TCP)
+ bb.warn("Host IPs are %s" % (", ".join(t[4][0] for t in result)))
+ return False
if response:
for l in response.read().decode("utf-8").splitlines():
@@ -95,26 +144,28 @@ python do_fetch() {
break
else:
bb.warn("Cannot parse CVE metadata, update failed")
- return
+ return False
# Compare with current db last modified date
- c.execute("select DATE from META where YEAR = ?", (year,))
- meta = c.fetchone()
+ cursor = conn.execute("select DATE from META where YEAR = ?", (year,))
+ meta = cursor.fetchone()
+ cursor.close()
+
if not meta or meta[0] != last_modified:
bb.debug(2, "Updating entries")
# Clear products table entries corresponding to current year
- c.execute("delete from PRODUCTS where ID like ?", ('CVE-%d%%' % year,))
+ conn.execute("delete from PRODUCTS where ID like ?", ('CVE-%d%%' % year,)).close()
# Update db with current year json file
try:
- response = urllib.request.urlopen(json_url)
+ response = urllib.request.urlopen(json_url, timeout=cve_socket_timeout)
if response:
- update_db(c, gzip.decompress(response.read()).decode('utf-8'))
- c.execute("insert or replace into META values (?, ?)", [year, last_modified])
+ update_db(conn, gzip.decompress(response.read()).decode('utf-8'))
+ conn.execute("insert or replace into META values (?, ?)", [year, last_modified]).close()
except urllib.error.URLError as e:
cve_f.write('Warning: CVE db update error, CVE data is outdated.\n\n')
bb.warn("Cannot parse CVE data (%s), update failed" % e.reason)
- return
+ return False
else:
bb.debug(2, "Already up to date (last modified %s)" % last_modified)
# Update success, set the date to cve_check file.
@@ -123,27 +174,28 @@ python do_fetch() {
conn.commit()
conn.close()
-}
+ return True
-do_fetch[lockfiles] += "${CVE_CHECK_DB_FILE_LOCK}"
-do_fetch[file-checksums] = ""
-do_fetch[vardeps] = ""
+def initialize_db(conn):
+ with conn:
+ c = conn.cursor()
+
+ c.execute("CREATE TABLE IF NOT EXISTS META (YEAR INTEGER UNIQUE, DATE TEXT)")
-def initialize_db(c):
- c.execute("CREATE TABLE IF NOT EXISTS META (YEAR INTEGER UNIQUE, DATE TEXT)")
+ c.execute("CREATE TABLE IF NOT EXISTS NVD (ID TEXT UNIQUE, SUMMARY TEXT, \
+ SCOREV2 TEXT, SCOREV3 TEXT, MODIFIED INTEGER, VECTOR TEXT)")
- c.execute("CREATE TABLE IF NOT EXISTS NVD (ID TEXT UNIQUE, SUMMARY TEXT, \
- SCOREV2 TEXT, SCOREV3 TEXT, MODIFIED INTEGER, VECTOR TEXT)")
+ c.execute("CREATE TABLE IF NOT EXISTS PRODUCTS (ID TEXT, \
+ VENDOR TEXT, PRODUCT TEXT, VERSION_START TEXT, OPERATOR_START TEXT, \
+ VERSION_END TEXT, OPERATOR_END TEXT)")
+ c.execute("CREATE INDEX IF NOT EXISTS PRODUCT_ID_IDX on PRODUCTS(ID);")
- c.execute("CREATE TABLE IF NOT EXISTS PRODUCTS (ID TEXT, \
- VENDOR TEXT, PRODUCT TEXT, VERSION_START TEXT, OPERATOR_START TEXT, \
- VERSION_END TEXT, OPERATOR_END TEXT)")
- c.execute("CREATE INDEX IF NOT EXISTS PRODUCT_ID_IDX on PRODUCTS(ID);")
+ c.close()
-def parse_node_and_insert(c, node, cveId):
+def parse_node_and_insert(conn, node, cveId):
# Parse children node if needed
for child in node.get('children', ()):
- parse_node_and_insert(c, child, cveId)
+ parse_node_and_insert(conn, child, cveId)
def cpe_generator():
for cpe in node.get('cpe_match', ()):
@@ -200,9 +252,9 @@ def parse_node_and_insert(c, node, cveId):
# Save processing by representing as -.
yield [cveId, vendor, product, '-', '', '', '']
- c.executemany("insert into PRODUCTS values (?, ?, ?, ?, ?, ?, ?)", cpe_generator())
+ conn.executemany("insert into PRODUCTS values (?, ?, ?, ?, ?, ?, ?)", cpe_generator()).close()
-def update_db(c, jsondata):
+def update_db(conn, jsondata):
import json
root = json.loads(jsondata)
@@ -226,12 +278,12 @@ def update_db(c, jsondata):
accessVector = accessVector or "UNKNOWN"
cvssv3 = 0.0
- c.execute("insert or replace into NVD values (?, ?, ?, ?, ?, ?)",
- [cveId, cveDesc, cvssv2, cvssv3, date, accessVector])
+ conn.execute("insert or replace into NVD values (?, ?, ?, ?, ?, ?)",
+ [cveId, cveDesc, cvssv2, cvssv3, date, accessVector]).close()
configurations = elt['configurations']['nodes']
for config in configurations:
- parse_node_and_insert(c, config, cveId)
+ parse_node_and_insert(conn, config, cveId)
do_fetch[nostamp] = "1"
diff --git a/meta/recipes-core/meta/cve-update-nvd2-native.bb b/meta/recipes-core/meta/cve-update-nvd2-native.bb
new file mode 100644
index 0000000000..1a3eeba6d0
--- /dev/null
+++ b/meta/recipes-core/meta/cve-update-nvd2-native.bb
@@ -0,0 +1,372 @@
+SUMMARY = "Updates the NVD CVE database"
+LICENSE = "MIT"
+
+# Important note:
+# This product uses the NVD API but is not endorsed or certified by the NVD.
+
+INHIBIT_DEFAULT_DEPS = "1"
+
+inherit native
+
+deltask do_unpack
+deltask do_patch
+deltask do_configure
+deltask do_compile
+deltask do_install
+deltask do_populate_sysroot
+
+NVDCVE_URL ?= "https://services.nvd.nist.gov/rest/json/cves/2.0"
+
+# If you have a NVD API key (https://nvd.nist.gov/developers/request-an-api-key)
+# then setting this to get higher rate limits.
+NVDCVE_API_KEY ?= ""
+
+# CVE database update interval, in seconds. By default: once a day (24*60*60).
+# Use 0 to force the update
+# Use a negative value to skip the update
+CVE_DB_UPDATE_INTERVAL ?= "86400"
+
+# CVE database incremental update age threshold, in seconds. If the database is
+# older than this threshold, do a full re-download, else, do an incremental
+# update. By default: the maximum allowed value from NVD: 120 days (120*24*60*60)
+# Use 0 to force a full download.
+CVE_DB_INCR_UPDATE_AGE_THRES ?= "10368000"
+
+# Number of attempts for each http query to nvd server before giving up
+CVE_DB_UPDATE_ATTEMPTS ?= "5"
+
+CVE_DB_TEMP_FILE ?= "${CVE_CHECK_DB_DIR}/temp_nvdcve_2.db"
+
+python () {
+ if not bb.data.inherits_class("cve-check", d):
+ raise bb.parse.SkipRecipe("Skip recipe when cve-check class is not loaded.")
+}
+
+python do_fetch() {
+ """
+ Update NVD database with API 2.0
+ """
+ import bb.utils
+ import bb.progress
+ import shutil
+
+ bb.utils.export_proxies(d)
+
+ db_file = d.getVar("CVE_CHECK_DB_FILE")
+ db_dir = os.path.dirname(db_file)
+ db_tmp_file = d.getVar("CVE_DB_TEMP_FILE")
+
+ cleanup_db_download(db_file, db_tmp_file)
+ # By default let's update the whole database (since time 0)
+ database_time = 0
+
+ # The NVD database changes once a day, so no need to update more frequently
+ # Allow the user to force-update
+ try:
+ import time
+ update_interval = int(d.getVar("CVE_DB_UPDATE_INTERVAL"))
+ if update_interval < 0:
+ bb.note("CVE database update skipped")
+ return
+ if time.time() - os.path.getmtime(db_file) < update_interval:
+ bb.note("CVE database recently updated, skipping")
+ return
+ database_time = os.path.getmtime(db_file)
+
+ except OSError:
+ pass
+
+ bb.utils.mkdirhier(db_dir)
+ if os.path.exists(db_file):
+ shutil.copy2(db_file, db_tmp_file)
+
+ if update_db_file(db_tmp_file, d, database_time) == True:
+ # Update downloaded correctly, can swap files
+ shutil.move(db_tmp_file, db_file)
+ else:
+ # Update failed, do not modify the database
+ bb.warn("CVE database update failed")
+ os.remove(db_tmp_file)
+}
+
+do_fetch[lockfiles] += "${CVE_CHECK_DB_FILE_LOCK}"
+do_fetch[file-checksums] = ""
+do_fetch[vardeps] = ""
+
+def cleanup_db_download(db_file, db_tmp_file):
+ """
+ Cleanup the download space from possible failed downloads
+ """
+
+ # Clean up the updates done on the main file
+ # Remove it only if a journal file exists - it means a complete re-download
+ if os.path.exists("{0}-journal".format(db_file)):
+ # If a journal is present the last update might have been interrupted. In that case,
+ # just wipe any leftovers and force the DB to be recreated.
+ os.remove("{0}-journal".format(db_file))
+
+ if os.path.exists(db_file):
+ os.remove(db_file)
+
+ # Clean-up the temporary file downloads, we can remove both journal
+ # and the temporary database
+ if os.path.exists("{0}-journal".format(db_tmp_file)):
+ # If a journal is present the last update might have been interrupted. In that case,
+ # just wipe any leftovers and force the DB to be recreated.
+ os.remove("{0}-journal".format(db_tmp_file))
+
+ if os.path.exists(db_tmp_file):
+ os.remove(db_tmp_file)
+
+def nvd_request_wait(attempt, min_wait):
+ return min ( ( (2 * attempt) + min_wait ) , 30)
+
+def nvd_request_next(url, attempts, api_key, args, min_wait):
+ """
+ Request next part of the NVD database
+ NVD API documentation: https://nvd.nist.gov/developers/vulnerabilities
+ """
+
+ import urllib.request
+ import urllib.parse
+ import gzip
+ import http
+ import time
+
+ request = urllib.request.Request(url + "?" + urllib.parse.urlencode(args))
+ if api_key:
+ request.add_header("apiKey", api_key)
+ bb.note("Requesting %s" % request.full_url)
+
+ for attempt in range(attempts):
+ try:
+ r = urllib.request.urlopen(request)
+
+ if (r.headers['content-encoding'] == 'gzip'):
+ buf = r.read()
+ raw_data = gzip.decompress(buf).decode("utf-8")
+ else:
+ raw_data = r.read().decode("utf-8")
+
+ r.close()
+
+ except Exception as e:
+ wait_time = nvd_request_wait(attempt, min_wait)
+ bb.note("CVE database: received error (%s)" % (e))
+ bb.note("CVE database: retrying download after %d seconds. attempted (%d/%d)" % (wait_time, attempt+1, attempts))
+ time.sleep(wait_time)
+ pass
+ else:
+ return raw_data
+ else:
+ # We failed at all attempts
+ return None
+
+def update_db_file(db_tmp_file, d, database_time):
+ """
+ Update the given database file
+ """
+ import bb.utils, bb.progress
+ import datetime
+ import sqlite3
+ import json
+
+ # Connect to database
+ conn = sqlite3.connect(db_tmp_file)
+ initialize_db(conn)
+
+ req_args = {'startIndex' : 0}
+
+ incr_update_threshold = int(d.getVar("CVE_DB_INCR_UPDATE_AGE_THRES"))
+ if database_time != 0:
+ database_date = datetime.datetime.fromtimestamp(database_time, tz=datetime.timezone.utc)
+ today_date = datetime.datetime.now(tz=datetime.timezone.utc)
+ delta = today_date - database_date
+ if incr_update_threshold == 0:
+ bb.note("CVE database: forced full update")
+ elif delta < datetime.timedelta(seconds=incr_update_threshold):
+ bb.note("CVE database: performing partial update")
+ # The maximum range for time is 120 days
+ if delta > datetime.timedelta(days=120):
+ bb.error("CVE database: Trying to do an incremental update on a larger than supported range")
+ req_args['lastModStartDate'] = database_date.isoformat()
+ req_args['lastModEndDate'] = today_date.isoformat()
+ else:
+ bb.note("CVE database: file too old, forcing a full update")
+ else:
+ bb.note("CVE database: no preexisting database, do a full download")
+
+ with bb.progress.ProgressHandler(d) as ph, open(os.path.join(d.getVar("TMPDIR"), 'cve_check'), 'a') as cve_f:
+
+ bb.note("Updating entries")
+ index = 0
+ url = d.getVar("NVDCVE_URL")
+ api_key = d.getVar("NVDCVE_API_KEY") or None
+ attempts = int(d.getVar("CVE_DB_UPDATE_ATTEMPTS"))
+
+ # Recommended by NVD
+ wait_time = 6
+ if api_key:
+ wait_time = 2
+
+ while True:
+ req_args['startIndex'] = index
+ raw_data = nvd_request_next(url, attempts, api_key, req_args, wait_time)
+ if raw_data is None:
+ # We haven't managed to download data
+ return False
+
+ data = json.loads(raw_data)
+
+ index = data["startIndex"]
+ total = data["totalResults"]
+ per_page = data["resultsPerPage"]
+ bb.note("Got %d entries" % per_page)
+ for cve in data["vulnerabilities"]:
+ update_db(conn, cve)
+
+ index += per_page
+ ph.update((float(index) / (total+1)) * 100)
+ if index >= total:
+ break
+
+ # Recommended by NVD
+ time.sleep(wait_time)
+
+ # Update success, set the date to cve_check file.
+ cve_f.write('CVE database update : %s\n\n' % datetime.date.today())
+
+ conn.commit()
+ conn.close()
+ return True
+
+def initialize_db(conn):
+ with conn:
+ c = conn.cursor()
+
+ c.execute("CREATE TABLE IF NOT EXISTS META (YEAR INTEGER UNIQUE, DATE TEXT)")
+
+ c.execute("CREATE TABLE IF NOT EXISTS NVD (ID TEXT UNIQUE, SUMMARY TEXT, \
+ SCOREV2 TEXT, SCOREV3 TEXT, MODIFIED INTEGER, VECTOR TEXT)")
+
+ c.execute("CREATE TABLE IF NOT EXISTS PRODUCTS (ID TEXT, \
+ VENDOR TEXT, PRODUCT TEXT, VERSION_START TEXT, OPERATOR_START TEXT, \
+ VERSION_END TEXT, OPERATOR_END TEXT)")
+ c.execute("CREATE INDEX IF NOT EXISTS PRODUCT_ID_IDX on PRODUCTS(ID);")
+
+ c.close()
+
+def parse_node_and_insert(conn, node, cveId):
+
+ def cpe_generator():
+ for cpe in node.get('cpeMatch', ()):
+ if not cpe['vulnerable']:
+ return
+ cpe23 = cpe.get('criteria')
+ if not cpe23:
+ return
+ cpe23 = cpe23.split(':')
+ if len(cpe23) < 6:
+ return
+ vendor = cpe23[3]
+ product = cpe23[4]
+ version = cpe23[5]
+
+ if cpe23[6] == '*' or cpe23[6] == '-':
+ version_suffix = ""
+ else:
+ version_suffix = "_" + cpe23[6]
+
+ if version != '*' and version != '-':
+ # Version is defined, this is a '=' match
+ yield [cveId, vendor, product, version + version_suffix, '=', '', '']
+ elif version == '-':
+ # no version information is available
+ yield [cveId, vendor, product, version, '', '', '']
+ else:
+ # Parse start version, end version and operators
+ op_start = ''
+ op_end = ''
+ v_start = ''
+ v_end = ''
+
+ if 'versionStartIncluding' in cpe:
+ op_start = '>='
+ v_start = cpe['versionStartIncluding']
+
+ if 'versionStartExcluding' in cpe:
+ op_start = '>'
+ v_start = cpe['versionStartExcluding']
+
+ if 'versionEndIncluding' in cpe:
+ op_end = '<='
+ v_end = cpe['versionEndIncluding']
+
+ if 'versionEndExcluding' in cpe:
+ op_end = '<'
+ v_end = cpe['versionEndExcluding']
+
+ if op_start or op_end or v_start or v_end:
+ yield [cveId, vendor, product, v_start, op_start, v_end, op_end]
+ else:
+ # This is no version information, expressed differently.
+ # Save processing by representing as -.
+ yield [cveId, vendor, product, '-', '', '', '']
+
+ conn.executemany("insert into PRODUCTS values (?, ?, ?, ?, ?, ?, ?)", cpe_generator()).close()
+
+def update_db(conn, elt):
+ """
+ Update a single entry in the on-disk database
+ """
+
+ accessVector = None
+ cveId = elt['cve']['id']
+ if elt['cve']['vulnStatus'] == "Rejected":
+ c = conn.cursor()
+ c.execute("delete from PRODUCTS where ID = ?;", [cveId])
+ c.execute("delete from NVD where ID = ?;", [cveId])
+ c.close()
+ return
+ cveDesc = ""
+ for desc in elt['cve']['descriptions']:
+ if desc['lang'] == 'en':
+ cveDesc = desc['value']
+ date = elt['cve']['lastModified']
+ try:
+ accessVector = elt['cve']['metrics']['cvssMetricV2'][0]['cvssData']['accessVector']
+ cvssv2 = elt['cve']['metrics']['cvssMetricV2'][0]['cvssData']['baseScore']
+ except KeyError:
+ cvssv2 = 0.0
+ cvssv3 = None
+ try:
+ accessVector = accessVector or elt['cve']['metrics']['cvssMetricV30'][0]['cvssData']['attackVector']
+ cvssv3 = elt['cve']['metrics']['cvssMetricV30'][0]['cvssData']['baseScore']
+ except KeyError:
+ pass
+ try:
+ accessVector = accessVector or elt['cve']['metrics']['cvssMetricV31'][0]['cvssData']['attackVector']
+ cvssv3 = cvssv3 or elt['cve']['metrics']['cvssMetricV31'][0]['cvssData']['baseScore']
+ except KeyError:
+ pass
+ accessVector = accessVector or "UNKNOWN"
+ cvssv3 = cvssv3 or 0.0
+
+ conn.execute("insert or replace into NVD values (?, ?, ?, ?, ?, ?)",
+ [cveId, cveDesc, cvssv2, cvssv3, date, accessVector]).close()
+
+ try:
+ # Remove any pre-existing CVE configuration. Even for partial database
+ # update, those will be repopulated. This ensures that old
+ # configuration is not kept for an updated CVE.
+ conn.execute("delete from PRODUCTS where ID = ?", [cveId]).close()
+ for config in elt['cve']['configurations']:
+ # This is suboptimal as it doesn't handle AND/OR and negate, but is better than nothing
+ for node in config["nodes"]:
+ parse_node_and_insert(conn, node, cveId)
+ except KeyError:
+ bb.note("CVE %s has no configurations" % cveId)
+
+do_fetch[nostamp] = "1"
+
+EXCLUDE_FROM_WORLD = "1"
diff --git a/meta/recipes-core/meta/wic-tools.bb b/meta/recipes-core/meta/wic-tools.bb
index ba0916cb56..9282d36a4d 100644
--- a/meta/recipes-core/meta/wic-tools.bb
+++ b/meta/recipes-core/meta/wic-tools.bb
@@ -6,7 +6,8 @@ DEPENDS = "\
parted-native gptfdisk-native dosfstools-native \
mtools-native bmap-tools-native grub-native cdrtools-native \
btrfs-tools-native squashfs-tools-native pseudo-native \
- e2fsprogs-native util-linux-native tar-native\
+ e2fsprogs-native util-linux-native tar-native erofs-utils-native \
+ virtual/${TARGET_PREFIX}binutils \
"
DEPENDS:append:x86 = " syslinux-native syslinux grub-efi systemd-boot"
DEPENDS:append:x86-64 = " syslinux-native syslinux grub-efi systemd-boot"
diff --git a/meta/recipes-core/ncurses/files/CVE-2023-29491.patch b/meta/recipes-core/ncurses/files/CVE-2023-29491.patch
new file mode 100644
index 0000000000..0116959bbf
--- /dev/null
+++ b/meta/recipes-core/ncurses/files/CVE-2023-29491.patch
@@ -0,0 +1,464 @@
+From eb51b1ea1f75a0ec17c9c5937cb28df1e8eeec56 Mon Sep 17 00:00:00 2001
+From: Thomas E. Dickey <dickey@invisible-island.net>
+Date: Sun, 9 Apr 2023 05:38:25 +0530
+Subject: [PATCH] Fix CVE-2023-29491
+
+CVE: CVE-2023-29491
+
+Upstream-Status: Backport [http://ncurses.scripts.mit.edu/?p=ncurses.git;a=commitdiff;h=eb51b1ea1f75a0ec17c9c5937cb28df1e8eeec56]
+
+Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ ncurses/tinfo/lib_tgoto.c | 10 +++-
+ ncurses/tinfo/lib_tparm.c | 116 ++++++++++++++++++++++++++++++++-----
+ ncurses/tinfo/read_entry.c | 3 +
+ progs/tic.c | 6 ++
+ progs/tparm_type.c | 9 +++
+ progs/tparm_type.h | 2 +
+ progs/tput.c | 61 ++++++++++++++++---
+ 7 files changed, 185 insertions(+), 22 deletions(-)
+
+diff --git a/ncurses/tinfo/lib_tgoto.c b/ncurses/tinfo/lib_tgoto.c
+index 9cf5e100..c50ed4df 100644
+--- a/ncurses/tinfo/lib_tgoto.c
++++ b/ncurses/tinfo/lib_tgoto.c
+@@ -207,6 +207,14 @@ tgoto(const char *string, int x, int y)
+ result = tgoto_internal(string, x, y);
+ else
+ #endif
+- result = TIPARM_2(string, y, x);
++ if ((result = TIPARM_2(string, y, x)) == NULL) {
++ /*
++ * Because termcap did not provide a more general solution such as
++ * tparm(), it was necessary to handle single-parameter capabilities
++ * using tgoto(). The internal _nc_tiparm() function returns a NULL
++ * for that case; retry for the single-parameter case.
++ */
++ result = TIPARM_1(string, y);
++ }
+ returnPtr(result);
+ }
+diff --git a/ncurses/tinfo/lib_tparm.c b/ncurses/tinfo/lib_tparm.c
+index d9bdfd8f..a10a3877 100644
+--- a/ncurses/tinfo/lib_tparm.c
++++ b/ncurses/tinfo/lib_tparm.c
+@@ -1086,6 +1086,64 @@ tparam_internal(TPARM_STATE *tps, const char *string, TPARM_DATA *data)
+ return (TPS(out_buff));
+ }
+
++#ifdef CUR
++/*
++ * Only a few standard capabilities accept string parameters. The others that
++ * are parameterized accept only numeric parameters.
++ */
++static bool
++check_string_caps(TPARM_DATA *data, const char *string)
++{
++ bool result = FALSE;
++
++#define CHECK_CAP(name) (VALID_STRING(name) && !strcmp(name, string))
++
++ /*
++ * Disallow string parameters unless we can check them against a terminal
++ * description.
++ */
++ if (cur_term != NULL) {
++ int want_type = 0;
++
++ if (CHECK_CAP(pkey_key))
++ want_type = 2; /* function key #1, type string #2 */
++ else if (CHECK_CAP(pkey_local))
++ want_type = 2; /* function key #1, execute string #2 */
++ else if (CHECK_CAP(pkey_xmit))
++ want_type = 2; /* function key #1, transmit string #2 */
++ else if (CHECK_CAP(plab_norm))
++ want_type = 2; /* label #1, show string #2 */
++ else if (CHECK_CAP(pkey_plab))
++ want_type = 6; /* function key #1, type string #2, show string #3 */
++#if NCURSES_XNAMES
++ else {
++ char *check;
++
++ check = tigetstr("Cs");
++ if (CHECK_CAP(check))
++ want_type = 1; /* style #1 */
++
++ check = tigetstr("Ms");
++ if (CHECK_CAP(check))
++ want_type = 3; /* storage unit #1, content #2 */
++ }
++#endif
++
++ if (want_type == data->tparm_type) {
++ result = TRUE;
++ } else {
++ T(("unexpected string-parameter"));
++ }
++ }
++ return result;
++}
++
++#define ValidCap() (myData.tparm_type == 0 || \
++ check_string_caps(&myData, string))
++#else
++#define ValidCap() 1
++#endif
++
+ #if NCURSES_TPARM_VARARGS
+
+ NCURSES_EXPORT(char *)
+@@ -1100,7 +1158,7 @@ tparm(const char *string, ...)
+ tps->tname = "tparm";
+ #endif /* TRACE */
+
+- if (tparm_setup(cur_term, string, &myData) == OK) {
++ if (tparm_setup(cur_term, string, &myData) == OK && ValidCap()) {
+ va_list ap;
+
+ va_start(ap, string);
+@@ -1135,7 +1193,7 @@ tparm(const char *string,
+ tps->tname = "tparm";
+ #endif /* TRACE */
+
+- if (tparm_setup(cur_term, string, &myData) == OK) {
++ if (tparm_setup(cur_term, string, &myData) == OK && ValidCap()) {
+
+ myData.param[0] = a1;
+ myData.param[1] = a2;
+@@ -1166,7 +1224,7 @@ tiparm(const char *string, ...)
+ tps->tname = "tiparm";
+ #endif /* TRACE */
+
+- if (tparm_setup(cur_term, string, &myData) == OK) {
++ if (tparm_setup(cur_term, string, &myData) == OK && ValidCap()) {
+ va_list ap;
+
+ va_start(ap, string);
+@@ -1179,7 +1237,25 @@ tiparm(const char *string, ...)
+ }
+
+ /*
+- * The internal-use flavor ensures that the parameters are numbers, not strings
++ * The internal-use flavor ensures that parameters are numbers, not strings.
++ * In addition to ensuring that they are numbers, it ensures that the parameter
++ * count is consistent with intended usage.
++ *
++ * Unlike the general-purpose tparm/tiparm, these internal calls are fairly
++ * well defined:
++ *
++ * expected == 0 - not applicable
++ * expected == 1 - set color, or vertical/horizontal addressing
++ * expected == 2 - cursor addressing
++ * expected == 4 - initialize color or color pair
++ * expected == 9 - set attributes
++ *
++ * Only for the last case (set attributes) should a parameter be optional.
++ * Also, a capability which calls for more parameters than expected should be
++ * ignored.
++ *
++ * Return a null if the parameter-checks fail. Otherwise, return a pointer to
++ * the formatted capability string.
+ */
+ NCURSES_EXPORT(char *)
+ _nc_tiparm(int expected, const char *string, ...)
+@@ -1189,22 +1265,36 @@ _nc_tiparm(int expected, const char *string, ...)
+ char *result = NULL;
+
+ _nc_tparm_err = 0;
++ T((T_CALLED("_nc_tiparm(%d, %s, ...)"), expected, _nc_visbuf(string)));
+ #ifdef TRACE
+ tps->tname = "_nc_tiparm";
+ #endif /* TRACE */
+
+- if (tparm_setup(cur_term, string, &myData) == OK
+- && myData.num_actual <= expected
+- && myData.tparm_type == 0) {
+- va_list ap;
++ if (tparm_setup(cur_term, string, &myData) == OK && ValidCap()) {
++ if (myData.num_actual == 0) {
++ T(("missing parameter%s, expected %s%d",
++ expected > 1 ? "s" : "",
++ expected == 9 ? "up to " : "",
++ expected));
++ } else if (myData.num_actual > expected) {
++ T(("too many parameters, have %d, expected %d",
++ myData.num_actual,
++ expected));
++ } else if (expected != 9 && myData.num_actual != expected) {
++ T(("expected %d parameters, have %d",
++ myData.num_actual,
++ expected));
++ } else {
++ va_list ap;
+
+- va_start(ap, string);
+- tparm_copy_valist(&myData, FALSE, ap);
+- va_end(ap);
++ va_start(ap, string);
++ tparm_copy_valist(&myData, FALSE, ap);
++ va_end(ap);
+
+- result = tparam_internal(tps, string, &myData);
++ result = tparam_internal(tps, string, &myData);
++ }
+ }
+- return result;
++ returnPtr(result);
+ }
+
+ /*
+diff --git a/ncurses/tinfo/read_entry.c b/ncurses/tinfo/read_entry.c
+index 66e3d31e..8ccb1570 100644
+--- a/ncurses/tinfo/read_entry.c
++++ b/ncurses/tinfo/read_entry.c
+@@ -321,6 +321,9 @@ _nc_read_termtype(TERMTYPE2 *ptr, char *buffer, int limit)
+ || bool_count < 0
+ || num_count < 0
+ || str_count < 0
++ || bool_count > BOOLCOUNT
++ || num_count > NUMCOUNT
++ || str_count > STRCOUNT
+ || str_size < 0) {
+ returnDB(TGETENT_NO);
+ }
+diff --git a/progs/tic.c b/progs/tic.c
+index 152010d2..92d551c8 100644
+--- a/progs/tic.c
++++ b/progs/tic.c
+@@ -2255,9 +2255,15 @@ check_1_infotocap(const char *name, NCURSES_CONST char *value, int count)
+
+ _nc_reset_tparm(NULL);
+ switch (actual) {
++ case Str:
++ result = TPARM_1(value, strings[1]);
++ break;
+ case Num_Str:
+ result = TPARM_2(value, numbers[1], strings[2]);
+ break;
++ case Str_Str:
++ result = TPARM_2(value, strings[1], strings[2]);
++ break;
+ case Num_Str_Str:
+ result = TPARM_3(value, numbers[1], strings[2], strings[3]);
+ break;
+diff --git a/progs/tparm_type.c b/progs/tparm_type.c
+index 3da4a077..644aa62a 100644
+--- a/progs/tparm_type.c
++++ b/progs/tparm_type.c
+@@ -47,6 +47,7 @@ tparm_type(const char *name)
+ {code, {longname} }, \
+ {code, {ti} }, \
+ {code, {tc} }
++#define XD(code, onlyname) TD(code, onlyname, onlyname, onlyname)
+ TParams result = Numbers;
+ /* *INDENT-OFF* */
+ static const struct {
+@@ -58,6 +59,10 @@ tparm_type(const char *name)
+ TD(Num_Str, "pkey_xmit", "pfx", "px"),
+ TD(Num_Str, "plab_norm", "pln", "pn"),
+ TD(Num_Str_Str, "pkey_plab", "pfxl", "xl"),
++#if NCURSES_XNAMES
++ XD(Str, "Cs"),
++ XD(Str_Str, "Ms"),
++#endif
+ };
+ /* *INDENT-ON* */
+
+@@ -80,12 +85,16 @@ guess_tparm_type(int nparam, char **p_is_s)
+ case 1:
+ if (!p_is_s[0])
+ result = Numbers;
++ if (p_is_s[0])
++ result = Str;
+ break;
+ case 2:
+ if (!p_is_s[0] && !p_is_s[1])
+ result = Numbers;
+ if (!p_is_s[0] && p_is_s[1])
+ result = Num_Str;
++ if (p_is_s[0] && p_is_s[1])
++ result = Str_Str;
+ break;
+ case 3:
+ if (!p_is_s[0] && !p_is_s[1] && !p_is_s[2])
+diff --git a/progs/tparm_type.h b/progs/tparm_type.h
+index 7c102a30..af5bcf0f 100644
+--- a/progs/tparm_type.h
++++ b/progs/tparm_type.h
+@@ -45,8 +45,10 @@
+ typedef enum {
+ Other = -1
+ ,Numbers = 0
++ ,Str
+ ,Num_Str
+ ,Num_Str_Str
++ ,Str_Str
+ } TParams;
+
+ extern TParams tparm_type(const char *name);
+diff --git a/progs/tput.c b/progs/tput.c
+index 4cd0c5ba..41508b72 100644
+--- a/progs/tput.c
++++ b/progs/tput.c
+@@ -1,5 +1,5 @@
+ /****************************************************************************
+- * Copyright 2018-2021,2022 Thomas E. Dickey *
++ * Copyright 2018-2022,2023 Thomas E. Dickey *
+ * Copyright 1998-2016,2017 Free Software Foundation, Inc. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining a *
+@@ -47,12 +47,15 @@
+ #include <transform.h>
+ #include <tty_settings.h>
+
+-MODULE_ID("$Id: tput.c,v 1.99 2022/02/26 23:19:31 tom Exp $")
++MODULE_ID("$Id: tput.c,v 1.102 2023/04/08 16:26:36 tom Exp $")
+
+ #define PUTS(s) fputs(s, stdout)
+
+ const char *_nc_progname = "tput";
+
++static bool opt_v = FALSE; /* quiet, do not show warnings */
++static bool opt_x = FALSE; /* clear scrollback if possible */
++
+ static bool is_init = FALSE;
+ static bool is_reset = FALSE;
+ static bool is_clear = FALSE;
+@@ -81,6 +84,7 @@ usage(const char *optstring)
+ KEEP(" -S << read commands from standard input")
+ KEEP(" -T TERM use this instead of $TERM")
+ KEEP(" -V print curses-version")
++ KEEP(" -v verbose, show warnings")
+ KEEP(" -x do not try to clear scrollback")
+ KEEP("")
+ KEEP("Commands:")
+@@ -148,7 +152,7 @@ exit_code(int token, int value)
+ * Returns nonzero on error.
+ */
+ static int
+-tput_cmd(int fd, TTY * settings, bool opt_x, int argc, char **argv, int *used)
++tput_cmd(int fd, TTY * settings, int argc, char **argv, int *used)
+ {
+ NCURSES_CONST char *name;
+ char *s;
+@@ -231,7 +235,9 @@ tput_cmd(int fd, TTY * settings, bool opt_x, int argc, char **argv, int *used)
+ } else if (VALID_STRING(s)) {
+ if (argc > 1) {
+ int k;
++ int narg;
+ int analyzed;
++ int provided;
+ int popcount;
+ long numbers[1 + NUM_PARM];
+ char *strings[1 + NUM_PARM];
+@@ -271,14 +277,45 @@ tput_cmd(int fd, TTY * settings, bool opt_x, int argc, char **argv, int *used)
+
+ popcount = 0;
+ _nc_reset_tparm(NULL);
++ /*
++ * Count the number of numeric parameters which are provided.
++ */
++ provided = 0;
++ for (narg = 1; narg < argc; ++narg) {
++ char *ending = NULL;
++ long check = strtol(argv[narg], &ending, 10);
++ if (check < 0 || ending == argv[narg] || *ending != '\0')
++ break;
++ provided = narg;
++ }
+ switch (paramType) {
++ case Str:
++ s = TPARM_1(s, strings[1]);
++ analyzed = 1;
++ if (provided == 0 && argc >= 1)
++ provided++;
++ break;
++ case Str_Str:
++ s = TPARM_2(s, strings[1], strings[2]);
++ analyzed = 2;
++ if (provided == 0 && argc >= 1)
++ provided++;
++ if (provided == 1 && argc >= 2)
++ provided++;
++ break;
+ case Num_Str:
+ s = TPARM_2(s, numbers[1], strings[2]);
+ analyzed = 2;
++ if (provided == 1 && argc >= 2)
++ provided++;
+ break;
+ case Num_Str_Str:
+ s = TPARM_3(s, numbers[1], strings[2], strings[3]);
+ analyzed = 3;
++ if (provided == 1 && argc >= 2)
++ provided++;
++ if (provided == 2 && argc >= 3)
++ provided++;
+ break;
+ case Numbers:
+ analyzed = _nc_tparm_analyze(NULL, s, p_is_s, &popcount);
+@@ -316,7 +353,13 @@ tput_cmd(int fd, TTY * settings, bool opt_x, int argc, char **argv, int *used)
+ if (analyzed < popcount) {
+ analyzed = popcount;
+ }
+- *used += analyzed;
++ if (opt_v && (analyzed != provided)) {
++ fprintf(stderr, "%s: %s parameters for \"%s\"\n",
++ _nc_progname,
++ (analyzed < provided ? "extra" : "missing"),
++ argv[0]);
++ }
++ *used += provided;
+ }
+
+ /* use putp() in order to perform padding */
+@@ -339,7 +382,6 @@ main(int argc, char **argv)
+ int used;
+ TTY old_settings;
+ TTY tty_settings;
+- bool opt_x = FALSE; /* clear scrollback if possible */
+ bool is_alias;
+ bool need_tty;
+
+@@ -348,7 +390,7 @@ main(int argc, char **argv)
+
+ term = getenv("TERM");
+
+- while ((c = getopt(argc, argv, is_alias ? "T:Vx" : "ST:Vx")) != -1) {
++ while ((c = getopt(argc, argv, is_alias ? "T:Vvx" : "ST:Vvx")) != -1) {
+ switch (c) {
+ case 'S':
+ cmdline = FALSE;
+@@ -361,6 +403,9 @@ main(int argc, char **argv)
+ case 'V':
+ puts(curses_version());
+ ExitProgram(EXIT_SUCCESS);
++ case 'v': /* verbose */
++ opt_v = TRUE;
++ break;
+ case 'x': /* do not try to clear scrollback */
+ opt_x = TRUE;
+ break;
+@@ -404,7 +449,7 @@ main(int argc, char **argv)
+ usage(NULL);
+ while (argc > 0) {
+ tty_settings = old_settings;
+- code = tput_cmd(fd, &tty_settings, opt_x, argc, argv, &used);
++ code = tput_cmd(fd, &tty_settings, argc, argv, &used);
+ if (code != 0)
+ break;
+ argc -= used;
+@@ -439,7 +484,7 @@ main(int argc, char **argv)
+ while (argnum > 0) {
+ int code;
+ tty_settings = old_settings;
+- code = tput_cmd(fd, &tty_settings, opt_x, argnum, argnow, &used);
++ code = tput_cmd(fd, &tty_settings, argnum, argnow, &used);
+ if (code != 0) {
+ if (result == 0)
+ result = ErrSystem(0); /* will return value >4 */
+--
+2.40.0
+
diff --git a/meta/recipes-core/ncurses/files/CVE-2023-50495.patch b/meta/recipes-core/ncurses/files/CVE-2023-50495.patch
new file mode 100644
index 0000000000..e5a8f43b01
--- /dev/null
+++ b/meta/recipes-core/ncurses/files/CVE-2023-50495.patch
@@ -0,0 +1,81 @@
+commit ebc08cff36689eec54edc1ce2de6ebac826bd6cd
+Author: Peter Marko <peter.marko@siemens.com>
+Date: Fri Apr 12 23:56:25 2024 +0200
+
+check return value of _nc_save_str(), in special case for tic where
+extended capabilities are processed but the terminal description was
+not initialized (report by Ziqiao Kong).
+
+Only parts relevant for this CVE was extracted from upstream patch.
+
+CVE: CVE-2023-45853
+Upstream-Status: Backport [https://invisible-island.net/archives/ncurses/6.4/ncurses-6.4-20230424.patch.gz]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+
+---
+ ncurses/tinfo/parse_entry.c | 23 ++++++++++++++++-------
+ 1 file changed, 16 insertions(+), 7 deletions(-)
+
+diff --git a/ncurses/tinfo/parse_entry.c b/ncurses/tinfo/parse_entry.c
+index a77cd0b..8ac02ac 100644
+--- a/ncurses/tinfo/parse_entry.c
++++ b/ncurses/tinfo/parse_entry.c
+@@ -110,7 +110,7 @@ _nc_extend_names(ENTRY * entryp, const char *name, int token_type)
+ /* Well, we are given a cancel for a name that we don't recognize */
+ return _nc_extend_names(entryp, name, STRING);
+ default:
+- return 0;
++ return NULL;
+ }
+
+ /* Adjust the 'offset' (insertion-point) to keep the lists of extended
+@@ -142,6 +142,11 @@ _nc_extend_names(ENTRY * entryp, const char *name, int token_type)
+ for (last = (unsigned) (max - 1); last > tindex; last--)
+
+ if (!found) {
++ char *saved;
++
++ if ((saved = _nc_save_str(name)) == NULL)
++ return NULL;
++
+ switch (token_type) {
+ case BOOLEAN:
+ tp->ext_Booleans++;
+@@ -169,7 +174,7 @@ _nc_extend_names(ENTRY * entryp, const char *name, int token_type)
+ TYPE_REALLOC(char *, actual, tp->ext_Names);
+ while (--actual > offset)
+ tp->ext_Names[actual] = tp->ext_Names[actual - 1];
+- tp->ext_Names[offset] = _nc_save_str(name);
++ tp->ext_Names[offset] = saved;
+ }
+
+ temp.nte_name = tp->ext_Names[offset];
+@@ -337,6 +342,8 @@ _nc_parse_entry(ENTRY * entryp, int literal, bool silent)
+ bool is_use = (strcmp(_nc_curr_token.tk_name, "use") == 0);
+ bool is_tc = !is_use && (strcmp(_nc_curr_token.tk_name, "tc") == 0);
+ if (is_use || is_tc) {
++ char *saved;
++
+ if (!VALID_STRING(_nc_curr_token.tk_valstring)
+ || _nc_curr_token.tk_valstring[0] == '\0') {
+ _nc_warning("missing name for use-clause");
+@@ -350,11 +357,13 @@ _nc_parse_entry(ENTRY * entryp, int literal, bool silent)
+ _nc_curr_token.tk_valstring);
+ continue;
+ }
+- entryp->uses[entryp->nuses].name = _nc_save_str(_nc_curr_token.tk_valstring);
+- entryp->uses[entryp->nuses].line = _nc_curr_line;
+- entryp->nuses++;
+- if (entryp->nuses > 1 && is_tc) {
+- BAD_TC_USAGE
++ if ((saved = _nc_save_str(_nc_curr_token.tk_valstring)) != NULL) {
++ entryp->uses[entryp->nuses].name = saved;
++ entryp->uses[entryp->nuses].line = _nc_curr_line;
++ entryp->nuses++;
++ if (entryp->nuses > 1 && is_tc) {
++ BAD_TC_USAGE
++ }
+ }
+ } else {
+ /* normal token lookup */
diff --git a/meta/recipes-core/ncurses/ncurses_6.3+20220423.bb b/meta/recipes-core/ncurses/ncurses_6.3+20220423.bb
index f67a3f5bf4..da1e6d838d 100644
--- a/meta/recipes-core/ncurses/ncurses_6.3+20220423.bb
+++ b/meta/recipes-core/ncurses/ncurses_6.3+20220423.bb
@@ -3,6 +3,8 @@ require ncurses.inc
SRC_URI += "file://0001-tic-hang.patch \
file://0002-configure-reproducible.patch \
file://0003-gen-pkgconfig.in-Do-not-include-LDFLAGS-in-generated.patch \
+ file://CVE-2023-29491.patch \
+ file://CVE-2023-50495.patch \
"
# commit id corresponds to the revision in package version
SRCREV = "a0bc708bc6954b5d3c0a38d92b683c3ec3135260"
diff --git a/meta/recipes-core/ovmf/ovmf/0001-ovmf-update-path-to-native-BaseTools.patch b/meta/recipes-core/ovmf/ovmf/0001-ovmf-update-path-to-native-BaseTools.patch
index 89d9ffab5e..0c3df4fc44 100644
--- a/meta/recipes-core/ovmf/ovmf/0001-ovmf-update-path-to-native-BaseTools.patch
+++ b/meta/recipes-core/ovmf/ovmf/0001-ovmf-update-path-to-native-BaseTools.patch
@@ -10,7 +10,7 @@ tools. The BBAKE_EDK_TOOLS_PATH string is used as a pattern to be replaced
with the appropriate location before building.
Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
-Upstream-Status: Pending
+Upstream-Status: Inappropriate [oe-core cross compile specific]
---
OvmfPkg/build.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/meta/recipes-core/ovmf/ovmf/0002-BaseTools-makefile-adjust-to-build-in-under-bitbake.patch b/meta/recipes-core/ovmf/ovmf/0002-BaseTools-makefile-adjust-to-build-in-under-bitbake.patch
index f6141c8af5..2293d7e938 100644
--- a/meta/recipes-core/ovmf/ovmf/0002-BaseTools-makefile-adjust-to-build-in-under-bitbake.patch
+++ b/meta/recipes-core/ovmf/ovmf/0002-BaseTools-makefile-adjust-to-build-in-under-bitbake.patch
@@ -6,8 +6,13 @@ Subject: [PATCH 2/6] BaseTools: makefile: adjust to build in under bitbake
Prepend the build flags with those of bitbake. This is to build
using the bitbake native sysroot include and library directories.
+Note from Alex: this is not appropriate for upstream submission as
+the recipe already does lots of similar in-place fixups elsewhere, so
+this patch shold be converted to follow that pattern. We're not going
+to fight against how upstream wants to configure the build.
+
Signed-off-by: Ricardo Neri <ricardo.neri@linux.intel.com>
-Upstream-Status: Pending
+Upstream-Status: Inappropriate [needs to be converted to in-recipe fixups]
---
BaseTools/Source/C/Makefiles/header.makefile | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb b/meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb
index 4f844ad925..d06c6a5609 100644
--- a/meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb
+++ b/meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb
@@ -4,3 +4,4 @@ PR = "r1"
inherit packagegroup
RDEPENDS:${PN} = "dropbear"
+RRECOMMENDS:${PN} = "openssh-sftp-server"
diff --git a/meta/recipes-core/packagegroups/packagegroup-self-hosted.bb b/meta/recipes-core/packagegroups/packagegroup-self-hosted.bb
index 9523aadd15..e62567894b 100644
--- a/meta/recipes-core/packagegroups/packagegroup-self-hosted.bb
+++ b/meta/recipes-core/packagegroups/packagegroup-self-hosted.bb
@@ -98,11 +98,14 @@ RDEPENDS:packagegroup-self-hosted-sdk:append:libc-glibc = "\
glibc-utils \
rpcsvc-proto \
"
+
+STRACE = "strace"
+STRACE:riscv32 = ""
RDEPENDS:packagegroup-self-hosted-debug = " \
gdb \
gdbserver \
rsync \
- strace \
+ ${STRACE} \
tcf-agent"
diff --git a/meta/recipes-core/psplash/files/psplash-start.service b/meta/recipes-core/psplash/files/psplash-start.service
index 36c2bb38e0..bec9368427 100644
--- a/meta/recipes-core/psplash/files/psplash-start.service
+++ b/meta/recipes-core/psplash/files/psplash-start.service
@@ -2,6 +2,7 @@
Description=Start psplash boot splash screen
DefaultDependencies=no
RequiresMountsFor=/run
+ConditionFileIsExecutable=/usr/bin/psplash
[Service]
Type=notify
diff --git a/meta/recipes-core/psplash/files/psplash-systemd.service b/meta/recipes-core/psplash/files/psplash-systemd.service
index 082207f232..e93e3deb35 100644
--- a/meta/recipes-core/psplash/files/psplash-systemd.service
+++ b/meta/recipes-core/psplash/files/psplash-systemd.service
@@ -4,6 +4,7 @@ DefaultDependencies=no
After=psplash-start.service
Requires=psplash-start.service
RequiresMountsFor=/run
+ConditionFileIsExecutable=/usr/bin/psplash
[Service]
ExecStart=/usr/bin/psplash-systemd
diff --git a/meta/recipes-core/psplash/psplash_git.bb b/meta/recipes-core/psplash/psplash_git.bb
index edc0ac1d89..9532ed1534 100644
--- a/meta/recipes-core/psplash/psplash_git.bb
+++ b/meta/recipes-core/psplash/psplash_git.bb
@@ -58,7 +58,7 @@ python __anonymous() {
d.setVarFlag("ALTERNATIVE_TARGET_%s" % ep, 'psplash', '${bindir}/%s' % p)
d.appendVar("RDEPENDS:%s" % ep, " %s" % pn)
if p == "psplash-default":
- d.appendVar("RRECOMMENDS:%s" % pn, " %s" % ep)
+ d.appendVar("RDEPENDS:%s" % pn, " %s" % ep)
}
S = "${WORKDIR}/git"
diff --git a/meta/recipes-core/sysfsutils/sysfsutils_2.1.0.bb b/meta/recipes-core/sysfsutils/sysfsutils_2.1.0.bb
index c90a02f131..fd72cf4165 100644
--- a/meta/recipes-core/sysfsutils/sysfsutils_2.1.0.bb
+++ b/meta/recipes-core/sysfsutils/sysfsutils_2.1.0.bb
@@ -10,18 +10,14 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=3d06403ea54c7574a9e581c6478cc393 \
file://lib/LGPL;md5=b75d069791103ffe1c0d6435deeff72e"
PR = "r5"
-SRC_URI = "${SOURCEFORGE_MIRROR}/linux-diag/sysfsutils-${PV}.tar.gz \
+SRC_URI = "git://github.com/linux-ras/sysfsutils.git;protocol=https;branch=master \
file://sysfsutils-2.0.0-class-dup.patch \
file://obsolete_automake_macros.patch \
file://separatebuild.patch"
-SRC_URI[md5sum] = "14e7dcd0436d2f49aa403f67e1ef7ddc"
-SRC_URI[sha256sum] = "e865de2c1f559fff0d3fc936e660c0efaf7afe662064f2fb97ccad1ec28d208a"
+SRCREV = "0d5456e1c9d969cdad6accef2ae2d4881d5db085"
-UPSTREAM_CHECK_URI = "http://sourceforge.net/projects/linux-diag/files/sysfsutils/"
-UPSTREAM_CHECK_REGEX = "/sysfsutils/(?P<pver>(\d+[\.\-_]*)+)/"
-
-S = "${WORKDIR}/sysfsutils-${PV}"
+S = "${WORKDIR}/git"
inherit autotools
diff --git a/meta/recipes-core/systemd/systemd-systemctl/systemctl b/meta/recipes-core/systemd/systemd-systemctl/systemctl
index 6d19666d82..0fd7e24085 100755
--- a/meta/recipes-core/systemd/systemd-systemctl/systemctl
+++ b/meta/recipes-core/systemd/systemd-systemctl/systemctl
@@ -184,12 +184,14 @@ class SystemdUnit():
raise SystemdUnitNotFoundError(self.root, unit)
- def _process_deps(self, config, service, location, prop, dirstem):
+ def _process_deps(self, config, service, location, prop, dirstem, instance):
systemdir = self.root / SYSCONFDIR / "systemd" / "system"
target = ROOT / location.relative_to(self.root)
try:
for dependent in config.get('Install', prop):
+ # expand any %i to instance (ignoring escape sequence %%)
+ dependent = re.sub("([^%](%%)*)%i", "\\g<1>{}".format(instance), dependent)
wants = systemdir / "{}.{}".format(dependent, dirstem) / service
add_link(wants, target)
@@ -229,8 +231,8 @@ class SystemdUnit():
else:
service = self.unit
- self._process_deps(config, service, path, 'WantedBy', 'wants')
- self._process_deps(config, service, path, 'RequiredBy', 'requires')
+ self._process_deps(config, service, path, 'WantedBy', 'wants', instance)
+ self._process_deps(config, service, path, 'RequiredBy', 'requires', instance)
try:
for also in config.get('Install', 'Also'):
diff --git a/meta/recipes-core/systemd/systemd/00-create-volatile.conf b/meta/recipes-core/systemd/systemd/00-create-volatile.conf
index 87cbe1e7d3..c4277221a2 100644
--- a/meta/recipes-core/systemd/systemd/00-create-volatile.conf
+++ b/meta/recipes-core/systemd/systemd/00-create-volatile.conf
@@ -3,5 +3,6 @@
# inside /var/log.
+d /run/lock 1777 - - -
d /var/volatile/log - - - -
d /var/volatile/tmp 1777 - -
diff --git a/meta/recipes-core/systemd/systemd/0001-network-remove-only-managed-configs-on-reconfigure-o.patch b/meta/recipes-core/systemd/systemd/0001-network-remove-only-managed-configs-on-reconfigure-o.patch
new file mode 100644
index 0000000000..8950981d2e
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/0001-network-remove-only-managed-configs-on-reconfigure-o.patch
@@ -0,0 +1,358 @@
+From 31b25c7d360a2ef2da1717aa39f190de5222d11a Mon Sep 17 00:00:00 2001
+From: Yu Watanabe <watanabe.yu+github@gmail.com>
+Date: Mon, 31 Jan 2022 19:08:27 +0900
+Subject: [PATCH] network: remove only managed configs on reconfigure or
+ carrier lost
+
+Otherwise, if the carrir of the non-managed interface is lost, the
+configs such as addresses or routes on the interface will be removed by
+networkd.
+
+Upstream-Status: Backport [systemd v251 a0e99a377a2f22c0ba460d3e7228214008714c14]
+Signed-off-by: C. Andy Martin <cam@myfastmail.com>
+---
+ src/network/networkd-address.c | 13 +++++--------
+ src/network/networkd-address.h | 2 +-
+ src/network/networkd-link.c | 18 ++++++++++--------
+ src/network/networkd-neighbor.c | 6 +++++-
+ src/network/networkd-neighbor.h | 2 +-
+ src/network/networkd-nexthop.c | 16 ++++++++++------
+ src/network/networkd-nexthop.h | 2 +-
+ src/network/networkd-route.c | 16 ++++++++++------
+ src/network/networkd-route.h | 2 +-
+ src/network/networkd-routing-policy-rule.c | 4 ++--
+ src/network/networkd-routing-policy-rule.h | 2 +-
+ test/test-network/systemd-networkd-tests.py | 2 +-
+ 12 files changed, 48 insertions(+), 37 deletions(-)
+
+diff --git a/src/network/networkd-address.c b/src/network/networkd-address.c
+index 7df743efb5..01c1d88dec 100644
+--- a/src/network/networkd-address.c
++++ b/src/network/networkd-address.c
+@@ -891,22 +891,19 @@ int link_drop_foreign_addresses(Link *link) {
+ return r;
+ }
+
+-int link_drop_addresses(Link *link) {
++int link_drop_managed_addresses(Link *link) {
+ Address *address;
+ int k, r = 0;
+
+ assert(link);
+
+ SET_FOREACH(address, link->addresses) {
+- /* Ignore addresses not assigned yet or already removing. */
+- if (!address_exists(address))
++ /* Do not touch addresses managed by kernel or other tools. */
++ if (address->source == NETWORK_CONFIG_SOURCE_FOREIGN)
+ continue;
+
+- /* Do not drop IPv6LL addresses assigned by the kernel here. They will be dropped in
+- * link_drop_ipv6ll_addresses() if IPv6LL addressing is disabled. */
+- if (address->source == NETWORK_CONFIG_SOURCE_FOREIGN &&
+- address->family == AF_INET6 &&
+- in6_addr_is_link_local(&address->in_addr.in6))
++ /* Ignore addresses not assigned yet or already removing. */
++ if (!address_exists(address))
+ continue;
+
+ k = address_remove(address);
+diff --git a/src/network/networkd-address.h b/src/network/networkd-address.h
+index 41c4ce6fa4..b2110d8d21 100644
+--- a/src/network/networkd-address.h
++++ b/src/network/networkd-address.h
+@@ -74,7 +74,7 @@ void address_set_broadcast(Address *a);
+
+ DEFINE_NETWORK_SECTION_FUNCTIONS(Address, address_free);
+
+-int link_drop_addresses(Link *link);
++int link_drop_managed_addresses(Link *link);
+ int link_drop_foreign_addresses(Link *link);
+ int link_drop_ipv6ll_addresses(Link *link);
+ void link_foreignize_addresses(Link *link);
+diff --git a/src/network/networkd-link.c b/src/network/networkd-link.c
+index b62a154828..12c592b257 100644
+--- a/src/network/networkd-link.c
++++ b/src/network/networkd-link.c
+@@ -1070,27 +1070,27 @@ static int link_drop_foreign_config(Link *link) {
+ return r;
+ }
+
+-static int link_drop_config(Link *link) {
++static int link_drop_managed_config(Link *link) {
+ int k, r;
+
+ assert(link);
+ assert(link->manager);
+
+- r = link_drop_routes(link);
++ r = link_drop_managed_routes(link);
+
+- k = link_drop_nexthops(link);
++ k = link_drop_managed_nexthops(link);
+ if (k < 0 && r >= 0)
+ r = k;
+
+- k = link_drop_addresses(link);
++ k = link_drop_managed_addresses(link);
+ if (k < 0 && r >= 0)
+ r = k;
+
+- k = link_drop_neighbors(link);
++ k = link_drop_managed_neighbors(link);
+ if (k < 0 && r >= 0)
+ r = k;
+
+- k = link_drop_routing_policy_rules(link);
++ k = link_drop_managed_routing_policy_rules(link);
+ if (k < 0 && r >= 0)
+ r = k;
+
+@@ -1318,7 +1318,9 @@ static int link_reconfigure_impl(Link *link, bool force) {
+ * link_drop_foreign_config() in link_configure(). */
+ link_foreignize_config(link);
+ else {
+- r = link_drop_config(link);
++ /* Remove all managed configs. Note, foreign configs are removed in later by
++ * link_configure() -> link_drop_foreign_config() if the link is managed by us. */
++ r = link_drop_managed_config(link);
+ if (r < 0)
+ return r;
+ }
+@@ -1705,7 +1707,7 @@ static int link_carrier_lost_impl(Link *link) {
+ if (r < 0)
+ ret = r;
+
+- r = link_drop_config(link);
++ r = link_drop_managed_config(link);
+ if (r < 0 && ret >= 0)
+ ret = r;
+
+diff --git a/src/network/networkd-neighbor.c b/src/network/networkd-neighbor.c
+index 1766095e53..b58898a6dc 100644
+--- a/src/network/networkd-neighbor.c
++++ b/src/network/networkd-neighbor.c
+@@ -406,13 +406,17 @@ int link_drop_foreign_neighbors(Link *link) {
+ return r;
+ }
+
+-int link_drop_neighbors(Link *link) {
++int link_drop_managed_neighbors(Link *link) {
+ Neighbor *neighbor;
+ int k, r = 0;
+
+ assert(link);
+
+ SET_FOREACH(neighbor, link->neighbors) {
++ /* Do not touch nexthops managed by kernel or other tools. */
++ if (neighbor->source == NETWORK_CONFIG_SOURCE_FOREIGN)
++ continue;
++
+ /* Ignore neighbors not assigned yet or already removing. */
+ if (!neighbor_exists(neighbor))
+ continue;
+diff --git a/src/network/networkd-neighbor.h b/src/network/networkd-neighbor.h
+index e9e1854110..8e3c510cd5 100644
+--- a/src/network/networkd-neighbor.h
++++ b/src/network/networkd-neighbor.h
+@@ -34,7 +34,7 @@ int neighbor_compare_func(const Neighbor *a, const Neighbor *b);
+
+ void network_drop_invalid_neighbors(Network *network);
+
+-int link_drop_neighbors(Link *link);
++int link_drop_managed_neighbors(Link *link);
+ int link_drop_foreign_neighbors(Link *link);
+ void link_foreignize_neighbors(Link *link);
+
+diff --git a/src/network/networkd-nexthop.c b/src/network/networkd-nexthop.c
+index b829aaab90..42aa8c4c59 100644
+--- a/src/network/networkd-nexthop.c
++++ b/src/network/networkd-nexthop.c
+@@ -613,8 +613,8 @@ static void manager_mark_nexthops(Manager *manager, bool foreign, const Link *ex
+ if (nexthop->protocol == RTPROT_KERNEL)
+ continue;
+
+- /* When 'foreign' is true, do not remove nexthops we configured. */
+- if (foreign && nexthop->source != NETWORK_CONFIG_SOURCE_FOREIGN)
++ /* When 'foreign' is true, mark only foreign nexthops, and vice versa. */
++ if (foreign != (nexthop->source == NETWORK_CONFIG_SOURCE_FOREIGN))
+ continue;
+
+ /* Ignore nexthops not assigned yet or already removed. */
+@@ -641,7 +641,7 @@ static void manager_mark_nexthops(Manager *manager, bool foreign, const Link *ex
+ }
+ }
+
+-static int manager_drop_nexthops(Manager *manager) {
++static int manager_drop_marked_nexthops(Manager *manager) {
+ NextHop *nexthop;
+ int k, r = 0;
+
+@@ -704,14 +704,14 @@ int link_drop_foreign_nexthops(Link *link) {
+
+ manager_mark_nexthops(link->manager, /* foreign = */ true, NULL);
+
+- k = manager_drop_nexthops(link->manager);
++ k = manager_drop_marked_nexthops(link->manager);
+ if (k < 0 && r >= 0)
+ r = k;
+
+ return r;
+ }
+
+-int link_drop_nexthops(Link *link) {
++int link_drop_managed_nexthops(Link *link) {
+ NextHop *nexthop;
+ int k, r = 0;
+
+@@ -723,6 +723,10 @@ int link_drop_nexthops(Link *link) {
+ if (nexthop->protocol == RTPROT_KERNEL)
+ continue;
+
++ /* Do not touch addresses managed by kernel or other tools. */
++ if (nexthop->source == NETWORK_CONFIG_SOURCE_FOREIGN)
++ continue;
++
+ /* Ignore nexthops not assigned yet or already removing. */
+ if (!nexthop_exists(nexthop))
+ continue;
+@@ -734,7 +738,7 @@ int link_drop_nexthops(Link *link) {
+
+ manager_mark_nexthops(link->manager, /* foreign = */ false, link);
+
+- k = manager_drop_nexthops(link->manager);
++ k = manager_drop_marked_nexthops(link->manager);
+ if (k < 0 && r >= 0)
+ r = k;
+
+diff --git a/src/network/networkd-nexthop.h b/src/network/networkd-nexthop.h
+index 7a8920238c..1e54e9f211 100644
+--- a/src/network/networkd-nexthop.h
++++ b/src/network/networkd-nexthop.h
+@@ -44,7 +44,7 @@ int nexthop_compare_func(const NextHop *a, const NextHop *b);
+
+ void network_drop_invalid_nexthops(Network *network);
+
+-int link_drop_nexthops(Link *link);
++int link_drop_managed_nexthops(Link *link);
+ int link_drop_foreign_nexthops(Link *link);
+ void link_foreignize_nexthops(Link *link);
+
+diff --git a/src/network/networkd-route.c b/src/network/networkd-route.c
+index ee7a535075..7e6fe8bc11 100644
+--- a/src/network/networkd-route.c
++++ b/src/network/networkd-route.c
+@@ -788,8 +788,8 @@ static void manager_mark_routes(Manager *manager, bool foreign, const Link *exce
+ if (route->protocol == RTPROT_KERNEL)
+ continue;
+
+- /* When 'foreign' is true, do not remove routes we configured. */
+- if (foreign && route->source != NETWORK_CONFIG_SOURCE_FOREIGN)
++ /* When 'foreign' is true, mark only foreign routes, and vice versa. */
++ if (foreign != (route->source == NETWORK_CONFIG_SOURCE_FOREIGN))
+ continue;
+
+ /* Do not touch dynamic routes. They will removed by dhcp_pd_prefix_lost() */
+@@ -834,7 +834,7 @@ static void manager_mark_routes(Manager *manager, bool foreign, const Link *exce
+ }
+ }
+
+-static int manager_drop_routes(Manager *manager) {
++static int manager_drop_marked_routes(Manager *manager) {
+ Route *route;
+ int k, r = 0;
+
+@@ -955,14 +955,14 @@ int link_drop_foreign_routes(Link *link) {
+
+ manager_mark_routes(link->manager, /* foreign = */ true, NULL);
+
+- k = manager_drop_routes(link->manager);
++ k = manager_drop_marked_routes(link->manager);
+ if (k < 0 && r >= 0)
+ r = k;
+
+ return r;
+ }
+
+-int link_drop_routes(Link *link) {
++int link_drop_managed_routes(Link *link) {
+ Route *route;
+ int k, r = 0;
+
+@@ -973,6 +973,10 @@ int link_drop_routes(Link *link) {
+ if (route_by_kernel(route))
+ continue;
+
++ /* Do not touch routes managed by kernel or other tools. */
++ if (route->source == NETWORK_CONFIG_SOURCE_FOREIGN)
++ continue;
++
+ if (!route_exists(route))
+ continue;
+
+@@ -983,7 +987,7 @@ int link_drop_routes(Link *link) {
+
+ manager_mark_routes(link->manager, /* foreign = */ false, link);
+
+- k = manager_drop_routes(link->manager);
++ k = manager_drop_marked_routes(link->manager);
+ if (k < 0 && r >= 0)
+ r = k;
+
+diff --git a/src/network/networkd-route.h b/src/network/networkd-route.h
+index e3e22a5985..2180a196fc 100644
+--- a/src/network/networkd-route.h
++++ b/src/network/networkd-route.h
+@@ -82,7 +82,7 @@ int route_remove(Route *route);
+
+ int route_get(Manager *manager, Link *link, const Route *in, Route **ret);
+
+-int link_drop_routes(Link *link);
++int link_drop_managed_routes(Link *link);
+ int link_drop_foreign_routes(Link *link);
+ void link_foreignize_routes(Link *link);
+
+diff --git a/src/network/networkd-routing-policy-rule.c b/src/network/networkd-routing-policy-rule.c
+index 90086f35a7..d4363060d8 100644
+--- a/src/network/networkd-routing-policy-rule.c
++++ b/src/network/networkd-routing-policy-rule.c
+@@ -653,8 +653,8 @@ static void manager_mark_routing_policy_rules(Manager *m, bool foreign, const Li
+ if (rule->protocol == RTPROT_KERNEL)
+ continue;
+
+- /* When 'foreign' is true, do not remove rules we configured. */
+- if (foreign && rule->source != NETWORK_CONFIG_SOURCE_FOREIGN)
++ /* When 'foreign' is true, mark only foreign rules, and vice versa. */
++ if (foreign != (rule->source == NETWORK_CONFIG_SOURCE_FOREIGN))
+ continue;
+
+ /* Ignore rules not assigned yet or already removing. */
+diff --git a/src/network/networkd-routing-policy-rule.h b/src/network/networkd-routing-policy-rule.h
+index f52943bd2e..7cc6f55c8d 100644
+--- a/src/network/networkd-routing-policy-rule.h
++++ b/src/network/networkd-routing-policy-rule.h
+@@ -71,7 +71,7 @@ int manager_drop_routing_policy_rules_internal(Manager *m, bool foreign, const L
+ static inline int manager_drop_foreign_routing_policy_rules(Manager *m) {
+ return manager_drop_routing_policy_rules_internal(m, true, NULL);
+ }
+-static inline int link_drop_routing_policy_rules(Link *link) {
++static inline int link_drop_managed_routing_policy_rules(Link *link) {
+ assert(link);
+ return manager_drop_routing_policy_rules_internal(link->manager, false, link);
+ }
+diff --git a/test/test-network/systemd-networkd-tests.py b/test/test-network/systemd-networkd-tests.py
+index ac2c1ba034..ed4d4992b1 100755
+--- a/test/test-network/systemd-networkd-tests.py
++++ b/test/test-network/systemd-networkd-tests.py
+@@ -3876,7 +3876,7 @@ class NetworkdBridgeTests(unittest.TestCase, Utilities):
+ print(output)
+ self.assertRegex(output, 'NO-CARRIER')
+ self.assertNotRegex(output, '192.168.0.15/24')
+- self.assertNotRegex(output, '192.168.0.16/24')
++ self.assertRegex(output, '192.168.0.16/24') # foreign address is kept
+
+ print('### ip -6 route list table all dev bridge99')
+ output = check_output('ip -6 route list table all dev bridge99')
+--
+2.34.1
+
diff --git a/meta/recipes-core/systemd/systemd/0001-nspawn-make-sure-host-root-can-write-to-the-uidmappe.patch b/meta/recipes-core/systemd/systemd/0001-nspawn-make-sure-host-root-can-write-to-the-uidmappe.patch
new file mode 100644
index 0000000000..8715019c99
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/0001-nspawn-make-sure-host-root-can-write-to-the-uidmappe.patch
@@ -0,0 +1,216 @@
+From e34fb1a4568bd080032065bb1506ab9b6c6606f1 Mon Sep 17 00:00:00 2001
+From: Lennart Poettering <lennart@poettering.net>
+Date: Thu, 17 Mar 2022 13:46:12 +0100
+Subject: [PATCH] nspawn: make sure host root can write to the uidmapped mounts
+ we prepare for the container payload
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When using user namespaces in conjunction with uidmapped mounts, nspawn
+so far set up two uidmappings:
+
+1. One that is used for the uidmapped mount and that maps the UID range
+ 0…65535 on the backing fs to some high UID range X…X+65535 on the
+ uidmapped fs. (Let's call this mapping the "mount mapping")
+
+2. One that is used for the userns namespace the container payload
+ processes run in, that maps X…X+65535 back to 0…65535. (Let's call
+ this one the "process mapping").
+
+These mappings hence are pretty much identical, one just moves things up
+and one back down. (Reminder: we do all this so that the processes can
+run under high UIDs while running off file systems that require no
+recursive chown()ing, i.e. we want processes with high UID range but
+files with low UID range.)
+
+This creates one problem, i.e. issue #20989: if nspawn (which runs as
+host root, i.e. host UID 0) wants to add inodes to the uidmapped mount
+it can't do that, since host UID 0 is not defined in the mount mapping
+(only the X…X+65536 range is, after all, and X > 0), and processes whose
+UID is not mapped in a uidmapped fs cannot create inodes in it since
+those would be owned by an unmapped UID, which then triggers
+the famous EOVERFLOW error.
+
+Let's fix this, by explicitly including an entry for the host UID 0 in
+the mount mapping. Specifically, we'll extend the mount mapping to map
+UID 2147483646 (which is INT32_MAX-1, see code for an explanation why I
+picked this one) of the backing fs to UID 0 on the uidmapped fs. This
+way nspawn can creates inode on the uidmapped as it likes (which will
+then actually be owned by UID 2147483646 on the backing fs), and as it
+always did. Note that we do *not* create a similar entry in the process
+mapping. Thus any files created by nspawn that way (and not chown()ed to
+something better) will appear as unmapped (i.e. as overflowuid/"nobody")
+in the container payload. And that's good. Of course, the latter is
+mostly theoretic, as nspawn should generally chown() the inodes it
+creates to UID ranges that actually make sense for the container (and we
+generally already do this correctly), but it#s good to know that we are
+safe here, given we might accidentally forget to chown() some inodes we
+create.
+
+Net effect: the two mappings will not be identical anymore. The mount
+mapping has one entry more, and the only reason it exists is so that
+nspawn can access the uidmapped fs reasonably independently from any
+process mapping.
+
+Fixes: #20989
+
+Upstream-Status: Backport [50ae2966d20b0b4a19def060de3b966b7a70b54a]
+Signed-off-by: Marek Vasut <marex@denx.de>
+---
+ src/basic/user-util.h | 13 +++++++++++++
+ src/nspawn/nspawn-mount.c | 2 +-
+ src/nspawn/nspawn.c | 2 +-
+ src/shared/dissect-image.c | 2 +-
+ src/shared/mount-util.c | 28 +++++++++++++++++++++++-----
+ src/shared/mount-util.h | 13 ++++++++++++-
+ 6 files changed, 51 insertions(+), 9 deletions(-)
+
+diff --git a/src/basic/user-util.h b/src/basic/user-util.h
+index ab1ce48b2d..0b9749ef8b 100644
+--- a/src/basic/user-util.h
++++ b/src/basic/user-util.h
+@@ -59,6 +59,19 @@ int take_etc_passwd_lock(const char *root);
+ #define UID_NOBODY ((uid_t) 65534U)
+ #define GID_NOBODY ((gid_t) 65534U)
+
++/* If REMOUNT_IDMAP_HOST_ROOT is set for remount_idmap() we'll include a mapping here that maps the host root
++ * user accessing the idmapped mount to the this user ID on the backing fs. This is the last valid UID in the
++ * *signed* 32bit range. You might wonder why precisely use this specific UID for this purpose? Well, we
++ * definitely cannot use the first 0…65536 UIDs for that, since in most cases that's precisely the file range
++ * we intend to map to some high UID range, and since UID mappings have to be bijective we thus cannot use
++ * them at all. Furthermore the UID range beyond INT32_MAX (i.e. the range above the signed 32bit range) is
++ * icky, since many APIs cannot use it (example: setfsuid() returns the old UID as signed integer). Following
++ * our usual logic of assigning a 16bit UID range to each container, so that the upper 16bit of a 32bit UID
++ * value indicate kind of a "container ID" and the lower 16bit map directly to the intended user you can read
++ * this specific UID as the "nobody" user of the container with ID 0x7FFF, which is kinda nice. */
++#define UID_MAPPED_ROOT ((uid_t) (INT32_MAX-1))
++#define GID_MAPPED_ROOT ((gid_t) (INT32_MAX-1))
++
+ #define ETC_PASSWD_LOCK_PATH "/etc/.pwd.lock"
+
+ /* The following macros add 1 when converting things, since UID 0 is a valid UID, while the pointer
+diff --git a/src/nspawn/nspawn-mount.c b/src/nspawn/nspawn-mount.c
+index 40773d90c1..f2fad0f462 100644
+--- a/src/nspawn/nspawn-mount.c
++++ b/src/nspawn/nspawn-mount.c
+@@ -780,7 +780,7 @@ static int mount_bind(const char *dest, CustomMount *m, uid_t uid_shift, uid_t u
+ }
+
+ if (idmapped) {
+- r = remount_idmap(where, uid_shift, uid_range);
++ r = remount_idmap(where, uid_shift, uid_range, REMOUNT_IDMAP_HOST_ROOT);
+ if (r < 0)
+ return log_error_errno(r, "Failed to map ids for bind mount %s: %m", where);
+ }
+diff --git a/src/nspawn/nspawn.c b/src/nspawn/nspawn.c
+index 8f17ab8810..fe0af8e42d 100644
+--- a/src/nspawn/nspawn.c
++++ b/src/nspawn/nspawn.c
+@@ -3779,7 +3779,7 @@ static int outer_child(
+ IN_SET(arg_userns_ownership, USER_NAMESPACE_OWNERSHIP_MAP, USER_NAMESPACE_OWNERSHIP_AUTO) &&
+ arg_uid_shift != 0) {
+
+- r = remount_idmap(directory, arg_uid_shift, arg_uid_range);
++ r = remount_idmap(directory, arg_uid_shift, arg_uid_range, REMOUNT_IDMAP_HOST_ROOT);
+ if (r == -EINVAL || ERRNO_IS_NOT_SUPPORTED(r)) {
+ /* This might fail because the kernel or file system doesn't support idmapping. We
+ * can't really distinguish this nicely, nor do we have any guarantees about the
+diff --git a/src/shared/dissect-image.c b/src/shared/dissect-image.c
+index 39a7f4c3f2..471c165257 100644
+--- a/src/shared/dissect-image.c
++++ b/src/shared/dissect-image.c
+@@ -1807,7 +1807,7 @@ static int mount_partition(
+ (void) fs_grow(node, p);
+
+ if (remap_uid_gid) {
+- r = remount_idmap(p, uid_shift, uid_range);
++ r = remount_idmap(p, uid_shift, uid_range, REMOUNT_IDMAP_HOST_ROOT);
+ if (r < 0)
+ return r;
+ }
+diff --git a/src/shared/mount-util.c b/src/shared/mount-util.c
+index c75c02f5be..fb2e9a0711 100644
+--- a/src/shared/mount-util.c
++++ b/src/shared/mount-util.c
+@@ -1049,14 +1049,31 @@ int make_mount_point(const char *path) {
+ return 1;
+ }
+
+-static int make_userns(uid_t uid_shift, uid_t uid_range) {
+- char line[DECIMAL_STR_MAX(uid_t)*3+3+1];
++static int make_userns(uid_t uid_shift, uid_t uid_range, RemountIdmapFlags flags) {
+ _cleanup_close_ int userns_fd = -1;
++ _cleanup_free_ char *line = NULL;
+
+ /* Allocates a userns file descriptor with the mapping we need. For this we'll fork off a child
+ * process whose only purpose is to give us a new user namespace. It's killed when we got it. */
+
+- xsprintf(line, UID_FMT " " UID_FMT " " UID_FMT "\n", 0, uid_shift, uid_range);
++ if (asprintf(&line, UID_FMT " " UID_FMT " " UID_FMT "\n", 0, uid_shift, uid_range) < 0)
++ return log_oom_debug();
++
++ /* If requested we'll include an entry in the mapping so that the host root user can make changes to
++ * the uidmapped mount like it normally would. Specifically, we'll map the user with UID_HOST_ROOT on
++ * the backing fs to UID 0. This is useful, since nspawn code wants to create various missing inodes
++ * in the OS tree before booting into it, and this becomes very easy and straightforward to do if it
++ * can just do it under its own regular UID. Note that in that case the container's runtime uidmap
++ * (i.e. the one the container payload processes run in) will leave this UID unmapped, i.e. if we
++ * accidentally leave files owned by host root in the already uidmapped tree around they'll show up
++ * as owned by 'nobody', which is safe. (Of course, we shouldn't leave such inodes around, but always
++ * chown() them to the container's own UID range, but it's good to have a safety net, in case we
++ * forget it.) */
++ if (flags & REMOUNT_IDMAP_HOST_ROOT)
++ if (strextendf(&line,
++ UID_FMT " " UID_FMT " " UID_FMT "\n",
++ UID_MAPPED_ROOT, 0, 1) < 0)
++ return log_oom_debug();
+
+ /* We always assign the same UID and GID ranges */
+ userns_fd = userns_acquire(line, line);
+@@ -1069,7 +1086,8 @@ static int make_userns(uid_t uid_shift, uid_t uid_range) {
+ int remount_idmap(
+ const char *p,
+ uid_t uid_shift,
+- uid_t uid_range) {
++ uid_t uid_range,
++ RemountIdmapFlags flags) {
+
+ _cleanup_close_ int mount_fd = -1, userns_fd = -1;
+ int r;
+@@ -1085,7 +1103,7 @@ int remount_idmap(
+ return log_debug_errno(errno, "Failed to open tree of mounted filesystem '%s': %m", p);
+
+ /* Create a user namespace mapping */
+- userns_fd = make_userns(uid_shift, uid_range);
++ userns_fd = make_userns(uid_shift, uid_range, flags);
+ if (userns_fd < 0)
+ return userns_fd;
+
+diff --git a/src/shared/mount-util.h b/src/shared/mount-util.h
+index ce73aebd4b..f53a64186f 100644
+--- a/src/shared/mount-util.h
++++ b/src/shared/mount-util.h
+@@ -112,7 +112,18 @@ int mount_image_in_namespace(pid_t target, const char *propagate_path, const cha
+
+ int make_mount_point(const char *path);
+
+-int remount_idmap(const char *p, uid_t uid_shift, uid_t uid_range);
++typedef enum RemountIdmapFlags {
++ /* Include a mapping from UID_MAPPED_ROOT (i.e. UID 2^31-2) on the backing fs to UID 0 on the
++ * uidmapped fs. This is useful to ensure that the host root user can safely add inodes to the
++ * uidmapped fs (which otherwise wouldn't work as the host root user is not defined on the uidmapped
++ * mount and any attempts to create inodes will then be refused with EOVERFLOW). The idea is that
++ * these inodes are quickly re-chown()ed to more suitable UIDs/GIDs. Any code that intends to be able
++ * to add inodes to file systems mapped this way should set this flag, but given it comes with
++ * certain security implications defaults to off, and requires explicit opt-in. */
++ REMOUNT_IDMAP_HOST_ROOT = 1 << 0,
++} RemountIdmapFlags;
++
++int remount_idmap(const char *p, uid_t uid_shift, uid_t uid_range, RemountIdmapFlags flags);
+
+ /* Creates a mount point (not parents) based on the source path or stat - ie, a file or a directory */
+ int make_mount_point_inode_from_stat(const struct stat *st, const char *dest, mode_t mode);
+--
+2.40.1
+
diff --git a/meta/recipes-core/systemd/systemd/0001-shared-json-allow-json_variant_dump-to-return-an-err.patch b/meta/recipes-core/systemd/systemd/0001-shared-json-allow-json_variant_dump-to-return-an-err.patch
new file mode 100644
index 0000000000..b23b735507
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/0001-shared-json-allow-json_variant_dump-to-return-an-err.patch
@@ -0,0 +1,60 @@
+From 25492154b42f68a48752a7f61eaf1fb61e454e52 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Tue, 18 Oct 2022 18:09:06 +0200
+Subject: [PATCH] shared/json: allow json_variant_dump() to return an error
+
+Upstream-Status: Backport [https://github.com/systemd/systemd/commit/7922ead507e0d83e4ec72a8cbd2b67194766e58c]
+
+Needed to fix CVE-2022-45873.patch backported from systemd/main,
+otherwise it fails to build with:
+
+| ../git/src/shared/elf-util.c: In function 'parse_elf_object':
+| ../git/src/shared/elf-util.c:792:27: error: void value not ignored as it ought to be
+| 792 | r = json_variant_dump(package_metadata, JSON_FORMAT_FLUSH, json_out, NULL);
+| | ^
+
+Signed-off-by: Martin Jansa <martin2.jansa@lgepartner.com>
+---
+ src/shared/json.c | 7 ++++---
+ src/shared/json.h | 2 +-
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/src/shared/json.c b/src/shared/json.c
+index dff95eda26..81c05efe22 100644
+--- a/src/shared/json.c
++++ b/src/shared/json.c
+@@ -1792,9 +1792,9 @@ int json_variant_format(JsonVariant *v, JsonFormatFlags flags, char **ret) {
+ return (int) sz - 1;
+ }
+
+-void json_variant_dump(JsonVariant *v, JsonFormatFlags flags, FILE *f, const char *prefix) {
++int json_variant_dump(JsonVariant *v, JsonFormatFlags flags, FILE *f, const char *prefix) {
+ if (!v)
+- return;
++ return 0;
+
+ if (!f)
+ f = stdout;
+@@ -1820,7 +1820,8 @@ void json_variant_dump(JsonVariant *v, JsonFormatFlags flags, FILE *f, const cha
+ fputc('\n', f); /* In case of SSE add a second newline */
+
+ if (flags & JSON_FORMAT_FLUSH)
+- fflush(f);
++ return fflush_and_check(f);
++ return 0;
+ }
+
+ int json_variant_filter(JsonVariant **v, char **to_remove) {
+diff --git a/src/shared/json.h b/src/shared/json.h
+index 8760354b66..c712700763 100644
+--- a/src/shared/json.h
++++ b/src/shared/json.h
+@@ -187,7 +187,7 @@ typedef enum JsonFormatFlags {
+ } JsonFormatFlags;
+
+ int json_variant_format(JsonVariant *v, JsonFormatFlags flags, char **ret);
+-void json_variant_dump(JsonVariant *v, JsonFormatFlags flags, FILE *f, const char *prefix);
++int json_variant_dump(JsonVariant *v, JsonFormatFlags flags, FILE *f, const char *prefix);
+
+ int json_variant_filter(JsonVariant **v, char **to_remove);
+
diff --git a/meta/recipes-core/systemd/systemd/CVE-2022-3821.patch b/meta/recipes-core/systemd/systemd/CVE-2022-3821.patch
new file mode 100644
index 0000000000..eb8b0cba12
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2022-3821.patch
@@ -0,0 +1,45 @@
+From bff52d96598956163d73b7c7bdec7b0ad5b3c2d4 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 15 Nov 2022 16:52:03 +0530
+Subject: [PATCH] CVE-2022-3821
+
+Upstream-Status: Backport [https://github.com/systemd/systemd-stable/commit/72d4c15a946d20143cd4c6783c802124bc894dc7]
+CVE: CVE-2022-3821
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/basic/time-util.c | 2 +-
+ src/test/test-time-util.c | 5 +++++
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/src/basic/time-util.c b/src/basic/time-util.c
+index b659d6905d..89dc593d44 100644
+--- a/src/basic/time-util.c
++++ b/src/basic/time-util.c
+@@ -588,7 +588,7 @@ char *format_timespan(char *buf, size_t l, usec_t t, usec_t accuracy) {
+ t = b;
+ }
+
+- n = MIN((size_t) k, l);
++ n = MIN((size_t) k, l-1);
+
+ l -= n;
+ p += n;
+diff --git a/src/test/test-time-util.c b/src/test/test-time-util.c
+index 4d0131827e..8db6b25279 100644
+--- a/src/test/test-time-util.c
++++ b/src/test/test-time-util.c
+@@ -238,6 +238,11 @@ TEST(format_timespan) {
+ test_format_timespan_accuracy(1);
+ test_format_timespan_accuracy(USEC_PER_MSEC);
+ test_format_timespan_accuracy(USEC_PER_SEC);
++
++ /* See issue #23928. */
++ _cleanup_free_ char *buf;
++ assert_se(buf = new(char, 5));
++ assert_se(buf == format_timespan(buf, 5, 100005, 1000));
+ }
+
+ TEST(verify_timezone) {
+--
+2.25.1
+
diff --git a/meta/recipes-core/systemd/systemd/CVE-2022-4415-1.patch b/meta/recipes-core/systemd/systemd/CVE-2022-4415-1.patch
new file mode 100644
index 0000000000..5cf0fe284e
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2022-4415-1.patch
@@ -0,0 +1,109 @@
+From 45d323fc889a55fae400a5b08a56273d5724ef4a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Tue, 29 Nov 2022 09:00:16 +0100
+Subject: [PATCH 1/2] coredump: adjust whitespace
+
+(cherry picked from commit 510a146634f3e095b34e2a26023b1b1f99dcb8c0)
+(cherry picked from commit cc2eb7a9b5fd6d9dd8ea35fb045ce6e5e16e1187)
+(cherry picked from commit cb044d734c44cd3c05a6e438b5b995b2a9cfa73c)
+
+Preparation to avoid conflicts when applying CVE CVE-2022-4415
+Upstream-Status: Backport [https://github.com/systemd/systemd-stable/commit/45d323fc889a55fae400a5b08a56273d5724ef4a]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ src/coredump/coredump.c | 56 ++++++++++++++++++++---------------------
+ 1 file changed, 28 insertions(+), 28 deletions(-)
+
+diff --git a/src/coredump/coredump.c b/src/coredump/coredump.c
+index eaea63f682..8295b03ac7 100644
+--- a/src/coredump/coredump.c
++++ b/src/coredump/coredump.c
+@@ -103,16 +103,16 @@ enum {
+ };
+
+ static const char * const meta_field_names[_META_MAX] = {
+- [META_ARGV_PID] = "COREDUMP_PID=",
+- [META_ARGV_UID] = "COREDUMP_UID=",
+- [META_ARGV_GID] = "COREDUMP_GID=",
+- [META_ARGV_SIGNAL] = "COREDUMP_SIGNAL=",
+- [META_ARGV_TIMESTAMP] = "COREDUMP_TIMESTAMP=",
+- [META_ARGV_RLIMIT] = "COREDUMP_RLIMIT=",
+- [META_ARGV_HOSTNAME] = "COREDUMP_HOSTNAME=",
+- [META_COMM] = "COREDUMP_COMM=",
+- [META_EXE] = "COREDUMP_EXE=",
+- [META_UNIT] = "COREDUMP_UNIT=",
++ [META_ARGV_PID] = "COREDUMP_PID=",
++ [META_ARGV_UID] = "COREDUMP_UID=",
++ [META_ARGV_GID] = "COREDUMP_GID=",
++ [META_ARGV_SIGNAL] = "COREDUMP_SIGNAL=",
++ [META_ARGV_TIMESTAMP] = "COREDUMP_TIMESTAMP=",
++ [META_ARGV_RLIMIT] = "COREDUMP_RLIMIT=",
++ [META_ARGV_HOSTNAME] = "COREDUMP_HOSTNAME=",
++ [META_COMM] = "COREDUMP_COMM=",
++ [META_EXE] = "COREDUMP_EXE=",
++ [META_UNIT] = "COREDUMP_UNIT=",
+ };
+
+ typedef struct Context {
+@@ -131,9 +131,9 @@ typedef enum CoredumpStorage {
+ } CoredumpStorage;
+
+ static const char* const coredump_storage_table[_COREDUMP_STORAGE_MAX] = {
+- [COREDUMP_STORAGE_NONE] = "none",
++ [COREDUMP_STORAGE_NONE] = "none",
+ [COREDUMP_STORAGE_EXTERNAL] = "external",
+- [COREDUMP_STORAGE_JOURNAL] = "journal",
++ [COREDUMP_STORAGE_JOURNAL] = "journal",
+ };
+
+ DEFINE_PRIVATE_STRING_TABLE_LOOKUP(coredump_storage, CoredumpStorage);
+@@ -149,13 +149,13 @@ static uint64_t arg_max_use = UINT64_MAX;
+
+ static int parse_config(void) {
+ static const ConfigTableItem items[] = {
+- { "Coredump", "Storage", config_parse_coredump_storage, 0, &arg_storage },
+- { "Coredump", "Compress", config_parse_bool, 0, &arg_compress },
+- { "Coredump", "ProcessSizeMax", config_parse_iec_uint64, 0, &arg_process_size_max },
+- { "Coredump", "ExternalSizeMax", config_parse_iec_uint64_infinity, 0, &arg_external_size_max },
+- { "Coredump", "JournalSizeMax", config_parse_iec_size, 0, &arg_journal_size_max },
+- { "Coredump", "KeepFree", config_parse_iec_uint64, 0, &arg_keep_free },
+- { "Coredump", "MaxUse", config_parse_iec_uint64, 0, &arg_max_use },
++ { "Coredump", "Storage", config_parse_coredump_storage, 0, &arg_storage },
++ { "Coredump", "Compress", config_parse_bool, 0, &arg_compress },
++ { "Coredump", "ProcessSizeMax", config_parse_iec_uint64, 0, &arg_process_size_max },
++ { "Coredump", "ExternalSizeMax", config_parse_iec_uint64_infinity, 0, &arg_external_size_max },
++ { "Coredump", "JournalSizeMax", config_parse_iec_size, 0, &arg_journal_size_max },
++ { "Coredump", "KeepFree", config_parse_iec_uint64, 0, &arg_keep_free },
++ { "Coredump", "MaxUse", config_parse_iec_uint64, 0, &arg_max_use },
+ {}
+ };
+
+@@ -201,15 +201,15 @@ static int fix_acl(int fd, uid_t uid) {
+ static int fix_xattr(int fd, const Context *context) {
+
+ static const char * const xattrs[_META_MAX] = {
+- [META_ARGV_PID] = "user.coredump.pid",
+- [META_ARGV_UID] = "user.coredump.uid",
+- [META_ARGV_GID] = "user.coredump.gid",
+- [META_ARGV_SIGNAL] = "user.coredump.signal",
+- [META_ARGV_TIMESTAMP] = "user.coredump.timestamp",
+- [META_ARGV_RLIMIT] = "user.coredump.rlimit",
+- [META_ARGV_HOSTNAME] = "user.coredump.hostname",
+- [META_COMM] = "user.coredump.comm",
+- [META_EXE] = "user.coredump.exe",
++ [META_ARGV_PID] = "user.coredump.pid",
++ [META_ARGV_UID] = "user.coredump.uid",
++ [META_ARGV_GID] = "user.coredump.gid",
++ [META_ARGV_SIGNAL] = "user.coredump.signal",
++ [META_ARGV_TIMESTAMP] = "user.coredump.timestamp",
++ [META_ARGV_RLIMIT] = "user.coredump.rlimit",
++ [META_ARGV_HOSTNAME] = "user.coredump.hostname",
++ [META_COMM] = "user.coredump.comm",
++ [META_EXE] = "user.coredump.exe",
+ };
+
+ int r = 0;
+--
+2.30.2
+
diff --git a/meta/recipes-core/systemd/systemd/CVE-2022-4415-2.patch b/meta/recipes-core/systemd/systemd/CVE-2022-4415-2.patch
new file mode 100644
index 0000000000..8389ee8cd6
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2022-4415-2.patch
@@ -0,0 +1,391 @@
+From 1d5e0e9910500f3c3584485f77bfc35e601036e3 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Mon, 28 Nov 2022 12:12:55 +0100
+Subject: [PATCH 2/2] coredump: do not allow user to access coredumps with
+ changed uid/gid/capabilities
+
+When the user starts a program which elevates its permissions via setuid,
+setgid, or capabilities set on the file, it may access additional information
+which would then be visible in the coredump. We shouldn't make the the coredump
+visible to the user in such cases.
+
+Reported-by: Matthias Gerstner <mgerstner@suse.de>
+
+This reads the /proc/<pid>/auxv file and attaches it to the process metadata as
+PROC_AUXV. Before the coredump is submitted, it is parsed and if either
+at_secure was set (which the kernel will do for processes that are setuid,
+setgid, or setcap), or if the effective uid/gid don't match uid/gid, the file
+is not made accessible to the user. If we can't access this data, we assume the
+file should not be made accessible either. In principle we could also access
+the auxv data from a note in the core file, but that is much more complex and
+it seems better to use the stand-alone file that is provided by the kernel.
+
+Attaching auxv is both convient for this patch (because this way it's passed
+between the stages along with other fields), but I think it makes sense to save
+it in general.
+
+We use the information early in the core file to figure out if the program was
+32-bit or 64-bit and its endianness. This way we don't need heuristics to guess
+whether the format of the auxv structure. This test might reject some cases on
+fringe architecutes. But the impact would be limited: we just won't grant the
+user permissions to view the coredump file. If people report that we're missing
+some cases, we can always enhance this to support more architectures.
+
+I tested auxv parsing on amd64, 32-bit program on amd64, arm64, arm32, and
+ppc64el, but not the whole coredump handling.
+
+(cherry picked from commit 3e4d0f6cf99f8677edd6a237382a65bfe758de03)
+(cherry picked from commit 9b75a3d0502d6741c8ecb7175794345f8eb3827c)
+(cherry picked from commit efca5283dc791a07171f80eef84e14fdb58fad57)
+
+CVE: CVE-2022-4415
+Upstream-Status: Backport [https://github.com/systemd/systemd-stable/commit/1d5e0e9910500f3c3584485f77bfc35e601036e3]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ src/basic/io-util.h | 9 ++
+ src/coredump/coredump.c | 196 +++++++++++++++++++++++++++++++++++++---
+ 2 files changed, 192 insertions(+), 13 deletions(-)
+
+diff --git a/src/basic/io-util.h b/src/basic/io-util.h
+index 39728e06bc..3afb134266 100644
+--- a/src/basic/io-util.h
++++ b/src/basic/io-util.h
+@@ -91,7 +91,16 @@ struct iovec_wrapper *iovw_new(void);
+ struct iovec_wrapper *iovw_free(struct iovec_wrapper *iovw);
+ struct iovec_wrapper *iovw_free_free(struct iovec_wrapper *iovw);
+ void iovw_free_contents(struct iovec_wrapper *iovw, bool free_vectors);
++
+ int iovw_put(struct iovec_wrapper *iovw, void *data, size_t len);
++static inline int iovw_consume(struct iovec_wrapper *iovw, void *data, size_t len) {
++ /* Move data into iovw or free on error */
++ int r = iovw_put(iovw, data, len);
++ if (r < 0)
++ free(data);
++ return r;
++}
++
+ int iovw_put_string_field(struct iovec_wrapper *iovw, const char *field, const char *value);
+ int iovw_put_string_field_free(struct iovec_wrapper *iovw, const char *field, char *value);
+ void iovw_rebase(struct iovec_wrapper *iovw, char *old, char *new);
+diff --git a/src/coredump/coredump.c b/src/coredump/coredump.c
+index 8295b03ac7..79280ab986 100644
+--- a/src/coredump/coredump.c
++++ b/src/coredump/coredump.c
+@@ -4,6 +4,7 @@
+ #include <stdio.h>
+ #include <sys/prctl.h>
+ #include <sys/statvfs.h>
++#include <sys/auxv.h>
+ #include <sys/xattr.h>
+ #include <unistd.h>
+
+@@ -99,6 +100,7 @@ enum {
+
+ META_EXE = _META_MANDATORY_MAX,
+ META_UNIT,
++ META_PROC_AUXV,
+ _META_MAX
+ };
+
+@@ -113,10 +115,12 @@ static const char * const meta_field_names[_META_MAX] = {
+ [META_COMM] = "COREDUMP_COMM=",
+ [META_EXE] = "COREDUMP_EXE=",
+ [META_UNIT] = "COREDUMP_UNIT=",
++ [META_PROC_AUXV] = "COREDUMP_PROC_AUXV=",
+ };
+
+ typedef struct Context {
+ const char *meta[_META_MAX];
++ size_t meta_size[_META_MAX];
+ pid_t pid;
+ bool is_pid1;
+ bool is_journald;
+@@ -178,13 +182,16 @@ static uint64_t storage_size_max(void) {
+ return 0;
+ }
+
+-static int fix_acl(int fd, uid_t uid) {
++static int fix_acl(int fd, uid_t uid, bool allow_user) {
++ assert(fd >= 0);
++ assert(uid_is_valid(uid));
+
+ #if HAVE_ACL
+ int r;
+
+- assert(fd >= 0);
+- assert(uid_is_valid(uid));
++ /* We don't allow users to read coredumps if the uid or capabilities were changed. */
++ if (!allow_user)
++ return 0;
+
+ if (uid_is_system(uid) || uid_is_dynamic(uid) || uid == UID_NOBODY)
+ return 0;
+@@ -244,7 +251,8 @@ static int fix_permissions(
+ const char *filename,
+ const char *target,
+ const Context *context,
+- uid_t uid) {
++ uid_t uid,
++ bool allow_user) {
+
+ int r;
+
+@@ -254,7 +262,7 @@ static int fix_permissions(
+
+ /* Ignore errors on these */
+ (void) fchmod(fd, 0640);
+- (void) fix_acl(fd, uid);
++ (void) fix_acl(fd, uid, allow_user);
+ (void) fix_xattr(fd, context);
+
+ r = fsync_full(fd);
+@@ -324,6 +332,153 @@ static int make_filename(const Context *context, char **ret) {
+ return 0;
+ }
+
++static int parse_auxv64(
++ const uint64_t *auxv,
++ size_t size_bytes,
++ int *at_secure,
++ uid_t *uid,
++ uid_t *euid,
++ gid_t *gid,
++ gid_t *egid) {
++
++ assert(auxv || size_bytes == 0);
++
++ if (size_bytes % (2 * sizeof(uint64_t)) != 0)
++ return log_warning_errno(SYNTHETIC_ERRNO(EIO), "Incomplete auxv structure (%zu bytes).", size_bytes);
++
++ size_t words = size_bytes / sizeof(uint64_t);
++
++ /* Note that we set output variables even on error. */
++
++ for (size_t i = 0; i + 1 < words; i += 2)
++ switch (auxv[i]) {
++ case AT_SECURE:
++ *at_secure = auxv[i + 1] != 0;
++ break;
++ case AT_UID:
++ *uid = auxv[i + 1];
++ break;
++ case AT_EUID:
++ *euid = auxv[i + 1];
++ break;
++ case AT_GID:
++ *gid = auxv[i + 1];
++ break;
++ case AT_EGID:
++ *egid = auxv[i + 1];
++ break;
++ case AT_NULL:
++ if (auxv[i + 1] != 0)
++ goto error;
++ return 0;
++ }
++ error:
++ return log_warning_errno(SYNTHETIC_ERRNO(ENODATA),
++ "AT_NULL terminator not found, cannot parse auxv structure.");
++}
++
++static int parse_auxv32(
++ const uint32_t *auxv,
++ size_t size_bytes,
++ int *at_secure,
++ uid_t *uid,
++ uid_t *euid,
++ gid_t *gid,
++ gid_t *egid) {
++
++ assert(auxv || size_bytes == 0);
++
++ size_t words = size_bytes / sizeof(uint32_t);
++
++ if (size_bytes % (2 * sizeof(uint32_t)) != 0)
++ return log_warning_errno(SYNTHETIC_ERRNO(EIO), "Incomplete auxv structure (%zu bytes).", size_bytes);
++
++ /* Note that we set output variables even on error. */
++
++ for (size_t i = 0; i + 1 < words; i += 2)
++ switch (auxv[i]) {
++ case AT_SECURE:
++ *at_secure = auxv[i + 1] != 0;
++ break;
++ case AT_UID:
++ *uid = auxv[i + 1];
++ break;
++ case AT_EUID:
++ *euid = auxv[i + 1];
++ break;
++ case AT_GID:
++ *gid = auxv[i + 1];
++ break;
++ case AT_EGID:
++ *egid = auxv[i + 1];
++ break;
++ case AT_NULL:
++ if (auxv[i + 1] != 0)
++ goto error;
++ return 0;
++ }
++ error:
++ return log_warning_errno(SYNTHETIC_ERRNO(ENODATA),
++ "AT_NULL terminator not found, cannot parse auxv structure.");
++}
++
++static int grant_user_access(int core_fd, const Context *context) {
++ int at_secure = -1;
++ uid_t uid = UID_INVALID, euid = UID_INVALID;
++ uid_t gid = GID_INVALID, egid = GID_INVALID;
++ int r;
++
++ assert(core_fd >= 0);
++ assert(context);
++
++ if (!context->meta[META_PROC_AUXV])
++ return log_warning_errno(SYNTHETIC_ERRNO(ENODATA), "No auxv data, not adjusting permissions.");
++
++ uint8_t elf[EI_NIDENT];
++ errno = 0;
++ if (pread(core_fd, &elf, sizeof(elf), 0) != sizeof(elf))
++ return log_warning_errno(errno_or_else(EIO),
++ "Failed to pread from coredump fd: %s", errno != 0 ? strerror_safe(errno) : "Unexpected EOF");
++
++ if (elf[EI_MAG0] != ELFMAG0 ||
++ elf[EI_MAG1] != ELFMAG1 ||
++ elf[EI_MAG2] != ELFMAG2 ||
++ elf[EI_MAG3] != ELFMAG3 ||
++ elf[EI_VERSION] != EV_CURRENT)
++ return log_info_errno(SYNTHETIC_ERRNO(EUCLEAN),
++ "Core file does not have ELF header, not adjusting permissions.");
++ if (!IN_SET(elf[EI_CLASS], ELFCLASS32, ELFCLASS64) ||
++ !IN_SET(elf[EI_DATA], ELFDATA2LSB, ELFDATA2MSB))
++ return log_info_errno(SYNTHETIC_ERRNO(EUCLEAN),
++ "Core file has strange ELF class, not adjusting permissions.");
++
++ if ((elf[EI_DATA] == ELFDATA2LSB) != (__BYTE_ORDER == __LITTLE_ENDIAN))
++ return log_info_errno(SYNTHETIC_ERRNO(EUCLEAN),
++ "Core file has non-native endianness, not adjusting permissions.");
++
++ if (elf[EI_CLASS] == ELFCLASS64)
++ r = parse_auxv64((const uint64_t*) context->meta[META_PROC_AUXV],
++ context->meta_size[META_PROC_AUXV],
++ &at_secure, &uid, &euid, &gid, &egid);
++ else
++ r = parse_auxv32((const uint32_t*) context->meta[META_PROC_AUXV],
++ context->meta_size[META_PROC_AUXV],
++ &at_secure, &uid, &euid, &gid, &egid);
++ if (r < 0)
++ return r;
++
++ /* We allow access if we got all the data and at_secure is not set and
++ * the uid/gid matches euid/egid. */
++ bool ret =
++ at_secure == 0 &&
++ uid != UID_INVALID && euid != UID_INVALID && uid == euid &&
++ gid != GID_INVALID && egid != GID_INVALID && gid == egid;
++ log_debug("Will %s access (uid="UID_FMT " euid="UID_FMT " gid="GID_FMT " egid="GID_FMT " at_secure=%s)",
++ ret ? "permit" : "restrict",
++ uid, euid, gid, egid, yes_no(at_secure));
++ return ret;
++}
++
+ static int save_external_coredump(
+ const Context *context,
+ int input_fd,
+@@ -446,6 +601,8 @@ static int save_external_coredump(
+ context->meta[META_ARGV_PID], context->meta[META_COMM]);
+ truncated = r == 1;
+
++ bool allow_user = grant_user_access(fd, context) > 0;
++
+ #if HAVE_COMPRESSION
+ if (arg_compress) {
+ _cleanup_(unlink_and_freep) char *tmp_compressed = NULL;
+@@ -483,7 +640,7 @@ static int save_external_coredump(
+ uncompressed_size += partial_uncompressed_size;
+ }
+
+- r = fix_permissions(fd_compressed, tmp_compressed, fn_compressed, context, uid);
++ r = fix_permissions(fd_compressed, tmp_compressed, fn_compressed, context, uid, allow_user);
+ if (r < 0)
+ return r;
+
+@@ -510,7 +667,7 @@ static int save_external_coredump(
+ "SIZE_LIMIT=%zu", max_size,
+ "MESSAGE_ID=" SD_MESSAGE_TRUNCATED_CORE_STR);
+
+- r = fix_permissions(fd, tmp, fn, context, uid);
++ r = fix_permissions(fd, tmp, fn, context, uid, allow_user);
+ if (r < 0)
+ return log_error_errno(r, "Failed to fix permissions and finalize coredump %s into %s: %m", coredump_tmpfile_name(tmp), fn);
+
+@@ -758,7 +915,7 @@ static int change_uid_gid(const Context *context) {
+ }
+
+ static int submit_coredump(
+- Context *context,
++ const Context *context,
+ struct iovec_wrapper *iovw,
+ int input_fd) {
+
+@@ -919,16 +1076,15 @@ static int save_context(Context *context, const struct iovec_wrapper *iovw) {
+ struct iovec *iovec = iovw->iovec + n;
+
+ for (size_t i = 0; i < ELEMENTSOF(meta_field_names); i++) {
+- char *p;
+-
+ /* Note that these strings are NUL terminated, because we made sure that a
+ * trailing NUL byte is in the buffer, though not included in the iov_len
+ * count (see process_socket() and gather_pid_metadata_*()) */
+ assert(((char*) iovec->iov_base)[iovec->iov_len] == 0);
+
+- p = startswith(iovec->iov_base, meta_field_names[i]);
++ const char *p = startswith(iovec->iov_base, meta_field_names[i]);
+ if (p) {
+ context->meta[i] = p;
++ context->meta_size[i] = iovec->iov_len - strlen(meta_field_names[i]);
+ count++;
+ break;
+ }
+@@ -1170,6 +1326,7 @@ static int gather_pid_metadata(struct iovec_wrapper *iovw, Context *context) {
+ uid_t owner_uid;
+ pid_t pid;
+ char *t;
++ size_t size;
+ const char *p;
+ int r;
+
+@@ -1234,13 +1391,26 @@ static int gather_pid_metadata(struct iovec_wrapper *iovw, Context *context) {
+ (void) iovw_put_string_field_free(iovw, "COREDUMP_PROC_LIMITS=", t);
+
+ p = procfs_file_alloca(pid, "cgroup");
+- if (read_full_virtual_file(p, &t, NULL) >=0)
++ if (read_full_virtual_file(p, &t, NULL) >= 0)
+ (void) iovw_put_string_field_free(iovw, "COREDUMP_PROC_CGROUP=", t);
+
+ p = procfs_file_alloca(pid, "mountinfo");
+- if (read_full_virtual_file(p, &t, NULL) >=0)
++ if (read_full_virtual_file(p, &t, NULL) >= 0)
+ (void) iovw_put_string_field_free(iovw, "COREDUMP_PROC_MOUNTINFO=", t);
+
++ /* We attach /proc/auxv here. ELF coredumps also contain a note for this (NT_AUXV), see elf(5). */
++ p = procfs_file_alloca(pid, "auxv");
++ if (read_full_virtual_file(p, &t, &size) >= 0) {
++ char *buf = malloc(strlen("COREDUMP_PROC_AUXV=") + size + 1);
++ if (buf) {
++ /* Add a dummy terminator to make save_context() happy. */
++ *((uint8_t*) mempcpy(stpcpy(buf, "COREDUMP_PROC_AUXV="), t, size)) = '\0';
++ (void) iovw_consume(iovw, buf, size + strlen("COREDUMP_PROC_AUXV="));
++ }
++
++ free(t);
++ }
++
+ if (get_process_cwd(pid, &t) >= 0)
+ (void) iovw_put_string_field_free(iovw, "COREDUMP_CWD=", t);
+
+--
+2.30.2
+
diff --git a/meta/recipes-core/systemd/systemd/CVE-2022-45873.patch b/meta/recipes-core/systemd/systemd/CVE-2022-45873.patch
new file mode 100644
index 0000000000..94bd22ca43
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2022-45873.patch
@@ -0,0 +1,124 @@
+From 076b807be472630692c5348c60d0c2b7b28ad437 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Tue, 18 Oct 2022 18:23:53 +0200
+Subject: [PATCH] coredump: avoid deadlock when passing processed backtrace
+ data
+
+We would deadlock when passing the data back from the forked-off process that
+was doing backtrace generation back to the coredump parent. This is because we
+fork the child and wait for it to exit. The child tries to write too much data
+to the output pipe, and and after the first 64k blocks on the parent because
+the pipe is full. The bug surfaced in Fedora because of a combination of four
+factors:
+- 87707784c70dc9894ec613df0a6e75e732a362a3 was backported to v251.5, which
+ allowed coredump processing to be successful.
+- 1a0281a3ebf4f8c16d40aa9e63103f16cd23bb2a was NOT backported, so the output
+ was very verbose.
+- Fedora has the ELF package metadata available, so a lot of output can be
+ generated. Most other distros just don't have the information.
+- gnome-calendar crashes and has a bazillion modules and 69596 bytes of output
+ are generated for it.
+
+Fixes https://bugzilla.redhat.com/show_bug.cgi?id=2135778.
+
+The code is changed to try to write data opportunistically. If we get partial
+information, that is still logged. In is generally better to log partial
+backtrace information than nothing at all.
+
+Upstream-Status: Backport [https://github.com/systemd/systemd/commit/076b807be472630692c5348c60d0c2b7b28ad437]
+CVE: CVE-2022-45873
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/shared/elf-util.c | 37 +++++++++++++++++++++++++++++++------
+ 1 file changed, 31 insertions(+), 6 deletions(-)
+
+diff --git a/src/shared/elf-util.c b/src/shared/elf-util.c
+index 6d9fcfbbf2..bd27507346 100644
+--- a/src/shared/elf-util.c
++++ b/src/shared/elf-util.c
+@@ -30,6 +30,9 @@
+ #define THREADS_MAX 64
+ #define ELF_PACKAGE_METADATA_ID 0xcafe1a7e
+
++/* The amount of data we're willing to write to each of the output pipes. */
++#define COREDUMP_PIPE_MAX (1024*1024U)
++
+ static void *dw_dl = NULL;
+ static void *elf_dl = NULL;
+
+@@ -700,13 +703,13 @@ int parse_elf_object(int fd, const char *executable, bool fork_disable_dump, cha
+ return r;
+
+ if (ret) {
+- r = RET_NERRNO(pipe2(return_pipe, O_CLOEXEC));
++ r = RET_NERRNO(pipe2(return_pipe, O_CLOEXEC|O_NONBLOCK));
+ if (r < 0)
+ return r;
+ }
+
+ if (ret_package_metadata) {
+- r = RET_NERRNO(pipe2(json_pipe, O_CLOEXEC));
++ r = RET_NERRNO(pipe2(json_pipe, O_CLOEXEC|O_NONBLOCK));
+ if (r < 0)
+ return r;
+ }
+@@ -750,8 +753,24 @@ int parse_elf_object(int fd, const char *executable, bool fork_disable_dump, cha
+ goto child_fail;
+
+ if (buf) {
+- r = loop_write(return_pipe[1], buf, strlen(buf), false);
+- if (r < 0)
++ size_t len = strlen(buf);
++
++ if (len > COREDUMP_PIPE_MAX) {
++ /* This is iffy. A backtrace can be a few hundred kilobytes, but too much is
++ * too much. Let's log a warning and ignore the rest. */
++ log_warning("Generated backtrace is %zu bytes (more than the limit of %u bytes), backtrace will be truncated.",
++ len, COREDUMP_PIPE_MAX);
++ len = COREDUMP_PIPE_MAX;
++ }
++
++ /* Bump the space for the returned string.
++ * Failure is ignored, because partial output is still useful. */
++ (void) fcntl(return_pipe[1], F_SETPIPE_SZ, len);
++
++ r = loop_write(return_pipe[1], buf, len, false);
++ if (r == -EAGAIN)
++ log_warning("Write failed, backtrace will be truncated.");
++ else if (r < 0)
+ goto child_fail;
+
+ return_pipe[1] = safe_close(return_pipe[1]);
+@@ -760,13 +779,19 @@ int parse_elf_object(int fd, const char *executable, bool fork_disable_dump, cha
+ if (package_metadata) {
+ _cleanup_fclose_ FILE *json_out = NULL;
+
++ /* Bump the space for the returned string. We don't know how much space we'll need in
++ * advance, so we'll just try to write as much as possible and maybe fail later. */
++ (void) fcntl(json_pipe[1], F_SETPIPE_SZ, COREDUMP_PIPE_MAX);
++
+ json_out = take_fdopen(&json_pipe[1], "w");
+ if (!json_out) {
+ r = -errno;
+ goto child_fail;
+ }
+
+- json_variant_dump(package_metadata, JSON_FORMAT_FLUSH, json_out, NULL);
++ r = json_variant_dump(package_metadata, JSON_FORMAT_FLUSH, json_out, NULL);
++ if (r < 0)
++ log_warning_errno(r, "Failed to write JSON package metadata, ignoring: %m");
+ }
+
+ _exit(EXIT_SUCCESS);
+@@ -801,7 +826,7 @@ int parse_elf_object(int fd, const char *executable, bool fork_disable_dump, cha
+
+ r = json_parse_file(json_in, NULL, 0, &package_metadata, NULL, NULL);
+ if (r < 0 && r != -EINVAL) /* EINVAL: json was empty, so we got nothing, but that's ok */
+- return r;
++ log_warning_errno(r, "Failed to read or parse json metadata, ignoring: %m");
+ }
+
+ if (ret)
+--
+2.25.1
+
diff --git a/meta/recipes-core/systemd/systemd/CVE-2023-7008.patch b/meta/recipes-core/systemd/systemd/CVE-2023-7008.patch
new file mode 100644
index 0000000000..e2296abc49
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2023-7008.patch
@@ -0,0 +1,40 @@
+From 3b4cc1437b51fcc0b08da8cc3f5d1175eed25eb1 Mon Sep 17 00:00:00 2001
+From: Michal Sekletar <msekleta@redhat.com>
+Date: Wed, 20 Dec 2023 16:44:14 +0100
+Subject: [PATCH] resolved: actually check authenticated flag of SOA
+ transaction
+
+Fixes #25676
+
+Upstream-Status: Backport [https://github.com/systemd/systemd/commit/3b4cc1437b51fcc0b08da8cc3f5d1175eed25eb1]
+CVE: CVE-2023-7008
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/resolve/resolved-dns-transaction.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/resolve/resolved-dns-transaction.c b/src/resolve/resolved-dns-transaction.c
+index f937f9f7b5..7deb598400 100644
+--- a/src/resolve/resolved-dns-transaction.c
++++ b/src/resolve/resolved-dns-transaction.c
+@@ -2761,7 +2761,7 @@ static int dns_transaction_requires_rrsig(DnsTransaction *t, DnsResourceRecord *
+ if (r == 0)
+ continue;
+
+- return FLAGS_SET(t->answer_query_flags, SD_RESOLVED_AUTHENTICATED);
++ return FLAGS_SET(dt->answer_query_flags, SD_RESOLVED_AUTHENTICATED);
+ }
+
+ return true;
+@@ -2788,7 +2788,7 @@ static int dns_transaction_requires_rrsig(DnsTransaction *t, DnsResourceRecord *
+ /* We found the transaction that was supposed to find the SOA RR for us. It was
+ * successful, but found no RR for us. This means we are not at a zone cut. In this
+ * case, we require authentication if the SOA lookup was authenticated too. */
+- return FLAGS_SET(t->answer_query_flags, SD_RESOLVED_AUTHENTICATED);
++ return FLAGS_SET(dt->answer_query_flags, SD_RESOLVED_AUTHENTICATED);
+ }
+
+ return true;
+--
+2.25.1
+
diff --git a/meta/recipes-core/systemd/systemd/fix-vlan-qos-mapping.patch b/meta/recipes-core/systemd/systemd/fix-vlan-qos-mapping.patch
new file mode 100644
index 0000000000..c530de7f50
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/fix-vlan-qos-mapping.patch
@@ -0,0 +1,140 @@
+From 4d13d175f8454df63843a880c78badd4f6d720ca Mon Sep 17 00:00:00 2001
+From: Yu Watanabe <watanabe.yu+github@gmail.com>
+Date: Wed, 24 May 2023 11:02:36 +0900
+Subject: [PATCH 1/3] network/vlan: drop unnecessary restriction for QoS
+ mapping
+
+Fixes #27460.
+
+Upstream-Status: Backport [https://github.com/systemd/systemd/commit/fe830b84d4002582e7aefb16e5e09fd0195f21c8.patch]
+Signed-off-by: Sana Kazi <sana.kazi@kpit.com>
+---
+ src/network/netdev/vlan.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/src/network/netdev/vlan.c b/src/network/netdev/vlan.c
+index a3d961dac3ca4..d61e9486abc47 100644
+--- a/src/network/netdev/vlan.c
++++ b/src/network/netdev/vlan.c
+@@ -165,11 +165,6 @@ int config_parse_vlan_qos_maps(
+ continue;
+ }
+
+- if (m->to > m->from || m->to == 0 || m->from == 0) {
+- log_syntax(unit, LOG_WARNING, filename, line, 0, "Invalid %s, ignoring: %s", lvalue, w);
+- continue;
+- }
+-
+ r = set_ensure_consume(s, &vlan_qos_maps_hash_ops, TAKE_PTR(m));
+ if (r < 0) {
+ log_syntax(unit, LOG_WARNING, filename, line, r, "Failed to store %s, ignoring: %s", lvalue, w);
+
+From 4194478af861f80a73905d1f9e570a09862f91a7 Mon Sep 17 00:00:00 2001
+From: Yu Watanabe <watanabe.yu+github@gmail.com>
+Date: Wed, 24 May 2023 11:06:35 +0900
+Subject: [PATCH 2/3] network/vlan: paranoia about type safety
+
+No functional change, as the struct is defined as the following:
+```
+struct ifla_vlan_qos_mapping {
+ __u32 from;
+ __u32 to;
+};
+```
+---
+ src/network/netdev/vlan.c | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+diff --git a/src/network/netdev/vlan.c b/src/network/netdev/vlan.c
+index d61e9486abc47..5eb36ef6801f3 100644
+--- a/src/network/netdev/vlan.c
++++ b/src/network/netdev/vlan.c
+@@ -144,6 +144,7 @@ int config_parse_vlan_qos_maps(
+ for (const char *p = rvalue;;) {
+ _cleanup_free_ struct ifla_vlan_qos_mapping *m = NULL;
+ _cleanup_free_ char *w = NULL;
++ unsigned from, to;
+
+ r = extract_first_word(&p, &w, NULL, EXTRACT_CUNESCAPE|EXTRACT_UNQUOTE);
+ if (r == -ENOMEM)
+@@ -155,16 +156,21 @@ int config_parse_vlan_qos_maps(
+ if (r == 0)
+ return 0;
+
+- m = new0(struct ifla_vlan_qos_mapping, 1);
+- if (!m)
+- return log_oom();
+-
+- r = parse_range(w, &m->from, &m->to);
++ r = parse_range(w, &from, &to);
+ if (r < 0) {
+ log_syntax(unit, LOG_WARNING, filename, line, r, "Failed to parse %s, ignoring: %s", lvalue, w);
+ continue;
+ }
+
++ m = new(struct ifla_vlan_qos_mapping, 1);
++ if (!m)
++ return log_oom();
++
++ *m = (struct ifla_vlan_qos_mapping) {
++ .from = from,
++ .to = to,
++ };
++
+ r = set_ensure_consume(s, &vlan_qos_maps_hash_ops, TAKE_PTR(m));
+ if (r < 0) {
+ log_syntax(unit, LOG_WARNING, filename, line, r, "Failed to store %s, ignoring: %s", lvalue, w);
+
+From 73d24e45f8ac18eaaebf1df2b1f055c14179c6ff Mon Sep 17 00:00:00 2001
+From: Yu Watanabe <watanabe.yu+github@gmail.com>
+Date: Wed, 24 May 2023 11:15:44 +0900
+Subject: [PATCH 3/3] test-network: add tests for vlan QoS mapping
+
+---
+ .../conf/21-vlan.netdev.d/override.conf | 10 ++++++----
+ test/test-network/systemd-networkd-tests.py | 14 ++++++++------
+ 2 files changed, 14 insertions(+), 10 deletions(-)
+
+diff --git a/test/test-network/conf/21-vlan.netdev.d/override.conf b/test/test-network/conf/21-vlan.netdev.d/override.conf
+index 3b8d47d9b1db5..c71077d274a69 100644
+--- a/test/test-network/conf/21-vlan.netdev.d/override.conf
++++ b/test/test-network/conf/21-vlan.netdev.d/override.conf
+@@ -3,7 +3,9 @@ MTUBytes=2000
+
+ [VLAN]
+ Id=99
+-GVRP=true
+-MVRP=true
+-LooseBinding=true
+-ReorderHeader=true
++GVRP=yes
++MVRP=yes
++LooseBinding=yes
++ReorderHeader=yes
++EgressQOSMaps=0-1 1-3 10-3 6-6 7-7
++IngressQOSMaps=15-13 20-100
+diff --git a/test/test-network/systemd-networkd-tests.py b/test/test-network/systemd-networkd-tests.py
+index fe72f37ce4f47..8b01718d55e64 100755
+--- a/test/test-network/systemd-networkd-tests.py
++++ b/test/test-network/systemd-networkd-tests.py
+@@ -1285,12 +1285,14 @@ def test_vlan(self):
+
+ output = check_output('ip -d link show vlan99')
+ print(output)
+- self.assertRegex(output, ' mtu 2000 ')
+- self.assertRegex(output, 'REORDER_HDR')
+- self.assertRegex(output, 'LOOSE_BINDING')
+- self.assertRegex(output, 'GVRP')
+- self.assertRegex(output, 'MVRP')
+- self.assertRegex(output, ' id 99 ')
++ self.assertIn(' mtu 2000 ', output)
++ self.assertIn('REORDER_HDR', output)
++ self.assertIn('LOOSE_BINDING', output)
++ self.assertIn('GVRP', output)
++ self.assertIn('MVRP', output)
++ self.assertIn(' id 99 ', output)
++ self.assertIn('ingress-qos-map { 4:100 7:13 }', output)
++ self.assertIn('egress-qos-map { 0:1 1:3 6:6 7:7 10:3 }', output)
+
+ output = check_output('ip -4 address show dev test1')
+ print(output)
diff --git a/meta/recipes-core/systemd/systemd_250.5.bb b/meta/recipes-core/systemd/systemd_250.5.bb
index 006b2f86ea..4d520c85f3 100644
--- a/meta/recipes-core/systemd/systemd_250.5.bb
+++ b/meta/recipes-core/systemd/systemd_250.5.bb
@@ -25,6 +25,15 @@ SRC_URI += "file://touchscreen.rules \
file://0003-implment-systemd-sysv-install-for-OE.patch \
file://0001-Move-sysusers.d-sysctl.d-binfmt.d-modules-load.d-to-.patch \
file://0001-resolve-Use-sockaddr-pointer-type-for-bind.patch \
+ file://CVE-2022-3821.patch \
+ file://CVE-2022-45873.patch \
+ file://0001-shared-json-allow-json_variant_dump-to-return-an-err.patch \
+ file://CVE-2022-4415-1.patch \
+ file://CVE-2022-4415-2.patch \
+ file://0001-network-remove-only-managed-configs-on-reconfigure-o.patch \
+ file://0001-nspawn-make-sure-host-root-can-write-to-the-uidmappe.patch \
+ file://CVE-2023-7008.patch \
+ file://fix-vlan-qos-mapping.patch \
"
# patches needed by musl
@@ -165,6 +174,7 @@ PACKAGECONFIG[manpages] = "-Dman=true,-Dman=false,libxslt-native xmlto-native do
PACKAGECONFIG[microhttpd] = "-Dmicrohttpd=true,-Dmicrohttpd=false,libmicrohttpd"
PACKAGECONFIG[myhostname] = "-Dnss-myhostname=true,-Dnss-myhostname=false,,libnss-myhostname"
PACKAGECONFIG[networkd] = "-Dnetworkd=true,-Dnetworkd=false"
+PACKAGECONFIG[no-dns-fallback] = "-Ddns-servers="
PACKAGECONFIG[nss] = "-Dnss-systemd=true,-Dnss-systemd=false"
PACKAGECONFIG[nss-mymachines] = "-Dnss-mymachines=true,-Dnss-mymachines=false"
PACKAGECONFIG[nss-resolve] = "-Dnss-resolve=true,-Dnss-resolve=false"
@@ -217,7 +227,7 @@ rootlibdir ?= "${base_libdir}"
rootlibexecdir = "${rootprefix}/lib"
EXTRA_OEMESON += "-Dnobody-user=nobody \
- -Dnobody-group=nobody \
+ -Dnobody-group=nogroup \
-Drootlibdir=${rootlibdir} \
-Drootprefix=${rootprefix} \
-Ddefault-locale=C \
@@ -387,11 +397,13 @@ SYSTEMD_PACKAGES = "${@bb.utils.contains('PACKAGECONFIG', 'binfmt', '${PN}-binfm
SYSTEMD_SERVICE:${PN}-binfmt = "systemd-binfmt.service"
USERADD_PACKAGES = "${PN} ${PN}-extra-utils \
+ udev \
${@bb.utils.contains('PACKAGECONFIG', 'microhttpd', '${PN}-journal-gatewayd', '', d)} \
${@bb.utils.contains('PACKAGECONFIG', 'microhttpd', '${PN}-journal-remote', '', d)} \
${@bb.utils.contains('PACKAGECONFIG', 'journal-upload', '${PN}-journal-upload', '', d)} \
"
GROUPADD_PARAM:${PN} = "-r systemd-journal;"
+GROUPADD_PARAM:udev = "-r render;-r sgx;"
GROUPADD_PARAM:${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'polkit_hostnamed_fallback', '-r systemd-hostname;', '', d)}"
USERADD_PARAM:${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'coredump', '--system -d / -M --shell /sbin/nologin systemd-coredump;', '', d)}"
USERADD_PARAM:${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'networkd', '--system -d / -M --shell /sbin/nologin systemd-network;', '', d)}"
@@ -429,9 +441,9 @@ FILES:${PN}-binfmt = "${sysconfdir}/binfmt.d/ \
${rootlibexecdir}/systemd/systemd-binfmt \
${systemd_system_unitdir}/proc-sys-fs-binfmt_misc.* \
${systemd_system_unitdir}/systemd-binfmt.service"
-RRECOMMENDS:${PN}-binfmt = "kernel-module-binfmt-misc"
+RRECOMMENDS:${PN}-binfmt = "${@bb.utils.contains('PACKAGECONFIG', 'binfmt', 'kernel-module-binfmt-misc', '', d)}"
-RRECOMMENDS:${PN}-vconsole-setup = "kbd kbd-consolefonts kbd-keymaps"
+RRECOMMENDS:${PN}-vconsole-setup = "${@bb.utils.contains('PACKAGECONFIG', 'vconsole', 'kbd kbd-consolefonts kbd-keymaps', '', d)}"
FILES:${PN}-journal-gatewayd = "${rootlibexecdir}/systemd/systemd-journal-gatewayd \
@@ -765,21 +777,25 @@ ALTERNATIVE_LINK_NAME[runlevel] = "${base_sbindir}/runlevel"
ALTERNATIVE_PRIORITY[runlevel] ?= "300"
pkg_postinst:${PN}:libc-glibc () {
- sed -e '/^hosts:/s/\s*\<myhostname\>//' \
- -e 's/\(^hosts:.*\)\(\<files\>\)\(.*\)\(\<dns\>\)\(.*\)/\1\2 myhostname \3\4\5/' \
- -i $D${sysconfdir}/nsswitch.conf
+ if ${@bb.utils.contains('PACKAGECONFIG', 'myhostname', 'true', 'false', d)}; then
+ sed -e '/^hosts:/s/\s*\<myhostname\>//' \
+ -e 's/\(^hosts:.*\)\(\<files\>\)\(.*\)\(\<dns\>\)\(.*\)/\1\2 myhostname \3\4\5/' \
+ -i $D${sysconfdir}/nsswitch.conf
+ fi
}
pkg_prerm:${PN}:libc-glibc () {
- sed -e '/^hosts:/s/\s*\<myhostname\>//' \
- -e '/^hosts:/s/\s*myhostname//' \
- -i $D${sysconfdir}/nsswitch.conf
+ if ${@bb.utils.contains('PACKAGECONFIG', 'myhostname', 'true', 'false', d)}; then
+ sed -e '/^hosts:/s/\s*\<myhostname\>//' \
+ -e '/^hosts:/s/\s*myhostname//' \
+ -i $D${sysconfdir}/nsswitch.conf
+ fi
}
PACKAGE_WRITE_DEPS += "qemu-native"
pkg_postinst:udev-hwdb () {
if test -n "$D"; then
- $INTERCEPT_DIR/postinst_intercept update_udev_hwdb ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} rootlibexecdir="${rootlibexecdir}" PREFERRED_PROVIDER_udev="${PREFERRED_PROVIDER_udev}"
+ $INTERCEPT_DIR/postinst_intercept update_udev_hwdb ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} rootlibexecdir="${rootlibexecdir}" PREFERRED_PROVIDER_udev="${PREFERRED_PROVIDER_udev}" base_bindir="${base_bindir}"
else
udevadm hwdb --update
fi
diff --git a/meta/recipes-core/sysvinit/sysvinit-inittab/start_getty b/meta/recipes-core/sysvinit/sysvinit-inittab/start_getty
index 699a1ead1a..3c31a95f7f 100644
--- a/meta/recipes-core/sysvinit/sysvinit-inittab/start_getty
+++ b/meta/recipes-core/sysvinit/sysvinit-inittab/start_getty
@@ -14,4 +14,7 @@ esac
if [ -e /sys/class/tty/$2 -a -c /dev/$2 ]; then
${setsid:-} ${getty} -L $1 $2 $3
+else
+ # Prevent respawning to fast error if /dev entry does not exist
+ sleep 1000
fi
diff --git a/meta/recipes-core/udev/udev-extraconf/mount.blacklist b/meta/recipes-core/udev/udev-extraconf/mount.ignorelist
index e49349428b..e49349428b 100644
--- a/meta/recipes-core/udev/udev-extraconf/mount.blacklist
+++ b/meta/recipes-core/udev/udev-extraconf/mount.ignorelist
diff --git a/meta/recipes-core/udev/udev-extraconf/mount.sh b/meta/recipes-core/udev/udev-extraconf/mount.sh
index b23731870e..b7e86dbc0e 100644
--- a/meta/recipes-core/udev/udev-extraconf/mount.sh
+++ b/meta/recipes-core/udev/udev-extraconf/mount.sh
@@ -6,6 +6,7 @@
BASE_INIT="`readlink -f "@base_sbindir@/init"`"
INIT_SYSTEMD="@systemd_unitdir@/systemd"
+MOUNT_BASE="@MOUNT_BASE@"
if [ "x$BASE_INIT" = "x$INIT_SYSTEMD" ];then
# systemd as init uses systemd-mount to mount block devices
@@ -26,11 +27,11 @@ fi
PMOUNT="/usr/bin/pmount"
-for line in `grep -h -v ^# /etc/udev/mount.blacklist /etc/udev/mount.blacklist.d/*`
+for line in `grep -h -v ^# /etc/udev/mount.ignorelist /etc/udev/mount.ignorelist.d/*`
do
if [ ` expr match "$DEVNAME" "$line" ` -gt 0 ];
then
- logger "udev/mount.sh" "[$DEVNAME] is blacklisted, ignoring"
+ logger "udev/mount.sh" "[$DEVNAME] is marked to ignore"
exit 0
fi
done
@@ -39,11 +40,21 @@ automount_systemd() {
name="`basename "$DEVNAME"`"
# Skip already mounted partitions
- if [ -f /run/systemd/transient/run-media-$name.mount ]; then
- logger "mount.sh/automount" "/run/media/$name already mounted"
+ if [ -f /run/systemd/transient/$(echo $MOUNT_BASE | cut -d '/' -f 2- | sed 's#/#-#g')-*$name.mount ]; then
+ logger "mount.sh/automount" "$MOUNT_BASE/$name already mounted"
return
fi
+ # Get the unique name for mount point
+ get_label_name "${DEVNAME}"
+
+ # Only go for auto-mounting when the device has been cleaned up in remove
+ # or has not been identified yet
+ if [ -e "/tmp/.automount-$name" ]; then
+ logger "mount.sh/automount" "[$MOUNT_BASE/$name] is already cached"
+ return
+ fi
+
# Skip the partition which are already in /etc/fstab
grep "^[[:space:]]*$DEVNAME" /etc/fstab && return
for n in LABEL PARTLABEL UUID PARTUUID; do
@@ -53,7 +64,7 @@ automount_systemd() {
grep "^[[:space:]]*$tmp" /etc/fstab && return
done
- [ -d "/run/media/$name" ] || mkdir -p "/run/media/$name"
+ [ -d "$MOUNT_BASE/$name" ] || mkdir -p "$MOUNT_BASE/$name"
MOUNT="$MOUNT -o silent"
@@ -65,18 +76,20 @@ automount_systemd() {
;;
swap)
return ;;
+ lvm*|LVM*)
+ return ;;
# TODO
*)
;;
esac
- if ! $MOUNT --no-block -t auto $DEVNAME "/run/media/$name"
+ if ! $MOUNT --no-block -t auto $DEVNAME "$MOUNT_BASE/$name"
then
- #logger "mount.sh/automount" "$MOUNT -t auto $DEVNAME \"/run/media/$name\" failed!"
- rm_dir "/run/media/$name"
+ #logger "mount.sh/automount" "$MOUNT -t auto $DEVNAME \"$MOUNT_BASE/$name\" failed!"
+ rm_dir "$MOUNT_BASE/$name"
else
- logger "mount.sh/automount" "Auto-mount of [/run/media/$name] successful"
- touch "/tmp/.automount-$name"
+ logger "mount.sh/automount" "Auto-mount of [$MOUNT_BASE/$name] successful"
+ echo "$name" > "/tmp/.automount-$name"
fi
}
@@ -93,7 +106,17 @@ automount() {
# configured in fstab
grep -q "^$DEVNAME " /proc/mounts && return
- ! test -d "/run/media/$name" && mkdir -p "/run/media/$name"
+ # Get the unique name for mount point
+ get_label_name "${DEVNAME}"
+
+ # Only go for auto-mounting when the device has been cleaned up in remove
+ # or has not been identified yet
+ if [ -e "/tmp/.automount-$name" ]; then
+ logger "mount.sh/automount" "[$MOUNT_BASE/$name] is already cached"
+ return
+ fi
+
+ ! test -d "$MOUNT_BASE/$name" && mkdir -p "$MOUNT_BASE/$name"
# Silent util-linux's version of mounting auto
if [ "x`readlink $MOUNT`" = "x/bin/mount.util-linux" ] ;
then
@@ -108,18 +131,23 @@ automount() {
;;
swap)
return ;;
+ lvm*|LVM*)
+ return ;;
# TODO
*)
;;
esac
- if ! $MOUNT -t auto $DEVNAME "/run/media/$name"
+ if ! $MOUNT -t auto $DEVNAME "$MOUNT_BASE/$name"
then
- #logger "mount.sh/automount" "$MOUNT -t auto $DEVNAME \"/run/media/$name\" failed!"
- rm_dir "/run/media/$name"
+ #logger "mount.sh/automount" "$MOUNT -t auto $DEVNAME \"$MOUNT_BASE/$name\" failed!"
+ rm_dir "$MOUNT_BASE/$name"
else
- logger "mount.sh/automount" "Auto-mount of [/run/media/$name] successful"
- touch "/tmp/.automount-$name"
+ logger "mount.sh/automount" "Auto-mount of [$MOUNT_BASE/$name] successful"
+ # The actual device might not be present in the remove event so blkid cannot
+ # be used to calculate what name was generated here. Simply save the mount
+ # name in our tmp file.
+ echo "$name" > "/tmp/.automount-$name"
fi
}
@@ -133,6 +161,18 @@ rm_dir() {
fi
}
+get_label_name() {
+ # Get the LABEL or PARTLABEL
+ LABEL=`/sbin/blkid | grep "$1:" | grep -o 'LABEL=".*"' | cut -d '"' -f2`
+ # If the $DEVNAME has a LABEL or a PARTLABEL
+ if [ -n "$LABEL" ]; then
+ # Set the mount location dir name to LABEL appended
+ # with $name e.g. label-sda. That would avoid overlapping
+ # mounts in case two devices have same LABEL
+ name="${LABEL}-${name}"
+ fi
+}
+
# No ID_FS_TYPE for cdrom device, yet it should be mounted
name="`basename "$DEVNAME"`"
[ -e /sys/block/$name/device/media ] && media_type=`cat /sys/block/$name/device/media`
@@ -150,12 +190,18 @@ if [ "$ACTION" = "add" ] && [ -n "$DEVNAME" ] && [ -n "$ID_FS_TYPE" -o "$media_t
fi
if [ "$ACTION" = "remove" ] || [ "$ACTION" = "change" ] && [ -x "$UMOUNT" ] && [ -n "$DEVNAME" ]; then
- for mnt in `cat /proc/mounts | grep "$DEVNAME" | cut -f 2 -d " " `
- do
- $UMOUNT $mnt
- done
-
- # Remove empty directories from auto-mounter
name="`basename "$DEVNAME"`"
- test -e "/tmp/.automount-$name" && rm_dir "/run/media/$name"
+ tmpfile=`find /tmp | grep "\.automount-.*${name}$"`
+ if [ ! -e "/sys/$DEVPATH" -a -e "$tmpfile" ]; then
+ logger "mount.sh/remove" "cleaning up $DEVNAME, was mounted by the auto-mounter"
+ for mnt in `cat /proc/mounts | grep "$DEVNAME" | cut -f 2 -d " " `
+ do
+ $UMOUNT $mnt
+ done
+ # Remove mount directory created by the auto-mounter
+ # and clean up our tmp cache file
+ mntdir=`cat "$tmpfile"`
+ rm_dir "$MOUNT_BASE/$mntdir"
+ rm "$tmpfile"
+ fi
fi
diff --git a/meta/recipes-core/udev/udev-extraconf_1.1.bb b/meta/recipes-core/udev/udev-extraconf_1.1.bb
index 2ba35b0df6..30f1fe76d0 100644
--- a/meta/recipes-core/udev/udev-extraconf_1.1.bb
+++ b/meta/recipes-core/udev/udev-extraconf_1.1.bb
@@ -1,13 +1,13 @@
SUMMARY = "Extra machine specific configuration files"
HOMEPAGE = "https://wiki.gentoo.org/wiki/Eudev"
-DESCRIPTION = "Extra machine specific configuration files for udev, specifically blacklist information."
+DESCRIPTION = "Extra machine specific configuration files for udev, specifically information on devices to ignore."
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
SRC_URI = " \
file://automount.rules \
file://mount.sh \
- file://mount.blacklist \
+ file://mount.ignorelist \
file://autonet.rules \
file://network.sh \
file://localextra.rules \
@@ -15,6 +15,7 @@ SRC_URI = " \
S = "${WORKDIR}"
+MOUNT_BASE = "/run/media"
do_install() {
install -d ${D}${sysconfdir}/udev/rules.d
@@ -23,21 +24,33 @@ do_install() {
install -m 0644 ${WORKDIR}/autonet.rules ${D}${sysconfdir}/udev/rules.d/autonet.rules
install -m 0644 ${WORKDIR}/localextra.rules ${D}${sysconfdir}/udev/rules.d/localextra.rules
- install -d ${D}${sysconfdir}/udev/mount.blacklist.d
- install -m 0644 ${WORKDIR}/mount.blacklist ${D}${sysconfdir}/udev/
+ install -d ${D}${sysconfdir}/udev/mount.ignorelist.d
+ install -m 0644 ${WORKDIR}/mount.ignorelist ${D}${sysconfdir}/udev/
install -d ${D}${sysconfdir}/udev/scripts/
install -m 0755 ${WORKDIR}/mount.sh ${D}${sysconfdir}/udev/scripts/mount.sh
sed -i 's|@systemd_unitdir@|${systemd_unitdir}|g' ${D}${sysconfdir}/udev/scripts/mount.sh
sed -i 's|@base_sbindir@|${base_sbindir}|g' ${D}${sysconfdir}/udev/scripts/mount.sh
+ sed -i 's|@MOUNT_BASE@|${MOUNT_BASE}|g' ${D}${sysconfdir}/udev/scripts/mount.sh
install -m 0755 ${WORKDIR}/network.sh ${D}${sysconfdir}/udev/scripts
}
-FILES:${PN} = "${sysconfdir}/udev"
-RDEPENDS:${PN} = "udev"
-CONFFILES:${PN} = "${sysconfdir}/udev/mount.blacklist"
+pkg_postinst:${PN} () {
+ if [ -e $D${systemd_unitdir}/system/systemd-udevd.service ]; then
+ sed -i "/\[Service\]/aMountFlags=shared" $D${systemd_unitdir}/system/systemd-udevd.service
+ fi
+}
+
+pkg_postrm:${PN} () {
+ if [ -e $D${systemd_unitdir}/system/systemd-udevd.service ]; then
+ sed -i "/MountFlags=shared/d" $D${systemd_unitdir}/system/systemd-udevd.service
+ fi
+}
+
+RDEPENDS:${PN} = "udev util-linux-blkid ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'util-linux-lsblk', '', d)}"
+CONFFILES:${PN} = "${sysconfdir}/udev/mount.ignorelist"
# to replace udev-extra-rules from meta-oe
RPROVIDES:${PN} = "udev-extra-rules"
diff --git a/meta/recipes-core/util-linux/util-linux_2.37.4.bb b/meta/recipes-core/util-linux/util-linux_2.37.4.bb
index b39020884f..8866120eed 100644
--- a/meta/recipes-core/util-linux/util-linux_2.37.4.bb
+++ b/meta/recipes-core/util-linux/util-linux_2.37.4.bb
@@ -69,7 +69,7 @@ EXTRA_OECONF = "\
--enable-libuuid --enable-libblkid \
\
--enable-fsck --enable-kill --enable-last --enable-mesg \
- --enable-mount --enable-partx --enable-raw --enable-rfkill \
+ --enable-mount --enable-partx --enable-rfkill \
--enable-unshare --enable-write \
\
--disable-bfs --disable-login \
@@ -233,6 +233,8 @@ ALTERNATIVE_TARGET[getty] = "${base_sbindir}/agetty"
ALTERNATIVE_LINK_NAME[hexdump] = "${bindir}/hexdump"
ALTERNATIVE_LINK_NAME[hwclock] = "${base_sbindir}/hwclock"
ALTERNATIVE_LINK_NAME[ionice] = "${bindir}/ionice"
+ALTERNATIVE_LINK_NAME[ipcrm] = "${bindir}/ipcrm"
+ALTERNATIVE_LINK_NAME[ipcs] = "${bindir}/ipcs"
ALTERNATIVE_LINK_NAME[kill] = "${base_bindir}/kill"
ALTERNATIVE:${PN}-last = "last lastb"
ALTERNATIVE_LINK_NAME[last] = "${bindir}/last"
diff --git a/meta/recipes-core/zlib/zlib/CVE-2022-37434.patch b/meta/recipes-core/zlib/zlib/CVE-2022-37434.patch
new file mode 100644
index 0000000000..d29e6e0f1f
--- /dev/null
+++ b/meta/recipes-core/zlib/zlib/CVE-2022-37434.patch
@@ -0,0 +1,44 @@
+From 8617d83d6939754ae3a04fc2d22daa18eeea2a43 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 17 Aug 2022 10:15:57 +0530
+Subject: [PATCH] CVE-2022-37434
+
+Upstream-Status: Backport [https://github.com/madler/zlib/commit/eff308af425b67093bab25f80f1ae950166bece1 & https://github.com/madler/zlib/commit/1eb7682f845ac9e9bf9ae35bbfb3bad5dacbd91d]
+CVE: CVE-2022-37434
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+Fix a bug when getting a gzip header extra field with inflate().
+
+If the extra field was larger than the space the user provided with
+inflateGetHeader(), and if multiple calls of inflate() delivered
+the extra header data, then there could be a buffer overflow of the
+provided space. This commit assures that provided space is not
+exceeded.
+
+ Fix extra field processing bug that dereferences NULL state->head.
+
+The recent commit to fix a gzip header extra field processing bug
+introduced the new bug fixed here.
+---
+ inflate.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/inflate.c b/inflate.c
+index ac333e8..cd01857 100644
+--- a/inflate.c
++++ b/inflate.c
+@@ -759,8 +759,9 @@ int flush;
+ if (copy > have) copy = have;
+ if (copy) {
+ if (state->head != Z_NULL &&
+- state->head->extra != Z_NULL) {
+- len = state->head->extra_len - state->length;
++ state->head->extra != Z_NULL &&
++ (len = state->head->extra_len - state->length) <
++ state->head->extra_max) {
+ zmemcpy(state->head->extra + len, next,
+ len + copy > state->head->extra_max ?
+ state->head->extra_max - len : copy);
+--
+2.25.1
+
diff --git a/meta/recipes-core/zlib/zlib/CVE-2023-45853.patch b/meta/recipes-core/zlib/zlib/CVE-2023-45853.patch
new file mode 100644
index 0000000000..ba3709249b
--- /dev/null
+++ b/meta/recipes-core/zlib/zlib/CVE-2023-45853.patch
@@ -0,0 +1,42 @@
+From 73331a6a0481067628f065ffe87bb1d8f787d10c Mon Sep 17 00:00:00 2001
+From: Hans Wennborg <hans@chromium.org>
+Date: Fri, 18 Aug 2023 11:05:33 +0200
+Subject: [PATCH] Reject overflows of zip header fields in minizip.
+
+This checks the lengths of the file name, extra field, and comment
+that would be put in the zip headers, and rejects them if they are
+too long. They are each limited to 65535 bytes in length by the zip
+format. This also avoids possible buffer overflows if the provided
+fields are too long.
+
+CVE: CVE-2023-45853
+Upstream-Status: Backport [https://github.com/madler/zlib/commit/73331a6a0481067628f065ffe87bb1d8f787d10c]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+
+---
+ contrib/minizip/zip.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/contrib/minizip/zip.c b/contrib/minizip/zip.c
+index 3d3d4cadd..0446109b2 100644
+--- a/contrib/minizip/zip.c
++++ b/contrib/minizip/zip.c
+@@ -1043,6 +1043,17 @@ extern int ZEXPORT zipOpenNewFileInZip4_64(zipFile file, const char* filename, c
+ return ZIP_PARAMERROR;
+ #endif
+
++ // The filename and comment length must fit in 16 bits.
++ if ((filename!=NULL) && (strlen(filename)>0xffff))
++ return ZIP_PARAMERROR;
++ if ((comment!=NULL) && (strlen(comment)>0xffff))
++ return ZIP_PARAMERROR;
++ // The extra field length must fit in 16 bits. If the member also requires
++ // a Zip64 extra block, that will also need to fit within that 16-bit
++ // length, but that will be checked for later.
++ if ((size_extrafield_local>0xffff) || (size_extrafield_global>0xffff))
++ return ZIP_PARAMERROR;
++
+ zi = (zip64_internal*)file;
+
+ if (zi->in_opened_file_inzip == 1)
diff --git a/meta/recipes-core/zlib/zlib_1.2.11.bb b/meta/recipes-core/zlib/zlib_1.2.11.bb
index f8bcc0abcf..393ac61e3d 100644
--- a/meta/recipes-core/zlib/zlib_1.2.11.bb
+++ b/meta/recipes-core/zlib/zlib_1.2.11.bb
@@ -11,6 +11,8 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/libpng/${BPN}/${PV}/${BPN}-${PV}.tar.xz \
file://0001-configure-Pass-LDFLAGS-to-link-tests.patch \
file://CVE-2018-25032.patch \
file://run-ptest \
+ file://CVE-2022-37434.patch \
+ file://CVE-2023-45853.patch \
"
UPSTREAM_CHECK_URI = "http://zlib.net/"
@@ -52,3 +54,6 @@ do_install:append:class-target() {
}
BBCLASSEXTEND = "native nativesdk"
+
+# this CVE is for cloudflare zlib
+CVE_CHECK_IGNORE += "CVE-2023-6992"
diff --git a/meta/recipes-devtools/apt/apt/0001-add-missing-cstdint-for-uint16_t.patch b/meta/recipes-devtools/apt/apt/0001-add-missing-cstdint-for-uint16_t.patch
new file mode 100644
index 0000000000..44aa8a5873
--- /dev/null
+++ b/meta/recipes-devtools/apt/apt/0001-add-missing-cstdint-for-uint16_t.patch
@@ -0,0 +1,35 @@
+From 960d10e89cf60d39998dae6fdcd4f0866b753a79 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Mon, 23 Jan 2023 12:31:35 -0800
+Subject: [PATCH] add missing <cstdint> for uint16_t
+
+This fixes build problems with gcc 13 snapshot [1]
+
+Fixes
+| include/apt-pkg/pkgcache.h:257:23: warning: cast from 'char*' to 'const uint16_t*' {aka 'const short unsigned int*'} increases required alignment of target type [-Wcast-align]
+| 257 | uint16_t len = *reinterpret_cast<const uint16_t*>(name - sizeof(uint16_t));
+| | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+[1] https://www.gnu.org/software/gcc/gcc-13/porting_to.html
+
+Upstream-Status: Submitted [https://salsa.debian.org/apt-team/apt/-/merge_requests/276]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ apt-pkg/contrib/mmap.cc | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/apt-pkg/contrib/mmap.cc b/apt-pkg/contrib/mmap.cc
+index 642e20473..0568e1cd0 100644
+--- a/apt-pkg/contrib/mmap.cc
++++ b/apt-pkg/contrib/mmap.cc
+@@ -23,6 +23,7 @@
+ #include <apt-pkg/macros.h>
+ #include <apt-pkg/mmap.h>
+
++#include <cstdint>
+ #include <cstring>
+ #include <string>
+ #include <errno.h>
+--
+2.39.1
+
diff --git a/meta/recipes-devtools/apt/apt_2.4.5.bb b/meta/recipes-devtools/apt/apt_2.4.5.bb
index 95c25e3036..9ceabcc186 100644
--- a/meta/recipes-devtools/apt/apt_2.4.5.bb
+++ b/meta/recipes-devtools/apt/apt_2.4.5.bb
@@ -13,6 +13,7 @@ SRC_URI = "${DEBIAN_MIRROR}/main/a/apt/${BPN}_${PV}.tar.xz \
file://0001-cmake-Do-not-build-po-files.patch \
file://0001-Hide-fstatat64-and-prlimit64-defines-on-musl.patch \
file://0001-aptwebserver.cc-Include-array.patch \
+ file://0001-add-missing-cstdint-for-uint16_t.patch \
"
SRC_URI:append:class-native = " \
@@ -117,6 +118,7 @@ do_install:append:class-native() {
do_install:append:class-nativesdk() {
customize_apt_conf_sample
+ rm -rf ${D}${localstatedir}/log
}
do_install:append:class-target() {
@@ -132,5 +134,5 @@ do_install:append:class-target() {
do_install:append() {
# Avoid non-reproducible -src package
- sed -i -e "s,${B},,g" ${B}/apt-pkg/tagfile-keys.cc
+ sed -i -e "s,${B}/include/,,g" ${B}/apt-pkg/tagfile-keys.cc
}
diff --git a/meta/recipes-devtools/autoconf/autoconf/0001-Port-to-compilers-that-moan-about-K-R-func-decls.patch b/meta/recipes-devtools/autoconf/autoconf/0001-Port-to-compilers-that-moan-about-K-R-func-decls.patch
new file mode 100644
index 0000000000..4f15bf96c3
--- /dev/null
+++ b/meta/recipes-devtools/autoconf/autoconf/0001-Port-to-compilers-that-moan-about-K-R-func-decls.patch
@@ -0,0 +1,138 @@
+From 7a3bbca81b803ba116b83c82de378e840cc35f81 Mon Sep 17 00:00:00 2001
+From: Paul Eggert <eggert@cs.ucla.edu>
+Date: Thu, 1 Sep 2022 16:19:50 -0500
+Subject: [PATCH] Port to compilers that moan about K&R func decls
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+* lib/autoconf/c.m4 (AC_LANG_CALL, AC_LANG_FUNC_LINK_TRY):
+Use '(void)' rather than '()' in function prototypes, as the latter
+provokes fatal errors in some compilers nowadays.
+* lib/autoconf/functions.m4 (AC_FUNC_STRTOD):
+* tests/fortran.at (AC_F77_DUMMY_MAIN usage):
+* tests/semantics.at (AC_CHECK_DECLS):
+Don’t use () in a function decl.
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/autoconf.git/commit/?id=8b5e2016c7ed2d67f31b03a3d2e361858ff5299b]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ doc/autoconf.texi | 7 +++----
+ lib/autoconf/c.m4 | 6 +++---
+ lib/autoconf/functions.m4 | 3 ---
+ tests/fortran.at | 8 ++++----
+ tests/semantics.at | 2 +-
+ 5 files changed, 11 insertions(+), 15 deletions(-)
+
+--- a/doc/autoconf.texi
++++ b/doc/autoconf.texi
+@@ -5465,9 +5465,7 @@ the @samp{#undef malloc}):
+ #include <config.h>
+ #undef malloc
+
+-#include <sys/types.h>
+-
+-void *malloc ();
++#include <stdlib.h>
+
+ /* Allocate an N-byte block of memory from the heap.
+ If N is zero, allocate a 1-byte block. */
+@@ -8295,7 +8293,7 @@ needed:
+ # ifdef __cplusplus
+ extern "C"
+ # endif
+- int F77_DUMMY_MAIN () @{ return 1; @}
++ int F77_DUMMY_MAIN (void) @{ return 1; @}
+ #endif
+ @end example
+
+--- a/lib/autoconf/c.m4
++++ b/lib/autoconf/c.m4
+@@ -127,7 +127,7 @@ m4_if([$2], [main], ,
+ [/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+-char $2 ();])], [return $2 ();])])
++char $2 (void);])], [return $2 ();])])
+
+
+ # AC_LANG_FUNC_LINK_TRY(C)(FUNCTION)
+@@ -151,7 +151,7 @@ m4_define([AC_LANG_FUNC_LINK_TRY(C)],
+ #define $1 innocuous_$1
+
+ /* System header to define __stub macros and hopefully few prototypes,
+- which can conflict with char $1 (); below. */
++ which can conflict with char $1 (void); below. */
+
+ #include <limits.h>
+ #undef $1
+@@ -162,7 +162,7 @@ m4_define([AC_LANG_FUNC_LINK_TRY(C)],
+ #ifdef __cplusplus
+ extern "C"
+ #endif
+-char $1 ();
++char $1 (void);
+ /* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+--- a/lib/autoconf/functions.m4
++++ b/lib/autoconf/functions.m4
+@@ -1601,9 +1601,6 @@ AC_DEFUN([AC_FUNC_STRTOD],
+ AC_CACHE_CHECK(for working strtod, ac_cv_func_strtod,
+ [AC_RUN_IFELSE([AC_LANG_SOURCE([[
+ ]AC_INCLUDES_DEFAULT[
+-#ifndef strtod
+-double strtod ();
+-#endif
+ int
+ main (void)
+ {
+--- a/tests/fortran.at
++++ b/tests/fortran.at
+@@ -233,7 +233,7 @@ void FOOBAR_F77 (double *x, double *y);
+ # ifdef __cplusplus
+ extern "C"
+ # endif
+- int F77_DUMMY_MAIN () { return 1; }
++ int F77_DUMMY_MAIN (void) { return 1; }
+ #endif
+
+ int main(int argc, char *argv[])
+@@ -315,7 +315,7 @@ void FOOBAR_FC(double *x, double *y);
+ # ifdef __cplusplus
+ extern "C"
+ # endif
+- int FC_DUMMY_MAIN () { return 1; }
++ int FC_DUMMY_MAIN (void) { return 1; }
+ #endif
+
+ int main (int argc, char *argv[])
+@@ -561,7 +561,7 @@ void @foobar@ (int *x);
+ # ifdef __cplusplus
+ extern "C"
+ # endif
+- int F77_DUMMY_MAIN () { return 1; }
++ int F77_DUMMY_MAIN (void) { return 1; }
+ #endif
+
+ int main(int argc, char *argv[])
+@@ -637,7 +637,7 @@ void @foobar@ (int *x);
+ # ifdef __cplusplus
+ extern "C"
+ # endif
+- int FC_DUMMY_MAIN () { return 1; }
++ int FC_DUMMY_MAIN (void) { return 1; }
+ #endif
+
+ int main(int argc, char *argv[])
+--- a/tests/semantics.at
++++ b/tests/semantics.at
+@@ -207,7 +207,7 @@ AT_CHECK_MACRO([AC_CHECK_DECLS],
+ [[extern int yes;
+ enum { myenum };
+ extern struct mystruct_s { int x[20]; } mystruct;
+- extern int myfunc();
++ extern int myfunc (int);
+ #define mymacro1(arg) arg
+ #define mymacro2]])
+ # Ensure we can detect missing declarations of functions whose
diff --git a/meta/recipes-devtools/autoconf/autoconf_2.71.bb b/meta/recipes-devtools/autoconf/autoconf_2.71.bb
index 799191e2ca..97c241a3f5 100644
--- a/meta/recipes-devtools/autoconf/autoconf_2.71.bb
+++ b/meta/recipes-devtools/autoconf/autoconf_2.71.bb
@@ -18,6 +18,7 @@ SRC_URI = "${GNU_MIRROR}/autoconf/${BP}.tar.gz \
file://preferbash.patch \
file://autotest-automake-result-format.patch \
file://man-host-perl.patch \
+ file://0001-Port-to-compilers-that-moan-about-K-R-func-decls.patch \
"
SRC_URI:append:class-native = " file://no-man.patch"
diff --git a/meta/recipes-devtools/automake/automake/buildtest.patch b/meta/recipes-devtools/automake/automake/buildtest.patch
index b88b9e8693..c43a4ac8f3 100644
--- a/meta/recipes-devtools/automake/automake/buildtest.patch
+++ b/meta/recipes-devtools/automake/automake/buildtest.patch
@@ -36,7 +36,7 @@ index e0db651..de137fa 100644
-check-TESTS: $(TESTS)
+AM_RECURSIVE_TARGETS += buildtest runtest
+
-+buildtest-TESTS: $(TESTS)
++buildtest-TESTS: $(TESTS) $(check_PROGRAMS)
+
+check-TESTS: buildtest-TESTS
+ $(MAKE) $(AM_MAKEFLAGS) runtest-TESTS
diff --git a/meta/recipes-devtools/binutils/binutils-2.38.inc b/meta/recipes-devtools/binutils/binutils-2.38.inc
index dc0a2a4054..bbe7bb57b2 100644
--- a/meta/recipes-devtools/binutils/binutils-2.38.inc
+++ b/meta/recipes-devtools/binutils/binutils-2.38.inc
@@ -18,7 +18,7 @@ SRCBRANCH ?= "binutils-2_38-branch"
UPSTREAM_CHECK_GITTAGREGEX = "binutils-(?P<pver>\d+_(\d_?)*)"
-SRCREV ?= "134f17ef688ba4c72a6c4e57af7382882cc1a705"
+SRCREV ?= "ea5fe5d01e5a182ee7a0eddb54a702109a9f5931"
BINUTILS_GIT_URI ?= "git://sourceware.org/git/binutils-gdb.git;branch=${SRCBRANCH};protocol=git"
SRC_URI = "\
${BINUTILS_GIT_URI} \
@@ -32,5 +32,44 @@ SRC_URI = "\
file://0011-sync-with-OE-libtool-changes.patch \
file://0012-Check-for-clang-before-checking-gcc-version.patch \
file://0013-Avoid-as-info-race-condition.patch \
+ file://0014-CVE-2019-1010204.patch \
+ file://0015-CVE-2022-38533.patch \
+ file://0016-CVE-2022-38126.patch \
+ file://0017-CVE-2022-38127-1.patch \
+ file://0017-CVE-2022-38127-2.patch \
+ file://0017-CVE-2022-38127-3.patch \
+ file://0017-CVE-2022-38127-4.patch \
+ file://0018-CVE-2022-38128-1.patch \
+ file://0018-CVE-2022-38128-2.patch \
+ file://0018-CVE-2022-38128-3.patch \
+ file://0019-CVE-2022-4285.patch \
+ file://0020-CVE-2023-22608-1.patch \
+ file://0020-CVE-2023-22608-2.patch \
+ file://0020-CVE-2023-22608-3.patch \
+ file://0021-CVE-2023-1579-1.patch \
+ file://0021-CVE-2023-1579-2.patch \
+ file://0021-CVE-2023-1579-3.patch \
+ file://0021-CVE-2023-1579-4.patch \
+ file://0022-CVE-2023-25584-1.patch \
+ file://0022-CVE-2023-25584-2.patch \
+ file://0022-CVE-2023-25584-3.patch \
+ file://0023-CVE-2023-25585.patch \
+ file://0026-CVE-2023-1972.patch \
+ file://0025-CVE-2023-25588.patch \
+ file://0027-CVE-2022-47008.patch \
+ file://0028-CVE-2022-47011.patch \
+ file://0029-CVE-2022-48065-1.patch \
+ file://0029-CVE-2022-48065-2.patch \
+ file://0029-CVE-2022-48065-3.patch \
+ file://0030-CVE-2022-44840.patch \
+ file://0031-CVE-2022-45703-1.patch \
+ file://0031-CVE-2022-45703-2.patch \
+ file://0031-CVE-2022-47695.patch \
+ file://CVE-2022-48063.patch \
+ file://0032-CVE-2022-47010.patch \
+ file://0033-CVE-2022-47007.patch \
+ file://0034-CVE-2022-48064.patch \
+ file://0035-CVE-2023-39129.patch \
+ file://0036-CVE-2023-39130.patch \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-devtools/binutils/binutils/0003-binutils-nativesdk-Search-for-alternative-ld.so.conf.patch b/meta/recipes-devtools/binutils/binutils/0003-binutils-nativesdk-Search-for-alternative-ld.so.conf.patch
index 59a97c13c7..8a5f4a8d79 100644
--- a/meta/recipes-devtools/binutils/binutils/0003-binutils-nativesdk-Search-for-alternative-ld.so.conf.patch
+++ b/meta/recipes-devtools/binutils/binutils/0003-binutils-nativesdk-Search-for-alternative-ld.so.conf.patch
@@ -65,7 +65,7 @@ index 121c25d948f..34cbc60e5e9 100644
info.path = NULL;
info.len = info.alloc = 0;
- tmppath = concat (ld_sysroot, prefix, "/etc/ld.so.conf",
-+ tmppath = concat (ld_sysconfdir, "/etc/ld.so.conf",
++ tmppath = concat (ld_sysconfdir, "/ld.so.conf",
(const char *) NULL);
if (!ldelf_parse_ld_so_conf (&info, tmppath))
{
diff --git a/meta/recipes-devtools/binutils/binutils/0014-CVE-2019-1010204.patch b/meta/recipes-devtools/binutils/binutils/0014-CVE-2019-1010204.patch
new file mode 100644
index 0000000000..dad4a62038
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0014-CVE-2019-1010204.patch
@@ -0,0 +1,49 @@
+From 2a4fc266dbf77ed7ab83da16468e9ba627b8bc2d Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Mon, 27 Jun 2022 13:07:40 +0100
+Subject: [PATCH] Have gold's File_read::do_read() function check the start
+ parameter
+
+ PR 23765
+ * fileread.cc (File_read::do_read): Check start parameter before
+ computing number of bytes to read.
+
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=2a4fc266dbf77ed7ab83da16468e9ba627b8bc2d]
+
+Signed-off-by: Pgowda <pgowda.cve@gmail.com>
+---
+ gold/ChangeLog | 6 ++++++
+ gold/fileread.cc | 6 ++++++
+ 2 files changed, 12 insertions(+)
+
+diff --git a/gold/ChangeLog b/gold/ChangeLog
+index 5103dab7b67..8557dc6db7f 100644
+--- a/gold/ChangeLog
++++ b/gold/ChangeLog
+@@ -1,3 +1,9 @@
++2022-06-27 Nick Clifton <nickc@redhat.com>
++
++ PR 23765
++ * fileread.cc (File_read::do_read): Check start parameter before
++ computing number of bytes to read.
++
+ 2022-02-17 Nick Clifton <nickc@redhat.com>
+
+ * po/sr.po: Updated Serbian translation.
+diff --git a/gold/fileread.cc b/gold/fileread.cc
+index 2b653f78c2e..af2df215468 100644
+--- a/gold/fileread.cc
++++ b/gold/fileread.cc
+@@ -385,6 +385,12 @@ File_read::do_read(off_t start, section_
+ ssize_t bytes;
+ if (this->whole_file_view_ != NULL)
+ {
++ // See PR 23765 for an example of a testcase that triggers this error.
++ if (((ssize_t) start) < 0)
++ gold_fatal(_("%s: read failed, starting offset (%#llx) less than zero"),
++ this->filename().c_str(),
++ static_cast<long long>(start));
++
+ bytes = this->size_ - start;
+ if (static_cast<section_size_type>(bytes) >= size)
+ {
diff --git a/meta/recipes-devtools/binutils/binutils/0015-CVE-2022-38533.patch b/meta/recipes-devtools/binutils/binutils/0015-CVE-2022-38533.patch
new file mode 100644
index 0000000000..5d9ac2cb1f
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0015-CVE-2022-38533.patch
@@ -0,0 +1,36 @@
+From ef186fe54aa6d281a3ff8a9528417e5cc614c797 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Sat, 13 Aug 2022 15:32:47 +0930
+Subject: [PATCH] PR29482 - strip: heap-buffer-overflow
+
+ PR 29482
+ * coffcode.h (coff_set_section_contents): Sanity check _LIB.
+
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=ef186fe54aa6d281a3ff8a9528417e5cc614c797]
+
+Signed-off-by: Pgowda <pgowda.cve@gmail.com>
+
+---
+ bfd/coffcode.h | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/bfd/coffcode.h b/bfd/coffcode.h
+index 67aaf158ca1..52027981c3f 100644
+--- a/bfd/coffcode.h
++++ b/bfd/coffcode.h
+@@ -4302,10 +4302,13 @@ coff_set_section_contents (bfd * abfd,
+
+ rec = (bfd_byte *) location;
+ recend = rec + count;
+- while (rec < recend)
++ while (recend - rec >= 4)
+ {
++ size_t len = bfd_get_32 (abfd, rec);
++ if (len == 0 || len > (size_t) (recend - rec) / 4)
++ break;
++ rec += len * 4;
+ ++section->lma;
+- rec += bfd_get_32 (abfd, rec) * 4;
+ }
+
+ BFD_ASSERT (rec == recend);
diff --git a/meta/recipes-devtools/binutils/binutils/0016-CVE-2022-38126.patch b/meta/recipes-devtools/binutils/binutils/0016-CVE-2022-38126.patch
new file mode 100644
index 0000000000..8200e28a81
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0016-CVE-2022-38126.patch
@@ -0,0 +1,34 @@
+From e3e5ae049371a27fd1737aba946fe26d06e029b5 Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Mon, 27 Jun 2022 13:43:02 +0100
+Subject: [PATCH] Replace a run-time assertion failure with a warning message
+ when parsing corrupt DWARF data.
+
+ PR 29289
+ * dwarf.c (display_debug_names): Replace assert with a warning
+ message.
+
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=e3e5ae049371a27fd1737aba946fe26d06e029b5]
+
+Signed-off-by: Pgowda <pgowda.cve@gmail.com>
+---
+ binutils/dwarf.c | 7 ++++++-
+
+diff --git a/binutils/dwarf.c b/binutils/dwarf.c
+index 37b477b886d..b99c56987da 100644
+--- a/binutils/dwarf.c
++++ b/binutils/dwarf.c
+@@ -9802,7 +9802,12 @@ display_debug_names (struct dwarf_sectio
+ printf (_("Out of %lu items there are %zu bucket clashes"
+ " (longest of %zu entries).\n"),
+ (unsigned long) name_count, hash_clash_count, longest_clash);
+- assert (name_count == buckets_filled + hash_clash_count);
++
++ if (name_count != buckets_filled + hash_clash_count)
++ warn (_("The name_count (%lu) is not the same as the used bucket_count (%lu) + the hash clash count (%lu)"),
++ (unsigned long) name_count,
++ (unsigned long) buckets_filled,
++ (unsigned long) hash_clash_count);
+
+ struct abbrev_lookup_entry
+ {
diff --git a/meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-1.patch b/meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-1.patch
new file mode 100644
index 0000000000..9bbf1d6453
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-1.patch
@@ -0,0 +1,1224 @@
+From 19c26da69d68d5d863f37c06ad73ab6292d02ffa Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Wed, 6 Apr 2022 14:43:37 +0100
+Subject: [PATCH] Add code to display the contents of .debug_loclists sections
+ which contain offset entry tables.
+
+ PR 28981
+ * dwarf.c (fetch_indexed_value): Rename to fecth_indexed_addr and
+ return the address, rather than a string.
+ (fetch_indexed_value): New function - returns a value indexed by a
+ DW_FORM_loclistx or DW_FORM_rnglistx form.
+ (read_and_display_attr_value): Add support for DW_FORM_loclistx
+ and DW_FORM_rnglistx.
+ (process_debug_info): Load the loclists and rnglists sections.
+ (display_loclists_list): Add support for DW_LLE_base_addressx,
+ DW_LLE_startx_endx, DW_LLE_startx_length and
+ DW_LLE_default_location.
+ (display_offset_entry_loclists): New function. Displays a
+ .debug_loclists section that contains offset entry tables.
+ (display_debug_loc): Call the new function.
+ (display_debug_rnglists_list): Add support for
+ DW_RLE_base_addressx, DW_RLE_startx_endx and DW_RLE_startx_length.
+ (display_debug_ranges): Display the contents of the section's
+ header.
+ * dwarf.h (struct debug_info): Add loclists_base field.
+ * testsuite/binutils-all/dw5.W: Update expected output.
+ * testsuite/binutils-all/x86-64/pr26808.dump: Likewise.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=19c26da69d68d5d863f37c06ad73ab6292d02ffa]
+
+Signed-off-by: Pgowda <pgowda.cve@gmail.com>
+---
+ binutils/ChangeLog | 24 +
+ binutils/dwarf.c | 513 +++++++++++++++---
+ binutils/dwarf.h | 4 +
+ binutils/testsuite/binutils-all/dw5.W | 2 +-
+ .../binutils-all/x86-64/pr26808.dump | 82 +--
+ gas/ChangeLog | 5 +
+ gas/testsuite/gas/elf/dwarf-5-irp.d | 2 +-
+ 7 files changed, 517 insertions(+), 115 deletions(-)
+
+diff --git a/binutils/dwarf.c b/binutils/dwarf.c
+index 15b3c81a138..bc862f77c04 100644
+--- a/binutils/dwarf.c
++++ b/binutils/dwarf.c
+@@ -240,7 +240,7 @@ static const char *
+ dwarf_vmatoa_1 (const char *fmtch, dwarf_vma value, unsigned num_bytes)
+ {
+ /* As dwarf_vmatoa is used more then once in a printf call
+- for output, we are cycling through an fixed array of pointers
++ for output, we are cycling through a fixed array of pointers
+ for return address. */
+ static int buf_pos = 0;
+ static struct dwarf_vmatoa_buf
+@@ -796,24 +796,70 @@ fetch_indexed_string (dwarf_vma idx, str
+ return ret;
+ }
+
+-static const char *
+-fetch_indexed_value (dwarf_vma offset, dwarf_vma bytes)
++static dwarf_vma
++fetch_indexed_addr (dwarf_vma offset, uint32_t num_bytes)
+ {
+ struct dwarf_section *section = &debug_displays [debug_addr].section;
+
+ if (section->start == NULL)
+- return (_("<no .debug_addr section>"));
++ {
++ warn (_("<no .debug_addr section>"));
++ return 0;
++ }
+
+- if (offset + bytes > section->size)
++ if (offset + num_bytes > section->size)
+ {
+ warn (_("Offset into section %s too big: 0x%s\n"),
+ section->name, dwarf_vmatoa ("x", offset));
+- return "<offset too big>";
++ return 0;
+ }
+
+- return dwarf_vmatoa ("x", byte_get (section->start + offset, bytes));
++ return byte_get (section->start + offset, num_bytes);
+ }
+
++/* Fetch a value from a debug section that has been indexed by
++ something in another section (eg DW_FORM_loclistx).
++ Returns 0 if the value could not be found. */
++
++static dwarf_vma
++fetch_indexed_value (dwarf_vma index,
++ enum dwarf_section_display_enum sec_enum)
++{
++ struct dwarf_section *section = &debug_displays [sec_enum].section;
++
++ if (section->start == NULL)
++ {
++ warn (_("Unable to locate %s section\n"), section->uncompressed_name);
++ return 0;
++ }
++
++ uint32_t pointer_size, bias;
++
++ if (byte_get (section->start, 4) == 0xffffffff)
++ {
++ pointer_size = 8;
++ bias = 20;
++ }
++ else
++ {
++ pointer_size = 4;
++ bias = 12;
++ }
++
++ dwarf_vma offset = index * pointer_size;
++
++ /* Offsets are biased by the size of the section header. */
++ offset += bias;
++
++ if (offset + pointer_size > section->size)
++ {
++ warn (_("Offset into section %s too big: 0x%s\n"),
++ section->name, dwarf_vmatoa ("x", offset));
++ return 0;
++ }
++
++ return byte_get (section->start + offset, pointer_size);
++}
+
+ /* FIXME: There are better and more efficient ways to handle
+ these structures. For now though, I just want something that
+@@ -1999,6 +2045,8 @@ skip_attr_bytes (unsigned long form,
+ case DW_FORM_strx:
+ case DW_FORM_GNU_addr_index:
+ case DW_FORM_addrx:
++ case DW_FORM_loclistx:
++ case DW_FORM_rnglistx:
+ READ_ULEB (uvalue, data, end);
+ break;
+
+@@ -2410,9 +2458,6 @@ read_and_display_attr_value (unsigned lo
+
+ switch (form)
+ {
+- default:
+- break;
+-
+ case DW_FORM_ref_addr:
+ if (dwarf_version == 2)
+ SAFE_BYTE_GET_AND_INC (uvalue, data, pointer_size, end);
+@@ -2496,6 +2541,8 @@ read_and_display_attr_value (unsigned lo
+ case DW_FORM_udata:
+ case DW_FORM_GNU_addr_index:
+ case DW_FORM_addrx:
++ case DW_FORM_loclistx:
++ case DW_FORM_rnglistx:
+ READ_ULEB (uvalue, data, end);
+ break;
+
+@@ -2515,6 +2562,9 @@ read_and_display_attr_value (unsigned lo
+ case DW_FORM_implicit_const:
+ uvalue = implicit_const;
+ break;
++
++ default:
++ break;
+ }
+
+ switch (form)
+@@ -2710,6 +2760,8 @@ read_and_display_attr_value (unsigned lo
+ case DW_FORM_addrx2:
+ case DW_FORM_addrx3:
+ case DW_FORM_addrx4:
++ case DW_FORM_loclistx:
++ case DW_FORM_rnglistx:
+ if (!do_loc)
+ {
+ dwarf_vma base;
+@@ -2728,11 +2780,11 @@ read_and_display_attr_value (unsigned lo
+ /* We have already displayed the form name. */
+ printf (_("%c(index: 0x%s): %s"), delimiter,
+ dwarf_vmatoa ("x", uvalue),
+- fetch_indexed_value (offset, pointer_size));
++ dwarf_vmatoa ("x", fetch_indexed_addr (offset, pointer_size)));
+ else
+ printf (_("%c(addr_index: 0x%s): %s"), delimiter,
+ dwarf_vmatoa ("x", uvalue),
+- fetch_indexed_value (offset, pointer_size));
++ dwarf_vmatoa ("x", fetch_indexed_addr (offset, pointer_size)));
+ }
+ break;
+
+@@ -2754,6 +2806,13 @@ read_and_display_attr_value (unsigned lo
+ {
+ switch (attribute)
+ {
++ case DW_AT_loclists_base:
++ if (debug_info_p->loclists_base)
++ warn (_("CU @ 0x%s has multiple loclists_base values"),
++ dwarf_vmatoa ("x", debug_info_p->cu_offset));
++ debug_info_p->loclists_base = uvalue;
++ break;
++
+ case DW_AT_frame_base:
+ have_frame_base = 1;
+ /* Fall through. */
+@@ -2776,7 +2835,8 @@ read_and_display_attr_value (unsigned lo
+ case DW_AT_GNU_call_site_target_clobbered:
+ if ((dwarf_version < 4
+ && (form == DW_FORM_data4 || form == DW_FORM_data8))
+- || form == DW_FORM_sec_offset)
++ || form == DW_FORM_sec_offset
++ || form == DW_FORM_loclistx)
+ {
+ /* Process location list. */
+ unsigned int lmax = debug_info_p->max_loc_offsets;
+@@ -2796,11 +2856,17 @@ read_and_display_attr_value (unsigned lo
+ lmax, sizeof (*debug_info_p->have_frame_base));
+ debug_info_p->max_loc_offsets = lmax;
+ }
+- if (this_set != NULL)
++
++ if (form == DW_FORM_loclistx)
++ uvalue = fetch_indexed_value (uvalue, loclists);
++ else if (this_set != NULL)
+ uvalue += this_set->section_offsets [DW_SECT_LOC];
++
+ debug_info_p->have_frame_base [num] = have_frame_base;
+ if (attribute != DW_AT_GNU_locviews)
+ {
++ uvalue += debug_info_p->loclists_base;
++
+ /* Corrupt DWARF info can produce more offsets than views.
+ See PR 23062 for an example. */
+ if (debug_info_p->num_loc_offsets
+@@ -2844,7 +2910,8 @@ read_and_display_attr_value (unsigned lo
+ case DW_AT_ranges:
+ if ((dwarf_version < 4
+ && (form == DW_FORM_data4 || form == DW_FORM_data8))
+- || form == DW_FORM_sec_offset)
++ || form == DW_FORM_sec_offset
++ || form == DW_FORM_rnglistx)
+ {
+ /* Process range list. */
+ unsigned int lmax = debug_info_p->max_range_lists;
+@@ -2858,6 +2925,10 @@ read_and_display_attr_value (unsigned lo
+ lmax, sizeof (*debug_info_p->range_lists));
+ debug_info_p->max_range_lists = lmax;
+ }
++
++ if (form == DW_FORM_rnglistx)
++ uvalue = fetch_indexed_value (uvalue, rnglists);
++
+ debug_info_p->range_lists [num] = uvalue;
+ debug_info_p->num_range_lists++;
+ }
+@@ -3231,6 +3302,7 @@ read_and_display_attr_value (unsigned lo
+ have_frame_base = 1;
+ /* Fall through. */
+ case DW_AT_location:
++ case DW_AT_loclists_base:
+ case DW_AT_string_length:
+ case DW_AT_return_addr:
+ case DW_AT_data_member_location:
+@@ -3248,7 +3320,8 @@ read_and_display_attr_value (unsigned lo
+ case DW_AT_GNU_call_site_target_clobbered:
+ if ((dwarf_version < 4
+ && (form == DW_FORM_data4 || form == DW_FORM_data8))
+- || form == DW_FORM_sec_offset)
++ || form == DW_FORM_sec_offset
++ || form == DW_FORM_loclistx)
+ printf (_(" (location list)"));
+ /* Fall through. */
+ case DW_AT_allocated:
+@@ -3517,6 +3590,9 @@ process_debug_info (struct dwarf_section
+ }
+
+ load_debug_section_with_follow (abbrev_sec, file);
++ load_debug_section_with_follow (loclists, file);
++ load_debug_section_with_follow (rnglists, file);
++
+ if (debug_displays [abbrev_sec].section.start == NULL)
+ {
+ warn (_("Unable to locate %s section!\n"),
+@@ -3729,6 +3805,7 @@ process_debug_info (struct dwarf_section
+ debug_information [unit].have_frame_base = NULL;
+ debug_information [unit].max_loc_offsets = 0;
+ debug_information [unit].num_loc_offsets = 0;
++ debug_information [unit].loclists_base = 0;
+ debug_information [unit].range_lists = NULL;
+ debug_information [unit].max_range_lists= 0;
+ debug_information [unit].num_range_lists = 0;
+@@ -6465,20 +6542,21 @@ display_loc_list (struct dwarf_section *
+ /* Display a location list from a normal (ie, non-dwo) .debug_loclists section. */
+
+ static void
+-display_loclists_list (struct dwarf_section *section,
+- unsigned char **start_ptr,
+- unsigned int debug_info_entry,
+- dwarf_vma offset,
+- dwarf_vma base_address,
+- unsigned char **vstart_ptr,
+- int has_frame_base)
+-{
+- unsigned char *start = *start_ptr, *vstart = *vstart_ptr;
+- unsigned char *section_end = section->start + section->size;
+- dwarf_vma cu_offset;
+- unsigned int pointer_size;
+- unsigned int offset_size;
+- int dwarf_version;
++display_loclists_list (struct dwarf_section * section,
++ unsigned char ** start_ptr,
++ unsigned int debug_info_entry,
++ dwarf_vma offset,
++ dwarf_vma base_address,
++ unsigned char ** vstart_ptr,
++ int has_frame_base)
++{
++ unsigned char * start = *start_ptr;
++ unsigned char * vstart = *vstart_ptr;
++ unsigned char * section_end = section->start + section->size;
++ dwarf_vma cu_offset;
++ unsigned int pointer_size;
++ unsigned int offset_size;
++ unsigned int dwarf_version;
+
+ /* Initialize it due to a false compiler warning. */
+ dwarf_vma begin = -1, vbegin = -1;
+@@ -6544,27 +6622,59 @@ display_loclists_list (struct dwarf_sect
+ case DW_LLE_end_of_list:
+ printf (_("<End of list>\n"));
+ break;
++
++ case DW_LLE_base_addressx:
++ READ_ULEB (base_address, start, section_end);
++ print_dwarf_vma (base_address, pointer_size);
++ printf (_("(index into .debug_addr) "));
++ base_address = fetch_indexed_addr (base_address, pointer_size);
++ print_dwarf_vma (base_address, pointer_size);
++ printf (_("(base address)\n"));
++ break;
++
++ case DW_LLE_startx_endx:
++ READ_ULEB (begin, start, section_end);
++ begin = fetch_indexed_addr (begin, pointer_size);
++ READ_ULEB (end, start, section_end);
++ end = fetch_indexed_addr (end, pointer_size);
++ break;
++
++ case DW_LLE_startx_length:
++ READ_ULEB (begin, start, section_end);
++ begin = fetch_indexed_addr (begin, pointer_size);
++ READ_ULEB (end, start, section_end);
++ end += begin;
++ break;
++
++ case DW_LLE_default_location:
++ begin = end = 0;
++ break;
++
+ case DW_LLE_offset_pair:
+ READ_ULEB (begin, start, section_end);
+ begin += base_address;
+ READ_ULEB (end, start, section_end);
+ end += base_address;
+ break;
++
++ case DW_LLE_base_address:
++ SAFE_BYTE_GET_AND_INC (base_address, start, pointer_size,
++ section_end);
++ print_dwarf_vma (base_address, pointer_size);
++ printf (_("(base address)\n"));
++ break;
++
+ case DW_LLE_start_end:
+ SAFE_BYTE_GET_AND_INC (begin, start, pointer_size, section_end);
+ SAFE_BYTE_GET_AND_INC (end, start, pointer_size, section_end);
+ break;
++
+ case DW_LLE_start_length:
+ SAFE_BYTE_GET_AND_INC (begin, start, pointer_size, section_end);
+ READ_ULEB (end, start, section_end);
+ end += begin;
+ break;
+- case DW_LLE_base_address:
+- SAFE_BYTE_GET_AND_INC (base_address, start, pointer_size,
+- section_end);
+- print_dwarf_vma (base_address, pointer_size);
+- printf (_("(base address)\n"));
+- break;
++
+ #ifdef DW_LLE_view_pair
+ case DW_LLE_view_pair:
+ if (vstart)
+@@ -6578,15 +6688,17 @@ display_loclists_list (struct dwarf_sect
+ printf (_("views for:\n"));
+ continue;
+ #endif
++
+ default:
+ error (_("Invalid location list entry type %d\n"), llet);
+ return;
+ }
++
+ if (llet == DW_LLE_end_of_list)
+ break;
+- if (llet != DW_LLE_offset_pair
+- && llet != DW_LLE_start_end
+- && llet != DW_LLE_start_length)
++
++ if (llet == DW_LLE_base_address
++ || llet == DW_LLE_base_addressx)
+ continue;
+
+ if (start == section_end)
+@@ -6828,6 +6940,218 @@ loc_offsets_compar (const void *ap, cons
+ }
+
+ static int
++display_offset_entry_loclists (struct dwarf_section *section)
++{
++ unsigned char * start = section->start;
++ unsigned char * const end = start + section->size;
++
++ introduce (section, false);
++
++ do
++ {
++ dwarf_vma length;
++ unsigned short version;
++ unsigned char address_size;
++ unsigned char segment_selector_size;
++ uint32_t offset_entry_count;
++ uint32_t i;
++ bool is_64bit;
++
++ printf (_("Table at Offset 0x%lx\n"), (long)(start - section->start));
++
++ SAFE_BYTE_GET_AND_INC (length, start, 4, end);
++ if (length == 0xffffffff)
++ {
++ is_64bit = true;
++ SAFE_BYTE_GET_AND_INC (length, start, 8, end);
++ }
++ else
++ is_64bit = false;
++
++ SAFE_BYTE_GET_AND_INC (version, start, 2, end);
++ SAFE_BYTE_GET_AND_INC (address_size, start, 1, end);
++ SAFE_BYTE_GET_AND_INC (segment_selector_size, start, 1, end);
++ SAFE_BYTE_GET_AND_INC (offset_entry_count, start, 4, end);
++
++ printf (_(" Length: 0x%s\n"), dwarf_vmatoa ("x", length));
++ printf (_(" DWARF version: %u\n"), version);
++ printf (_(" Address size: %u\n"), address_size);
++ printf (_(" Segment size: %u\n"), segment_selector_size);
++ printf (_(" Offset entries: %u\n"), offset_entry_count);
++
++ if (version < 5)
++ {
++ warn (_("The %s section contains a corrupt or "
++ "unsupported version number: %d.\n"),
++ section->name, version);
++ return 0;
++ }
++
++ if (segment_selector_size != 0)
++ {
++ warn (_("The %s section contains an "
++ "unsupported segment selector size: %d.\n"),
++ section->name, segment_selector_size);
++ return 0;
++ }
++
++ if (offset_entry_count == 0)
++ {
++ warn (_("The %s section contains a table without offset\n"),
++ section->name);
++ return 0;
++ }
++
++ printf (_("\n Offset Entries starting at 0x%lx:\n"),
++ (long)(start - section->start));
++
++ if (is_64bit)
++ {
++ for (i = 0; i < offset_entry_count; i++)
++ {
++ dwarf_vma entry;
++
++ SAFE_BYTE_GET_AND_INC (entry, start, 8, end);
++ printf (_(" [%6u] 0x%s\n"), i, dwarf_vmatoa ("x", entry));
++ }
++ }
++ else
++ {
++ for (i = 0; i < offset_entry_count; i++)
++ {
++ uint32_t entry;
++
++ SAFE_BYTE_GET_AND_INC (entry, start, 4, end);
++ printf (_(" [%6u] 0x%x\n"), i, entry);
++ }
++ }
++
++ putchar ('\n');
++
++ uint32_t j;
++
++ for (j = 1, i = 0; i < offset_entry_count;)
++ {
++ unsigned char lle;
++ dwarf_vma base_address = 0;
++ dwarf_vma begin;
++ dwarf_vma finish;
++ dwarf_vma off = start - section->start;
++
++ if (j != i)
++ {
++ printf (_(" Offset Entry %u\n"), i);
++ j = i;
++ }
++
++ printf (" ");
++ print_dwarf_vma (off, 4);
++
++ SAFE_BYTE_GET_AND_INC (lle, start, 1, end);
++
++ switch (lle)
++ {
++ case DW_LLE_end_of_list:
++ printf (_("<End of list>\n\n"));
++ i ++;
++ continue;
++
++ case DW_LLE_base_addressx:
++ READ_ULEB (base_address, start, end);
++ print_dwarf_vma (base_address, address_size);
++ printf (_("(index into .debug_addr) "));
++ base_address = fetch_indexed_addr (base_address, address_size);
++ print_dwarf_vma (base_address, address_size);
++ printf (_("(base address)\n"));
++ continue;
++
++ case DW_LLE_startx_endx:
++ READ_ULEB (begin, start, end);
++ begin = fetch_indexed_addr (begin, address_size);
++ READ_ULEB (finish, start, end);
++ finish = fetch_indexed_addr (finish, address_size);
++ break;
++
++ case DW_LLE_startx_length:
++ READ_ULEB (begin, start, end);
++ begin = fetch_indexed_addr (begin, address_size);
++ READ_ULEB (finish, start, end);
++ finish += begin;
++ break;
++
++ case DW_LLE_offset_pair:
++ READ_ULEB (begin, start, end);
++ begin += base_address;
++ READ_ULEB (finish, start, end);
++ finish += base_address;
++ break;
++
++ case DW_LLE_default_location:
++ begin = finish = 0;
++ break;
++
++ case DW_LLE_base_address:
++ SAFE_BYTE_GET_AND_INC (base_address, start, address_size, end);
++ print_dwarf_vma (base_address, address_size);
++ printf (_("(base address)\n"));
++ continue;
++
++ case DW_LLE_start_end:
++ SAFE_BYTE_GET_AND_INC (begin, start, address_size, end);
++ SAFE_BYTE_GET_AND_INC (finish, start, address_size, end);
++ break;
++
++ case DW_LLE_start_length:
++ SAFE_BYTE_GET_AND_INC (begin, start, address_size, end);
++ READ_ULEB (finish, start, end);
++ finish += begin;
++ break;
++
++ default:
++ error (_("Invalid location list entry type %d\n"), lle);
++ return 0;
++ }
++
++ if (start == end)
++ {
++ warn (_("Location list starting at offset 0x%lx is not terminated.\n"),
++ (unsigned long) off);
++ break;
++ }
++
++ print_dwarf_vma (begin, address_size);
++ print_dwarf_vma (finish, address_size);
++
++ if (begin == finish)
++ fputs (_(" (start == end)"), stdout);
++ else if (begin > finish)
++ fputs (_(" (start > end)"), stdout);
++
++ /* Read the counted location descriptions. */
++ READ_ULEB (length, start, end);
++
++ if (length > (size_t) (end - start))
++ {
++ warn (_("Location list starting at offset 0x%lx is not terminated.\n"),
++ (unsigned long) off);
++ break;
++ }
++
++ putchar (' ');
++ (void) decode_location_expression (start, address_size, address_size,
++ version, length, 0, section);
++ start += length;
++ putchar ('\n');
++ }
++
++ putchar ('\n');
++ }
++ while (start < end);
++
++ return 1;
++}
++
++static int
+ display_debug_loc (struct dwarf_section *section, void *file)
+ {
+ unsigned char *start = section->start, *vstart = NULL;
+@@ -6893,13 +7217,9 @@ display_debug_loc (struct dwarf_section
+ }
+
+ SAFE_BYTE_GET_AND_INC (offset_entry_count, hdrptr, 4, end);
++
+ if (offset_entry_count != 0)
+- {
+- warn (_("The %s section contains "
+- "unsupported offset entry count: %d.\n"),
+- section->name, offset_entry_count);
+- return 0;
+- }
++ return display_offset_entry_loclists (section);
+
+ expected_start = hdrptr - section_begin;
+ }
+@@ -6959,9 +7279,10 @@ display_debug_loc (struct dwarf_section
+ if (debug_information [first].num_loc_offsets > 0
+ && debug_information [first].loc_offsets [0] != expected_start
+ && debug_information [first].loc_views [0] != expected_start)
+- warn (_("Location lists in %s section start at 0x%s\n"),
++ warn (_("Location lists in %s section start at 0x%s rather than 0x%s\n"),
+ section->name,
+- dwarf_vmatoa ("x", debug_information [first].loc_offsets [0]));
++ dwarf_vmatoa ("x", debug_information [first].loc_offsets [0]),
++ dwarf_vmatoa ("x", expected_start));
+
+ if (!locs_sorted)
+ array = (unsigned int *) xcmalloc (num_loc_list, sizeof (unsigned int));
+@@ -7639,24 +7960,44 @@ display_debug_rnglists_list (unsigned ch
+ case DW_RLE_end_of_list:
+ printf (_("<End of list>\n"));
+ break;
+- case DW_RLE_base_address:
+- SAFE_BYTE_GET_AND_INC (base_address, start, pointer_size, finish);
++ case DW_RLE_base_addressx:
++ READ_ULEB (base_address, start, finish);
++ print_dwarf_vma (base_address, pointer_size);
++ printf (_("(base address index) "));
++ base_address = fetch_indexed_addr (base_address, pointer_size);
+ print_dwarf_vma (base_address, pointer_size);
+ printf (_("(base address)\n"));
+ break;
+- case DW_RLE_start_length:
+- SAFE_BYTE_GET_AND_INC (begin, start, pointer_size, finish);
++ case DW_RLE_startx_endx:
++ READ_ULEB (begin, start, finish);
++ READ_ULEB (end, start, finish);
++ begin = fetch_indexed_addr (begin, pointer_size);
++ end = fetch_indexed_addr (begin, pointer_size);
++ break;
++ case DW_RLE_startx_length:
++ READ_ULEB (begin, start, finish);
+ READ_ULEB (length, start, finish);
++ begin = fetch_indexed_addr (begin, pointer_size);
+ end = begin + length;
+ break;
+ case DW_RLE_offset_pair:
+ READ_ULEB (begin, start, finish);
+ READ_ULEB (end, start, finish);
+ break;
++ case DW_RLE_base_address:
++ SAFE_BYTE_GET_AND_INC (base_address, start, pointer_size, finish);
++ print_dwarf_vma (base_address, pointer_size);
++ printf (_("(base address)\n"));
++ break;
+ case DW_RLE_start_end:
+ SAFE_BYTE_GET_AND_INC (begin, start, pointer_size, finish);
+ SAFE_BYTE_GET_AND_INC (end, start, pointer_size, finish);
+ break;
++ case DW_RLE_start_length:
++ SAFE_BYTE_GET_AND_INC (begin, start, pointer_size, finish);
++ READ_ULEB (length, start, finish);
++ end = begin + length;
++ break;
+ default:
+ error (_("Invalid range list entry type %d\n"), rlet);
+ rlet = DW_RLE_end_of_list;
+@@ -7664,7 +8005,7 @@ display_debug_rnglists_list (unsigned ch
+ }
+ if (rlet == DW_RLE_end_of_list)
+ break;
+- if (rlet == DW_RLE_base_address)
++ if (rlet == DW_RLE_base_address || rlet == DW_RLE_base_addressx)
+ continue;
+
+ /* Only a DW_RLE_offset_pair needs the base address added. */
+@@ -7709,6 +8050,8 @@ display_debug_ranges (struct dwarf_secti
+ return 0;
+ }
+
++ introduce (section, false);
++
+ if (is_rnglists)
+ {
+ dwarf_vma initial_length;
+@@ -7745,19 +8088,19 @@ display_debug_ranges (struct dwarf_secti
+ }
+ }
+
+- /* Get and check the version number. */
++ /* Get the other fields in the header. */
+ SAFE_BYTE_GET_AND_INC (version, start, 2, finish);
+-
+- if (version != 5)
+- {
+- warn (_("Only DWARF version 5 debug_rnglists info "
+- "is currently supported.\n"));
+- return 0;
+- }
+-
+ SAFE_BYTE_GET_AND_INC (address_size, start, 1, finish);
+-
+ SAFE_BYTE_GET_AND_INC (segment_selector_size, start, 1, finish);
++ SAFE_BYTE_GET_AND_INC (offset_entry_count, start, 4, finish);
++
++ printf (_(" Length: 0x%s\n"), dwarf_vmatoa ("x", initial_length));
++ printf (_(" DWARF version: %u\n"), version);
++ printf (_(" Address size: %u\n"), address_size);
++ printf (_(" Segment size: %u\n"), segment_selector_size);
++ printf (_(" Offset entries: %u\n"), offset_entry_count);
++
++ /* Check the fields. */
+ if (segment_selector_size != 0)
+ {
+ warn (_("The %s section contains "
+@@ -7766,16 +8109,39 @@ display_debug_ranges (struct dwarf_secti
+ return 0;
+ }
+
+- SAFE_BYTE_GET_AND_INC (offset_entry_count, start, 4, finish);
+- if (offset_entry_count != 0)
++ if (version < 5)
+ {
+- warn (_("The %s section contains "
+- "unsupported offset entry count: %u.\n"),
+- section->name, offset_entry_count);
++ warn (_("Only DWARF version 5+ debug_rnglists info "
++ "is currently supported.\n"));
+ return 0;
+ }
+- }
+
++ if (offset_entry_count != 0)
++ {
++ printf (_("\n Offsets starting at 0x%lx:\n"), (long)(start - section->start));
++ if (offset_size == 8)
++ {
++ for (i = 0; i < offset_entry_count; i++)
++ {
++ dwarf_vma entry;
++
++ SAFE_BYTE_GET_AND_INC (entry, start, 8, finish);
++ printf (_(" [%6u] 0x%s\n"), i, dwarf_vmatoa ("x", entry));
++ }
++ }
++ else
++ {
++ for (i = 0; i < offset_entry_count; i++)
++ {
++ uint32_t entry;
++
++ SAFE_BYTE_GET_AND_INC (entry, start, 4, finish);
++ printf (_(" [%6u] 0x%x\n"), i, entry);
++ }
++ }
++ }
++ }
++
+ if (load_debug_info (file) == 0)
+ {
+ warn (_("Unable to load/parse the .debug_info section, so cannot interpret the %s section.\n"),
+@@ -7834,8 +8200,7 @@ display_debug_ranges (struct dwarf_secti
+ warn (_("Range lists in %s section start at 0x%lx\n"),
+ section->name, (unsigned long) range_entries[0].ranges_offset);
+
+- introduce (section, false);
+-
++ putchar ('\n');
+ printf (_(" Offset Begin End\n"));
+
+ for (i = 0; i < num_range_list; i++)
+@@ -7895,8 +8260,12 @@ display_debug_ranges (struct dwarf_secti
+ start = next;
+ last_start = next;
+
+- (is_rnglists ? display_debug_rnglists_list : display_debug_ranges_list)
+- (start, finish, pointer_size, offset, base_address);
++ if (is_rnglists)
++ display_debug_rnglists_list
++ (start, finish, pointer_size, offset, base_address);
++ else
++ display_debug_ranges_list
++ (start, finish, pointer_size, offset, base_address);
+ }
+ putchar ('\n');
+
+diff --git a/binutils/dwarf.h b/binutils/dwarf.h
+index 4fc62abfa4c..ccce2461c81 100644
+--- a/binutils/dwarf.h
++++ b/binutils/dwarf.h
+@@ -181,9 +181,13 @@ typedef struct
+ /* This is an array of offsets to the location view table. */
+ dwarf_vma * loc_views;
+ int * have_frame_base;
++
++ /* Information for associating location lists with CUs. */
+ unsigned int num_loc_offsets;
+ unsigned int max_loc_offsets;
+ unsigned int num_loc_views;
++ dwarf_vma loclists_base;
++
+ /* List of .debug_ranges offsets seen in this .debug_info. */
+ dwarf_vma * range_lists;
+ unsigned int num_range_lists;
+diff --git a/binutils/testsuite/binutils-all/dw5.W b/binutils/testsuite/binutils-all/dw5.W
+index ebab8b7d3b0..bfcdac175ba 100644
+--- a/binutils/testsuite/binutils-all/dw5.W
++++ b/binutils/testsuite/binutils-all/dw5.W
+@@ -281,7 +281,7 @@ Contents of the .debug_loclists section:
+ 00000039 <End of list>
+
+ Contents of the .debug_rnglists section:
+-
++#...
+ Offset Begin End
+ 0000000c 0000000000001234 0000000000001236
+ 00000016 0000000000001234 0000000000001239
+diff --git a/binutils/testsuite/binutils-all/x86-64/pr26808.dump b/binutils/testsuite/binutils-all/x86-64/pr26808.dump
+index f64f9d008f9..7ef73b24dc9 100644
+--- a/binutils/testsuite/binutils-all/x86-64/pr26808.dump
++++ b/binutils/testsuite/binutils-all/x86-64/pr26808.dump
+@@ -30,13 +30,13 @@ Contents of the .debug_info.dwo section:
+ <a5> DW_AT_decl_file : 1
+ <a6> DW_AT_decl_line : 30
+ <a7> DW_AT_type : <0x90>
+- <ab> DW_AT_low_pc : (addr_index: 0x0): <no .debug_addr section>
++ <ab> DW_AT_low_pc : (addr_index: 0x0): 0
+ <ac> DW_AT_high_pc : 0x304
+ <b4> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <b6> DW_AT_GNU_all_tail_call_sites: 1
+ <b6> DW_AT_sibling : <0x11b>
+ <2><ba>: Abbrev Number: 14 (DW_TAG_lexical_block)
+- <bb> DW_AT_low_pc : (addr_index: 0x1): <no .debug_addr section>
++ <bb> DW_AT_low_pc : (addr_index: 0x1): 0
+ <bc> DW_AT_high_pc : 0x2fa
+ <3><c4>: Abbrev Number: 15 (DW_TAG_variable)
+ <c5> DW_AT_name : c1
+@@ -56,7 +56,7 @@ Contents of the .debug_info.dwo section:
+ <ff> DW_AT_artificial : 1
+ <ff> DW_AT_location : 2 byte block: fb 2 (DW_OP_GNU_addr_index <0x2>)
+ <3><102>: Abbrev Number: 14 (DW_TAG_lexical_block)
+- <103> DW_AT_low_pc : (addr_index: 0x3): <no .debug_addr section>
++ <103> DW_AT_low_pc : (addr_index: 0x3): 0
+ <104> DW_AT_high_pc : 0x2f
+ <4><10c>: Abbrev Number: 17 (DW_TAG_variable)
+ <10d> DW_AT_name : i
+@@ -274,7 +274,7 @@ Contents of the .debug_info.dwo section:
+ <2dd> DW_AT_decl_file : 1
+ <2de> DW_AT_decl_line : 70
+ <2df> DW_AT_linkage_name: _Z4f13iv
+- <2e8> DW_AT_low_pc : (addr_index: 0x0): <no .debug_addr section>
++ <2e8> DW_AT_low_pc : (addr_index: 0x0): 0
+ <2e9> DW_AT_high_pc : 0x6
+ <2f1> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <2f3> DW_AT_GNU_all_call_sites: 1
+@@ -282,7 +282,7 @@ Contents of the .debug_info.dwo section:
+ <2f4> DW_AT_specification: <0x219>
+ <2f8> DW_AT_decl_file : 2
+ <2f9> DW_AT_decl_line : 30
+- <2fa> DW_AT_low_pc : (addr_index: 0x1): <no .debug_addr section>
++ <2fa> DW_AT_low_pc : (addr_index: 0x1): 0
+ <2fb> DW_AT_high_pc : 0x20
+ <303> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <305> DW_AT_object_pointer: <0x30d>
+@@ -300,7 +300,7 @@ Contents of the .debug_info.dwo section:
+ <31d> DW_AT_specification: <0x223>
+ <321> DW_AT_decl_file : 2
+ <322> DW_AT_decl_line : 38
+- <323> DW_AT_low_pc : (addr_index: 0x2): <no .debug_addr section>
++ <323> DW_AT_low_pc : (addr_index: 0x2): 0
+ <324> DW_AT_high_pc : 0x18
+ <32c> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <32e> DW_AT_object_pointer: <0x336>
+@@ -316,7 +316,7 @@ Contents of the .debug_info.dwo section:
+ <341> DW_AT_specification: <0x22d>
+ <345> DW_AT_decl_file : 2
+ <346> DW_AT_decl_line : 46
+- <347> DW_AT_low_pc : (addr_index: 0x3): <no .debug_addr section>
++ <347> DW_AT_low_pc : (addr_index: 0x3): 0
+ <348> DW_AT_high_pc : 0x18
+ <350> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <352> DW_AT_object_pointer: <0x35a>
+@@ -332,7 +332,7 @@ Contents of the .debug_info.dwo section:
+ <365> DW_AT_specification: <0x237>
+ <369> DW_AT_decl_file : 2
+ <36a> DW_AT_decl_line : 54
+- <36b> DW_AT_low_pc : (addr_index: 0x4): <no .debug_addr section>
++ <36b> DW_AT_low_pc : (addr_index: 0x4): 0
+ <36c> DW_AT_high_pc : 0x16
+ <374> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <376> DW_AT_object_pointer: <0x37e>
+@@ -348,7 +348,7 @@ Contents of the .debug_info.dwo section:
+ <389> DW_AT_specification: <0x26b>
+ <38d> DW_AT_decl_file : 2
+ <38e> DW_AT_decl_line : 62
+- <38f> DW_AT_low_pc : (addr_index: 0x5): <no .debug_addr section>
++ <38f> DW_AT_low_pc : (addr_index: 0x5): 0
+ <390> DW_AT_high_pc : 0x16
+ <398> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <39a> DW_AT_object_pointer: <0x3a2>
+@@ -366,7 +366,7 @@ Contents of the .debug_info.dwo section:
+ <3b2> DW_AT_specification: <0x275>
+ <3b6> DW_AT_decl_file : 2
+ <3b7> DW_AT_decl_line : 72
+- <3b8> DW_AT_low_pc : (addr_index: 0x6): <no .debug_addr section>
++ <3b8> DW_AT_low_pc : (addr_index: 0x6): 0
+ <3b9> DW_AT_high_pc : 0x1b
+ <3c1> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <3c3> DW_AT_object_pointer: <0x3cb>
+@@ -382,7 +382,7 @@ Contents of the .debug_info.dwo section:
+ <3d6> DW_AT_specification: <0x27f>
+ <3da> DW_AT_decl_file : 2
+ <3db> DW_AT_decl_line : 82
+- <3dc> DW_AT_low_pc : (addr_index: 0x7): <no .debug_addr section>
++ <3dc> DW_AT_low_pc : (addr_index: 0x7): 0
+ <3dd> DW_AT_high_pc : 0x1b
+ <3e5> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <3e7> DW_AT_object_pointer: <0x3ef>
+@@ -398,7 +398,7 @@ Contents of the .debug_info.dwo section:
+ <3fa> DW_AT_specification: <0x289>
+ <3fe> DW_AT_decl_file : 2
+ <3ff> DW_AT_decl_line : 92
+- <400> DW_AT_low_pc : (addr_index: 0x8): <no .debug_addr section>
++ <400> DW_AT_low_pc : (addr_index: 0x8): 0
+ <401> DW_AT_high_pc : 0x19
+ <409> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <40b> DW_AT_object_pointer: <0x413>
+@@ -414,7 +414,7 @@ Contents of the .debug_info.dwo section:
+ <41e> DW_AT_specification: <0x2ae>
+ <422> DW_AT_decl_file : 2
+ <423> DW_AT_decl_line : 102
+- <424> DW_AT_low_pc : (addr_index: 0x9): <no .debug_addr section>
++ <424> DW_AT_low_pc : (addr_index: 0x9): 0
+ <425> DW_AT_high_pc : 0x19
+ <42d> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <42f> DW_AT_object_pointer: <0x437>
+@@ -432,7 +432,7 @@ Contents of the .debug_info.dwo section:
+ <447> DW_AT_specification: <0x2b8>
+ <44b> DW_AT_decl_file : 2
+ <44c> DW_AT_decl_line : 112
+- <44d> DW_AT_low_pc : (addr_index: 0xa): <no .debug_addr section>
++ <44d> DW_AT_low_pc : (addr_index: 0xa): 0
+ <44e> DW_AT_high_pc : 0x1f
+ <456> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <458> DW_AT_object_pointer: <0x460>
+@@ -451,7 +451,7 @@ Contents of the .debug_info.dwo section:
+ <471> DW_AT_decl_line : 120
+ <472> DW_AT_linkage_name: _Z4f11av
+ <47b> DW_AT_type : <0x242>
+- <47f> DW_AT_low_pc : (addr_index: 0xb): <no .debug_addr section>
++ <47f> DW_AT_low_pc : (addr_index: 0xb): 0
+ <480> DW_AT_high_pc : 0xb
+ <488> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <48a> DW_AT_GNU_all_call_sites: 1
+@@ -459,7 +459,7 @@ Contents of the .debug_info.dwo section:
+ <48b> DW_AT_specification: <0x2c2>
+ <48f> DW_AT_decl_file : 2
+ <490> DW_AT_decl_line : 126
+- <491> DW_AT_low_pc : (addr_index: 0xc): <no .debug_addr section>
++ <491> DW_AT_low_pc : (addr_index: 0xc): 0
+ <492> DW_AT_high_pc : 0x20
+ <49a> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <49c> DW_AT_object_pointer: <0x4a4>
+@@ -478,7 +478,7 @@ Contents of the .debug_info.dwo section:
+ <4b4> DW_AT_decl_line : 134
+ <4b5> DW_AT_linkage_name: _Z3t12v
+ <4bd> DW_AT_type : <0x249>
+- <4c1> DW_AT_low_pc : (addr_index: 0xd): <no .debug_addr section>
++ <4c1> DW_AT_low_pc : (addr_index: 0xd): 0
+ <4c2> DW_AT_high_pc : 0x19
+ <4ca> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <4cc> DW_AT_GNU_all_tail_call_sites: 1
+@@ -489,7 +489,7 @@ Contents of the .debug_info.dwo section:
+ <4d2> DW_AT_decl_line : 142
+ <4d3> DW_AT_linkage_name: _Z3t13v
+ <4db> DW_AT_type : <0x249>
+- <4df> DW_AT_low_pc : (addr_index: 0xe): <no .debug_addr section>
++ <4df> DW_AT_low_pc : (addr_index: 0xe): 0
+ <4e0> DW_AT_high_pc : 0x14
+ <4e8> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <4ea> DW_AT_GNU_all_tail_call_sites: 1
+@@ -500,13 +500,13 @@ Contents of the .debug_info.dwo section:
+ <4f0> DW_AT_decl_line : 150
+ <4f1> DW_AT_linkage_name: _Z3t14v
+ <4f9> DW_AT_type : <0x249>
+- <4fd> DW_AT_low_pc : (addr_index: 0xf): <no .debug_addr section>
++ <4fd> DW_AT_low_pc : (addr_index: 0xf): 0
+ <4fe> DW_AT_high_pc : 0x61
+ <506> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <508> DW_AT_GNU_all_tail_call_sites: 1
+ <508> DW_AT_sibling : <0x532>
+ <2><50c>: Abbrev Number: 24 (DW_TAG_lexical_block)
+- <50d> DW_AT_low_pc : (addr_index: 0x10): <no .debug_addr section>
++ <50d> DW_AT_low_pc : (addr_index: 0x10): 0
+ <50e> DW_AT_high_pc : 0x57
+ <3><516>: Abbrev Number: 25 (DW_TAG_variable)
+ <517> DW_AT_name : s1
+@@ -538,13 +538,13 @@ Contents of the .debug_info.dwo section:
+ <54b> DW_AT_decl_line : 163
+ <54c> DW_AT_linkage_name: _Z3t15v
+ <554> DW_AT_type : <0x249>
+- <558> DW_AT_low_pc : (addr_index: 0x11): <no .debug_addr section>
++ <558> DW_AT_low_pc : (addr_index: 0x11): 0
+ <559> DW_AT_high_pc : 0x5d
+ <561> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <563> DW_AT_GNU_all_tail_call_sites: 1
+ <563> DW_AT_sibling : <0x58d>
+ <2><567>: Abbrev Number: 24 (DW_TAG_lexical_block)
+- <568> DW_AT_low_pc : (addr_index: 0x12): <no .debug_addr section>
++ <568> DW_AT_low_pc : (addr_index: 0x12): 0
+ <569> DW_AT_high_pc : 0x53
+ <3><571>: Abbrev Number: 25 (DW_TAG_variable)
+ <572> DW_AT_name : s1
+@@ -576,7 +576,7 @@ Contents of the .debug_info.dwo section:
+ <5a9> DW_AT_decl_line : 176
+ <5aa> DW_AT_linkage_name: _Z3t16v
+ <5b2> DW_AT_type : <0x249>
+- <5b6> DW_AT_low_pc : (addr_index: 0x13): <no .debug_addr section>
++ <5b6> DW_AT_low_pc : (addr_index: 0x13): 0
+ <5b7> DW_AT_high_pc : 0x13
+ <5bf> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <5c1> DW_AT_GNU_all_tail_call_sites: 1
+@@ -587,13 +587,13 @@ Contents of the .debug_info.dwo section:
+ <5c7> DW_AT_decl_line : 184
+ <5c8> DW_AT_linkage_name: _Z3t17v
+ <5d0> DW_AT_type : <0x249>
+- <5d4> DW_AT_low_pc : (addr_index: 0x14): <no .debug_addr section>
++ <5d4> DW_AT_low_pc : (addr_index: 0x14): 0
+ <5d5> DW_AT_high_pc : 0x5f
+ <5dd> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <5df> DW_AT_GNU_all_call_sites: 1
+ <5df> DW_AT_sibling : <0x612>
+ <2><5e3>: Abbrev Number: 24 (DW_TAG_lexical_block)
+- <5e4> DW_AT_low_pc : (addr_index: 0x15): <no .debug_addr section>
++ <5e4> DW_AT_low_pc : (addr_index: 0x15): 0
+ <5e5> DW_AT_high_pc : 0x59
+ <3><5ed>: Abbrev Number: 25 (DW_TAG_variable)
+ <5ee> DW_AT_name : c
+@@ -602,7 +602,7 @@ Contents of the .debug_info.dwo section:
+ <5f2> DW_AT_type : <0x53d>
+ <5f6> DW_AT_location : 2 byte block: 91 6f (DW_OP_fbreg: -17)
+ <3><5f9>: Abbrev Number: 24 (DW_TAG_lexical_block)
+- <5fa> DW_AT_low_pc : (addr_index: 0x16): <no .debug_addr section>
++ <5fa> DW_AT_low_pc : (addr_index: 0x16): 0
+ <5fb> DW_AT_high_pc : 0x50
+ <4><603>: Abbrev Number: 25 (DW_TAG_variable)
+ <604> DW_AT_name : i
+@@ -620,13 +620,13 @@ Contents of the .debug_info.dwo section:
+ <618> DW_AT_decl_line : 199
+ <619> DW_AT_linkage_name: _Z3t18v
+ <621> DW_AT_type : <0x249>
+- <625> DW_AT_low_pc : (addr_index: 0x17): <no .debug_addr section>
++ <625> DW_AT_ow_pc : (addr_index: 0x17): 0
+ <626> DW_AT_high_pc : 0x5f
+ <62e> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <630> DW_AT_GNU_all_tail_call_sites: 1
+ <630> DW_AT_sibling : <0x67a>
+ <2><634>: Abbrev Number: 24 (DW_TAG_lexical_block)
+- <635> DW_AT_low_pc : (addr_index: 0x18): <no .debug_addr section>
++ <635> DW_AT_low_pc : (addr_index: 0x18): 0
+ <636> DW_AT_high_pc : 0x55
+ <3><63e>: Abbrev Number: 25 (DW_TAG_variable)
+ <63f> DW_AT_name : c
+@@ -635,7 +635,7 @@ Contents of the .debug_info.dwo section:
+ <643> DW_AT_type : <0x53d>
+ <647> DW_AT_location : 2 byte block: 91 6f (DW_OP_fbreg: -17)
+ <3><64a>: Abbrev Number: 24 (DW_TAG_lexical_block)
+- <64b> DW_AT_low_pc : (addr_index: 0x19): <no .debug_addr section>
++ <64b> DW_AT_low_pc : (addr_index: 0x19): 0
+ <64c> DW_AT_high_pc : 0x4c
+ <4><654>: Abbrev Number: 25 (DW_TAG_variable)
+ <655> DW_AT_name : i
+@@ -644,7 +644,7 @@ Contents of the .debug_info.dwo section:
+ <659> DW_AT_type : <0x242>
+ <65d> DW_AT_location : 2 byte block: 91 68 (DW_OP_fbreg: -24)
+ <4><660>: Abbrev Number: 24 (DW_TAG_lexical_block)
+- <661> DW_AT_low_pc : (addr_index: 0x1a): <no .debug_addr section>
++ <661> DW_AT_low_pc : (addr_index: 0x1a): 0
+ <662> DW_AT_high_pc : 0x34
+ <5><66a>: Abbrev Number: 25 (DW_TAG_variable)
+ <66b> DW_AT_name : s
+@@ -786,7 +786,7 @@ Contents of the .debug_info.dwo section:
+ <7d3> DW_AT_decl_line : 32
+ <7d4> DW_AT_linkage_name: _Z4t16av
+ <7dd> DW_AT_type : <0x7c4>
+- <7e1> DW_AT_low_pc : (addr_index: 0x0): <no .debug_addr section>
++ <7e1> DW_AT_low_pc : (addr_index: 0x0): 0
+ <7e2> DW_AT_high_pc : 0x13
+ <7ea> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <7ec> DW_AT_GNU_all_tail_call_sites: 1
+@@ -878,14 +878,14 @@ Contents of the .debug_info.dwo section:
+ <908> DW_AT_decl_file : 1
+ <909> DW_AT_decl_line : 70
+ <90a> DW_AT_linkage_name: _Z4f13iv
+- <913> DW_AT_low_pc : (addr_index: 0x0): <no .debug_addr section>
++ <913> DW_AT_low_pc : (addr_index: 0x0): 0
+ <914> DW_AT_high_pc : 0x6
+ <91c> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <91e> DW_AT_GNU_all_call_sites: 1
+ <1><91e>: Abbrev Number: 17 (DW_TAG_subprogram)
+ <91f> DW_AT_specification: <0x8a8>
+ <923> DW_AT_decl_file : 2
+- <924> DW_AT_low_pc : (addr_index: 0x1): <no .debug_addr section>
++ <924> DW_AT_low_pc : (addr_index: 0x1): 0
+ <925> DW_AT_high_pc : 0xf
+ <92d> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <92f> DW_AT_object_pointer: <0x937>
+@@ -903,7 +903,7 @@ Contents of the .debug_info.dwo section:
+ <94b> DW_AT_specification: <0x89b>
+ <94f> DW_AT_decl_file : 2
+ <950> DW_AT_decl_line : 36
+- <951> DW_AT_low_pc : (addr_index: 0x2): <no .debug_addr section>
++ <951> DW_AT_low_pc : (addr_index: 0x2): 0
+ <952> DW_AT_high_pc : 0x20
+ <95a> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <95c> DW_AT_object_pointer: <0x964>
+@@ -922,7 +922,7 @@ Contents of the .debug_info.dwo section:
+ <978> DW_AT_decl_line : 72
+ <979> DW_AT_linkage_name: _Z3f10v
+ <981> DW_AT_type : <0x8b7>
+- <985> DW_AT_low_pc : (addr_index: 0x3): <no .debug_addr section>
++ <985> DW_AT_low_pc : (addr_index: 0x3): 0
+ <986> DW_AT_high_pc : 0xb
+ <98e> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <990> DW_AT_GNU_all_call_sites: 1
+@@ -933,7 +933,7 @@ Contents of the .debug_info.dwo section:
+ <997> DW_AT_decl_line : 80
+ <998> DW_AT_linkage_name: _Z4f11bPFivE
+ <9a5> DW_AT_type : <0x8b7>
+- <9a9> DW_AT_low_pc : (addr_index: 0x4): <no .debug_addr section>
++ <9a9> DW_AT_low_pc : (addr_index: 0x4): 0
+ <9aa> DW_AT_high_pc : 0x14
+ <9b2> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <9b4> DW_AT_GNU_all_tail_call_sites: 1
+@@ -954,7 +954,7 @@ Contents of the .debug_info.dwo section:
+ <9d3> DW_AT_specification: <0x8e0>
+ <9d7> DW_AT_decl_file : 2
+ <9d8> DW_AT_decl_line : 88
+- <9d9> DW_AT_low_pc : (addr_index: 0x5): <no .debug_addr section>
++ <9d9> DW_AT_low_pc : (addr_index: 0x5): 0
+ <9da> DW_AT_high_pc : 0xf
+ <9e2> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <9e4> DW_AT_object_pointer: <0x9ec>
+@@ -976,7 +976,7 @@ Contents of the .debug_info.dwo section:
+ <a06> DW_AT_decl_line : 96
+ <a07> DW_AT_linkage_name: _Z3f13v
+ <a0f> DW_AT_type : <0xa1e>
+- <a13> DW_AT_low_pc : (addr_index: 0x6): <no .debug_addr section>
++ <a13> DW_AT_low_pc : (addr_index: 0x6): 0
+ <a14> DW_AT_high_pc : 0xb
+ <a1c> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <a1e> DW_AT_GNU_all_call_sites: 1
+@@ -990,7 +990,7 @@ Contents of the .debug_info.dwo section:
+ <a2a> DW_AT_decl_line : 104
+ <a2b> DW_AT_linkage_name: _Z3f14v
+ <a33> DW_AT_type : <0xa42>
+- <a37> DW_AT_low_pc : (addr_index: 0x7): <no .debug_addr section>
++ <a37> DW_AT_low_pc : (addr_index: 0x7): 0
+ <a38> DW_AT_high_pc : 0xb
+ <a40> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <a42> DW_AT_GNU_all_call_sites: 1
+@@ -1010,7 +1010,7 @@ Contents of the .debug_info.dwo section:
+ <a5b> DW_AT_decl_line : 112
+ <a5c> DW_AT_linkage_name: _Z3f15v
+ <a64> DW_AT_type : <0xa73>
+- <a68> DW_AT_low_pc : (addr_index: 0x8): <no .debug_addr section>
++ <a68> DW_AT_low_pc : (addr_index: 0x8): 0
+ <a69> DW_AT_high_pc : 0xb
+ <a71> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <a73> DW_AT_GNU_all_call_sites: 1
+@@ -1030,7 +1030,7 @@ Contents of the .debug_info.dwo section:
+ <a8f> DW_AT_decl_line : 127
+ <a90> DW_AT_linkage_name: _Z3f18i
+ <a98> DW_AT_type : <0xa42>
+- <a9c> DW_AT_low_pc : (addr_index: 0x9): <no .debug_addr section>
++ <a9c> DW_AT_low_pc : (addr_index: 0x9): 0
+ <a9d> DW_AT_high_pc : 0x44
+ <aa5> DW_AT_frame_base : 1 byte block: 9c (DW_OP_call_frame_cfa)
+ <aa7> DW_AT_GNU_all_call_sites: 1
diff --git a/meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-2.patch b/meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-2.patch
new file mode 100644
index 0000000000..0583bfcfab
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-2.patch
@@ -0,0 +1,188 @@
+From ec41dd75c866599fc03c390c6afb5736c159c0ff Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Tue, 21 Jun 2022 16:37:27 +0100
+Subject: [PATCH] Binutils support for dwarf-5 (location and range lists
+ related)
+
+ * dwarf.h (struct debug_info): Add rnglists_base field.
+ * dwarf.c (read_and_display_attr_value): Read attribute DW_AT_rnglists_base.
+ (display_debug_rnglists_list): While handling DW_RLE_base_addressx,
+ DW_RLE_startx_endx, DW_RLE_startx_length items, pass the proper parameter
+ value to fetch_indexed_addr(), i.e. fetch the proper entry in .debug_addr section.
+ (display_debug_ranges): Add rnglists_base to the .debug_rnglists base address.
+ (load_separate_debug_files): Load .debug_addr section, if exists.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=ec41dd75c866599fc03c390c6afb5736c159c0ff]
+
+Signed-off-by: Pgowda <pgowda.cve@gmail.com>
+---
+ binutils/ChangeLog | 10 +++++++++
+ binutils/dwarf.c | 53 ++++++++++++++++++++++++++++++++++------------
+ binutils/dwarf.h | 1 +
+ 3 files changed, 51 insertions(+), 13 deletions(-)
+
+diff --git a/binutils/dwarf.c b/binutils/dwarf.c
+index cb2523af1f3..30b64ac68a8 100644
+--- a/binutils/dwarf.c
++++ b/binutils/dwarf.c
+@@ -2812,7 +2812,12 @@ read_and_display_attr_value (unsigned lo
+ dwarf_vmatoa ("x", debug_info_p->cu_offset));
+ debug_info_p->loclists_base = uvalue;
+ break;
+-
++ case DW_AT_rnglists_base:
++ if (debug_info_p->rnglists_base)
++ warn (_("CU @ 0x%s has multiple rnglists_base values"),
++ dwarf_vmatoa ("x", debug_info_p->cu_offset));
++ debug_info_p->rnglists_base = uvalue;
++ break;
+ case DW_AT_frame_base:
+ have_frame_base = 1;
+ /* Fall through. */
+@@ -3303,6 +3308,7 @@ read_and_display_attr_value (unsigned lo
+ /* Fall through. */
+ case DW_AT_location:
+ case DW_AT_loclists_base:
++ case DW_AT_rnglists_base:
+ case DW_AT_string_length:
+ case DW_AT_return_addr:
+ case DW_AT_data_member_location:
+@@ -3322,7 +3328,10 @@ read_and_display_attr_value (unsigned lo
+ && (form == DW_FORM_data4 || form == DW_FORM_data8))
+ || form == DW_FORM_sec_offset
+ || form == DW_FORM_loclistx)
+- printf (_(" (location list)"));
++ {
++ if (attribute != DW_AT_rnglists_base)
++ printf (_(" (location list)"));
++ }
+ /* Fall through. */
+ case DW_AT_allocated:
+ case DW_AT_associated:
+@@ -3809,6 +3818,7 @@ process_debug_info (struct dwarf_section
+ debug_information [unit].range_lists = NULL;
+ debug_information [unit].max_range_lists= 0;
+ debug_information [unit].num_range_lists = 0;
++ debug_information [unit].rnglists_base = 0;
+ }
+
+ if (!do_loc && dwarf_start_die == 0)
+@@ -7932,9 +7942,16 @@ display_debug_rnglists_list (unsigned ch
+ unsigned char * finish,
+ unsigned int pointer_size,
+ dwarf_vma offset,
+- dwarf_vma base_address)
++ dwarf_vma base_address,
++ unsigned int offset_size)
+ {
+ unsigned char *next = start;
++ unsigned int debug_addr_section_hdr_len;
++
++ if (offset_size == 4)
++ debug_addr_section_hdr_len = 8;
++ else
++ debug_addr_section_hdr_len = 16;
+
+ while (1)
+ {
+@@ -7964,20 +7981,24 @@ display_debug_rnglists_list (unsigned ch
+ READ_ULEB (base_address, start, finish);
+ print_dwarf_vma (base_address, pointer_size);
+ printf (_("(base address index) "));
+- base_address = fetch_indexed_addr (base_address, pointer_size);
++ base_address = fetch_indexed_addr ((base_address * pointer_size)
++ + debug_addr_section_hdr_len, pointer_size);
+ print_dwarf_vma (base_address, pointer_size);
+ printf (_("(base address)\n"));
+ break;
+ case DW_RLE_startx_endx:
+ READ_ULEB (begin, start, finish);
+ READ_ULEB (end, start, finish);
+- begin = fetch_indexed_addr (begin, pointer_size);
+- end = fetch_indexed_addr (begin, pointer_size);
++ begin = fetch_indexed_addr ((begin * pointer_size)
++ + debug_addr_section_hdr_len, pointer_size);
++ end = fetch_indexed_addr ((begin * pointer_size)
++ + debug_addr_section_hdr_len, pointer_size);
+ break;
+ case DW_RLE_startx_length:
+ READ_ULEB (begin, start, finish);
+ READ_ULEB (length, start, finish);
+- begin = fetch_indexed_addr (begin, pointer_size);
++ begin = fetch_indexed_addr ((begin * pointer_size)
++ + debug_addr_section_hdr_len, pointer_size);
+ end = begin + length;
+ break;
+ case DW_RLE_offset_pair:
+@@ -8003,6 +8024,7 @@ display_debug_rnglists_list (unsigned ch
+ rlet = DW_RLE_end_of_list;
+ break;
+ }
++
+ if (rlet == DW_RLE_end_of_list)
+ break;
+ if (rlet == DW_RLE_base_address || rlet == DW_RLE_base_addressx)
+@@ -8043,6 +8065,7 @@ display_debug_ranges (struct dwarf_secti
+ /* Initialize it due to a false compiler warning. */
+ unsigned char address_size = 0;
+ dwarf_vma last_offset = 0;
++ unsigned int offset_size = 0;
+
+ if (bytes == 0)
+ {
+@@ -8054,10 +8077,10 @@ display_debug_ranges (struct dwarf_secti
+
+ if (is_rnglists)
+ {
+- dwarf_vma initial_length;
+- unsigned char segment_selector_size;
+- unsigned int offset_size, offset_entry_count;
+- unsigned short version;
++ dwarf_vma initial_length;
++ unsigned char segment_selector_size;
++ unsigned int offset_entry_count;
++ unsigned short version;
+
+ /* Get and check the length of the block. */
+ SAFE_BYTE_GET_AND_INC (initial_length, start, 4, finish);
+@@ -8230,7 +8253,8 @@ display_debug_ranges (struct dwarf_secti
+ (unsigned long) offset, i);
+ continue;
+ }
+- next = section_begin + offset;
++
++ next = section_begin + offset + debug_info_p->rnglists_base;
+
+ /* If multiple DWARF entities reference the same range then we will
+ have multiple entries in the `range_entries' list for the same
+@@ -8262,7 +8286,7 @@ display_debug_ranges (struct dwarf_secti
+
+ if (is_rnglists)
+ display_debug_rnglists_list
+- (start, finish, pointer_size, offset, base_address);
++ (start, finish, pointer_size, offset, base_address, offset_size);
+ else
+ display_debug_ranges_list
+ (start, finish, pointer_size, offset, base_address);
+@@ -11911,6 +11935,9 @@ load_separate_debug_files (void * file,
+ && load_debug_section (abbrev, file)
+ && load_debug_section (info, file))
+ {
++ /* Load the .debug_addr section, if it exists. */
++ load_debug_section (debug_addr, file);
++
+ free_dwo_info ();
+
+ if (process_debug_info (& debug_displays[info].section, file, abbrev,
+diff --git a/binutils/dwarf.h b/binutils/dwarf.h
+index 040e674c6ce..8a89c08e7c2 100644
+--- a/binutils/dwarf.h
++++ b/binutils/dwarf.h
+@@ -192,6 +192,7 @@ typedef struct
+ dwarf_vma * range_lists;
+ unsigned int num_range_lists;
+ unsigned int max_range_lists;
++ dwarf_vma rnglists_base;
+ }
+ debug_info;
+
diff --git a/meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-3.patch b/meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-3.patch
new file mode 100644
index 0000000000..56331b1128
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-3.patch
@@ -0,0 +1,211 @@
+From f18acc9c4e5d18f4783f3a7d59e3ec95d7af0199 Mon Sep 17 00:00:00 2001
+From: "Kumar N, Bhuvanendra" <Kavitha.Natarajan@amd.com>
+Date: Wed, 22 Jun 2022 17:07:25 +0100
+Subject: [PATCH] Binutils support for split-dwarf and dwarf-5
+
+ * dwarf.c (fetch_indexed_string): Added new parameter
+ str_offsets_base to calculate the string offset.
+ (read_and_display_attr_value): Read DW_AT_str_offsets_base
+ attribute.
+ (process_debug_info): While allocating memory and initializing
+ debug_information, do it for do_debug_info also, if its true.
+ (load_separate_debug_files): Load .debug_str_offsets if exists.
+ * dwarf.h (struct debug_info): Add str_offsets_base field.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=f18acc9c4e5d18f4783f3a7d59e3ec95d7af0199]
+
+Signed-off-by: Pgowda <pgowda.cve@gmail.com>
+---
+ binutils/ChangeLog | 13 ++++++++++-
+ binutils/dwarf.c | 57 ++++++++++++++++++++++++++++++++++------------
+ binutils/dwarf.h | 1 +
+ 3 files changed, 56 insertions(+), 15 deletions(-)
+
+diff --git a/binutils/dwarf.c b/binutils/dwarf.c
+index f9c46cf54dd..d9a3144023c 100644
+--- a/binutils/dwarf.c
++++ b/binutils/dwarf.c
+@@ -687,8 +687,11 @@ fetch_indirect_line_string (dwarf_vma of
+ }
+
+ static const char *
+-fetch_indexed_string (dwarf_vma idx, struct cu_tu_set *this_set,
+- dwarf_vma offset_size, bool dwo)
++fetch_indexed_string (dwarf_vma idx,
++ struct cu_tu_set * this_set,
++ dwarf_vma offset_size,
++ bool dwo,
++ dwarf_vma str_offsets_base)
+ {
+ enum dwarf_section_display_enum str_sec_idx = dwo ? str_dwo : str;
+ enum dwarf_section_display_enum idx_sec_idx = dwo ? str_index_dwo : str_index;
+@@ -776,7 +779,15 @@ fetch_indexed_string (dwarf_vma idx, str
+ return _("<index offset is too big>");
+ }
+
+- str_offset = byte_get (curr + index_offset, offset_size);
++ if (str_offsets_base > 0)
++ {
++ if (offset_size == 8)
++ str_offsets_base -= 16;
++ else
++ str_offsets_base -= 8;
++ }
++
++ str_offset = byte_get (curr + index_offset + str_offsets_base, offset_size);
+ str_offset -= str_section->address;
+ if (str_offset >= str_section->size)
+ {
+@@ -2721,11 +2732,13 @@ read_and_display_attr_value (unsigned lo
+ /* We have already displayed the form name. */
+ printf (_("%c(offset: 0x%s): %s"), delimiter,
+ dwarf_vmatoa ("x", uvalue),
+- fetch_indexed_string (uvalue, this_set, offset_size, dwo));
++ fetch_indexed_string (uvalue, this_set, offset_size, dwo,
++ debug_info_p->str_offsets_base));
+ else
+ printf (_("%c(indexed string: 0x%s): %s"), delimiter,
+ dwarf_vmatoa ("x", uvalue),
+- fetch_indexed_string (uvalue, this_set, offset_size, dwo));
++ fetch_indexed_string (uvalue, this_set, offset_size, dwo,
++ debug_info_p->str_offsets_base));
+ }
+ break;
+
+@@ -2800,7 +2813,7 @@ read_and_display_attr_value (unsigned lo
+ break;
+ }
+
+- if ((do_loc || do_debug_loc || do_debug_ranges)
++ if ((do_loc || do_debug_loc || do_debug_ranges || do_debug_info)
+ && num_debug_info_entries == 0
+ && debug_info_p != NULL)
+ {
+@@ -2818,6 +2831,13 @@ read_and_display_attr_value (unsigned lo
+ dwarf_vmatoa ("x", debug_info_p->cu_offset));
+ debug_info_p->rnglists_base = uvalue;
+ break;
++ case DW_AT_str_offsets_base:
++ if (debug_info_p->str_offsets_base)
++ warn (_("CU @ 0x%s has multiple str_offsets_base values"),
++ dwarf_vmatoa ("x", debug_info_p->cu_offset));
++ debug_info_p->str_offsets_base = uvalue;
++ break;
++
+ case DW_AT_frame_base:
+ have_frame_base = 1;
+ /* Fall through. */
+@@ -2956,7 +2976,9 @@ read_and_display_attr_value (unsigned lo
+ case DW_FORM_strx2:
+ case DW_FORM_strx3:
+ case DW_FORM_strx4:
+- add_dwo_name (fetch_indexed_string (uvalue, this_set, offset_size, false), cu_offset);
++ add_dwo_name (fetch_indexed_string (uvalue, this_set, offset_size, false,
++ debug_info_p->str_offsets_base),
++ cu_offset);
+ break;
+ case DW_FORM_string:
+ add_dwo_name ((const char *) orig_data, cu_offset);
+@@ -2988,7 +3010,9 @@ read_and_display_attr_value (unsigned lo
+ case DW_FORM_strx2:
+ case DW_FORM_strx3:
+ case DW_FORM_strx4:
+- add_dwo_dir (fetch_indexed_string (uvalue, this_set, offset_size, false), cu_offset);
++ add_dwo_dir (fetch_indexed_string (uvalue, this_set, offset_size, false,
++ debug_info_p->str_offsets_base),
++ cu_offset);
+ break;
+ case DW_FORM_string:
+ add_dwo_dir ((const char *) orig_data, cu_offset);
+@@ -3309,6 +3333,7 @@ read_and_display_attr_value (unsigned lo
+ case DW_AT_location:
+ case DW_AT_loclists_base:
+ case DW_AT_rnglists_base:
++ case DW_AT_str_offsets_base:
+ case DW_AT_string_length:
+ case DW_AT_return_addr:
+ case DW_AT_data_member_location:
+@@ -3329,7 +3354,8 @@ read_and_display_attr_value (unsigned lo
+ || form == DW_FORM_sec_offset
+ || form == DW_FORM_loclistx)
+ {
+- if (attribute != DW_AT_rnglists_base)
++ if (attribute != DW_AT_rnglists_base
++ && attribute != DW_AT_str_offsets_base)
+ printf (_(" (location list)"));
+ }
+ /* Fall through. */
+@@ -3562,7 +3588,7 @@ process_debug_info (struct dwarf_section
+ return false;
+ }
+
+- if ((do_loc || do_debug_loc || do_debug_ranges)
++ if ((do_loc || do_debug_loc || do_debug_ranges || do_debug_info)
+ && num_debug_info_entries == 0
+ && ! do_types)
+ {
+@@ -3797,7 +3823,7 @@ process_debug_info (struct dwarf_section
+ continue;
+ }
+
+- if ((do_loc || do_debug_loc || do_debug_ranges)
++ if ((do_loc || do_debug_loc || do_debug_ranges || do_debug_info)
+ && num_debug_info_entries == 0
+ && alloc_num_debug_info_entries > unit
+ && ! do_types)
+@@ -3819,6 +3845,7 @@ process_debug_info (struct dwarf_section
+ debug_information [unit].max_range_lists= 0;
+ debug_information [unit].num_range_lists = 0;
+ debug_information [unit].rnglists_base = 0;
++ debug_information [unit].str_offsets_base = 0;
+ }
+
+ if (!do_loc && dwarf_start_die == 0)
+@@ -4089,7 +4116,7 @@ process_debug_info (struct dwarf_section
+
+ /* Set num_debug_info_entries here so that it can be used to check if
+ we need to process .debug_loc and .debug_ranges sections. */
+- if ((do_loc || do_debug_loc || do_debug_ranges)
++ if ((do_loc || do_debug_loc || do_debug_ranges || do_debug_info)
+ && num_debug_info_entries == 0
+ && ! do_types)
+ {
+@@ -6237,7 +6264,7 @@ display_debug_macro (struct dwarf_sectio
+ READ_ULEB (lineno, curr, end);
+ READ_ULEB (offset, curr, end);
+ string = (const unsigned char *)
+- fetch_indexed_string (offset, NULL, offset_size, false);
++ fetch_indexed_string (offset, NULL, offset_size, false, 0);
+ if (op == DW_MACRO_define_strx)
+ printf (" DW_MACRO_define_strx ");
+ else
+@@ -7851,7 +7878,7 @@ display_debug_str_offsets (struct dwarf_
+ SAFE_BYTE_GET_AND_INC (offset, curr, entry_length, entries_end);
+ if (dwo)
+ string = (const unsigned char *)
+- fetch_indexed_string (idx, NULL, entry_length, dwo);
++ fetch_indexed_string (idx, NULL, entry_length, dwo, 0);
+ else
+ string = fetch_indirect_string (offset);
+
+@@ -11937,6 +11964,8 @@ load_separate_debug_files (void * file,
+ {
+ /* Load the .debug_addr section, if it exists. */
+ load_debug_section (debug_addr, file);
++ /* Load the .debug_str_offsets section, if it exists. */
++ load_debug_section (str_index, file);
+
+ free_dwo_info ();
+
+diff --git a/binutils/dwarf.h b/binutils/dwarf.h
+index 8a89c08e7c2..adbf20f9a28 100644
+--- a/binutils/dwarf.h
++++ b/binutils/dwarf.h
+@@ -193,6 +193,7 @@ typedef struct
+ unsigned int num_range_lists;
+ unsigned int max_range_lists;
+ dwarf_vma rnglists_base;
++ dwarf_vma str_offsets_base;
+ }
+ debug_info;
+
diff --git a/meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-4.patch b/meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-4.patch
new file mode 100644
index 0000000000..e59b19c184
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0017-CVE-2022-38127-4.patch
@@ -0,0 +1,43 @@
+From e98e7d9a70dcc987bff0e925f20b78cd4a2979ed Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Mon, 27 Jun 2022 13:30:35 +0100
+Subject: [PATCH] Fix NULL pointer indirection when parsing corrupt DWARF data.
+
+ PR 29290
+ * dwarf.c (read_and_display_attr_value): Check that debug_info_p
+ is set before dereferencing it.
+
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=e98e7d9a70dcc987bff0e925f20b78cd4a2979ed]
+
+Signed-off-by: Pgowda <pgowda.cve@gmail.com>
+---
+ binutils/dwarf.c | 11 +++++------
+
+diff --git a/binutils/dwarf.c b/binutils/dwarf.c
+index bcabb61b871..37b477b886d 100644
+--- a/binutils/dwarf.c
++++ b/binutils/dwarf.c
+@@ -2727,18 +2727,17 @@ read_and_display_attr_value (unsigned lo
+ {
+ const char *suffix = strrchr (section->name, '.');
+ bool dwo = suffix && strcmp (suffix, ".dwo") == 0;
++ const char *strng;
+
++ strng = fetch_indexed_string (uvalue, this_set, offset_size, dwo,
++ debug_info_p ? debug_info_p->str_offsets_base : 0);
+ if (do_wide)
+ /* We have already displayed the form name. */
+ printf (_("%c(offset: 0x%s): %s"), delimiter,
+- dwarf_vmatoa ("x", uvalue),
+- fetch_indexed_string (uvalue, this_set, offset_size, dwo,
+- debug_info_p->str_offsets_base));
++ dwarf_vmatoa ("x", uvalue), strng);
+ else
+ printf (_("%c(indexed string: 0x%s): %s"), delimiter,
+- dwarf_vmatoa ("x", uvalue),
+- fetch_indexed_string (uvalue, this_set, offset_size, dwo,
+- debug_info_p->str_offsets_base));
++ dwarf_vmatoa ("x", uvalue), strng);
+ }
+ break;
+
diff --git a/meta/recipes-devtools/binutils/binutils/0018-CVE-2022-38128-1.patch b/meta/recipes-devtools/binutils/binutils/0018-CVE-2022-38128-1.patch
new file mode 100644
index 0000000000..0a490d86b3
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0018-CVE-2022-38128-1.patch
@@ -0,0 +1,350 @@
+From f07c08e115e27cddf5a0030dc6332bbee1bd9c6a Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Thu, 21 Jul 2022 08:38:14 +0930
+Subject: [PATCH] binutils/dwarf.c: abbrev caching
+
+I'm inclined to think that abbrev caching is counter-productive. The
+time taken to search the list of abbrevs converted to internal form is
+non-zero, and it's easy to decode the raw abbrevs. It's especially
+silly to cache empty lists of decoded abbrevs (happens with zero
+padding in .debug_abbrev), or abbrevs as they are displayed when there
+is no further use of those abbrevs. This patch stops caching in those
+cases.
+
+ * dwarf.c (record_abbrev_list_for_cu): Add free_list param.
+ Put abbrevs on abbrev_lists here.
+ (new_abbrev_list): Delete function.
+ (process_abbrev_set): Return newly allocated list. Move
+ abbrev base, offset and size checking to..
+ (find_and_process_abbrev_set): ..here, new function. Handle
+ lookup of cached abbrevs here, and calculate start and end
+ for process_abbrev_set. Return free_list if newly alloc'd.
+ (process_debug_info): Consolidate cached list lookup, new list
+ alloc and processing into find_and_process_abbrev_set call.
+ Free list when not cached.
+ (display_debug_abbrev): Similarly.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=f07c08e115e27cddf5a0030dc6332bbee1bd9c6a]
+
+Signed-off-by: Pgowda <pgowda.cve@gmail.com>
+---
+ binutils/dwarf.c | 208 +++++++++++++++++++++++++----------------------
+ 1 file changed, 110 insertions(+), 98 deletions(-)
+
+diff --git a/binutils/dwarf.c b/binutils/dwarf.c
+index 267ed3bb382..2fc352f74c5 100644
+--- a/binutils/dwarf.c
++++ b/binutils/dwarf.c
+@@ -882,8 +882,15 @@ static unsigned long next_free_abbrev_m
+ #define ABBREV_MAP_ENTRIES_INCREMENT 8
+
+ static void
+-record_abbrev_list_for_cu (dwarf_vma start, dwarf_vma end, abbrev_list * list)
++record_abbrev_list_for_cu (dwarf_vma start, dwarf_vma end,
++ abbrev_list *list, abbrev_list *free_list)
+ {
++ if (free_list != NULL)
++ {
++ list->next = abbrev_lists;
++ abbrev_lists = list;
++ }
++
+ if (cu_abbrev_map == NULL)
+ {
+ num_abbrev_map_entries = INITIAL_NUM_ABBREV_MAP_ENTRIES;
+@@ -936,20 +943,6 @@ free_all_abbrevs (void)
+ }
+
+ static abbrev_list *
+-new_abbrev_list (dwarf_vma abbrev_base, dwarf_vma abbrev_offset)
+-{
+- abbrev_list * list = (abbrev_list *) xcalloc (sizeof * list, 1);
+-
+- list->abbrev_base = abbrev_base;
+- list->abbrev_offset = abbrev_offset;
+-
+- list->next = abbrev_lists;
+- abbrev_lists = list;
+-
+- return list;
+-}
+-
+-static abbrev_list *
+ find_abbrev_list_by_abbrev_offset (dwarf_vma abbrev_base,
+ dwarf_vma abbrev_offset)
+ {
+@@ -966,7 +959,7 @@ find_abbrev_list_by_abbrev_offset (dwarf
+ /* Find the abbreviation map for the CU that includes OFFSET.
+ OFFSET is an absolute offset from the start of the .debug_info section. */
+ /* FIXME: This function is going to slow down readelf & objdump.
+- Consider using a better algorithm to mitigate this effect. */
++ Not caching abbrevs is likely the answer. */
+
+ static abbrev_map *
+ find_abbrev_map_by_offset (dwarf_vma offset)
+@@ -1033,40 +1026,18 @@ add_abbrev_attr (unsigned long attrib
+ list->last_abbrev->last_attr = attr;
+ }
+
+-/* Processes the (partial) contents of a .debug_abbrev section.
+- Returns NULL if the end of the section was encountered.
+- Returns the address after the last byte read if the end of
+- an abbreviation set was found. */
++/* Return processed (partial) contents of a .debug_abbrev section.
++ Returns NULL on errors. */
+
+-static unsigned char *
++static abbrev_list *
+ process_abbrev_set (struct dwarf_section *section,
+- dwarf_vma abbrev_base,
+- dwarf_vma abbrev_size,
+- dwarf_vma abbrev_offset,
+- abbrev_list *list)
++ unsigned char *start,
++ unsigned char *end)
+ {
+- if (abbrev_base >= section->size
+- || abbrev_size > section->size - abbrev_base)
+- {
+- /* PR 17531: file:4bcd9ce9. */
+- warn (_("Debug info is corrupted, abbrev size (%lx) is larger than "
+- "abbrev section size (%lx)\n"),
+- (unsigned long) (abbrev_base + abbrev_size),
+- (unsigned long) section->size);
+- return NULL;
+- }
+- if (abbrev_offset >= abbrev_size)
+- {
+- warn (_("Debug info is corrupted, abbrev offset (%lx) is larger than "
+- "abbrev section size (%lx)\n"),
+- (unsigned long) abbrev_offset,
+- (unsigned long) abbrev_size);
+- return NULL;
+- }
++ abbrev_list *list = xmalloc (sizeof (*list));
++ list->first_abbrev = NULL;
++ list->last_abbrev = NULL;
+
+- unsigned char *start = section->start + abbrev_base;
+- unsigned char *end = start + abbrev_size;
+- start += abbrev_offset;
+ while (start < end)
+ {
+ unsigned long entry;
+@@ -1079,14 +1050,18 @@ process_abbrev_set (struct dwarf_section
+ /* A single zero is supposed to end the set according
+ to the standard. If there's more, then signal that to
+ the caller. */
+- if (start == end)
+- return NULL;
+- if (entry == 0)
+- return start;
++ if (start == end || entry == 0)
++ {
++ list->start_of_next_abbrevs = start != end ? start : NULL;
++ return list;
++ }
+
+ READ_ULEB (tag, start, end);
+ if (start == end)
+- return NULL;
++ {
++ free (list);
++ return NULL;
++ }
+
+ children = *start++;
+
+@@ -1121,9 +1096,67 @@ process_abbrev_set (struct dwarf_section
+ /* Report the missing single zero which ends the section. */
+ error (_(".debug_abbrev section not zero terminated\n"));
+
++ free (list);
+ return NULL;
+ }
+
++/* Return a sequence of abbrevs in SECTION starting at ABBREV_BASE
++ plus ABBREV_OFFSET and finishing at ABBREV_BASE + ABBREV_SIZE.
++ If FREE_LIST is non-NULL search the already decoded abbrevs on
++ abbrev_lists first and if found set *FREE_LIST to NULL. If
++ searching doesn't find a matching abbrev, set *FREE_LIST to the
++ newly allocated list. If FREE_LIST is NULL, no search is done and
++ the returned abbrev_list is always newly allocated. */
++
++static abbrev_list *
++find_and_process_abbrev_set (struct dwarf_section *section,
++ dwarf_vma abbrev_base,
++ dwarf_vma abbrev_size,
++ dwarf_vma abbrev_offset,
++ abbrev_list **free_list)
++{
++ if (free_list)
++ *free_list = NULL;
++
++ if (abbrev_base >= section->size
++ || abbrev_size > section->size - abbrev_base)
++ {
++ /* PR 17531: file:4bcd9ce9. */
++ warn (_("Debug info is corrupted, abbrev size (%lx) is larger than "
++ "abbrev section size (%lx)\n"),
++ (unsigned long) (abbrev_base + abbrev_size),
++ (unsigned long) section->size);
++ return NULL;
++ }
++ if (abbrev_offset >= abbrev_size)
++ {
++ warn (_("Debug info is corrupted, abbrev offset (%lx) is larger than "
++ "abbrev section size (%lx)\n"),
++ (unsigned long) abbrev_offset,
++ (unsigned long) abbrev_size);
++ return NULL;
++ }
++
++ unsigned char *start = section->start + abbrev_base + abbrev_offset;
++ unsigned char *end = section->start + abbrev_base + abbrev_size;
++ abbrev_list *list = NULL;
++ if (free_list)
++ list = find_abbrev_list_by_abbrev_offset (abbrev_base, abbrev_offset);
++ if (list == NULL)
++ {
++ list = process_abbrev_set (section, start, end);
++ if (list)
++ {
++ list->abbrev_base = abbrev_base;
++ list->abbrev_offset = abbrev_offset;
++ list->next = NULL;
++ }
++ if (free_list)
++ *free_list = list;
++ }
++ return list;
++}
++
+ static const char *
+ get_TAG_name (unsigned long tag)
+ {
+@@ -3670,7 +3703,6 @@ process_debug_info (struct dwarf_section
+ dwarf_vma cu_offset;
+ unsigned int offset_size;
+ struct cu_tu_set * this_set;
+- abbrev_list * list;
+ unsigned char *end_cu;
+
+ hdrptr = start;
+@@ -3726,22 +3758,18 @@ process_debug_info (struct dwarf_section
+ abbrev_size = this_set->section_sizes [DW_SECT_ABBREV];
+ }
+
+- list = find_abbrev_list_by_abbrev_offset (abbrev_base,
+- compunit.cu_abbrev_offset);
+- if (list == NULL)
+- {
+- unsigned char * next;
+-
+- list = new_abbrev_list (abbrev_base,
+- compunit.cu_abbrev_offset);
+- next = process_abbrev_set (&debug_displays[abbrev_sec].section,
+- abbrev_base, abbrev_size,
+- compunit.cu_abbrev_offset, list);
+- list->start_of_next_abbrevs = next;
+- }
+-
++ abbrev_list *list;
++ abbrev_list *free_list;
++ list = find_and_process_abbrev_set (&debug_displays[abbrev_sec].section,
++ abbrev_base, abbrev_size,
++ compunit.cu_abbrev_offset,
++ &free_list);
+ start = end_cu;
+- record_abbrev_list_for_cu (cu_offset, start - section_begin, list);
++ if (list != NULL && list->first_abbrev != NULL)
++ record_abbrev_list_for_cu (cu_offset, start - section_begin,
++ list, free_list);
++ else if (free_list != NULL)
++ free_abbrev_list (free_list);
+ }
+
+ for (start = section_begin, unit = 0; start < end; unit++)
+@@ -3757,7 +3785,6 @@ process_debug_info (struct dwarf_section
+ struct cu_tu_set *this_set;
+ dwarf_vma abbrev_base;
+ size_t abbrev_size;
+- abbrev_list * list = NULL;
+ unsigned char *end_cu;
+
+ hdrptr = start;
+@@ -3936,20 +3963,10 @@ process_debug_info (struct dwarf_section
+ }
+
+ /* Process the abbrevs used by this compilation unit. */
+- list = find_abbrev_list_by_abbrev_offset (abbrev_base,
+- compunit.cu_abbrev_offset);
+- if (list == NULL)
+- {
+- unsigned char *next;
+-
+- list = new_abbrev_list (abbrev_base,
+- compunit.cu_abbrev_offset);
+- next = process_abbrev_set (&debug_displays[abbrev_sec].section,
+- abbrev_base, abbrev_size,
+- compunit.cu_abbrev_offset, list);
+- list->start_of_next_abbrevs = next;
+- }
+-
++ abbrev_list *list;
++ list = find_and_process_abbrev_set (&debug_displays[abbrev_sec].section,
++ abbrev_base, abbrev_size,
++ compunit.cu_abbrev_offset, NULL);
+ level = 0;
+ last_level = level;
+ saved_level = -1;
+@@ -4128,6 +4145,8 @@ process_debug_info (struct dwarf_section
+ if (entry->children)
+ ++level;
+ }
++ if (list != NULL)
++ free_abbrev_list (list);
+ }
+
+ /* Set num_debug_info_entries here so that it can be used to check if
+@@ -6353,24 +6372,15 @@ display_debug_abbrev (struct dwarf_secti
+
+ do
+ {
+- abbrev_list * list;
+- dwarf_vma offset;
+-
+- offset = start - section->start;
+- list = find_abbrev_list_by_abbrev_offset (0, offset);
++ dwarf_vma offset = start - section->start;
++ abbrev_list *list = find_and_process_abbrev_set (section, 0,
++ section->size, offset,
++ NULL);
+ if (list == NULL)
+- {
+- list = new_abbrev_list (0, offset);
+- start = process_abbrev_set (section, 0, section->size, offset, list);
+- list->start_of_next_abbrevs = start;
+- }
+- else
+- start = list->start_of_next_abbrevs;
+-
+- if (list->first_abbrev == NULL)
+- continue;
++ break;
+
+- printf (_(" Number TAG (0x%lx)\n"), (long) offset);
++ if (list->first_abbrev)
++ printf (_(" Number TAG (0x%lx)\n"), (long) offset);
+
+ for (entry = list->first_abbrev; entry; entry = entry->next)
+ {
+@@ -6391,6 +6401,8 @@ display_debug_abbrev (struct dwarf_secti
+ putchar ('\n');
+ }
+ }
++ start = list->start_of_next_abbrevs;
++ free_abbrev_list (list);
+ }
+ while (start);
+
diff --git a/meta/recipes-devtools/binutils/binutils/0018-CVE-2022-38128-2.patch b/meta/recipes-devtools/binutils/binutils/0018-CVE-2022-38128-2.patch
new file mode 100644
index 0000000000..b867b04e96
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0018-CVE-2022-38128-2.patch
@@ -0,0 +1,436 @@
+From 175b91507b83ad42607d2f6dadaf55b7b511bdbe Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Wed, 20 Jul 2022 18:28:50 +0930
+Subject: [PATCH] miscellaneous dwarf.c tidies
+
+ * dwarf.c: Leading and trailing whitespace fixes.
+ (free_abbrev_list): New function.
+ (free_all_abbrevs): Use the above. Free cu_abbrev_map here too.
+ (process_abbrev_set): Print actual section name on error.
+ (get_type_abbrev_from_form): Add overflow check.
+ (free_debug_memory): Don't free cu_abbrev_map here..
+ (process_debug_info): ..or here. Warn on another case of not
+ finding a neeeded abbrev.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=175b91507b83ad42607d2f6dadaf55b7b511bdbe]
+
+Signed-off-by: Pgowda <pgowda.cve@gmail.com>
+---
+ binutils/dwarf.c | 216 +++++++++++++++++++++++------------------------
+ 1 file changed, 106 insertions(+), 110 deletions(-)
+
+diff --git a/binutils/dwarf.c b/binutils/dwarf.c
+index 2b1eec49422..267ed3bb382 100644
+--- a/binutils/dwarf.c
++++ b/binutils/dwarf.c
+@@ -954,38 +954,41 @@ record_abbrev_list_for_cu (dwarf_vma sta
+ next_free_abbrev_map_entry ++;
+ }
+
+-static void
+-free_all_abbrevs (void)
++static abbrev_list *
++free_abbrev_list (abbrev_list *list)
+ {
+- abbrev_list * list;
++ abbrev_entry *abbrv = list->first_abbrev;
+
+- for (list = abbrev_lists; list != NULL;)
++ while (abbrv)
+ {
+- abbrev_list * next = list->next;
+- abbrev_entry * abbrv;
++ abbrev_attr *attr = abbrv->first_attr;
+
+- for (abbrv = list->first_abbrev; abbrv != NULL;)
++ while (attr)
+ {
+- abbrev_entry * next_abbrev = abbrv->next;
+- abbrev_attr * attr;
+-
+- for (attr = abbrv->first_attr; attr;)
+- {
+- abbrev_attr *next_attr = attr->next;
+-
+- free (attr);
+- attr = next_attr;
+- }
+-
+- free (abbrv);
+- abbrv = next_abbrev;
++ abbrev_attr *next_attr = attr->next;
++ free (attr);
++ attr = next_attr;
+ }
+
+- free (list);
+- list = next;
++ abbrev_entry *next_abbrev = abbrv->next;
++ free (abbrv);
++ abbrv = next_abbrev;
+ }
+
+- abbrev_lists = NULL;
++ abbrev_list *next = list->next;
++ free (list);
++ return next;
++}
++
++static void
++free_all_abbrevs (void)
++{
++ while (abbrev_lists)
++ abbrev_lists = free_abbrev_list (abbrev_lists);
++
++ free (cu_abbrev_map);
++ cu_abbrev_map = NULL;
++ next_free_abbrev_map_entry = 0;
+ }
+
+ static abbrev_list *
+@@ -1017,7 +1020,7 @@ find_abbrev_map_by_offset (dwarf_vma off
+ && cu_abbrev_map[i].end > offset)
+ return cu_abbrev_map + i;
+
+- return NULL;
++ return NULL;
+ }
+
+ static void
+@@ -1140,7 +1143,7 @@ process_abbrev_set (struct dwarf_section
+ }
+
+ /* Report the missing single zero which ends the section. */
+- error (_(".debug_abbrev section not zero terminated\n"));
++ error (_("%s section not zero terminated\n"), section->name);
+
+ free (list);
+ return NULL;
+@@ -1917,7 +1920,7 @@ fetch_alt_indirect_string (dwarf_vma off
+ dwarf_vmatoa ("x", offset));
+ return _("<offset is too big>");
+ }
+-
++
+ static const char *
+ get_AT_name (unsigned long attribute)
+ {
+@@ -2199,7 +2202,8 @@ get_type_abbrev_from_form (unsigned long
+ case DW_FORM_ref4:
+ case DW_FORM_ref8:
+ case DW_FORM_ref_udata:
+- if (uvalue + cu_offset > (size_t) (cu_end - section->start))
++ if (uvalue + cu_offset < uvalue
++ || uvalue + cu_offset > (size_t) (cu_end - section->start))
+ {
+ warn (_("Unable to resolve ref form: uvalue %lx + cu_offset %lx > CU size %lx\n"),
+ uvalue, (long) cu_offset, (long) (cu_end - section->start));
+@@ -2236,7 +2240,7 @@ get_type_abbrev_from_form (unsigned long
+ else
+ *map_return = NULL;
+ }
+-
++
+ READ_ULEB (abbrev_number, data, section->start + section->size);
+
+ for (entry = map->list->first_abbrev; entry != NULL; entry = entry->next)
+@@ -2837,7 +2841,7 @@ read_and_display_attr_value (unsigned lo
+ if (!do_loc)
+ printf ("%c<0x%s>", delimiter, dwarf_vmatoa ("x", uvalue + cu_offset));
+ break;
+-
++
+ default:
+ warn (_("Unrecognized form: 0x%lx\n"), form);
+ /* What to do? Consume a byte maybe? */
+@@ -3009,7 +3013,7 @@ read_and_display_attr_value (unsigned lo
+ case DW_FORM_strx3:
+ case DW_FORM_strx4:
+ add_dwo_name (fetch_indexed_string (uvalue, this_set, offset_size, false,
+- debug_info_p->str_offsets_base),
++ debug_info_p->str_offsets_base),
+ cu_offset);
+ break;
+ case DW_FORM_string:
+@@ -3043,7 +3047,7 @@ read_and_display_attr_value (unsigned lo
+ case DW_FORM_strx3:
+ case DW_FORM_strx4:
+ add_dwo_dir (fetch_indexed_string (uvalue, this_set, offset_size, false,
+- debug_info_p->str_offsets_base),
++ debug_info_p->str_offsets_base),
+ cu_offset);
+ break;
+ case DW_FORM_string:
+@@ -3671,11 +3675,8 @@ process_debug_info (struct dwarf_section
+ introduce (section, false);
+
+ free_all_abbrevs ();
+- free (cu_abbrev_map);
+- cu_abbrev_map = NULL;
+- next_free_abbrev_map_entry = 0;
+
+- /* In order to be able to resolve DW_FORM_ref_attr forms we need
++ /* In order to be able to resolve DW_FORM_ref_addr forms we need
+ to load *all* of the abbrevs for all CUs in this .debug_info
+ section. This does effectively mean that we (partially) read
+ every CU header twice. */
+@@ -4029,12 +4030,11 @@ process_debug_info (struct dwarf_section
+
+ /* Scan through the abbreviation list until we reach the
+ correct entry. */
+- if (list == NULL)
+- continue;
+-
+- for (entry = list->first_abbrev; entry != NULL; entry = entry->next)
+- if (entry->number == abbrev_number)
+- break;
++ entry = NULL;
++ if (list != NULL)
++ for (entry = list->first_abbrev; entry != NULL; entry = entry->next)
++ if (entry->number == abbrev_number)
++ break;
+
+ if (entry == NULL)
+ {
+@@ -4442,7 +4442,7 @@ display_debug_sup (struct dwarf_section
+
+ SAFE_BYTE_GET_AND_INC (is_supplementary, start, 1, end);
+ if (is_supplementary != 0 && is_supplementary != 1)
+- warn (_("corrupt .debug_sup section: is_supplementary not 0 or 1\n"));
++ warn (_("corrupt .debug_sup section: is_supplementary not 0 or 1\n"));
+
+ sup_filename = start;
+ if (is_supplementary && sup_filename[0] != 0)
+@@ -5621,7 +5621,7 @@ display_debug_lines_decoded (struct dwar
+ printf ("%s %11d %#18" DWARF_VMA_FMT "x",
+ newFileName, state_machine_regs.line,
+ state_machine_regs.address);
+- }
++ }
+ else
+ {
+ if (xop == -DW_LNE_end_sequence)
+@@ -6075,7 +6075,7 @@ display_debug_macro (struct dwarf_sectio
+ load_debug_section_with_follow (str, file);
+ load_debug_section_with_follow (line, file);
+ load_debug_section_with_follow (str_index, file);
+-
++
+ introduce (section, false);
+
+ while (curr < end)
+@@ -6519,7 +6519,7 @@ display_loc_list (struct dwarf_section *
+
+ /* Check base address specifiers. */
+ if (is_max_address (begin, pointer_size)
+- && !is_max_address (end, pointer_size))
++ && !is_max_address (end, pointer_size))
+ {
+ base_address = end;
+ print_dwarf_vma (begin, pointer_size);
+@@ -6697,7 +6697,7 @@ display_loclists_list (struct dwarf_sect
+ case DW_LLE_default_location:
+ begin = end = 0;
+ break;
+-
++
+ case DW_LLE_offset_pair:
+ READ_ULEB (begin, start, section_end);
+ begin += base_address;
+@@ -6993,7 +6993,7 @@ display_offset_entry_loclists (struct dw
+ unsigned char * start = section->start;
+ unsigned char * const end = start + section->size;
+
+- introduce (section, false);
++ introduce (section, false);
+
+ do
+ {
+@@ -7042,14 +7042,14 @@ display_offset_entry_loclists (struct dw
+ section->name, segment_selector_size);
+ return 0;
+ }
+-
++
+ if (offset_entry_count == 0)
+ {
+ warn (_("The %s section contains a table without offset\n"),
+ section->name);
+ return 0;
+ }
+-
++
+ printf (_("\n Offset Entries starting at 0x%lx:\n"),
+ (long)(start - section->start));
+
+@@ -8295,12 +8295,12 @@ display_debug_ranges (struct dwarf_secti
+ next = section_begin + offset + debug_info_p->rnglists_base;
+
+ /* If multiple DWARF entities reference the same range then we will
+- have multiple entries in the `range_entries' list for the same
+- offset. Thanks to the sort above these will all be consecutive in
+- the `range_entries' list, so we can easily ignore duplicates
+- here. */
++ have multiple entries in the `range_entries' list for the same
++ offset. Thanks to the sort above these will all be consecutive in
++ the `range_entries' list, so we can easily ignore duplicates
++ here. */
+ if (i > 0 && last_offset == offset)
+- continue;
++ continue;
+ last_offset = offset;
+
+ if (dwarf_check != 0 && i > 0)
+@@ -10336,7 +10336,7 @@ display_debug_names (struct dwarf_sectio
+ break;
+ if (tagno >= 0)
+ printf ("%s<%lu>",
+- (tagno == 0 && second_abbrev_tag == 0 ? " " : "\n\t"),
++ (tagno == 0 && second_abbrev_tag == 0 ? " " : "\n\t"),
+ (unsigned long) abbrev_tag);
+
+ for (entry = abbrev_lookup;
+@@ -10901,7 +10901,7 @@ process_cu_tu_index (struct dwarf_sectio
+ Check for integer overflow (can occur when size_t is 32-bit)
+ with overlarge ncols or nused values. */
+ if (nused == -1u
+- || _mul_overflow ((size_t) ncols, 4, &temp)
++ || _mul_overflow ((size_t) ncols, 4, &temp)
+ || _mul_overflow ((size_t) nused + 1, temp, &total)
+ || total > (size_t) (limit - ppool))
+ {
+@@ -10909,7 +10909,7 @@ process_cu_tu_index (struct dwarf_sectio
+ section->name);
+ return 0;
+ }
+-
++
+ if (do_display)
+ {
+ printf (_(" Offset table\n"));
+@@ -11413,8 +11413,8 @@ add_separate_debug_file (const char * fi
+
+ static bool
+ debuginfod_fetch_separate_debug_info (struct dwarf_section * section,
+- char ** filename,
+- void * file)
++ char ** filename,
++ void * file)
+ {
+ size_t build_id_len;
+ unsigned char * build_id;
+@@ -11432,14 +11432,14 @@ debuginfod_fetch_separate_debug_info (st
+
+ filelen = strnlen ((const char *)section->start, section->size);
+ if (filelen == section->size)
+- /* Corrupt debugaltlink. */
+- return false;
++ /* Corrupt debugaltlink. */
++ return false;
+
+ build_id = section->start + filelen + 1;
+ build_id_len = section->size - (filelen + 1);
+
+ if (build_id_len == 0)
+- return false;
++ return false;
+ }
+ else
+ return false;
+@@ -11451,25 +11451,25 @@ debuginfod_fetch_separate_debug_info (st
+
+ client = debuginfod_begin ();
+ if (client == NULL)
+- return false;
++ return false;
+
+ /* Query debuginfod servers for the target file. If found its path
+- will be stored in filename. */
++ will be stored in filename. */
+ fd = debuginfod_find_debuginfo (client, build_id, build_id_len, filename);
+ debuginfod_end (client);
+
+ /* Only free build_id if we allocated space for a hex string
+- in get_build_id (). */
++ in get_build_id (). */
+ if (build_id_len == 0)
+- free (build_id);
++ free (build_id);
+
+ if (fd >= 0)
+- {
+- /* File successfully retrieved. Close fd since we want to
+- use open_debug_file () on filename instead. */
+- close (fd);
+- return true;
+- }
++ {
++ /* File successfully retrieved. Close fd since we want to
++ use open_debug_file () on filename instead. */
++ close (fd);
++ return true;
++ }
+ }
+
+ return false;
+@@ -11482,7 +11482,7 @@ load_separate_debug_info (const char *
+ parse_func_type parse_func,
+ check_func_type check_func,
+ void * func_data,
+- void * file ATTRIBUTE_UNUSED)
++ void * file ATTRIBUTE_UNUSED)
+ {
+ const char * separate_filename;
+ char * debug_filename;
+@@ -11597,11 +11597,11 @@ load_separate_debug_info (const char *
+ & tmp_filename,
+ file))
+ {
+- /* File successfully downloaded from server, replace
+- debug_filename with the file's path. */
+- free (debug_filename);
+- debug_filename = tmp_filename;
+- goto found;
++ /* File successfully downloaded from server, replace
++ debug_filename with the file's path. */
++ free (debug_filename);
++ debug_filename = tmp_filename;
++ goto found;
+ }
+ }
+ #endif
+@@ -11766,12 +11766,12 @@ load_build_id_debug_file (const char * m
+ /* In theory we should extract the contents of the section into
+ a note structure and then check the fields. For now though
+ just use hard coded offsets instead:
+-
++
+ Field Bytes Contents
+ NSize 0...3 4
+ DSize 4...7 8+
+ Type 8..11 3 (NT_GNU_BUILD_ID)
+- Name 12.15 GNU\0
++ Name 12.15 GNU\0
+ Data 16.... */
+
+ /* FIXME: Check the name size, name and type fields. */
+@@ -11783,7 +11783,7 @@ load_build_id_debug_file (const char * m
+ warn (_(".note.gnu.build-id data size is too small\n"));
+ return;
+ }
+-
++
+ if (build_id_size > (section->size - 16))
+ {
+ warn (_(".note.gnu.build-id data size is too bug\n"));
+@@ -12075,10 +12075,6 @@ free_debug_memory (void)
+
+ free_all_abbrevs ();
+
+- free (cu_abbrev_map);
+- cu_abbrev_map = NULL;
+- next_free_abbrev_map_entry = 0;
+-
+ free (shndx_pool);
+ shndx_pool = NULL;
+ shndx_pool_size = 0;
diff --git a/meta/recipes-devtools/binutils/binutils/0018-CVE-2022-38128-3.patch b/meta/recipes-devtools/binutils/binutils/0018-CVE-2022-38128-3.patch
new file mode 100644
index 0000000000..04d06ed6b6
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0018-CVE-2022-38128-3.patch
@@ -0,0 +1,95 @@
+From 695c6dfe7e85006b98c8b746f3fd5f913c94ebff Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Thu, 21 Jul 2022 09:56:15 +0930
+Subject: [PATCH] PR29370, infinite loop in display_debug_abbrev
+
+The PR29370 testcase is a fuzzed object file with multiple
+.trace_abbrev sections. Multiple .trace_abbrev or .debug_abbrev
+sections are not a violation of the DWARF standard. The DWARF5
+standard even gives an example of multiple .debug_abbrev sections
+contained in groups. Caching and lookup of processed abbrevs thus
+needs to be done by section and offset rather than base and offset.
+(Why base anyway?) Or, since section contents are kept, by a pointer
+into the contents.
+
+ PR 29370
+ * dwarf.c (struct abbrev_list): Replace abbrev_base and
+ abbrev_offset with raw field.
+ (find_abbrev_list_by_abbrev_offset): Delete.
+ (find_abbrev_list_by_raw_abbrev): New function.
+ (process_abbrev_set): Set list->raw and list->next.
+ (find_and_process_abbrev_set): Replace abbrev list lookup with
+ new function. Don't set list abbrev_base, abbrev_offset or next.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=patch;h=695c6dfe7e85006b98c8b746f3fd5f913c94ebff]
+
+Signed-off-by: Pgowda <pgowda.cve@gmail.com>
+---
+ binutils/dwarf.c | 19 ++++++-------------
+ 1 file changed, 6 insertions(+), 13 deletions(-)
+
+diff --git a/binutils/dwarf.c b/binutils/dwarf.c
+index 2fc352f74c5..99fb3566994 100644
+--- a/binutils/dwarf.c
++++ b/binutils/dwarf.c
+@@ -856,8 +856,7 @@ typedef struct abbrev_list
+ {
+ abbrev_entry * first_abbrev;
+ abbrev_entry * last_abbrev;
+- dwarf_vma abbrev_base;
+- dwarf_vma abbrev_offset;
++ unsigned char * raw;
+ struct abbrev_list * next;
+ unsigned char * start_of_next_abbrevs;
+ }
+@@ -946,14 +945,12 @@ free_all_abbrevs (void)
+ }
+
+ static abbrev_list *
+-find_abbrev_list_by_abbrev_offset (dwarf_vma abbrev_base,
+- dwarf_vma abbrev_offset)
++find_abbrev_list_by_raw_abbrev (unsigned char *raw)
+ {
+ abbrev_list * list;
+
+ for (list = abbrev_lists; list != NULL; list = list->next)
+- if (list->abbrev_base == abbrev_base
+- && list->abbrev_offset == abbrev_offset)
++ if (list->raw == raw)
+ return list;
+
+ return NULL;
+@@ -1040,6 +1037,7 @@ process_abbrev_set (struct dwarf_section
+ abbrev_list *list = xmalloc (sizeof (*list));
+ list->first_abbrev = NULL;
+ list->last_abbrev = NULL;
++ list->raw = start;
+
+ while (start < end)
+ {
+@@ -1055,6 +1053,7 @@ process_abbrev_set (struct dwarf_section
+ the caller. */
+ if (start == end || entry == 0)
+ {
++ list->next = NULL;
+ list->start_of_next_abbrevs = start != end ? start : NULL;
+ return list;
+ }
+@@ -1144,16 +1143,10 @@ find_and_process_abbrev_set (struct dwar
+ unsigned char *end = section->start + abbrev_base + abbrev_size;
+ abbrev_list *list = NULL;
+ if (free_list)
+- list = find_abbrev_list_by_abbrev_offset (abbrev_base, abbrev_offset);
++ list = find_abbrev_list_by_raw_abbrev (start);
+ if (list == NULL)
+ {
+ list = process_abbrev_set (section, start, end);
+- if (list)
+- {
+- list->abbrev_base = abbrev_base;
+- list->abbrev_offset = abbrev_offset;
+- list->next = NULL;
+- }
+ if (free_list)
+ *free_list = list;
+ }
diff --git a/meta/recipes-devtools/binutils/binutils/0019-CVE-2022-4285.patch b/meta/recipes-devtools/binutils/binutils/0019-CVE-2022-4285.patch
new file mode 100644
index 0000000000..e5e404982e
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0019-CVE-2022-4285.patch
@@ -0,0 +1,37 @@
+From 5c831a3c7f3ca98d6aba1200353311e1a1f84c70 Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Wed, 19 Oct 2022 15:09:12 +0100
+Subject: [PATCH] Fix an illegal memory access when parsing an ELF file
+ containing corrupt symbol version information.
+
+ PR 29699
+ * elf.c (_bfd_elf_slurp_version_tables): Fail if the sh_info field
+ of the section header is zero.
+
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=5c831a3c7f3ca98d6aba1200353311e1a1f84c70]
+CVE: CVE-2022-4285
+
+Signed-off-by: Yash Shinde <Yash.Shinde@windriver.com>
+---
+ bfd/ChangeLog | 6 ++++++
+ bfd/elf.c | 4 +++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/bfd/elf.c b/bfd/elf.c
+index fe00e0f9189..7cd7febcf95 100644
+--- a/bfd/elf.c
++++ b/bfd/elf.c
+@@ -8918,7 +8918,9 @@ _bfd_elf_slurp_version_tables (bfd *abfd, bool default_imported_symver)
+ bfd_set_error (bfd_error_file_too_big);
+ goto error_return_verref;
+ }
+- elf_tdata (abfd)->verref = (Elf_Internal_Verneed *) bfd_alloc (abfd, amt);
++ if (amt == 0)
++ goto error_return_verref;
++ elf_tdata (abfd)->verref = (Elf_Internal_Verneed *) bfd_zalloc (abfd, amt);
+ if (elf_tdata (abfd)->verref == NULL)
+ goto error_return_verref;
+
+--
+2.31.1
+
diff --git a/meta/recipes-devtools/binutils/binutils/0020-CVE-2023-22608-1.patch b/meta/recipes-devtools/binutils/binutils/0020-CVE-2023-22608-1.patch
new file mode 100644
index 0000000000..18d4ac5f9d
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0020-CVE-2023-22608-1.patch
@@ -0,0 +1,506 @@
+From 116aac1447ee92df25599859293752648e3c6ea0 Mon Sep 17 00:00:00 2001
+From: "Steinar H. Gunderson" <sesse@google.com>
+Date: Fri, 20 May 2022 16:10:34 +0200
+Subject: [PATCH] add a trie to map quickly from address range to compilation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+ unit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When using perf to profile large binaries, _bfd_dwarf2_find_nearest_line()
+becomes a hotspot, as perf wants to get line number information
+(for inline-detection purposes) for each and every sample. In Chromium
+in particular (the content_shell binary), this entails going through
+475k address ranges, which takes a long time when done repeatedly.
+
+Add a radix-256 trie over the address space to quickly map address to
+compilation unit spaces; for content_shell, which is 1.6 GB when some
+(but not full) debug information turned is on, we go from 6 ms to
+0.006 ms (6 µs) for each lookup from address to compilation unit, a 1000x
+speedup.
+
+There is a modest RAM increase of 180 MB in this binary (the existing
+linked list over ranges uses about 10 MB, and the entire perf job uses
+between 2-3 GB for a medium-size profile); for smaller binaries with few
+ranges, there should be hardly any extra RAM usage at all.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=b43771b045fb5616da3964f2994eefbe8ae70d32]
+
+CVE: CVE-2023-22608
+
+Signed-off-by: Yash Shinde <Yash.Shinde@windriver.com>
+
+---
+ bfd/dwarf2.c | 326 ++++++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 312 insertions(+), 14 deletions(-)
+
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index fdf071c3..0ae50a37 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -82,6 +82,77 @@ struct adjusted_section
+ bfd_vma adj_vma;
+ };
+
++/* A trie to map quickly from address range to compilation unit.
++
++ This is a fairly standard radix-256 trie, used to quickly locate which
++ compilation unit any given address belongs to. Given that each compilation
++ unit may register hundreds of very small and unaligned ranges (which may
++ potentially overlap, due to inlining and other concerns), and a large
++ program may end up containing hundreds of thousands of such ranges, we cannot
++ scan through them linearly without undue slowdown.
++
++ We use a hybrid trie to avoid memory explosion: There are two types of trie
++ nodes, leaves and interior nodes. (Almost all nodes are leaves, so they
++ take up the bulk of the memory usage.) Leaves contain a simple array of
++ ranges (high/low address) and which compilation unit contains those ranges,
++ and when we get to a leaf, we scan through it linearly. Interior nodes
++ contain pointers to 256 other nodes, keyed by the next byte of the address.
++ So for a 64-bit address like 0x1234567abcd, we would start at the root and go
++ down child[0x00]->child[0x00]->child[0x01]->child[0x23]->child[0x45] etc.,
++ until we hit a leaf. (Nodes are, in general, leaves until they exceed the
++ default allocation of 16 elements, at which point they are converted to
++ interior node if possible.) This gives us near-constant lookup times;
++ the only thing that can be costly is if there are lots of overlapping ranges
++ within a single 256-byte segment of the binary, in which case we have to
++ scan through them all to find the best match.
++
++ For a binary with few ranges, we will in practice only have a single leaf
++ node at the root, containing a simple array. Thus, the scheme is efficient
++ for both small and large binaries.
++ */
++
++/* Experiments have shown 16 to be a memory-efficient default leaf size.
++ The only case where a leaf will hold more memory than this, is at the
++ bottomost level (covering 256 bytes in the binary), where we'll expand
++ the leaf to be able to hold more ranges if needed.
++ */
++#define TRIE_LEAF_SIZE 16
++
++/* All trie_node pointers will really be trie_leaf or trie_interior,
++ but they have this common head. */
++struct trie_node
++{
++ /* If zero, we are an interior node.
++ Otherwise, how many ranges we have room for in this leaf. */
++ unsigned int num_room_in_leaf;
++};
++
++struct trie_leaf
++{
++ struct trie_node head;
++ unsigned int num_stored_in_leaf;
++ struct {
++ struct comp_unit *unit;
++ bfd_vma low_pc, high_pc;
++ } ranges[TRIE_LEAF_SIZE];
++};
++
++struct trie_interior
++{
++ struct trie_node head;
++ struct trie_node *children[256];
++};
++
++static struct trie_node *alloc_trie_leaf (bfd *abfd)
++{
++ struct trie_leaf *leaf =
++ bfd_zalloc (abfd, sizeof (struct trie_leaf));
++ if (leaf == NULL)
++ return NULL;
++ leaf->head.num_room_in_leaf = TRIE_LEAF_SIZE;
++ return &leaf->head;
++}
++
+ struct dwarf2_debug_file
+ {
+ /* The actual bfd from which debug info was loaded. Might be
+@@ -139,6 +210,9 @@ struct dwarf2_debug_file
+ /* A list of all previously read comp_units. */
+ struct comp_unit *all_comp_units;
+
++ /* A list of all previously read comp_units with no ranges (yet). */
++ struct comp_unit *all_comp_units_without_ranges;
++
+ /* Last comp unit in list above. */
+ struct comp_unit *last_comp_unit;
+
+@@ -147,6 +221,9 @@ struct dwarf2_debug_file
+
+ /* Hash table to map offsets to decoded abbrevs. */
+ htab_t abbrev_offsets;
++
++ /* Root of a trie to map addresses to compilation units. */
++ struct trie_node *trie_root;
+ };
+
+ struct dwarf2_debug
+@@ -220,6 +297,11 @@ struct comp_unit
+ /* Chain the previously read compilation units. */
+ struct comp_unit *next_unit;
+
++ /* Chain the previously read compilation units that have no ranges yet.
++ We scan these separately when we have a trie over the ranges.
++ Unused if arange.high != 0. */
++ struct comp_unit *next_unit_without_ranges;
++
+ /* Likewise, chain the compilation unit read after this one.
+ The comp units are stored in reversed reading order. */
+ struct comp_unit *prev_unit;
+@@ -296,6 +378,10 @@ struct comp_unit
+
+ /* TRUE if symbols are cached in hash table for faster lookup by name. */
+ bool cached;
++
++ /* Used when iterating over trie leaves to know which units we have
++ already seen in this iteration. */
++ bool mark;
+ };
+
+ /* This data structure holds the information of an abbrev. */
+@@ -1766,9 +1852,189 @@ concat_filename (struct line_info_table *table, unsigned int file)
+ return strdup (filename);
+ }
+
++/* Number of bits in a bfd_vma. */
++#define VMA_BITS (8 * sizeof (bfd_vma))
++
++/* Check whether [low1, high1) can be combined with [low2, high2),
++ i.e., they touch or overlap. */
++static bool ranges_overlap (bfd_vma low1,
++ bfd_vma high1,
++ bfd_vma low2,
++ bfd_vma high2)
++{
++ if (low1 == low2 || high1 == high2)
++ return true;
++
++ /* Sort so that low1 is below low2. */
++ if (low1 > low2)
++ {
++ bfd_vma tmp;
++
++ tmp = low1;
++ low1 = low2;
++ low2 = tmp;
++
++ tmp = high1;
++ high1 = high2;
++ high2 = tmp;
++ }
++
++ /* We touch iff low2 == high1.
++ We overlap iff low2 is within [low1, high1). */
++ return (low2 <= high1);
++}
++
++/* Insert an address range in the trie mapping addresses to compilation units.
++ Will return the new trie node (usually the same as is being sent in, but
++ in case of a leaf-to-interior conversion, or expansion of a leaf, it may be
++ different), or NULL on failure.
++ */
++static struct trie_node *insert_arange_in_trie(bfd *abfd,
++ struct trie_node *trie,
++ bfd_vma trie_pc,
++ unsigned int trie_pc_bits,
++ struct comp_unit *unit,
++ bfd_vma low_pc,
++ bfd_vma high_pc)
++{
++ bfd_vma clamped_low_pc, clamped_high_pc;
++ int ch, from_ch, to_ch;
++ bool is_full_leaf = false;
++
++ /* See if we can extend any of the existing ranges. This merging
++ isn't perfect (if merging opens up the possibility of merging two existing
++ ranges, we won't find them), but it takes the majority of the cases. */
++ if (trie->num_room_in_leaf > 0)
++ {
++ struct trie_leaf *leaf = (struct trie_leaf *) trie;
++ unsigned int i;
++
++ for (i = 0; i < leaf->num_stored_in_leaf; ++i)
++ {
++ if (leaf->ranges[i].unit == unit &&
++ ranges_overlap(low_pc, high_pc,
++ leaf->ranges[i].low_pc, leaf->ranges[i].high_pc))
++ {
++ if (low_pc < leaf->ranges[i].low_pc)
++ leaf->ranges[i].low_pc = low_pc;
++ if (high_pc > leaf->ranges[i].high_pc)
++ leaf->ranges[i].high_pc = high_pc;
++ return trie;
++ }
++ }
++
++ is_full_leaf = leaf->num_stored_in_leaf == trie->num_room_in_leaf;
++ }
++
++ /* If we're a leaf with no more room and we're _not_ at the bottom,
++ convert to an interior node. */
++ if (is_full_leaf && trie_pc_bits < VMA_BITS)
++ {
++ const struct trie_leaf *leaf = (struct trie_leaf *) trie;
++ unsigned int i;
++
++ trie = bfd_zalloc (abfd, sizeof (struct trie_interior));
++ if (!trie)
++ return NULL;
++ is_full_leaf = false;
++
++ /* TODO: If we wanted to save a little more memory at the cost of
++ complexity, we could have reused the old leaf node as one of the
++ children of the new interior node, instead of throwing it away. */
++ for (i = 0; i < leaf->num_stored_in_leaf; ++i)
++ {
++ if (!insert_arange_in_trie (abfd, trie, trie_pc, trie_pc_bits,
++ leaf->ranges[i].unit, leaf->ranges[i].low_pc,
++ leaf->ranges[i].high_pc))
++ return NULL;
++ }
++ }
++
++ /* If we're a leaf with no more room and we _are_ at the bottom,
++ we have no choice but to just make it larger. */
++ if (is_full_leaf)
++ {
++ const struct trie_leaf *leaf = (struct trie_leaf *) trie;
++ unsigned int new_room_in_leaf = trie->num_room_in_leaf * 2;
++ struct trie_leaf *new_leaf;
++
++ new_leaf = bfd_zalloc (abfd,
++ sizeof (struct trie_leaf) +
++ (new_room_in_leaf - TRIE_LEAF_SIZE) * sizeof (leaf->ranges[0]));
++ new_leaf->head.num_room_in_leaf = new_room_in_leaf;
++ new_leaf->num_stored_in_leaf = leaf->num_stored_in_leaf;
++
++ memcpy (new_leaf->ranges,
++ leaf->ranges,
++ leaf->num_stored_in_leaf * sizeof (leaf->ranges[0]));
++ trie = &new_leaf->head;
++ is_full_leaf = false;
++
++ /* Now the insert below will go through. */
++ }
++
++ /* If we're a leaf (now with room), we can just insert at the end. */
++ if (trie->num_room_in_leaf > 0)
++ {
++ struct trie_leaf *leaf = (struct trie_leaf *) trie;
++
++ unsigned int i = leaf->num_stored_in_leaf++;
++ leaf->ranges[i].unit = unit;
++ leaf->ranges[i].low_pc = low_pc;
++ leaf->ranges[i].high_pc = high_pc;
++ return trie;
++ }
++
++ /* Now we are definitely an interior node, so recurse into all
++ the relevant buckets. */
++
++ /* Clamp the range to the current trie bucket. */
++ clamped_low_pc = low_pc;
++ clamped_high_pc = high_pc;
++ if (trie_pc_bits > 0)
++ {
++ bfd_vma bucket_high_pc =
++ trie_pc + ((bfd_vma)-1 >> trie_pc_bits); /* Inclusive. */
++ if (clamped_low_pc < trie_pc)
++ clamped_low_pc = trie_pc;
++ if (clamped_high_pc > bucket_high_pc)
++ clamped_high_pc = bucket_high_pc;
++ }
++
++ /* Insert the ranges in all buckets that it spans. */
++ from_ch = (clamped_low_pc >> (VMA_BITS - trie_pc_bits - 8)) & 0xff;
++ to_ch = ((clamped_high_pc - 1) >> (VMA_BITS - trie_pc_bits - 8)) & 0xff;
++ for (ch = from_ch; ch <= to_ch; ++ch)
++ {
++ struct trie_interior *interior = (struct trie_interior *) trie;
++ struct trie_node *child = interior->children[ch];
++
++ if (child == NULL)
++ {
++ child = alloc_trie_leaf (abfd);
++ if (!child)
++ return NULL;
++ }
++ child = insert_arange_in_trie (abfd,
++ child,
++ trie_pc + ((bfd_vma)ch << (VMA_BITS - trie_pc_bits - 8)),
++ trie_pc_bits + 8,
++ unit,
++ low_pc,
++ high_pc);
++ if (!child)
++ return NULL;
++
++ interior->children[ch] = child;
++ }
++
++ return trie;
++}
++
++
+ static bool
+-arange_add (const struct comp_unit *unit, struct arange *first_arange,
+- bfd_vma low_pc, bfd_vma high_pc)
++arange_add (struct comp_unit *unit, struct arange *first_arange,
++ struct trie_node **trie_root, bfd_vma low_pc, bfd_vma high_pc)
+ {
+ struct arange *arange;
+
+@@ -1776,6 +2042,19 @@ arange_add (const struct comp_unit *unit, struct arange *first_arange,
+ if (low_pc == high_pc)
+ return true;
+
++ if (trie_root != NULL)
++ {
++ *trie_root = insert_arange_in_trie (unit->file->bfd_ptr,
++ *trie_root,
++ 0,
++ 0,
++ unit,
++ low_pc,
++ high_pc);
++ if (*trie_root == NULL)
++ return false;
++ }
++
+ /* If the first arange is empty, use it. */
+ if (first_arange->high == 0)
+ {
+@@ -2410,7 +2689,8 @@ decode_line_info (struct comp_unit *unit)
+ low_pc = address;
+ if (address > high_pc)
+ high_pc = address;
+- if (!arange_add (unit, &unit->arange, low_pc, high_pc))
++ if (!arange_add (unit, &unit->arange, &unit->file->trie_root,
++ low_pc, high_pc))
+ goto line_fail;
+ break;
+ case DW_LNE_set_address:
+@@ -3134,7 +3414,7 @@ find_abstract_instance (struct comp_unit *unit,
+
+ static bool
+ read_ranges (struct comp_unit *unit, struct arange *arange,
+- bfd_uint64_t offset)
++ struct trie_node **trie_root, bfd_uint64_t offset)
+ {
+ bfd_byte *ranges_ptr;
+ bfd_byte *ranges_end;
+@@ -3169,7 +3449,7 @@ read_ranges (struct comp_unit *unit, struct arange *arange,
+ base_address = high_pc;
+ else
+ {
+- if (!arange_add (unit, arange,
++ if (!arange_add (unit, arange, trie_root,
+ base_address + low_pc, base_address + high_pc))
+ return false;
+ }
+@@ -3179,7 +3459,7 @@ read_ranges (struct comp_unit *unit, struct arange *arange,
+
+ static bool
+ read_rnglists (struct comp_unit *unit, struct arange *arange,
+- bfd_uint64_t offset)
++ struct trie_node **trie_root, bfd_uint64_t offset)
+ {
+ bfd_byte *rngs_ptr;
+ bfd_byte *rngs_end;
+@@ -3253,19 +3533,19 @@ read_rnglists (struct comp_unit *unit, struct arange *arange,
+ return false;
+ }
+
+- if (!arange_add (unit, arange, low_pc, high_pc))
++ if (!arange_add (unit, arange, trie_root, low_pc, high_pc))
+ return false;
+ }
+ }
+
+ static bool
+ read_rangelist (struct comp_unit *unit, struct arange *arange,
+- bfd_uint64_t offset)
++ struct trie_node **trie_root, bfd_uint64_t offset)
+ {
+ if (unit->version <= 4)
+- return read_ranges (unit, arange, offset);
++ return read_ranges (unit, arange, trie_root, offset);
+ else
+- return read_rnglists (unit, arange, offset);
++ return read_rnglists (unit, arange, trie_root, offset);
+ }
+
+ static struct funcinfo *
+@@ -3563,7 +3843,8 @@ scan_unit_for_symbols (struct comp_unit *unit)
+
+ case DW_AT_ranges:
+ if (is_int_form (&attr)
+- && !read_rangelist (unit, &func->arange, attr.u.val))
++ && !read_rangelist (unit, &func->arange,
++ &unit->file->trie_root, attr.u.val))
+ goto fail;
+ break;
+
+@@ -3679,7 +3960,8 @@ scan_unit_for_symbols (struct comp_unit *unit)
+
+ if (func && high_pc != 0)
+ {
+- if (!arange_add (unit, &func->arange, low_pc, high_pc))
++ if (!arange_add (unit, &func->arange, &unit->file->trie_root,
++ low_pc, high_pc))
+ goto fail;
+ }
+ }
+@@ -3874,7 +4156,8 @@ parse_comp_unit (struct dwarf2_debug *stash,
+
+ case DW_AT_ranges:
+ if (is_int_form (&attr)
+- && !read_rangelist (unit, &unit->arange, attr.u.val))
++ && !read_rangelist (unit, &unit->arange,
++ &unit->file->trie_root, attr.u.val))
+ return NULL;
+ break;
+
+@@ -3916,7 +4199,8 @@ parse_comp_unit (struct dwarf2_debug *stash,
+ high_pc += low_pc;
+ if (high_pc != 0)
+ {
+- if (!arange_add (unit, &unit->arange, low_pc, high_pc))
++ if (!arange_add (unit, &unit->arange, &unit->file->trie_root,
++ low_pc, high_pc))
+ return NULL;
+ }
+
+@@ -4747,6 +5031,14 @@ _bfd_dwarf2_slurp_debug_info (bfd *abfd, bfd *debug_bfd,
+ if (!stash->alt.abbrev_offsets)
+ return false;
+
++ stash->f.trie_root = alloc_trie_leaf (abfd);
++ if (!stash->f.trie_root)
++ return false;
++
++ stash->alt.trie_root = alloc_trie_leaf (abfd);
++ if (!stash->alt.trie_root)
++ return false;
++
+ *pinfo = stash;
+
+ if (debug_bfd == NULL)
+@@ -4918,6 +5210,12 @@ stash_comp_unit (struct dwarf2_debug *stash, struct dwarf2_debug_file *file)
+ each->next_unit = file->all_comp_units;
+ file->all_comp_units = each;
+
++ if (each->arange.high == 0)
++ {
++ each->next_unit_without_ranges = file->all_comp_units_without_ranges;
++ file->all_comp_units_without_ranges = each->next_unit_without_ranges;
++ }
++
+ file->info_ptr += length;
+ return each;
+ }
diff --git a/meta/recipes-devtools/binutils/binutils/0020-CVE-2023-22608-2.patch b/meta/recipes-devtools/binutils/binutils/0020-CVE-2023-22608-2.patch
new file mode 100644
index 0000000000..a58b8dccdc
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0020-CVE-2023-22608-2.patch
@@ -0,0 +1,210 @@
+From 1e716c1b160d56c2ab8711e199cad5b4db47cedf Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Tue, 30 Aug 2022 16:01:20 +0100
+Subject: [PATCH] BFD library: Use entry 0 in directory and filename tables of
+
+ DWARF-5 debug info.
+
+ PR 29529
+ * dwarf2.c (struct line_info_table): Add new field:
+ use_dir_and_file_0.
+ (concat_filename): Use new field to help select the correct table
+ slot.
+ (read_formatted_entries): Do not skip entry 0.
+ (decode_line_info): Set new field depending upon the version of
+ DWARF being parsed. Initialise filename based upon the setting of
+ the new field.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=37833b966576c5d25e797ea3b6c33d0459a71892]
+CVE: CVE-2023-22608
+
+Signed-off-by: Yash Shinde <Yash.Shinde@windriver.com>
+
+---
+ bfd/dwarf2.c | 86 ++++++++++++++++++++----------
+ ld/testsuite/ld-x86-64/pr27587.err | 2 +-
+ 2 files changed, 59 insertions(+), 29 deletions(-)
+
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index 0ae50a37..b7839ad6 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -1571,6 +1571,7 @@ struct line_info_table
+ unsigned int num_files;
+ unsigned int num_dirs;
+ unsigned int num_sequences;
++ bool use_dir_and_file_0;
+ char * comp_dir;
+ char ** dirs;
+ struct fileinfo* files;
+@@ -1791,16 +1792,30 @@ concat_filename (struct line_info_table *table, unsigned int file)
+ {
+ char *filename;
+
+- if (table == NULL || file - 1 >= table->num_files)
++ /* Pre DWARF-5 entry 0 in the directory and filename tables was not used.
++ So in order to save space in the tables used here the info for, eg
++ directory 1 is stored in slot 0 of the directory table, directory 2
++ in slot 1 and so on.
++
++ Starting with DWARF-5 the 0'th entry is used so there is a one to one
++ mapping between DWARF slots and internal table entries. */
++ if (! table->use_dir_and_file_0)
+ {
+- /* FILE == 0 means unknown. */
+- if (file)
+- _bfd_error_handler
+- (_("DWARF error: mangled line number section (bad file number)"));
++ /* Pre DWARF-5, FILE == 0 means unknown. */
++ if (file == 0)
++ return strdup ("<unknown>");
++ -- file;
++ }
++
++ if (table == NULL || file >= table->num_files)
++ {
++ _bfd_error_handler
++ (_("DWARF error: mangled line number section (bad file number)"));
+ return strdup ("<unknown>");
+ }
+
+- filename = table->files[file - 1].name;
++ filename = table->files[file].name;
++
+ if (filename == NULL)
+ return strdup ("<unknown>");
+
+@@ -1811,12 +1826,17 @@ concat_filename (struct line_info_table *table, unsigned int file)
+ char *name;
+ size_t len;
+
+- if (table->files[file - 1].dir
++ if (table->files[file].dir
+ /* PR 17512: file: 0317e960. */
+- && table->files[file - 1].dir <= table->num_dirs
++ && table->files[file].dir <= table->num_dirs
+ /* PR 17512: file: 7f3d2e4b. */
+ && table->dirs != NULL)
+- subdir_name = table->dirs[table->files[file - 1].dir - 1];
++ {
++ if (table->use_dir_and_file_0)
++ subdir_name = table->dirs[table->files[file].dir];
++ else
++ subdir_name = table->dirs[table->files[file].dir - 1];
++ }
+
+ if (!subdir_name || !IS_ABSOLUTE_PATH (subdir_name))
+ dir_name = table->comp_dir;
+@@ -1857,10 +1877,12 @@ concat_filename (struct line_info_table *table, unsigned int file)
+
+ /* Check whether [low1, high1) can be combined with [low2, high2),
+ i.e., they touch or overlap. */
+-static bool ranges_overlap (bfd_vma low1,
+- bfd_vma high1,
+- bfd_vma low2,
+- bfd_vma high2)
++
++static bool
++ranges_overlap (bfd_vma low1,
++ bfd_vma high1,
++ bfd_vma low2,
++ bfd_vma high2)
+ {
+ if (low1 == low2 || high1 == high2)
+ return true;
+@@ -1887,15 +1909,16 @@ static bool ranges_overlap (bfd_vma low1,
+ /* Insert an address range in the trie mapping addresses to compilation units.
+ Will return the new trie node (usually the same as is being sent in, but
+ in case of a leaf-to-interior conversion, or expansion of a leaf, it may be
+- different), or NULL on failure.
+- */
+-static struct trie_node *insert_arange_in_trie(bfd *abfd,
+- struct trie_node *trie,
+- bfd_vma trie_pc,
+- unsigned int trie_pc_bits,
+- struct comp_unit *unit,
+- bfd_vma low_pc,
+- bfd_vma high_pc)
++ different), or NULL on failure. */
++
++static struct trie_node *
++insert_arange_in_trie (bfd *abfd,
++ struct trie_node *trie,
++ bfd_vma trie_pc,
++ unsigned int trie_pc_bits,
++ struct comp_unit *unit,
++ bfd_vma low_pc,
++ bfd_vma high_pc)
+ {
+ bfd_vma clamped_low_pc, clamped_high_pc;
+ int ch, from_ch, to_ch;
+@@ -2031,7 +2054,6 @@ static struct trie_node *insert_arange_in_trie(bfd *abfd,
+ return trie;
+ }
+
+-
+ static bool
+ arange_add (struct comp_unit *unit, struct arange *first_arange,
+ struct trie_node **trie_root, bfd_vma low_pc, bfd_vma high_pc)
+@@ -2412,10 +2434,8 @@ read_formatted_entries (struct comp_unit *unit, bfd_byte **bufp,
+ }
+ }
+
+- /* Skip the first "zero entry", which is the compilation dir/file. */
+- if (datai != 0)
+- if (!callback (table, fe.name, fe.dir, fe.time, fe.size))
+- return false;
++ if (!callback (table, fe.name, fe.dir, fe.time, fe.size))
++ return false;
+ }
+
+ *bufp = buf;
+@@ -2592,6 +2612,7 @@ decode_line_info (struct comp_unit *unit)
+ if (!read_formatted_entries (unit, &line_ptr, line_end, table,
+ line_info_add_file_name))
+ goto fail;
++ table->use_dir_and_file_0 = true;
+ }
+ else
+ {
+@@ -2614,6 +2635,7 @@ decode_line_info (struct comp_unit *unit)
+ if (!line_info_add_file_name (table, cur_file, dir, xtime, size))
+ goto fail;
+ }
++ table->use_dir_and_file_0 = false;
+ }
+
+ /* Read the statement sequences until there's nothing left. */
+@@ -2622,7 +2644,7 @@ decode_line_info (struct comp_unit *unit)
+ /* State machine registers. */
+ bfd_vma address = 0;
+ unsigned char op_index = 0;
+- char * filename = table->num_files ? concat_filename (table, 1) : NULL;
++ char * filename = NULL;
+ unsigned int line = 1;
+ unsigned int column = 0;
+ unsigned int discriminator = 0;
+@@ -2637,6 +2659,14 @@ decode_line_info (struct comp_unit *unit)
+ bfd_vma low_pc = (bfd_vma) -1;
+ bfd_vma high_pc = 0;
+
++ if (table->num_files)
++ {
++ if (table->use_dir_and_file_0)
++ filename = concat_filename (table, 0);
++ else
++ filename = concat_filename (table, 1);
++ }
++
+ /* Decode the table. */
+ while (!end_sequence && line_ptr < line_end)
+ {
+diff --git a/ld/testsuite/ld-x86-64/pr27587.err b/ld/testsuite/ld-x86-64/pr27587.err
+index fa870790..807750ca 100644
+--- a/ld/testsuite/ld-x86-64/pr27587.err
++++ b/ld/testsuite/ld-x86-64/pr27587.err
+@@ -1,3 +1,3 @@
+ #...
+-.*pr27587.i:4: undefined reference to `stack_size'
++.*pr27587/<artificial>:4: undefined reference to `stack_size'
+ #...
diff --git a/meta/recipes-devtools/binutils/binutils/0020-CVE-2023-22608-3.patch b/meta/recipes-devtools/binutils/binutils/0020-CVE-2023-22608-3.patch
new file mode 100644
index 0000000000..a1b74248ce
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0020-CVE-2023-22608-3.patch
@@ -0,0 +1,32 @@
+From 4b8386a90802ed8e43eac2266f6e03c92b4462ed Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Fri, 23 Dec 2022 13:02:04 +0000
+Subject: [PATCH] Fix illegal memory access parsing corrupt DWARF information.
+
+ PR 29936
+ * dwarf2.c (concat_filename): Fix check for a directory index off
+ the end of the directory table.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=8af23b30edbaedf009bc9b243cd4dfa10ae1ac09]
+CVE: CVE-2023-22608
+
+Signed-off-by: Yash Shinde <Yash.Shinde@windriver.com>
+
+---
+ bfd/dwarf2.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index b7839ad6..8b07a24c 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -1828,7 +1828,8 @@ concat_filename (struct line_info_table *table, unsigned int file)
+
+ if (table->files[file].dir
+ /* PR 17512: file: 0317e960. */
+- && table->files[file].dir <= table->num_dirs
++ && table->files[file].dir
++ <= (table->use_dir_and_file_0 ? table->num_dirs - 1 : table->num_dirs)
+ /* PR 17512: file: 7f3d2e4b. */
+ && table->dirs != NULL)
+ {
diff --git a/meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-1.patch b/meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-1.patch
new file mode 100644
index 0000000000..1e9c03e70e
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-1.patch
@@ -0,0 +1,459 @@
+From f67741e172bf342291fe3abd2b395899ce6433a0 Mon Sep 17 00:00:00 2001
+From: "Potharla, Rupesh" <Rupesh.Potharla@amd.com>
+Date: Tue, 24 May 2022 00:01:49 +0000
+Subject: [PATCH] bfd: Add Support for DW_FORM_strx* and DW_FORM_addrx*
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=f67741e172bf342291fe3abd2b395899ce6433a0]
+
+CVE: CVE-2023-1579
+
+Signed-off-by: Yash Shinde <Yash.Shinde@windriver.com>
+
+---
+ bfd/dwarf2.c | 282 ++++++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 268 insertions(+), 14 deletions(-)
+
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index f6b0183720b..45e286754e4 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -189,6 +189,18 @@ struct dwarf2_debug_file
+ /* Length of the loaded .debug_str section. */
+ bfd_size_type dwarf_str_size;
+
++ /* Pointer to the .debug_str_offsets section loaded into memory. */
++ bfd_byte *dwarf_str_offsets_buffer;
++
++ /* Length of the loaded .debug_str_offsets section. */
++ bfd_size_type dwarf_str_offsets_size;
++
++ /* Pointer to the .debug_addr section loaded into memory. */
++ bfd_byte *dwarf_addr_buffer;
++
++ /* Length of the loaded .debug_addr section. */
++ bfd_size_type dwarf_addr_size;
++
+ /* Pointer to the .debug_line_str section loaded into memory. */
+ bfd_byte *dwarf_line_str_buffer;
+
+@@ -382,6 +394,12 @@ struct comp_unit
+ /* Used when iterating over trie leaves to know which units we have
+ already seen in this iteration. */
+ bool mark;
++
++ /* Base address of debug_addr section. */
++ size_t dwarf_addr_offset;
++
++ /* Base address of string offset table. */
++ size_t dwarf_str_offset;
+ };
+
+ /* This data structure holds the information of an abbrev. */
+@@ -424,6 +442,8 @@ const struct dwarf_debug_section dwarf_debug_sections[] =
+ { ".debug_static_vars", ".zdebug_static_vars" },
+ { ".debug_str", ".zdebug_str", },
+ { ".debug_str", ".zdebug_str", },
++ { ".debug_str_offsets", ".zdebug_str_offsets", },
++ { ".debug_addr", ".zdebug_addr", },
+ { ".debug_line_str", ".zdebug_line_str", },
+ { ".debug_types", ".zdebug_types" },
+ /* GNU DWARF 1 extensions */
+@@ -458,6 +478,8 @@ enum dwarf_debug_section_enum
+ debug_static_vars,
+ debug_str,
+ debug_str_alt,
++ debug_str_offsets,
++ debug_addr,
+ debug_line_str,
+ debug_types,
+ debug_sfnames,
+@@ -1307,12 +1329,92 @@ is_int_form (const struct attribute *attr)
+ }
+ }
+
++/* Returns true if the form is strx[1-4]. */
++
++static inline bool
++is_strx_form (enum dwarf_form form)
++{
++ return (form == DW_FORM_strx
++ || form == DW_FORM_strx1
++ || form == DW_FORM_strx2
++ || form == DW_FORM_strx3
++ || form == DW_FORM_strx4);
++}
++
++/* Return true if the form is addrx[1-4]. */
++
++static inline bool
++is_addrx_form (enum dwarf_form form)
++{
++ return (form == DW_FORM_addrx
++ || form == DW_FORM_addrx1
++ || form == DW_FORM_addrx2
++ || form == DW_FORM_addrx3
++ || form == DW_FORM_addrx4);
++}
++
++/* Returns the address in .debug_addr section using DW_AT_addr_base.
++ Used to implement DW_FORM_addrx*. */
++static bfd_vma
++read_indexed_address (bfd_uint64_t idx,
++ struct comp_unit *unit)
++{
++ struct dwarf2_debug *stash = unit->stash;
++ struct dwarf2_debug_file *file = unit->file;
++ size_t addr_base = unit->dwarf_addr_offset;
++ bfd_byte *info_ptr;
++
++ if (stash == NULL)
++ return 0;
++
++ if (!read_section (unit->abfd, &stash->debug_sections[debug_addr],
++ file->syms, 0,
++ &file->dwarf_addr_buffer, &file->dwarf_addr_size))
++ return 0;
++
++ info_ptr = file->dwarf_addr_buffer + addr_base + idx * unit->offset_size;
++
++ if (unit->offset_size == 4)
++ return bfd_get_32 (unit->abfd, info_ptr);
++ else
++ return bfd_get_64 (unit->abfd, info_ptr);
++}
++
++/* Returns the string using DW_AT_str_offsets_base.
++ Used to implement DW_FORM_strx*. */
+ static const char *
+-read_indexed_string (bfd_uint64_t idx ATTRIBUTE_UNUSED,
+- struct comp_unit * unit ATTRIBUTE_UNUSED)
++read_indexed_string (bfd_uint64_t idx,
++ struct comp_unit *unit)
+ {
+- /* FIXME: Add support for indexed strings. */
+- return "<indexed strings not yet supported>";
++ struct dwarf2_debug *stash = unit->stash;
++ struct dwarf2_debug_file *file = unit->file;
++ bfd_byte *info_ptr;
++ unsigned long str_offset;
++
++ if (stash == NULL)
++ return NULL;
++
++ if (!read_section (unit->abfd, &stash->debug_sections[debug_str],
++ file->syms, 0,
++ &file->dwarf_str_buffer, &file->dwarf_str_size))
++ return NULL;
++
++ if (!read_section (unit->abfd, &stash->debug_sections[debug_str_offsets],
++ file->syms, 0,
++ &file->dwarf_str_offsets_buffer,
++ &file->dwarf_str_offsets_size))
++ return NULL;
++
++ info_ptr = (file->dwarf_str_offsets_buffer
++ + unit->dwarf_str_offset
++ + idx * unit->offset_size);
++
++ if (unit->offset_size == 4)
++ str_offset = bfd_get_32 (unit->abfd, info_ptr);
++ else
++ str_offset = bfd_get_64 (unit->abfd, info_ptr);
++
++ return (const char *) file->dwarf_str_buffer + str_offset;
+ }
+
+ /* Read and fill in the value of attribute ATTR as described by FORM.
+@@ -1381,21 +1483,37 @@ read_attribute_value (struct attribute * attr,
+ case DW_FORM_ref1:
+ case DW_FORM_flag:
+ case DW_FORM_data1:
++ attr->u.val = read_1_byte (abfd, &info_ptr, info_ptr_end);
++ break;
+ case DW_FORM_addrx1:
+ attr->u.val = read_1_byte (abfd, &info_ptr, info_ptr_end);
++ /* dwarf_addr_offset value 0 indicates the attribute DW_AT_addr_base
++ is not yet read. */
++ if (unit->dwarf_addr_offset != 0)
++ attr->u.val = read_indexed_address (attr->u.val, unit);
+ break;
+ case DW_FORM_data2:
+- case DW_FORM_addrx2:
+ case DW_FORM_ref2:
+ attr->u.val = read_2_bytes (abfd, &info_ptr, info_ptr_end);
+ break;
++ case DW_FORM_addrx2:
++ attr->u.val = read_2_bytes (abfd, &info_ptr, info_ptr_end);
++ if (unit->dwarf_addr_offset != 0)
++ attr->u.val = read_indexed_address (attr->u.val, unit);
++ break;
+ case DW_FORM_addrx3:
+ attr->u.val = read_3_bytes (abfd, &info_ptr, info_ptr_end);
++ if (unit->dwarf_addr_offset != 0)
++ attr->u.val = read_indexed_address(attr->u.val, unit);
+ break;
+ case DW_FORM_ref4:
+ case DW_FORM_data4:
++ attr->u.val = read_4_bytes (abfd, &info_ptr, info_ptr_end);
++ break;
+ case DW_FORM_addrx4:
+ attr->u.val = read_4_bytes (abfd, &info_ptr, info_ptr_end);
++ if (unit->dwarf_addr_offset != 0)
++ attr->u.val = read_indexed_address (attr->u.val, unit);
+ break;
+ case DW_FORM_data8:
+ case DW_FORM_ref8:
+@@ -1416,24 +1534,31 @@ read_attribute_value (struct attribute * attr,
+ break;
+ case DW_FORM_strx1:
+ attr->u.val = read_1_byte (abfd, &info_ptr, info_ptr_end);
+- attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
++ /* dwarf_str_offset value 0 indicates the attribute DW_AT_str_offsets_base
++ is not yet read. */
++ if (unit->dwarf_str_offset != 0)
++ attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
+ break;
+ case DW_FORM_strx2:
+ attr->u.val = read_2_bytes (abfd, &info_ptr, info_ptr_end);
+- attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
++ if (unit->dwarf_str_offset != 0)
++ attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
+ break;
+ case DW_FORM_strx3:
+ attr->u.val = read_3_bytes (abfd, &info_ptr, info_ptr_end);
+- attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
++ if (unit->dwarf_str_offset != 0)
++ attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
+ break;
+ case DW_FORM_strx4:
+ attr->u.val = read_4_bytes (abfd, &info_ptr, info_ptr_end);
+- attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
++ if (unit->dwarf_str_offset != 0)
++ attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
+ break;
+ case DW_FORM_strx:
+ attr->u.val = _bfd_safe_read_leb128 (abfd, &info_ptr,
+ false, info_ptr_end);
+- attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
++ if (unit->dwarf_str_offset != 0)
++ attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
+ break;
+ case DW_FORM_exprloc:
+ case DW_FORM_block:
+@@ -1455,9 +1580,14 @@ read_attribute_value (struct attribute * attr,
+ break;
+ case DW_FORM_ref_udata:
+ case DW_FORM_udata:
++ attr->u.val = _bfd_safe_read_leb128 (abfd, &info_ptr,
++ false, info_ptr_end);
++ break;
+ case DW_FORM_addrx:
+ attr->u.val = _bfd_safe_read_leb128 (abfd, &info_ptr,
+ false, info_ptr_end);
++ if (unit->dwarf_addr_offset != 0)
++ attr->u.val = read_indexed_address (attr->u.val, unit);
+ break;
+ case DW_FORM_indirect:
+ form = _bfd_safe_read_leb128 (abfd, &info_ptr,
+@@ -2396,6 +2526,11 @@ read_formatted_entries (struct comp_unit *unit, bfd_byte **bufp,
+ {
+ case DW_FORM_string:
+ case DW_FORM_line_strp:
++ case DW_FORM_strx:
++ case DW_FORM_strx1:
++ case DW_FORM_strx2:
++ case DW_FORM_strx3:
++ case DW_FORM_strx4:
+ *stringp = attr.u.str;
+ break;
+
+@@ -4031,6 +4166,80 @@ scan_unit_for_symbols (struct comp_unit *unit)
+ return false;
+ }
+
++/* Read the attributes of the form strx and addrx. */
++
++static void
++reread_attribute (struct comp_unit *unit,
++ struct attribute *attr,
++ bfd_vma *low_pc,
++ bfd_vma *high_pc,
++ bool *high_pc_relative,
++ bool compunit)
++{
++ if (is_strx_form (attr->form))
++ attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
++ if (is_addrx_form (attr->form))
++ attr->u.val = read_indexed_address (attr->u.val, unit);
++
++ switch (attr->name)
++ {
++ case DW_AT_stmt_list:
++ unit->stmtlist = 1;
++ unit->line_offset = attr->u.val;
++ break;
++
++ case DW_AT_name:
++ if (is_str_form (attr))
++ unit->name = attr->u.str;
++ break;
++
++ case DW_AT_low_pc:
++ *low_pc = attr->u.val;
++ if (compunit)
++ unit->base_address = *low_pc;
++ break;
++
++ case DW_AT_high_pc:
++ *high_pc = attr->u.val;
++ *high_pc_relative = attr->form != DW_FORM_addr;
++ break;
++
++ case DW_AT_ranges:
++ if (!read_rangelist (unit, &unit->arange,
++ &unit->file->trie_root, attr->u.val))
++ return;
++ break;
++
++ case DW_AT_comp_dir:
++ {
++ char *comp_dir = attr->u.str;
++
++ if (!is_str_form (attr))
++ {
++ _bfd_error_handler
++ (_("DWARF error: DW_AT_comp_dir attribute encountered "
++ "with a non-string form"));
++ comp_dir = NULL;
++ }
++
++ if (comp_dir)
++ {
++ char *cp = strchr (comp_dir, ':');
++
++ if (cp && cp != comp_dir && cp[-1] == '.' && cp[1] == '/')
++ comp_dir = cp + 1;
++ }
++ unit->comp_dir = comp_dir;
++ break;
++ }
++
++ case DW_AT_language:
++ unit->lang = attr->u.val;
++ default:
++ break;
++ }
++}
++
+ /* Parse a DWARF2 compilation unit starting at INFO_PTR. UNIT_LENGTH
+ includes the compilation unit header that proceeds the DIE's, but
+ does not include the length field that precedes each compilation
+@@ -4064,6 +4273,10 @@ parse_comp_unit (struct dwarf2_debug *stash,
+ bfd *abfd = file->bfd_ptr;
+ bool high_pc_relative = false;
+ enum dwarf_unit_type unit_type;
++ struct attribute *str_addrp = NULL;
++ size_t str_count = 0;
++ size_t str_alloc = 0;
++ bool compunit_flag = false;
+
+ version = read_2_bytes (abfd, &info_ptr, end_ptr);
+ if (version < 2 || version > 5)
+@@ -4168,11 +4381,33 @@ parse_comp_unit (struct dwarf2_debug *stash,
+ unit->file = file;
+ unit->info_ptr_unit = info_ptr_unit;
+
++ if (abbrev->tag == DW_TAG_compile_unit)
++ compunit_flag = true;
++
+ for (i = 0; i < abbrev->num_attrs; ++i)
+ {
+ info_ptr = read_attribute (&attr, &abbrev->attrs[i], unit, info_ptr, end_ptr);
+ if (info_ptr == NULL)
+- return NULL;
++ goto err_exit;
++
++ /* Identify attributes of the form strx* and addrx* which come before
++ DW_AT_str_offsets_base and DW_AT_addr_base respectively in the CU.
++ Store the attributes in an array and process them later. */
++ if ((unit->dwarf_str_offset == 0 && is_strx_form (attr.form))
++ || (unit->dwarf_addr_offset == 0 && is_addrx_form (attr.form)))
++ {
++ if (str_count <= str_alloc)
++ {
++ str_alloc = 2 * str_alloc + 200;
++ str_addrp = bfd_realloc (str_addrp,
++ str_alloc * sizeof (*str_addrp));
++ if (str_addrp == NULL)
++ goto err_exit;
++ }
++ str_addrp[str_count] = attr;
++ str_count++;
++ continue;
++ }
+
+ /* Store the data if it is of an attribute we want to keep in a
+ partial symbol table. */
+@@ -4198,7 +4433,7 @@ parse_comp_unit (struct dwarf2_debug *stash,
+ /* If the compilation unit DIE has a DW_AT_low_pc attribute,
+ this is the base address to use when reading location
+ lists or range lists. */
+- if (abbrev->tag == DW_TAG_compile_unit)
++ if (compunit_flag)
+ unit->base_address = low_pc;
+ }
+ break;
+@@ -4215,7 +4450,7 @@ parse_comp_unit (struct dwarf2_debug *stash,
+ if (is_int_form (&attr)
+ && !read_rangelist (unit, &unit->arange,
+ &unit->file->trie_root, attr.u.val))
+- return NULL;
++ goto err_exit;
+ break;
+
+ case DW_AT_comp_dir:
+@@ -4248,21 +4483,40 @@ parse_comp_unit (struct dwarf2_debug *stash,
+ unit->lang = attr.u.val;
+ break;
+
++ case DW_AT_addr_base:
++ unit->dwarf_addr_offset = attr.u.val;
++ break;
++
++ case DW_AT_str_offsets_base:
++ unit->dwarf_str_offset = attr.u.val;
++ break;
++
+ default:
+ break;
+ }
+ }
++
++ for (i = 0; i < str_count; ++i)
++ reread_attribute (unit, &str_addrp[i], &low_pc, &high_pc,
++ &high_pc_relative, compunit_flag);
++
+ if (high_pc_relative)
+ high_pc += low_pc;
+ if (high_pc != 0)
+ {
+ if (!arange_add (unit, &unit->arange, &unit->file->trie_root,
+ low_pc, high_pc))
+- return NULL;
++ goto err_exit;
+ }
+
+ unit->first_child_die_ptr = info_ptr;
++
++ free (str_addrp);
+ return unit;
++
++ err_exit:
++ free (str_addrp);
++ return NULL;
+ }
+
+ /* Return TRUE if UNIT may contain the address given by ADDR. When
+--
+2.31.1
+
diff --git a/meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-2.patch b/meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-2.patch
new file mode 100644
index 0000000000..be698ef5c1
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-2.patch
@@ -0,0 +1,2127 @@
+From 0e3c1eebb22e0ade28b619fb41f42d66ed6fb145 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Fri, 27 May 2022 12:37:21 +0930
+Subject: [PATCH] Remove use of bfd_uint64_t and similar
+
+Requiring C99 means that uses of bfd_uint64_t can be replaced with
+uint64_t, and similarly for bfd_int64_t, BFD_HOST_U_64_BIT, and
+BFD_HOST_64_BIT. This patch does that, removes #ifdef BFD_HOST_*
+and tidies a few places that print 64-bit values.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=0e3c1eebb22e0ade28b619fb41f42d66ed6fb145]
+
+CVE: CVE-2023-1579
+
+Signed-off-by: Yash Shinde <Yash.Shinde@windriver.com>
+
+---
+ bfd/aix386-core.c | 6 +--
+ bfd/bfd-in.h | 24 ++++++------
+ bfd/bfd-in2.h | 36 +++++++++---------
+ bfd/coff-rs6000.c | 10 +----
+ bfd/coff-x86_64.c | 2 +-
+ bfd/cpu-ia64-opc.c | 22 +++++------
+ bfd/dwarf2.c | 83 ++++++++++++++++++++---------------------
+ bfd/elf32-score.c | 16 ++++----
+ bfd/elf64-ia64-vms.c | 8 ++--
+ bfd/elflink.c | 16 +-------
+ bfd/elfxx-ia64.c | 6 +--
+ bfd/hppabsd-core.c | 6 +--
+ bfd/hpux-core.c | 6 +--
+ bfd/irix-core.c | 6 +--
+ bfd/libbfd.c | 65 +++++++++-----------------------
+ bfd/mach-o.c | 2 +-
+ bfd/mach-o.h | 8 ++--
+ bfd/netbsd-core.c | 6 +--
+ bfd/osf-core.c | 6 +--
+ bfd/ptrace-core.c | 6 +--
+ bfd/sco5-core.c | 6 +--
+ bfd/targets.c | 12 +++---
+ bfd/trad-core.c | 6 +--
+ bfd/vms-alpha.c | 2 +-
+ binutils/nm.c | 49 +++---------------------
+ binutils/od-macho.c | 50 ++++++++-----------------
+ binutils/prdbg.c | 39 +++----------------
+ binutils/readelf.c | 21 +++++------
+ gas/config/tc-arm.c | 28 ++++----------
+ gas/config/tc-csky.c | 10 ++---
+ gas/config/tc-sparc.c | 35 +++++++++--------
+ gas/config/tc-tilegx.c | 20 +++++-----
+ gas/config/tc-tilepro.c | 20 +++++-----
+ gas/config/tc-z80.c | 8 ++--
+ gas/config/te-vms.c | 2 +-
+ gas/config/te-vms.h | 2 +-
+ gdb/findcmd.c | 2 +-
+ gdb/tilegx-tdep.c | 2 +-
+ gprof/gmon_io.c | 44 ++++++----------------
+ include/elf/nfp.h | 2 +-
+ include/opcode/csky.h | 62 +++++++++++++++---------------
+ include/opcode/ia64.h | 2 +-
+ opcodes/csky-dis.c | 2 +-
+ opcodes/csky-opc.h | 4 +-
+ opcodes/ia64-dis.c | 2 +-
+ 45 files changed, 297 insertions(+), 475 deletions(-)
+
+diff --git a/bfd/aix386-core.c b/bfd/aix386-core.c
+index 3443e49ed46..977a6bd1fb4 100644
+--- a/bfd/aix386-core.c
++++ b/bfd/aix386-core.c
+@@ -220,9 +220,9 @@ swap_abort (void)
+ #define NO_GET ((bfd_vma (*) (const void *)) swap_abort)
+ #define NO_PUT ((void (*) (bfd_vma, void *)) swap_abort)
+ #define NO_GETS ((bfd_signed_vma (*) (const void *)) swap_abort)
+-#define NO_GET64 ((bfd_uint64_t (*) (const void *)) swap_abort)
+-#define NO_PUT64 ((void (*) (bfd_uint64_t, void *)) swap_abort)
+-#define NO_GETS64 ((bfd_int64_t (*) (const void *)) swap_abort)
++#define NO_GET64 ((uint64_t (*) (const void *)) swap_abort)
++#define NO_PUT64 ((void (*) (uint64_t, void *)) swap_abort)
++#define NO_GETS64 ((int64_t (*) (const void *)) swap_abort)
+
+ const bfd_target core_aix386_vec =
+ {
+diff --git a/bfd/bfd-in.h b/bfd/bfd-in.h
+index a1c4bf139fc..09c5728e944 100644
+--- a/bfd/bfd-in.h
++++ b/bfd/bfd-in.h
+@@ -116,10 +116,10 @@ typedef struct bfd bfd;
+ #error No 64 bit integer type available
+ #endif /* ! defined (BFD_HOST_64_BIT) */
+
+-typedef BFD_HOST_U_64_BIT bfd_vma;
+-typedef BFD_HOST_64_BIT bfd_signed_vma;
+-typedef BFD_HOST_U_64_BIT bfd_size_type;
+-typedef BFD_HOST_U_64_BIT symvalue;
++typedef uint64_t bfd_vma;
++typedef int64_t bfd_signed_vma;
++typedef uint64_t bfd_size_type;
++typedef uint64_t symvalue;
+
+ #if BFD_HOST_64BIT_LONG
+ #define BFD_VMA_FMT "l"
+@@ -447,10 +447,10 @@ extern bool bfd_record_phdr
+
+ /* Byte swapping routines. */
+
+-bfd_uint64_t bfd_getb64 (const void *);
+-bfd_uint64_t bfd_getl64 (const void *);
+-bfd_int64_t bfd_getb_signed_64 (const void *);
+-bfd_int64_t bfd_getl_signed_64 (const void *);
++uint64_t bfd_getb64 (const void *);
++uint64_t bfd_getl64 (const void *);
++int64_t bfd_getb_signed_64 (const void *);
++int64_t bfd_getl_signed_64 (const void *);
+ bfd_vma bfd_getb32 (const void *);
+ bfd_vma bfd_getl32 (const void *);
+ bfd_signed_vma bfd_getb_signed_32 (const void *);
+@@ -459,8 +459,8 @@ bfd_vma bfd_getb16 (const void *);
+ bfd_vma bfd_getl16 (const void *);
+ bfd_signed_vma bfd_getb_signed_16 (const void *);
+ bfd_signed_vma bfd_getl_signed_16 (const void *);
+-void bfd_putb64 (bfd_uint64_t, void *);
+-void bfd_putl64 (bfd_uint64_t, void *);
++void bfd_putb64 (uint64_t, void *);
++void bfd_putl64 (uint64_t, void *);
+ void bfd_putb32 (bfd_vma, void *);
+ void bfd_putl32 (bfd_vma, void *);
+ void bfd_putb24 (bfd_vma, void *);
+@@ -470,8 +470,8 @@ void bfd_putl16 (bfd_vma, void *);
+
+ /* Byte swapping routines which take size and endiannes as arguments. */
+
+-bfd_uint64_t bfd_get_bits (const void *, int, bool);
+-void bfd_put_bits (bfd_uint64_t, void *, int, bool);
++uint64_t bfd_get_bits (const void *, int, bool);
++void bfd_put_bits (uint64_t, void *, int, bool);
+
+
+ /* mmap hacks */
+diff --git a/bfd/bfd-in2.h b/bfd/bfd-in2.h
+index 50e26fc691d..d50885e76cf 100644
+--- a/bfd/bfd-in2.h
++++ b/bfd/bfd-in2.h
+@@ -123,10 +123,10 @@ typedef struct bfd bfd;
+ #error No 64 bit integer type available
+ #endif /* ! defined (BFD_HOST_64_BIT) */
+
+-typedef BFD_HOST_U_64_BIT bfd_vma;
+-typedef BFD_HOST_64_BIT bfd_signed_vma;
+-typedef BFD_HOST_U_64_BIT bfd_size_type;
+-typedef BFD_HOST_U_64_BIT symvalue;
++typedef uint64_t bfd_vma;
++typedef int64_t bfd_signed_vma;
++typedef uint64_t bfd_size_type;
++typedef uint64_t symvalue;
+
+ #if BFD_HOST_64BIT_LONG
+ #define BFD_VMA_FMT "l"
+@@ -454,10 +454,10 @@ extern bool bfd_record_phdr
+
+ /* Byte swapping routines. */
+
+-bfd_uint64_t bfd_getb64 (const void *);
+-bfd_uint64_t bfd_getl64 (const void *);
+-bfd_int64_t bfd_getb_signed_64 (const void *);
+-bfd_int64_t bfd_getl_signed_64 (const void *);
++uint64_t bfd_getb64 (const void *);
++uint64_t bfd_getl64 (const void *);
++int64_t bfd_getb_signed_64 (const void *);
++int64_t bfd_getl_signed_64 (const void *);
+ bfd_vma bfd_getb32 (const void *);
+ bfd_vma bfd_getl32 (const void *);
+ bfd_signed_vma bfd_getb_signed_32 (const void *);
+@@ -466,8 +466,8 @@ bfd_vma bfd_getb16 (const void *);
+ bfd_vma bfd_getl16 (const void *);
+ bfd_signed_vma bfd_getb_signed_16 (const void *);
+ bfd_signed_vma bfd_getl_signed_16 (const void *);
+-void bfd_putb64 (bfd_uint64_t, void *);
+-void bfd_putl64 (bfd_uint64_t, void *);
++void bfd_putb64 (uint64_t, void *);
++void bfd_putl64 (uint64_t, void *);
+ void bfd_putb32 (bfd_vma, void *);
+ void bfd_putl32 (bfd_vma, void *);
+ void bfd_putb24 (bfd_vma, void *);
+@@ -477,8 +477,8 @@ void bfd_putl16 (bfd_vma, void *);
+
+ /* Byte swapping routines which take size and endiannes as arguments. */
+
+-bfd_uint64_t bfd_get_bits (const void *, int, bool);
+-void bfd_put_bits (bfd_uint64_t, void *, int, bool);
++uint64_t bfd_get_bits (const void *, int, bool);
++void bfd_put_bits (uint64_t, void *, int, bool);
+
+
+ /* mmap hacks */
+@@ -7416,9 +7416,9 @@ typedef struct bfd_target
+ /* Entries for byte swapping for data. These are different from the
+ other entry points, since they don't take a BFD as the first argument.
+ Certain other handlers could do the same. */
+- bfd_uint64_t (*bfd_getx64) (const void *);
+- bfd_int64_t (*bfd_getx_signed_64) (const void *);
+- void (*bfd_putx64) (bfd_uint64_t, void *);
++ uint64_t (*bfd_getx64) (const void *);
++ int64_t (*bfd_getx_signed_64) (const void *);
++ void (*bfd_putx64) (uint64_t, void *);
+ bfd_vma (*bfd_getx32) (const void *);
+ bfd_signed_vma (*bfd_getx_signed_32) (const void *);
+ void (*bfd_putx32) (bfd_vma, void *);
+@@ -7427,9 +7427,9 @@ typedef struct bfd_target
+ void (*bfd_putx16) (bfd_vma, void *);
+
+ /* Byte swapping for the headers. */
+- bfd_uint64_t (*bfd_h_getx64) (const void *);
+- bfd_int64_t (*bfd_h_getx_signed_64) (const void *);
+- void (*bfd_h_putx64) (bfd_uint64_t, void *);
++ uint64_t (*bfd_h_getx64) (const void *);
++ int64_t (*bfd_h_getx_signed_64) (const void *);
++ void (*bfd_h_putx64) (uint64_t, void *);
+ bfd_vma (*bfd_h_getx32) (const void *);
+ bfd_signed_vma (*bfd_h_getx_signed_32) (const void *);
+ void (*bfd_h_putx32) (bfd_vma, void *);
+diff --git a/bfd/coff-rs6000.c b/bfd/coff-rs6000.c
+index 8819187ab42..48ce5c0516b 100644
+--- a/bfd/coff-rs6000.c
++++ b/bfd/coff-rs6000.c
+@@ -1890,18 +1890,12 @@ xcoff_write_armap_old (bfd *abfd, unsigned int elength ATTRIBUTE_UNUSED,
+ }
+
+ static char buff20[XCOFFARMAGBIG_ELEMENT_SIZE + 1];
+-#if BFD_HOST_64BIT_LONG
+-#define FMT20 "%-20ld"
+-#elif defined (__MSVCRT__)
+-#define FMT20 "%-20I64d"
+-#else
+-#define FMT20 "%-20lld"
+-#endif
++#define FMT20 "%-20" PRId64
+ #define FMT12 "%-12d"
+ #define FMT12_OCTAL "%-12o"
+ #define FMT4 "%-4d"
+ #define PRINT20(d, v) \
+- sprintf (buff20, FMT20, (bfd_uint64_t)(v)), \
++ sprintf (buff20, FMT20, (uint64_t) (v)), \
+ memcpy ((void *) (d), buff20, 20)
+
+ #define PRINT12(d, v) \
+diff --git a/bfd/coff-x86_64.c b/bfd/coff-x86_64.c
+index e8e16d3ce4b..cf339c93215 100644
+--- a/bfd/coff-x86_64.c
++++ b/bfd/coff-x86_64.c
+@@ -201,7 +201,7 @@ coff_amd64_reloc (bfd *abfd,
+
+ case 4:
+ {
+- bfd_uint64_t x = bfd_get_64 (abfd, addr);
++ uint64_t x = bfd_get_64 (abfd, addr);
+ DOIT (x);
+ bfd_put_64 (abfd, x, addr);
+ }
+diff --git a/bfd/cpu-ia64-opc.c b/bfd/cpu-ia64-opc.c
+index e2b5c2694b6..01e3c3f476a 100644
+--- a/bfd/cpu-ia64-opc.c
++++ b/bfd/cpu-ia64-opc.c
+@@ -99,14 +99,14 @@ ins_immu (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+ static const char*
+ ext_immu (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+ {
+- BFD_HOST_U_64_BIT value = 0;
++ uint64_t value = 0;
+ int i, bits = 0, total = 0;
+
+ for (i = 0; i < NELEMS (self->field) && self->field[i].bits; ++i)
+ {
+ bits = self->field[i].bits;
+ value |= ((code >> self->field[i].shift)
+- & ((((BFD_HOST_U_64_BIT) 1) << bits) - 1)) << total;
++ & (((uint64_t) 1 << bits) - 1)) << total;
+ total += bits;
+ }
+ *valuep = value;
+@@ -161,7 +161,7 @@ static const char*
+ ins_imms_scaled (const struct ia64_operand *self, ia64_insn value,
+ ia64_insn *code, int scale)
+ {
+- BFD_HOST_64_BIT svalue = value, sign_bit = 0;
++ int64_t svalue = value, sign_bit = 0;
+ ia64_insn new_insn = 0;
+ int i;
+
+@@ -186,17 +186,17 @@ ext_imms_scaled (const struct ia64_operand *self, ia64_insn code,
+ ia64_insn *valuep, int scale)
+ {
+ int i, bits = 0, total = 0;
+- BFD_HOST_U_64_BIT val = 0, sign;
++ uint64_t val = 0, sign;
+
+ for (i = 0; i < NELEMS (self->field) && self->field[i].bits; ++i)
+ {
+ bits = self->field[i].bits;
+ val |= ((code >> self->field[i].shift)
+- & ((((BFD_HOST_U_64_BIT) 1) << bits) - 1)) << total;
++ & (((uint64_t) 1 << bits) - 1)) << total;
+ total += bits;
+ }
+ /* sign extend: */
+- sign = (BFD_HOST_U_64_BIT) 1 << (total - 1);
++ sign = (uint64_t) 1 << (total - 1);
+ val = (val ^ sign) - sign;
+
+ *valuep = val << scale;
+@@ -312,7 +312,7 @@ static const char*
+ ins_cnt (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+ {
+ --value;
+- if (value >= ((BFD_HOST_U_64_BIT) 1) << self->field[0].bits)
++ if (value >= (uint64_t) 1 << self->field[0].bits)
+ return "count out of range";
+
+ *code |= value << self->field[0].shift;
+@@ -323,7 +323,7 @@ static const char*
+ ext_cnt (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+ {
+ *valuep = ((code >> self->field[0].shift)
+- & ((((BFD_HOST_U_64_BIT) 1) << self->field[0].bits) - 1)) + 1;
++ & (((uint64_t) 1 << self->field[0].bits) - 1)) + 1;
+ return 0;
+ }
+
+@@ -421,8 +421,8 @@ ext_strd5b (const struct ia64_operand *self, ia64_insn code,
+ static const char*
+ ins_inc3 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+ {
+- BFD_HOST_64_BIT val = value;
+- BFD_HOST_U_64_BIT sign = 0;
++ int64_t val = value;
++ uint64_t sign = 0;
+
+ if (val < 0)
+ {
+@@ -444,7 +444,7 @@ ins_inc3 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+ static const char*
+ ext_inc3 (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+ {
+- BFD_HOST_64_BIT val;
++ int64_t val;
+ int negate;
+
+ val = (code >> self->field[0].shift) & 0x7;
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index 45e286754e4..6a728fc38b0 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -63,8 +63,8 @@ struct attribute
+ {
+ char *str;
+ struct dwarf_block *blk;
+- bfd_uint64_t val;
+- bfd_int64_t sval;
++ uint64_t val;
++ int64_t sval;
+ }
+ u;
+ };
+@@ -632,12 +632,12 @@ lookup_info_hash_table (struct info_hash_table *hash_table, const char *key)
+ the located section does not contain at least OFFSET bytes. */
+
+ static bool
+-read_section (bfd * abfd,
++read_section (bfd *abfd,
+ const struct dwarf_debug_section *sec,
+- asymbol ** syms,
+- bfd_uint64_t offset,
+- bfd_byte ** section_buffer,
+- bfd_size_type * section_size)
++ asymbol **syms,
++ uint64_t offset,
++ bfd_byte **section_buffer,
++ bfd_size_type *section_size)
+ {
+ const char *section_name = sec->uncompressed_name;
+ bfd_byte *contents = *section_buffer;
+@@ -848,7 +848,7 @@ read_indirect_string (struct comp_unit *unit,
+ bfd_byte **ptr,
+ bfd_byte *buf_end)
+ {
+- bfd_uint64_t offset;
++ uint64_t offset;
+ struct dwarf2_debug *stash = unit->stash;
+ struct dwarf2_debug_file *file = unit->file;
+ char *str;
+@@ -882,7 +882,7 @@ read_indirect_line_string (struct comp_unit *unit,
+ bfd_byte **ptr,
+ bfd_byte *buf_end)
+ {
+- bfd_uint64_t offset;
++ uint64_t offset;
+ struct dwarf2_debug *stash = unit->stash;
+ struct dwarf2_debug_file *file = unit->file;
+ char *str;
+@@ -919,7 +919,7 @@ read_alt_indirect_string (struct comp_unit *unit,
+ bfd_byte **ptr,
+ bfd_byte *buf_end)
+ {
+- bfd_uint64_t offset;
++ uint64_t offset;
+ struct dwarf2_debug *stash = unit->stash;
+ char *str;
+
+@@ -975,8 +975,7 @@ read_alt_indirect_string (struct comp_unit *unit,
+ or NULL upon failure. */
+
+ static bfd_byte *
+-read_alt_indirect_ref (struct comp_unit * unit,
+- bfd_uint64_t offset)
++read_alt_indirect_ref (struct comp_unit *unit, uint64_t offset)
+ {
+ struct dwarf2_debug *stash = unit->stash;
+
+@@ -1012,7 +1011,7 @@ read_alt_indirect_ref (struct comp_unit * unit,
+ return stash->alt.dwarf_info_buffer + offset;
+ }
+
+-static bfd_uint64_t
++static uint64_t
+ read_address (struct comp_unit *unit, bfd_byte **ptr, bfd_byte *buf_end)
+ {
+ bfd_byte *buf = *ptr;
+@@ -1131,7 +1130,7 @@ del_abbrev (void *p)
+ in a hash table. */
+
+ static struct abbrev_info**
+-read_abbrevs (bfd *abfd, bfd_uint64_t offset, struct dwarf2_debug *stash,
++read_abbrevs (bfd *abfd, uint64_t offset, struct dwarf2_debug *stash,
+ struct dwarf2_debug_file *file)
+ {
+ struct abbrev_info **abbrevs;
+@@ -1356,8 +1355,7 @@ is_addrx_form (enum dwarf_form form)
+ /* Returns the address in .debug_addr section using DW_AT_addr_base.
+ Used to implement DW_FORM_addrx*. */
+ static bfd_vma
+-read_indexed_address (bfd_uint64_t idx,
+- struct comp_unit *unit)
++read_indexed_address (uint64_t idx, struct comp_unit *unit)
+ {
+ struct dwarf2_debug *stash = unit->stash;
+ struct dwarf2_debug_file *file = unit->file;
+@@ -1383,8 +1381,7 @@ read_indexed_address (bfd_uint64_t idx,
+ /* Returns the string using DW_AT_str_offsets_base.
+ Used to implement DW_FORM_strx*. */
+ static const char *
+-read_indexed_string (bfd_uint64_t idx,
+- struct comp_unit *unit)
++read_indexed_string (uint64_t idx, struct comp_unit *unit)
+ {
+ struct dwarf2_debug *stash = unit->stash;
+ struct dwarf2_debug_file *file = unit->file;
+@@ -1717,39 +1714,39 @@ struct line_info_table
+ struct funcinfo
+ {
+ /* Pointer to previous function in list of all functions. */
+- struct funcinfo * prev_func;
++ struct funcinfo *prev_func;
+ /* Pointer to function one scope higher. */
+- struct funcinfo * caller_func;
++ struct funcinfo *caller_func;
+ /* Source location file name where caller_func inlines this func. */
+- char * caller_file;
++ char *caller_file;
+ /* Source location file name. */
+- char * file;
++ char *file;
+ /* Source location line number where caller_func inlines this func. */
+- int caller_line;
++ int caller_line;
+ /* Source location line number. */
+- int line;
+- int tag;
+- bool is_linkage;
+- const char * name;
+- struct arange arange;
++ int line;
++ int tag;
++ bool is_linkage;
++ const char *name;
++ struct arange arange;
+ /* Where the symbol is defined. */
+- asection * sec;
++ asection *sec;
+ /* The offset of the funcinfo from the start of the unit. */
+- bfd_uint64_t unit_offset;
++ uint64_t unit_offset;
+ };
+
+ struct lookup_funcinfo
+ {
+ /* Function information corresponding to this lookup table entry. */
+- struct funcinfo * funcinfo;
++ struct funcinfo *funcinfo;
+
+ /* The lowest address for this specific function. */
+- bfd_vma low_addr;
++ bfd_vma low_addr;
+
+ /* The highest address of this function before the lookup table is sorted.
+ The highest address of all prior functions after the lookup table is
+ sorted, which is used for binary search. */
+- bfd_vma high_addr;
++ bfd_vma high_addr;
+ /* Index of this function, used to ensure qsort is stable. */
+ unsigned int idx;
+ };
+@@ -1759,7 +1756,7 @@ struct varinfo
+ /* Pointer to previous variable in list of all variables. */
+ struct varinfo *prev_var;
+ /* The offset of the varinfo from the start of the unit. */
+- bfd_uint64_t unit_offset;
++ uint64_t unit_offset;
+ /* Source location file name. */
+ char *file;
+ /* Source location line number. */
+@@ -3335,7 +3332,7 @@ find_abstract_instance (struct comp_unit *unit,
+ bfd_byte *info_ptr_end;
+ unsigned int abbrev_number, i;
+ struct abbrev_info *abbrev;
+- bfd_uint64_t die_ref = attr_ptr->u.val;
++ uint64_t die_ref = attr_ptr->u.val;
+ struct attribute attr;
+ const char *name = NULL;
+
+@@ -3549,7 +3546,7 @@ find_abstract_instance (struct comp_unit *unit,
+
+ static bool
+ read_ranges (struct comp_unit *unit, struct arange *arange,
+- struct trie_node **trie_root, bfd_uint64_t offset)
++ struct trie_node **trie_root, uint64_t offset)
+ {
+ bfd_byte *ranges_ptr;
+ bfd_byte *ranges_end;
+@@ -3594,7 +3591,7 @@ read_ranges (struct comp_unit *unit, struct arange *arange,
+
+ static bool
+ read_rnglists (struct comp_unit *unit, struct arange *arange,
+- struct trie_node **trie_root, bfd_uint64_t offset)
++ struct trie_node **trie_root, uint64_t offset)
+ {
+ bfd_byte *rngs_ptr;
+ bfd_byte *rngs_end;
+@@ -3675,7 +3672,7 @@ read_rnglists (struct comp_unit *unit, struct arange *arange,
+
+ static bool
+ read_rangelist (struct comp_unit *unit, struct arange *arange,
+- struct trie_node **trie_root, bfd_uint64_t offset)
++ struct trie_node **trie_root, uint64_t offset)
+ {
+ if (unit->version <= 4)
+ return read_ranges (unit, arange, trie_root, offset);
+@@ -3684,7 +3681,7 @@ read_rangelist (struct comp_unit *unit, struct arange *arange,
+ }
+
+ static struct funcinfo *
+-lookup_func_by_offset (bfd_uint64_t offset, struct funcinfo * table)
++lookup_func_by_offset (uint64_t offset, struct funcinfo * table)
+ {
+ for (; table != NULL; table = table->prev_func)
+ if (table->unit_offset == offset)
+@@ -3693,7 +3690,7 @@ lookup_func_by_offset (bfd_uint64_t offset, struct funcinfo * table)
+ }
+
+ static struct varinfo *
+-lookup_var_by_offset (bfd_uint64_t offset, struct varinfo * table)
++lookup_var_by_offset (uint64_t offset, struct varinfo * table)
+ {
+ while (table)
+ {
+@@ -3775,7 +3772,7 @@ scan_unit_for_symbols (struct comp_unit *unit)
+ struct abbrev_info *abbrev;
+ struct funcinfo *func;
+ struct varinfo *var;
+- bfd_uint64_t current_offset;
++ uint64_t current_offset;
+
+ /* PR 17512: file: 9f405d9d. */
+ if (info_ptr >= info_ptr_end)
+@@ -3909,7 +3906,7 @@ scan_unit_for_symbols (struct comp_unit *unit)
+ bfd_vma low_pc = 0;
+ bfd_vma high_pc = 0;
+ bool high_pc_relative = false;
+- bfd_uint64_t current_offset;
++ uint64_t current_offset;
+
+ /* PR 17512: file: 9f405d9d. */
+ if (info_ptr >= info_ptr_end)
+@@ -4259,7 +4256,7 @@ parse_comp_unit (struct dwarf2_debug *stash,
+ {
+ struct comp_unit* unit;
+ unsigned int version;
+- bfd_uint64_t abbrev_offset = 0;
++ uint64_t abbrev_offset = 0;
+ /* Initialize it just to avoid a GCC false warning. */
+ unsigned int addr_size = -1;
+ struct abbrev_info** abbrevs;
+diff --git a/bfd/elf32-score.c b/bfd/elf32-score.c
+index c868707347c..5bc78d523ea 100644
+--- a/bfd/elf32-score.c
++++ b/bfd/elf32-score.c
+@@ -230,14 +230,14 @@ static bfd_vma
+ score3_bfd_getl48 (const void *p)
+ {
+ const bfd_byte *addr = p;
+- bfd_uint64_t v;
+-
+- v = (bfd_uint64_t) addr[4];
+- v |= (bfd_uint64_t) addr[5] << 8;
+- v |= (bfd_uint64_t) addr[2] << 16;
+- v |= (bfd_uint64_t) addr[3] << 24;
+- v |= (bfd_uint64_t) addr[0] << 32;
+- v |= (bfd_uint64_t) addr[1] << 40;
++ uint64_t v;
++
++ v = (uint64_t) addr[4];
++ v |= (uint64_t) addr[5] << 8;
++ v |= (uint64_t) addr[2] << 16;
++ v |= (uint64_t) addr[3] << 24;
++ v |= (uint64_t) addr[0] << 32;
++ v |= (uint64_t) addr[1] << 40;
+ return v;
+ }
+
+diff --git a/bfd/elf64-ia64-vms.c b/bfd/elf64-ia64-vms.c
+index 59cc6b6fe85..4d8f98550a3 100644
+--- a/bfd/elf64-ia64-vms.c
++++ b/bfd/elf64-ia64-vms.c
+@@ -179,7 +179,7 @@ struct elf64_ia64_vms_obj_tdata
+ struct elf_obj_tdata root;
+
+ /* Ident for shared library. */
+- bfd_uint64_t ident;
++ uint64_t ident;
+
+ /* Used only during link: offset in the .fixups section for this bfd. */
+ bfd_vma fixups_off;
+@@ -2791,7 +2791,7 @@ elf64_ia64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
+ if (!_bfd_elf_add_dynamic_entry (info, DT_IA_64_VMS_IDENT, 0))
+ return false;
+ if (!_bfd_elf_add_dynamic_entry (info, DT_IA_64_VMS_LINKTIME,
+- (((bfd_uint64_t)time_hi) << 32)
++ ((uint64_t) time_hi << 32)
+ + time_lo))
+ return false;
+
+@@ -4720,7 +4720,7 @@ elf64_vms_close_and_cleanup (bfd *abfd)
+ if ((isize & 7) != 0)
+ {
+ int ishort = 8 - (isize & 7);
+- bfd_uint64_t pad = 0;
++ uint64_t pad = 0;
+
+ bfd_seek (abfd, isize, SEEK_SET);
+ bfd_bwrite (&pad, ishort, abfd);
+@@ -4853,7 +4853,7 @@ elf64_vms_link_add_object_symbols (bfd *abfd, struct bfd_link_info *info)
+ bed->s->swap_dyn_in (abfd, extdyn, &dyn);
+ if (dyn.d_tag == DT_IA_64_VMS_IDENT)
+ {
+- bfd_uint64_t tagv = dyn.d_un.d_val;
++ uint64_t tagv = dyn.d_un.d_val;
+ elf_ia64_vms_ident (abfd) = tagv;
+ break;
+ }
+diff --git a/bfd/elflink.c b/bfd/elflink.c
+index 96eb36aa5bf..fc3a335c72d 100644
+--- a/bfd/elflink.c
++++ b/bfd/elflink.c
+@@ -6354,15 +6354,11 @@ compute_bucket_count (struct bfd_link_info *info ATTRIBUTE_UNUSED,
+ size_t best_size = 0;
+ unsigned long int i;
+
+- /* We have a problem here. The following code to optimize the table
+- size requires an integer type with more the 32 bits. If
+- BFD_HOST_U_64_BIT is set we know about such a type. */
+-#ifdef BFD_HOST_U_64_BIT
+ if (info->optimize)
+ {
+ size_t minsize;
+ size_t maxsize;
+- BFD_HOST_U_64_BIT best_chlen = ~((BFD_HOST_U_64_BIT) 0);
++ uint64_t best_chlen = ~((uint64_t) 0);
+ bfd *dynobj = elf_hash_table (info)->dynobj;
+ size_t dynsymcount = elf_hash_table (info)->dynsymcount;
+ const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
+@@ -6399,7 +6395,7 @@ compute_bucket_count (struct bfd_link_info *info ATTRIBUTE_UNUSED,
+ for (i = minsize; i < maxsize; ++i)
+ {
+ /* Walk through the array of hashcodes and count the collisions. */
+- BFD_HOST_U_64_BIT max;
++ uint64_t max;
+ unsigned long int j;
+ unsigned long int fact;
+
+@@ -6464,11 +6460,7 @@ compute_bucket_count (struct bfd_link_info *info ATTRIBUTE_UNUSED,
+ free (counts);
+ }
+ else
+-#endif /* defined (BFD_HOST_U_64_BIT) */
+ {
+- /* This is the fallback solution if no 64bit type is available or if we
+- are not supposed to spend much time on optimizations. We select the
+- bucket count using a fixed set of numbers. */
+ for (i = 0; elf_buckets[i] != 0; i++)
+ {
+ best_size = elf_buckets[i];
+@@ -9354,7 +9346,6 @@ ext32b_r_offset (const void *p)
+ return aval;
+ }
+
+-#ifdef BFD_HOST_64_BIT
+ static bfd_vma
+ ext64l_r_offset (const void *p)
+ {
+@@ -9398,7 +9389,6 @@ ext64b_r_offset (const void *p)
+ | (uint64_t) a->c[7]);
+ return aval;
+ }
+-#endif
+
+ /* When performing a relocatable link, the input relocations are
+ preserved. But, if they reference global symbols, the indices
+@@ -9502,13 +9492,11 @@ elf_link_adjust_relocs (bfd *abfd,
+ }
+ else
+ {
+-#ifdef BFD_HOST_64_BIT
+ if (abfd->xvec->header_byteorder == BFD_ENDIAN_LITTLE)
+ ext_r_off = ext64l_r_offset;
+ else if (abfd->xvec->header_byteorder == BFD_ENDIAN_BIG)
+ ext_r_off = ext64b_r_offset;
+ else
+-#endif
+ abort ();
+ }
+
+diff --git a/bfd/elfxx-ia64.c b/bfd/elfxx-ia64.c
+index c126adf6890..a108324ca39 100644
+--- a/bfd/elfxx-ia64.c
++++ b/bfd/elfxx-ia64.c
+@@ -555,11 +555,7 @@ ia64_elf_install_value (bfd_byte *hit_addr, bfd_vma v, unsigned int r_type)
+ enum ia64_opnd opnd;
+ const char *err;
+ size_t size = 8;
+-#ifdef BFD_HOST_U_64_BIT
+- BFD_HOST_U_64_BIT val = (BFD_HOST_U_64_BIT) v;
+-#else
+- bfd_vma val = v;
+-#endif
++ uint64_t val = v;
+
+ opnd = IA64_OPND_NIL;
+ switch (r_type)
+diff --git a/bfd/hppabsd-core.c b/bfd/hppabsd-core.c
+index acfa5f69a95..d87af955838 100644
+--- a/bfd/hppabsd-core.c
++++ b/bfd/hppabsd-core.c
+@@ -213,9 +213,9 @@ swap_abort (void)
+ #define NO_GET ((bfd_vma (*) (const void *)) swap_abort)
+ #define NO_PUT ((void (*) (bfd_vma, void *)) swap_abort)
+ #define NO_GETS ((bfd_signed_vma (*) (const void *)) swap_abort)
+-#define NO_GET64 ((bfd_uint64_t (*) (const void *)) swap_abort)
+-#define NO_PUT64 ((void (*) (bfd_uint64_t, void *)) swap_abort)
+-#define NO_GETS64 ((bfd_int64_t (*) (const void *)) swap_abort)
++#define NO_GET64 ((uint64_t (*) (const void *)) swap_abort)
++#define NO_PUT64 ((void (*) (uint64_t, void *)) swap_abort)
++#define NO_GETS64 ((int64_t (*) (const void *)) swap_abort)
+
+ const bfd_target core_hppabsd_vec =
+ {
+diff --git a/bfd/hpux-core.c b/bfd/hpux-core.c
+index 4f03b84909a..654532c6bb9 100644
+--- a/bfd/hpux-core.c
++++ b/bfd/hpux-core.c
+@@ -362,9 +362,9 @@ swap_abort (void)
+ #define NO_GET ((bfd_vma (*) (const void *)) swap_abort)
+ #define NO_PUT ((void (*) (bfd_vma, void *)) swap_abort)
+ #define NO_GETS ((bfd_signed_vma (*) (const void *)) swap_abort)
+-#define NO_GET64 ((bfd_uint64_t (*) (const void *)) swap_abort)
+-#define NO_PUT64 ((void (*) (bfd_uint64_t, void *)) swap_abort)
+-#define NO_GETS64 ((bfd_int64_t (*) (const void *)) swap_abort)
++#define NO_GET64 ((uint64_t (*) (const void *)) swap_abort)
++#define NO_PUT64 ((void (*) (uint64_t, void *)) swap_abort)
++#define NO_GETS64 ((int64_t (*) (const void *)) swap_abort)
+
+ const bfd_target core_hpux_vec =
+ {
+diff --git a/bfd/irix-core.c b/bfd/irix-core.c
+index 694fe2e2e07..b12aef9ce8b 100644
+--- a/bfd/irix-core.c
++++ b/bfd/irix-core.c
+@@ -275,9 +275,9 @@ swap_abort(void)
+ #define NO_GET ((bfd_vma (*) (const void *)) swap_abort)
+ #define NO_PUT ((void (*) (bfd_vma, void *)) swap_abort)
+ #define NO_GETS ((bfd_signed_vma (*) (const void *)) swap_abort)
+-#define NO_GET64 ((bfd_uint64_t (*) (const void *)) swap_abort)
+-#define NO_PUT64 ((void (*) (bfd_uint64_t, void *)) swap_abort)
+-#define NO_GETS64 ((bfd_int64_t (*) (const void *)) swap_abort)
++#define NO_GET64 ((uint64_t (*) (const void *)) swap_abort)
++#define NO_PUT64 ((void (*) (uint64_t, void *)) swap_abort)
++#define NO_GETS64 ((int64_t (*) (const void *)) swap_abort)
+
+ const bfd_target core_irix_vec =
+ {
+diff --git a/bfd/libbfd.c b/bfd/libbfd.c
+index 2781671ddba..d33f3416206 100644
+--- a/bfd/libbfd.c
++++ b/bfd/libbfd.c
+@@ -617,7 +617,7 @@ DESCRIPTION
+ #define COERCE16(x) (((bfd_vma) (x) ^ 0x8000) - 0x8000)
+ #define COERCE32(x) (((bfd_vma) (x) ^ 0x80000000) - 0x80000000)
+ #define COERCE64(x) \
+- (((bfd_uint64_t) (x) ^ ((bfd_uint64_t) 1 << 63)) - ((bfd_uint64_t) 1 << 63))
++ (((uint64_t) (x) ^ ((uint64_t) 1 << 63)) - ((uint64_t) 1 << 63))
+
+ bfd_vma
+ bfd_getb16 (const void *p)
+@@ -757,12 +757,11 @@ bfd_getl_signed_32 (const void *p)
+ return COERCE32 (v);
+ }
+
+-bfd_uint64_t
+-bfd_getb64 (const void *p ATTRIBUTE_UNUSED)
++uint64_t
++bfd_getb64 (const void *p)
+ {
+-#ifdef BFD_HOST_64_BIT
+ const bfd_byte *addr = (const bfd_byte *) p;
+- bfd_uint64_t v;
++ uint64_t v;
+
+ v = addr[0]; v <<= 8;
+ v |= addr[1]; v <<= 8;
+@@ -774,18 +773,13 @@ bfd_getb64 (const void *p ATTRIBUTE_UNUSED)
+ v |= addr[7];
+
+ return v;
+-#else
+- BFD_FAIL();
+- return 0;
+-#endif
+ }
+
+-bfd_uint64_t
+-bfd_getl64 (const void *p ATTRIBUTE_UNUSED)
++uint64_t
++bfd_getl64 (const void *p)
+ {
+-#ifdef BFD_HOST_64_BIT
+ const bfd_byte *addr = (const bfd_byte *) p;
+- bfd_uint64_t v;
++ uint64_t v;
+
+ v = addr[7]; v <<= 8;
+ v |= addr[6]; v <<= 8;
+@@ -797,19 +791,13 @@ bfd_getl64 (const void *p ATTRIBUTE_UNUSED)
+ v |= addr[0];
+
+ return v;
+-#else
+- BFD_FAIL();
+- return 0;
+-#endif
+-
+ }
+
+-bfd_int64_t
+-bfd_getb_signed_64 (const void *p ATTRIBUTE_UNUSED)
++int64_t
++bfd_getb_signed_64 (const void *p)
+ {
+-#ifdef BFD_HOST_64_BIT
+ const bfd_byte *addr = (const bfd_byte *) p;
+- bfd_uint64_t v;
++ uint64_t v;
+
+ v = addr[0]; v <<= 8;
+ v |= addr[1]; v <<= 8;
+@@ -821,18 +809,13 @@ bfd_getb_signed_64 (const void *p ATTRIBUTE_UNUSED)
+ v |= addr[7];
+
+ return COERCE64 (v);
+-#else
+- BFD_FAIL();
+- return 0;
+-#endif
+ }
+
+-bfd_int64_t
+-bfd_getl_signed_64 (const void *p ATTRIBUTE_UNUSED)
++int64_t
++bfd_getl_signed_64 (const void *p)
+ {
+-#ifdef BFD_HOST_64_BIT
+ const bfd_byte *addr = (const bfd_byte *) p;
+- bfd_uint64_t v;
++ uint64_t v;
+
+ v = addr[7]; v <<= 8;
+ v |= addr[6]; v <<= 8;
+@@ -844,10 +827,6 @@ bfd_getl_signed_64 (const void *p ATTRIBUTE_UNUSED)
+ v |= addr[0];
+
+ return COERCE64 (v);
+-#else
+- BFD_FAIL();
+- return 0;
+-#endif
+ }
+
+ void
+@@ -871,9 +850,8 @@ bfd_putl32 (bfd_vma data, void *p)
+ }
+
+ void
+-bfd_putb64 (bfd_uint64_t data ATTRIBUTE_UNUSED, void *p ATTRIBUTE_UNUSED)
++bfd_putb64 (uint64_t data, void *p)
+ {
+-#ifdef BFD_HOST_64_BIT
+ bfd_byte *addr = (bfd_byte *) p;
+ addr[0] = (data >> (7*8)) & 0xff;
+ addr[1] = (data >> (6*8)) & 0xff;
+@@ -883,15 +861,11 @@ bfd_putb64 (bfd_uint64_t data ATTRIBUTE_UNUSED, void *p ATTRIBUTE_UNUSED)
+ addr[5] = (data >> (2*8)) & 0xff;
+ addr[6] = (data >> (1*8)) & 0xff;
+ addr[7] = (data >> (0*8)) & 0xff;
+-#else
+- BFD_FAIL();
+-#endif
+ }
+
+ void
+-bfd_putl64 (bfd_uint64_t data ATTRIBUTE_UNUSED, void *p ATTRIBUTE_UNUSED)
++bfd_putl64 (uint64_t data, void *p)
+ {
+-#ifdef BFD_HOST_64_BIT
+ bfd_byte *addr = (bfd_byte *) p;
+ addr[7] = (data >> (7*8)) & 0xff;
+ addr[6] = (data >> (6*8)) & 0xff;
+@@ -901,13 +875,10 @@ bfd_putl64 (bfd_uint64_t data ATTRIBUTE_UNUSED, void *p ATTRIBUTE_UNUSED)
+ addr[2] = (data >> (2*8)) & 0xff;
+ addr[1] = (data >> (1*8)) & 0xff;
+ addr[0] = (data >> (0*8)) & 0xff;
+-#else
+- BFD_FAIL();
+-#endif
+ }
+
+ void
+-bfd_put_bits (bfd_uint64_t data, void *p, int bits, bool big_p)
++bfd_put_bits (uint64_t data, void *p, int bits, bool big_p)
+ {
+ bfd_byte *addr = (bfd_byte *) p;
+ int i;
+@@ -926,11 +897,11 @@ bfd_put_bits (bfd_uint64_t data, void *p, int bits, bool big_p)
+ }
+ }
+
+-bfd_uint64_t
++uint64_t
+ bfd_get_bits (const void *p, int bits, bool big_p)
+ {
+ const bfd_byte *addr = (const bfd_byte *) p;
+- bfd_uint64_t data;
++ uint64_t data;
+ int i;
+ int bytes;
+
+diff --git a/bfd/mach-o.c b/bfd/mach-o.c
+index e32b7873cef..9f3f1f13e4e 100644
+--- a/bfd/mach-o.c
++++ b/bfd/mach-o.c
+@@ -4773,7 +4773,7 @@ bfd_mach_o_read_source_version (bfd *abfd, bfd_mach_o_load_command *command)
+ {
+ bfd_mach_o_source_version_command *cmd = &command->command.source_version;
+ struct mach_o_source_version_command_external raw;
+- bfd_uint64_t ver;
++ uint64_t ver;
+
+ if (command->len < sizeof (raw) + 8)
+ return false;
+diff --git a/bfd/mach-o.h b/bfd/mach-o.h
+index 5a068d8d970..f7418ad8d40 100644
+--- a/bfd/mach-o.h
++++ b/bfd/mach-o.h
+@@ -545,8 +545,8 @@ bfd_mach_o_encryption_info_command;
+
+ typedef struct bfd_mach_o_main_command
+ {
+- bfd_uint64_t entryoff;
+- bfd_uint64_t stacksize;
++ uint64_t entryoff;
++ uint64_t stacksize;
+ }
+ bfd_mach_o_main_command;
+
+@@ -563,8 +563,8 @@ bfd_mach_o_source_version_command;
+ typedef struct bfd_mach_o_note_command
+ {
+ char data_owner[16];
+- bfd_uint64_t offset;
+- bfd_uint64_t size;
++ uint64_t offset;
++ uint64_t size;
+ }
+ bfd_mach_o_note_command;
+
+diff --git a/bfd/netbsd-core.c b/bfd/netbsd-core.c
+index cb215937da6..ffc8e50842c 100644
+--- a/bfd/netbsd-core.c
++++ b/bfd/netbsd-core.c
+@@ -257,9 +257,9 @@ swap_abort (void)
+ #define NO_GET ((bfd_vma (*) (const void *)) swap_abort)
+ #define NO_PUT ((void (*) (bfd_vma, void *)) swap_abort)
+ #define NO_GETS ((bfd_signed_vma (*) (const void *)) swap_abort)
+-#define NO_GET64 ((bfd_uint64_t (*) (const void *)) swap_abort)
+-#define NO_PUT64 ((void (*) (bfd_uint64_t, void *)) swap_abort)
+-#define NO_GETS64 ((bfd_int64_t (*) (const void *)) swap_abort)
++#define NO_GET64 ((uint64_t (*) (const void *)) swap_abort)
++#define NO_PUT64 ((void (*) (uint64_t, void *)) swap_abort)
++#define NO_GETS64 ((int64_t (*) (const void *)) swap_abort)
+
+ const bfd_target core_netbsd_vec =
+ {
+diff --git a/bfd/osf-core.c b/bfd/osf-core.c
+index 09a04a07624..04434b2045c 100644
+--- a/bfd/osf-core.c
++++ b/bfd/osf-core.c
+@@ -169,9 +169,9 @@ swap_abort (void)
+ #define NO_GET ((bfd_vma (*) (const void *)) swap_abort)
+ #define NO_PUT ((void (*) (bfd_vma, void *)) swap_abort)
+ #define NO_GETS ((bfd_signed_vma (*) (const void *)) swap_abort)
+-#define NO_GET64 ((bfd_uint64_t (*) (const void *)) swap_abort)
+-#define NO_PUT64 ((void (*) (bfd_uint64_t, void *)) swap_abort)
+-#define NO_GETS64 ((bfd_int64_t (*) (const void *)) swap_abort)
++#define NO_GET64 ((uint64_t (*) (const void *)) swap_abort)
++#define NO_PUT64 ((void (*) (uint64_t, void *)) swap_abort)
++#define NO_GETS64 ((int64_t (*) (const void *)) swap_abort)
+
+ const bfd_target core_osf_vec =
+ {
+diff --git a/bfd/ptrace-core.c b/bfd/ptrace-core.c
+index 3d077d21200..c4afffbfb95 100644
+--- a/bfd/ptrace-core.c
++++ b/bfd/ptrace-core.c
+@@ -160,9 +160,9 @@ swap_abort (void)
+ #define NO_GET ((bfd_vma (*) (const void *)) swap_abort)
+ #define NO_PUT ((void (*) (bfd_vma, void *)) swap_abort)
+ #define NO_GETS ((bfd_signed_vma (*) (const void *)) swap_abort)
+-#define NO_GET64 ((bfd_uint64_t (*) (const void *)) swap_abort)
+-#define NO_PUT64 ((void (*) (bfd_uint64_t, void *)) swap_abort)
+-#define NO_GETS64 ((bfd_int64_t (*) (const void *)) swap_abort)
++#define NO_GET64 ((uint64_t (*) (const void *)) swap_abort)
++#define NO_PUT64 ((void (*) (uint64_t, void *)) swap_abort)
++#define NO_GETS64 ((int64_t (*) (const void *)) swap_abort)
+
+ const bfd_target core_ptrace_vec =
+ {
+diff --git a/bfd/sco5-core.c b/bfd/sco5-core.c
+index d1f80c9079f..7807ac86a65 100644
+--- a/bfd/sco5-core.c
++++ b/bfd/sco5-core.c
+@@ -340,9 +340,9 @@ swap_abort (void)
+ #define NO_GET ((bfd_vma (*) (const void *)) swap_abort)
+ #define NO_PUT ((void (*) (bfd_vma, void *)) swap_abort)
+ #define NO_GETS ((bfd_signed_vma (*) (const void *)) swap_abort)
+-#define NO_GET64 ((bfd_uint64_t (*) (const void *)) swap_abort)
+-#define NO_PUT64 ((void (*) (bfd_uint64_t, void *)) swap_abort)
+-#define NO_GETS64 ((bfd_int64_t (*) (const void *)) swap_abort)
++#define NO_GET64 ((uint64_t (*) (const void *)) swap_abort)
++#define NO_PUT64 ((void (*) (uint64_t, void *)) swap_abort)
++#define NO_GETS64 ((int64_t (*) (const void *)) swap_abort)
+
+ const bfd_target core_sco5_vec =
+ {
+diff --git a/bfd/targets.c b/bfd/targets.c
+index 05dd8236d91..f44b5c67724 100644
+--- a/bfd/targets.c
++++ b/bfd/targets.c
+@@ -226,9 +226,9 @@ DESCRIPTION
+ . {* Entries for byte swapping for data. These are different from the
+ . other entry points, since they don't take a BFD as the first argument.
+ . Certain other handlers could do the same. *}
+-. bfd_uint64_t (*bfd_getx64) (const void *);
+-. bfd_int64_t (*bfd_getx_signed_64) (const void *);
+-. void (*bfd_putx64) (bfd_uint64_t, void *);
++. uint64_t (*bfd_getx64) (const void *);
++. int64_t (*bfd_getx_signed_64) (const void *);
++. void (*bfd_putx64) (uint64_t, void *);
+ . bfd_vma (*bfd_getx32) (const void *);
+ . bfd_signed_vma (*bfd_getx_signed_32) (const void *);
+ . void (*bfd_putx32) (bfd_vma, void *);
+@@ -237,9 +237,9 @@ DESCRIPTION
+ . void (*bfd_putx16) (bfd_vma, void *);
+ .
+ . {* Byte swapping for the headers. *}
+-. bfd_uint64_t (*bfd_h_getx64) (const void *);
+-. bfd_int64_t (*bfd_h_getx_signed_64) (const void *);
+-. void (*bfd_h_putx64) (bfd_uint64_t, void *);
++. uint64_t (*bfd_h_getx64) (const void *);
++. int64_t (*bfd_h_getx_signed_64) (const void *);
++. void (*bfd_h_putx64) (uint64_t, void *);
+ . bfd_vma (*bfd_h_getx32) (const void *);
+ . bfd_signed_vma (*bfd_h_getx_signed_32) (const void *);
+ . void (*bfd_h_putx32) (bfd_vma, void *);
+diff --git a/bfd/trad-core.c b/bfd/trad-core.c
+index 92a279b6a72..8e9ee0d6667 100644
+--- a/bfd/trad-core.c
++++ b/bfd/trad-core.c
+@@ -249,9 +249,9 @@ swap_abort (void)
+ #define NO_GET ((bfd_vma (*) (const void *)) swap_abort)
+ #define NO_PUT ((void (*) (bfd_vma, void *)) swap_abort)
+ #define NO_GETS ((bfd_signed_vma (*) (const void *)) swap_abort)
+-#define NO_GET64 ((bfd_uint64_t (*) (const void *)) swap_abort)
+-#define NO_PUT64 ((void (*) (bfd_uint64_t, void *)) swap_abort)
+-#define NO_GETS64 ((bfd_int64_t (*) (const void *)) swap_abort)
++#define NO_GET64 ((uint64_t (*) (const void *)) swap_abort)
++#define NO_PUT64 ((void (*) (uint64_t, void *)) swap_abort)
++#define NO_GETS64 ((int64_t (*) (const void *)) swap_abort)
+
+ const bfd_target core_trad_vec =
+ {
+diff --git a/bfd/vms-alpha.c b/bfd/vms-alpha.c
+index 1129c98f0e2..fd0762811df 100644
+--- a/bfd/vms-alpha.c
++++ b/bfd/vms-alpha.c
+@@ -522,7 +522,7 @@ _bfd_vms_slurp_eisd (bfd *abfd, unsigned int offset)
+ struct vms_eisd *eisd;
+ unsigned int rec_size;
+ unsigned int size;
+- bfd_uint64_t vaddr;
++ uint64_t vaddr;
+ unsigned int flags;
+ unsigned int vbn;
+ char *name = NULL;
+diff --git a/binutils/nm.c b/binutils/nm.c
+index 60e4d850885..539c5688425 100644
+--- a/binutils/nm.c
++++ b/binutils/nm.c
+@@ -1557,29 +1557,15 @@ get_print_format (void)
+ padding = "016";
+ }
+
+- const char * length = "l";
+- if (print_width == 64)
+- {
+-#if BFD_HOST_64BIT_LONG
+- ;
+-#elif BFD_HOST_64BIT_LONG_LONG
+-#ifndef __MSVCRT__
+- length = "ll";
+-#else
+- length = "I64";
+-#endif
+-#endif
+- }
+-
+ const char * radix = NULL;
+ switch (print_radix)
+ {
+- case 8: radix = "o"; break;
+- case 10: radix = "d"; break;
+- case 16: radix = "x"; break;
++ case 8: radix = PRIo64; break;
++ case 10: radix = PRId64; break;
++ case 16: radix = PRIx64; break;
+ }
+
+- return concat ("%", padding, length, radix, NULL);
++ return concat ("%", padding, radix, NULL);
+ }
+
+ static void
+@@ -1874,33 +1860,8 @@ print_value (bfd *abfd ATTRIBUTE_UNUSED, bfd_vma val)
+ switch (print_width)
+ {
+ case 32:
+- printf (print_format_string, (unsigned long) val);
+- break;
+-
+ case 64:
+-#if BFD_HOST_64BIT_LONG || BFD_HOST_64BIT_LONG_LONG
+- printf (print_format_string, val);
+-#else
+- /* We have a 64 bit value to print, but the host is only 32 bit. */
+- if (print_radix == 16)
+- bfd_fprintf_vma (abfd, stdout, val);
+- else
+- {
+- char buf[30];
+- char *s;
+-
+- s = buf + sizeof buf;
+- *--s = '\0';
+- while (val > 0)
+- {
+- *--s = (val % print_radix) + '0';
+- val /= print_radix;
+- }
+- while ((buf + sizeof buf - 1) - s < 16)
+- *--s = '0';
+- printf ("%s", s);
+- }
+-#endif
++ printf (print_format_string, (uint64_t) val);
+ break;
+
+ default:
+diff --git a/binutils/od-macho.c b/binutils/od-macho.c
+index 56d448ac3bd..e91c87d2acf 100644
+--- a/binutils/od-macho.c
++++ b/binutils/od-macho.c
+@@ -283,15 +283,6 @@ bfd_mach_o_print_flags (const bfd_mach_o_xlat_name *table,
+ printf ("-");
+ }
+
+-/* Print a bfd_uint64_t, using a platform independent style. */
+-
+-static void
+-printf_uint64 (bfd_uint64_t v)
+-{
+- printf ("0x%08lx%08lx",
+- (unsigned long)((v >> 16) >> 16), (unsigned long)(v & 0xffffffffUL));
+-}
+-
+ static const char *
+ bfd_mach_o_get_name_or_null (const bfd_mach_o_xlat_name *table,
+ unsigned long val)
+@@ -1729,26 +1720,20 @@ dump_load_command (bfd *abfd, bfd_mach_o_load_command *cmd,
+ }
+ case BFD_MACH_O_LC_MAIN:
+ {
+- bfd_mach_o_main_command *entry = &cmd->command.main;
+- printf (" entry offset: ");
+- printf_uint64 (entry->entryoff);
+- printf ("\n"
+- " stack size: ");
+- printf_uint64 (entry->stacksize);
+- printf ("\n");
+- break;
++ bfd_mach_o_main_command *entry = &cmd->command.main;
++ printf (" entry offset: %#016" PRIx64 "\n"
++ " stack size: %#016" PRIx64 "\n",
++ entry->entryoff, entry->stacksize);
++ break;
+ }
+ case BFD_MACH_O_LC_NOTE:
+ {
+- bfd_mach_o_note_command *note = &cmd->command.note;
+- printf (" data owner: %.16s\n", note->data_owner);
+- printf (" offset: ");
+- printf_uint64 (note->offset);
+- printf ("\n"
+- " size: ");
+- printf_uint64 (note->size);
+- printf ("\n");
+- break;
++ bfd_mach_o_note_command *note = &cmd->command.note;
++ printf (" data owner: %.16s\n"
++ " offset: %#016" PRIx64 "\n"
++ " size: %#016" PRIx64 "\n",
++ note->data_owner, note->offset, note->size);
++ break;
+ }
+ case BFD_MACH_O_LC_BUILD_VERSION:
+ dump_build_version (abfd, cmd);
+@@ -2013,14 +1998,11 @@ dump_obj_compact_unwind (bfd *abfd,
+ {
+ e = (struct mach_o_compact_unwind_64 *) p;
+
+- putchar (' ');
+- printf_uint64 (bfd_get_64 (abfd, e->start));
+- printf (" %08lx", (unsigned long)bfd_get_32 (abfd, e->length));
+- putchar (' ');
+- printf_uint64 (bfd_get_64 (abfd, e->personality));
+- putchar (' ');
+- printf_uint64 (bfd_get_64 (abfd, e->lsda));
+- putchar ('\n');
++ printf (" %#016" PRIx64 " %#08x %#016" PRIx64 " %#016" PRIx64 "\n",
++ (uint64_t) bfd_get_64 (abfd, e->start),
++ (unsigned int) bfd_get_32 (abfd, e->length),
++ (uint64_t) bfd_get_64 (abfd, e->personality),
++ (uint64_t) bfd_get_64 (abfd, e->lsda));
+
+ printf (" encoding: ");
+ dump_unwind_encoding (mdata, bfd_get_32 (abfd, e->encoding));
+diff --git a/binutils/prdbg.c b/binutils/prdbg.c
+index d6cbab8578b..c1e41628d26 100644
+--- a/binutils/prdbg.c
++++ b/binutils/prdbg.c
+@@ -485,41 +485,12 @@ pop_type (struct pr_handle *info)
+ static void
+ print_vma (bfd_vma vma, char *buf, bool unsignedp, bool hexp)
+ {
+- if (sizeof (vma) <= sizeof (unsigned long))
+- {
+- if (hexp)
+- sprintf (buf, "0x%lx", (unsigned long) vma);
+- else if (unsignedp)
+- sprintf (buf, "%lu", (unsigned long) vma);
+- else
+- sprintf (buf, "%ld", (long) vma);
+- }
+-#if BFD_HOST_64BIT_LONG_LONG
+- else if (sizeof (vma) <= sizeof (unsigned long long))
+- {
+-#ifndef __MSVCRT__
+- if (hexp)
+- sprintf (buf, "0x%llx", (unsigned long long) vma);
+- else if (unsignedp)
+- sprintf (buf, "%llu", (unsigned long long) vma);
+- else
+- sprintf (buf, "%lld", (long long) vma);
+-#else
+- if (hexp)
+- sprintf (buf, "0x%I64x", (unsigned long long) vma);
+- else if (unsignedp)
+- sprintf (buf, "%I64u", (unsigned long long) vma);
+- else
+- sprintf (buf, "%I64d", (long long) vma);
+-#endif
+- }
+-#endif
++ if (hexp)
++ sprintf (buf, "%#" PRIx64, (uint64_t) vma);
++ else if (unsignedp)
++ sprintf (buf, "%" PRIu64, (uint64_t) vma);
+ else
+- {
+- buf[0] = '0';
+- buf[1] = 'x';
+- sprintf_vma (buf + 2, vma);
+- }
++ sprintf (buf, "%" PRId64, (int64_t) vma);
+ }
+
+ /* Start a new compilation unit. */
+diff --git a/binutils/readelf.c b/binutils/readelf.c
+index c35bfc12366..4c0a2a34767 100644
+--- a/binutils/readelf.c
++++ b/binutils/readelf.c
+@@ -10729,7 +10729,7 @@ dynamic_section_parisc_val (Elf_Internal_Dyn * entry)
+ /* Display a VMS time in a human readable format. */
+
+ static void
+-print_vms_time (bfd_int64_t vmstime)
++print_vms_time (int64_t vmstime)
+ {
+ struct tm *tm = NULL;
+ time_t unxtime;
+@@ -20764,7 +20764,7 @@ print_ia64_vms_note (Elf_Internal_Note * pnote)
+ /* FIXME: Generate an error if descsz > 8 ? */
+
+ printf ("0x%016" BFD_VMA_FMT "x\n",
+- (bfd_vma) byte_get ((unsigned char *)pnote->descdata, 8));
++ (bfd_vma) byte_get ((unsigned char *) pnote->descdata, 8));
+ break;
+
+ case NT_VMS_LINKTIME:
+@@ -20773,8 +20773,7 @@ print_ia64_vms_note (Elf_Internal_Note * pnote)
+ goto desc_size_fail;
+ /* FIXME: Generate an error if descsz > 8 ? */
+
+- print_vms_time
+- ((bfd_int64_t) byte_get ((unsigned char *)pnote->descdata, 8));
++ print_vms_time (byte_get ((unsigned char *) pnote->descdata, 8));
+ printf ("\n");
+ break;
+
+@@ -20784,8 +20783,7 @@ print_ia64_vms_note (Elf_Internal_Note * pnote)
+ goto desc_size_fail;
+ /* FIXME: Generate an error if descsz > 8 ? */
+
+- print_vms_time
+- ((bfd_int64_t) byte_get ((unsigned char *)pnote->descdata, 8));
++ print_vms_time (byte_get ((unsigned char *) pnote->descdata, 8));
+ printf ("\n");
+ break;
+
+@@ -20794,16 +20792,15 @@ print_ia64_vms_note (Elf_Internal_Note * pnote)
+ goto desc_size_fail;
+
+ printf (_(" Major id: %u, minor id: %u\n"),
+- (unsigned) byte_get ((unsigned char *)pnote->descdata, 4),
+- (unsigned) byte_get ((unsigned char *)pnote->descdata + 4, 4));
++ (unsigned) byte_get ((unsigned char *) pnote->descdata, 4),
++ (unsigned) byte_get ((unsigned char *) pnote->descdata + 4, 4));
+ printf (_(" Last modified : "));
+- print_vms_time
+- ((bfd_int64_t) byte_get ((unsigned char *)pnote->descdata + 8, 8));
++ print_vms_time (byte_get ((unsigned char *) pnote->descdata + 8, 8));
+ printf (_("\n Link flags : "));
+ printf ("0x%016" BFD_VMA_FMT "x\n",
+- (bfd_vma) byte_get ((unsigned char *)pnote->descdata + 16, 8));
++ (bfd_vma) byte_get ((unsigned char *) pnote->descdata + 16, 8));
+ printf (_(" Header flags: 0x%08x\n"),
+- (unsigned) byte_get ((unsigned char *)pnote->descdata + 24, 4));
++ (unsigned) byte_get ((unsigned char *) pnote->descdata + 24, 4));
+ printf (_(" Image id : %.*s\n"), maxlen - 32, pnote->descdata + 32);
+ break;
+ #endif
+diff --git a/gas/config/tc-arm.c b/gas/config/tc-arm.c
+index 1721097cfca..2e6d175482e 100644
+--- a/gas/config/tc-arm.c
++++ b/gas/config/tc-arm.c
+@@ -3565,7 +3565,7 @@ add_to_lit_pool (unsigned int nbytes)
+ imm1 = inst.operands[1].imm;
+ imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
+ : inst.relocs[0].exp.X_unsigned ? 0
+- : ((bfd_int64_t) inst.operands[1].imm) >> 32);
++ : (int64_t) inst.operands[1].imm >> 32);
+ if (target_big_endian)
+ {
+ imm1 = imm2;
+@@ -8819,15 +8819,14 @@ neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
+ return FAIL;
+ }
+
+-#if defined BFD_HOST_64_BIT
+ /* Returns TRUE if double precision value V may be cast
+ to single precision without loss of accuracy. */
+
+ static bool
+-is_double_a_single (bfd_uint64_t v)
++is_double_a_single (uint64_t v)
+ {
+ int exp = (v >> 52) & 0x7FF;
+- bfd_uint64_t mantissa = v & 0xFFFFFFFFFFFFFULL;
++ uint64_t mantissa = v & 0xFFFFFFFFFFFFFULL;
+
+ return ((exp == 0 || exp == 0x7FF
+ || (exp >= 1023 - 126 && exp <= 1023 + 127))
+@@ -8838,11 +8837,11 @@ is_double_a_single (bfd_uint64_t v)
+ (ignoring the least significant bits in exponent and mantissa). */
+
+ static int
+-double_to_single (bfd_uint64_t v)
++double_to_single (uint64_t v)
+ {
+ unsigned int sign = (v >> 63) & 1;
+ int exp = (v >> 52) & 0x7FF;
+- bfd_uint64_t mantissa = v & 0xFFFFFFFFFFFFFULL;
++ uint64_t mantissa = v & 0xFFFFFFFFFFFFFULL;
+
+ if (exp == 0x7FF)
+ exp = 0xFF;
+@@ -8865,7 +8864,6 @@ double_to_single (bfd_uint64_t v)
+ mantissa >>= 29;
+ return (sign << 31) | (exp << 23) | mantissa;
+ }
+-#endif /* BFD_HOST_64_BIT */
+
+ enum lit_type
+ {
+@@ -8914,11 +8912,7 @@ move_or_literal_pool (int i, enum lit_type t, bool mode_3)
+ if (inst.relocs[0].exp.X_op == O_constant
+ || inst.relocs[0].exp.X_op == O_big)
+ {
+-#if defined BFD_HOST_64_BIT
+- bfd_uint64_t v;
+-#else
+- valueT v;
+-#endif
++ uint64_t v;
+ if (inst.relocs[0].exp.X_op == O_big)
+ {
+ LITTLENUM_TYPE w[X_PRECISION];
+@@ -8933,7 +8927,6 @@ move_or_literal_pool (int i, enum lit_type t, bool mode_3)
+ else
+ l = generic_bignum;
+
+-#if defined BFD_HOST_64_BIT
+ v = l[3] & LITTLENUM_MASK;
+ v <<= LITTLENUM_NUMBER_OF_BITS;
+ v |= l[2] & LITTLENUM_MASK;
+@@ -8941,11 +8934,6 @@ move_or_literal_pool (int i, enum lit_type t, bool mode_3)
+ v |= l[1] & LITTLENUM_MASK;
+ v <<= LITTLENUM_NUMBER_OF_BITS;
+ v |= l[0] & LITTLENUM_MASK;
+-#else
+- v = l[1] & LITTLENUM_MASK;
+- v <<= LITTLENUM_NUMBER_OF_BITS;
+- v |= l[0] & LITTLENUM_MASK;
+-#endif
+ }
+ else
+ v = inst.relocs[0].exp.X_add_number;
+@@ -9041,7 +9029,7 @@ move_or_literal_pool (int i, enum lit_type t, bool mode_3)
+ ? inst.operands[1].reg
+ : inst.relocs[0].exp.X_unsigned
+ ? 0
+- : ((bfd_int64_t)((int) immlo)) >> 32;
++ : (int64_t) (int) immlo >> 32;
+ int cmode = neon_cmode_for_move_imm (immlo, immhi, false, &immbits,
+ &op, 64, NT_invtype);
+
+@@ -9090,7 +9078,6 @@ move_or_literal_pool (int i, enum lit_type t, bool mode_3)
+ discrepancy between the output produced by an assembler built for
+ a 32-bit-only host and the output produced from a 64-bit host, but
+ this cannot be helped. */
+-#if defined BFD_HOST_64_BIT
+ else if (!inst.operands[1].issingle
+ && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
+ {
+@@ -9103,7 +9090,6 @@ move_or_literal_pool (int i, enum lit_type t, bool mode_3)
+ return true;
+ }
+ }
+-#endif
+ }
+ }
+
+diff --git a/gas/config/tc-csky.c b/gas/config/tc-csky.c
+index 2371eeb747e..5b824d89af0 100644
+--- a/gas/config/tc-csky.c
++++ b/gas/config/tc-csky.c
+@@ -215,7 +215,7 @@ enum
+ unsigned int mach_flag = 0;
+ unsigned int arch_flag = 0;
+ unsigned int other_flag = 0;
+-BFD_HOST_U_64_BIT isa_flag = 0;
++uint64_t isa_flag = 0;
+ unsigned int dsp_flag = 0;
+
+ typedef struct stack_size_entry
+@@ -245,7 +245,7 @@ struct csky_macro_info
+ const char *name;
+ /* How many operands : if operands == 5, all of 1,2,3,4 are ok. */
+ long oprnd_num;
+- BFD_HOST_U_64_BIT isa_flag;
++ uint64_t isa_flag;
+ /* Do the work. */
+ void (*handle_func)(void);
+ };
+@@ -591,14 +591,14 @@ struct csky_cpu_feature
+ {
+ const char unique;
+ unsigned int arch_flag;
+- bfd_uint64_t isa_flag;
++ uint64_t isa_flag;
+ };
+
+ struct csky_cpu_version
+ {
+ int r;
+ int p;
+- bfd_uint64_t isa_flag;
++ uint64_t isa_flag;
+ };
+
+ #define CSKY_FEATURE_MAX 10
+@@ -608,7 +608,7 @@ struct csky_cpu_info
+ {
+ const char *name;
+ unsigned int arch_flag;
+- bfd_uint64_t isa_flag;
++ uint64_t isa_flag;
+ struct csky_cpu_feature features[CSKY_FEATURE_MAX];
+ struct csky_cpu_version ver[CSKY_CPU_REVERISON_MAX];
+ };
+diff --git a/gas/config/tc-sparc.c b/gas/config/tc-sparc.c
+index 222223f3549..4e443b1d28d 100644
+--- a/gas/config/tc-sparc.c
++++ b/gas/config/tc-sparc.c
+@@ -75,10 +75,10 @@ static enum { MM_TSO, MM_PSO, MM_RMO } sparc_memory_model = MM_RMO;
+ #ifndef TE_SOLARIS
+ /* Bitmask of instruction types seen so far, used to populate the
+ GNU attributes section with hwcap information. */
+-static bfd_uint64_t hwcap_seen;
++static uint64_t hwcap_seen;
+ #endif
+
+-static bfd_uint64_t hwcap_allowed;
++static uint64_t hwcap_allowed;
+
+ static int architecture_requested;
+ static int warn_on_bump;
+@@ -498,15 +498,15 @@ md_parse_option (int c, const char *arg)
+ || opcode_arch > max_architecture)
+ max_architecture = opcode_arch;
+
+- /* The allowed hardware capabilities are the implied by the
+- opcodes arch plus any extra capabilities defined in the GAS
+- arch. */
+- hwcap_allowed
+- = (hwcap_allowed
+- | (((bfd_uint64_t) sparc_opcode_archs[opcode_arch].hwcaps2) << 32)
+- | (((bfd_uint64_t) sa->hwcap2_allowed) << 32)
+- | sparc_opcode_archs[opcode_arch].hwcaps
+- | sa->hwcap_allowed);
++ /* The allowed hardware capabilities are the implied by the
++ opcodes arch plus any extra capabilities defined in the GAS
++ arch. */
++ hwcap_allowed
++ = (hwcap_allowed
++ | ((uint64_t) sparc_opcode_archs[opcode_arch].hwcaps2 << 32)
++ | ((uint64_t) sa->hwcap2_allowed << 32)
++ | sparc_opcode_archs[opcode_arch].hwcaps
++ | sa->hwcap_allowed);
+ architecture_requested = 1;
+ }
+ break;
+@@ -1607,7 +1607,7 @@ md_assemble (char *str)
+ }
+
+ static const char *
+-get_hwcap_name (bfd_uint64_t mask)
++get_hwcap_name (uint64_t mask)
+ {
+ if (mask & HWCAP_MUL32)
+ return "mul32";
+@@ -3171,8 +3171,7 @@ sparc_ip (char *str, const struct sparc_opcode **pinsn)
+ msg_str = sasi->name;
+ }
+
+- bfd_uint64_t hwcaps
+- = (((bfd_uint64_t) insn->hwcaps2) << 32) | insn->hwcaps;
++ uint64_t hwcaps = ((uint64_t) insn->hwcaps2 << 32) | insn->hwcaps;
+
+ #ifndef TE_SOLARIS
+ if (hwcaps)
+@@ -3211,10 +3210,10 @@ sparc_ip (char *str, const struct sparc_opcode **pinsn)
+ }
+ current_architecture = needed_architecture;
+ hwcap_allowed
+- = (hwcap_allowed
+- | hwcaps
+- | (((bfd_uint64_t) sparc_opcode_archs[current_architecture].hwcaps2) << 32)
+- | sparc_opcode_archs[current_architecture].hwcaps);
++ = (hwcap_allowed
++ | hwcaps
++ | ((uint64_t) sparc_opcode_archs[current_architecture].hwcaps2 << 32)
++ | sparc_opcode_archs[current_architecture].hwcaps);
+ }
+ /* Conflict. */
+ /* ??? This seems to be a bit fragile. What if the next entry in
+diff --git a/gas/config/tc-tilegx.c b/gas/config/tc-tilegx.c
+index b627b7080e5..4fcc38c9034 100644
+--- a/gas/config/tc-tilegx.c
++++ b/gas/config/tc-tilegx.c
+@@ -789,16 +789,16 @@ emit_tilegx_instruction (tilegx_bundle_bits bits,
+ static void
+ check_illegal_reg_writes (void)
+ {
+- BFD_HOST_U_64_BIT all_regs_written = 0;
++ uint64_t all_regs_written = 0;
+ int j;
+
+ for (j = 0; j < current_bundle_index; j++)
+ {
+ const struct tilegx_instruction *instr = &current_bundle[j];
+ int k;
+- BFD_HOST_U_64_BIT regs =
+- ((BFD_HOST_U_64_BIT)1) << instr->opcode->implicitly_written_register;
+- BFD_HOST_U_64_BIT conflict;
++ uint64_t regs =
++ (uint64_t) 1 << instr->opcode->implicitly_written_register;
++ uint64_t conflict;
+
+ for (k = 0; k < instr->opcode->num_operands; k++)
+ {
+@@ -808,12 +808,12 @@ check_illegal_reg_writes (void)
+ if (operand->is_dest_reg)
+ {
+ int regno = instr->operand_values[k].X_add_number;
+- BFD_HOST_U_64_BIT mask = ((BFD_HOST_U_64_BIT)1) << regno;
++ uint64_t mask = (uint64_t) 1 << regno;
+
+- if ((mask & ( (((BFD_HOST_U_64_BIT)1) << TREG_IDN1)
+- | (((BFD_HOST_U_64_BIT)1) << TREG_UDN1)
+- | (((BFD_HOST_U_64_BIT)1) << TREG_UDN2)
+- | (((BFD_HOST_U_64_BIT)1) << TREG_UDN3))) != 0
++ if ((mask & ( ((uint64_t) 1 << TREG_IDN1)
++ | ((uint64_t) 1 << TREG_UDN1)
++ | ((uint64_t) 1 << TREG_UDN2)
++ | ((uint64_t) 1 << TREG_UDN3))) != 0
+ && !allow_suspicious_bundles)
+ {
+ as_bad (_("Writes to register '%s' are not allowed."),
+@@ -825,7 +825,7 @@ check_illegal_reg_writes (void)
+ }
+
+ /* Writing to the zero register doesn't count. */
+- regs &= ~(((BFD_HOST_U_64_BIT)1) << TREG_ZERO);
++ regs &= ~((uint64_t) 1 << TREG_ZERO);
+
+ conflict = all_regs_written & regs;
+ if (conflict != 0 && !allow_suspicious_bundles)
+diff --git a/gas/config/tc-tilepro.c b/gas/config/tc-tilepro.c
+index af0be422f98..ca092d77a4b 100644
+--- a/gas/config/tc-tilepro.c
++++ b/gas/config/tc-tilepro.c
+@@ -677,16 +677,16 @@ emit_tilepro_instruction (tilepro_bundle_bits bits,
+ static void
+ check_illegal_reg_writes (void)
+ {
+- BFD_HOST_U_64_BIT all_regs_written = 0;
++ uint64_t all_regs_written = 0;
+ int j;
+
+ for (j = 0; j < current_bundle_index; j++)
+ {
+ const struct tilepro_instruction *instr = &current_bundle[j];
+ int k;
+- BFD_HOST_U_64_BIT regs =
+- ((BFD_HOST_U_64_BIT)1) << instr->opcode->implicitly_written_register;
+- BFD_HOST_U_64_BIT conflict;
++ uint64_t regs =
++ (uint64_t) 1 << instr->opcode->implicitly_written_register;
++ uint64_t conflict;
+
+ for (k = 0; k < instr->opcode->num_operands; k++)
+ {
+@@ -696,12 +696,12 @@ check_illegal_reg_writes (void)
+ if (operand->is_dest_reg)
+ {
+ int regno = instr->operand_values[k].X_add_number;
+- BFD_HOST_U_64_BIT mask = ((BFD_HOST_U_64_BIT)1) << regno;
++ uint64_t mask = (uint64_t) 1 << regno;
+
+- if ((mask & ( (((BFD_HOST_U_64_BIT)1) << TREG_IDN1)
+- | (((BFD_HOST_U_64_BIT)1) << TREG_UDN1)
+- | (((BFD_HOST_U_64_BIT)1) << TREG_UDN2)
+- | (((BFD_HOST_U_64_BIT)1) << TREG_UDN3))) != 0
++ if ((mask & ( ((uint64_t) 1 << TREG_IDN1)
++ | ((uint64_t) 1 << TREG_UDN1)
++ | ((uint64_t) 1 << TREG_UDN2)
++ | ((uint64_t) 1 << TREG_UDN3))) != 0
+ && !allow_suspicious_bundles)
+ {
+ as_bad (_("Writes to register '%s' are not allowed."),
+@@ -713,7 +713,7 @@ check_illegal_reg_writes (void)
+ }
+
+ /* Writing to the zero register doesn't count. */
+- regs &= ~(((BFD_HOST_U_64_BIT)1) << TREG_ZERO);
++ regs &= ~((uint64_t) 1 << TREG_ZERO);
+
+ conflict = all_regs_written & regs;
+ if (conflict != 0 && !allow_suspicious_bundles)
+diff --git a/gas/config/tc-z80.c b/gas/config/tc-z80.c
+index 81fbfe3b0ae..714e704e24a 100644
+--- a/gas/config/tc-z80.c
++++ b/gas/config/tc-z80.c
+@@ -3910,11 +3910,11 @@ z80_tc_label_is_local (const char *name)
+ #define EXP_MIN -0x10000
+ #define EXP_MAX 0x10000
+ static int
+-str_to_broken_float (bool *signP, bfd_uint64_t *mantissaP, int *expP)
++str_to_broken_float (bool *signP, uint64_t *mantissaP, int *expP)
+ {
+ char *p;
+ bool sign;
+- bfd_uint64_t mantissa = 0;
++ uint64_t mantissa = 0;
+ int exponent = 0;
+ int i;
+
+@@ -4029,7 +4029,7 @@ str_to_broken_float (bool *signP, bfd_uint64_t *mantissaP, int *expP)
+ static const char *
+ str_to_zeda32(char *litP, int *sizeP)
+ {
+- bfd_uint64_t mantissa;
++ uint64_t mantissa;
+ bool sign;
+ int exponent;
+ unsigned i;
+@@ -4088,7 +4088,7 @@ str_to_zeda32(char *litP, int *sizeP)
+ static const char *
+ str_to_float48(char *litP, int *sizeP)
+ {
+- bfd_uint64_t mantissa;
++ uint64_t mantissa;
+ bool sign;
+ int exponent;
+ unsigned i;
+diff --git a/gas/config/te-vms.c b/gas/config/te-vms.c
+index 015c95867f0..6661a3b6a72 100644
+--- a/gas/config/te-vms.c
++++ b/gas/config/te-vms.c
+@@ -339,7 +339,7 @@ vms_file_stats_name (const char *dirname,
+ return 0;
+ }
+
+-bfd_uint64_t
++uint64_t
+ vms_dwarf2_file_time_name (const char *filename, const char *dirname)
+ {
+ long long cdt;
+diff --git a/gas/config/te-vms.h b/gas/config/te-vms.h
+index ffe7f5e8f37..08f218502de 100644
+--- a/gas/config/te-vms.h
++++ b/gas/config/te-vms.h
+@@ -20,7 +20,7 @@
+ #define TE_VMS
+ #include "obj-format.h"
+
+-extern bfd_uint64_t vms_dwarf2_file_time_name (const char *, const char *);
++extern uint64_t vms_dwarf2_file_time_name (const char *, const char *);
+ extern long vms_dwarf2_file_size_name (const char *, const char *);
+ extern char *vms_dwarf2_file_name (const char *, const char *);
+
+diff --git a/gdb/findcmd.c b/gdb/findcmd.c
+index ff13f22e970..ed2cea7b74d 100644
+--- a/gdb/findcmd.c
++++ b/gdb/findcmd.c
+@@ -30,7 +30,7 @@
+ /* Copied from bfd_put_bits. */
+
+ static void
+-put_bits (bfd_uint64_t data, gdb::byte_vector &buf, int bits, bfd_boolean big_p)
++put_bits (uint64_t data, gdb::byte_vector &buf, int bits, bfd_boolean big_p)
+ {
+ int i;
+ int bytes;
+diff --git a/gdb/tilegx-tdep.c b/gdb/tilegx-tdep.c
+index 7930db72779..9668aa80b53 100644
+--- a/gdb/tilegx-tdep.c
++++ b/gdb/tilegx-tdep.c
+@@ -375,7 +375,7 @@ tilegx_analyze_prologue (struct gdbarch* gdbarch,
+ CORE_ADDR instbuf_start;
+ unsigned int instbuf_size;
+ int status;
+- bfd_uint64_t bundle;
++ uint64_t bundle;
+ struct tilegx_decoded_instruction
+ decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE];
+ int num_insns;
+diff --git a/gprof/gmon_io.c b/gprof/gmon_io.c
+index c613809d396..2b4dd26375b 100644
+--- a/gprof/gmon_io.c
++++ b/gprof/gmon_io.c
+@@ -48,10 +48,8 @@ enum gmon_ptr_signedness {
+ static enum gmon_ptr_size gmon_get_ptr_size (void);
+ static enum gmon_ptr_signedness gmon_get_ptr_signedness (void);
+
+-#ifdef BFD_HOST_U_64_BIT
+-static int gmon_io_read_64 (FILE *, BFD_HOST_U_64_BIT *);
+-static int gmon_io_write_64 (FILE *, BFD_HOST_U_64_BIT);
+-#endif
++static int gmon_io_read_64 (FILE *, uint64_t *);
++static int gmon_io_write_64 (FILE *, uint64_t);
+ static int gmon_read_raw_arc
+ (FILE *, bfd_vma *, bfd_vma *, unsigned long *);
+ static int gmon_write_raw_arc
+@@ -109,9 +107,8 @@ gmon_io_read_32 (FILE *ifp, unsigned int *valp)
+ return 0;
+ }
+
+-#ifdef BFD_HOST_U_64_BIT
+ static int
+-gmon_io_read_64 (FILE *ifp, BFD_HOST_U_64_BIT *valp)
++gmon_io_read_64 (FILE *ifp, uint64_t *valp)
+ {
+ char buf[8];
+
+@@ -120,15 +117,12 @@ gmon_io_read_64 (FILE *ifp, BFD_HOST_U_64_BIT *valp)
+ *valp = bfd_get_64 (core_bfd, buf);
+ return 0;
+ }
+-#endif
+
+ int
+ gmon_io_read_vma (FILE *ifp, bfd_vma *valp)
+ {
+ unsigned int val32;
+-#ifdef BFD_HOST_U_64_BIT
+- BFD_HOST_U_64_BIT val64;
+-#endif
++ uint64_t val64;
+
+ switch (gmon_get_ptr_size ())
+ {
+@@ -136,23 +130,19 @@ gmon_io_read_vma (FILE *ifp, bfd_vma *valp)
+ if (gmon_io_read_32 (ifp, &val32))
+ return 1;
+ if (gmon_get_ptr_signedness () == ptr_signed)
+- *valp = (int) val32;
++ *valp = (int) val32;
+ else
+- *valp = val32;
++ *valp = val32;
+ break;
+
+-#ifdef BFD_HOST_U_64_BIT
+ case ptr_64bit:
+ if (gmon_io_read_64 (ifp, &val64))
+ return 1;
+-#ifdef BFD_HOST_64_BIT
+ if (gmon_get_ptr_signedness () == ptr_signed)
+- *valp = (BFD_HOST_64_BIT) val64;
++ *valp = (int64_t) val64;
+ else
+-#endif
+- *valp = val64;
++ *valp = val64;
+ break;
+-#endif
+ }
+ return 0;
+ }
+@@ -176,9 +166,8 @@ gmon_io_write_32 (FILE *ofp, unsigned int val)
+ return 0;
+ }
+
+-#ifdef BFD_HOST_U_64_BIT
+ static int
+-gmon_io_write_64 (FILE *ofp, BFD_HOST_U_64_BIT val)
++gmon_io_write_64 (FILE *ofp, uint64_t val)
+ {
+ char buf[8];
+
+@@ -187,7 +176,6 @@ gmon_io_write_64 (FILE *ofp, BFD_HOST_U_64_BIT val)
+ return 1;
+ return 0;
+ }
+-#endif
+
+ int
+ gmon_io_write_vma (FILE *ofp, bfd_vma val)
+@@ -200,12 +188,10 @@ gmon_io_write_vma (FILE *ofp, bfd_vma val)
+ return 1;
+ break;
+
+-#ifdef BFD_HOST_U_64_BIT
+ case ptr_64bit:
+- if (gmon_io_write_64 (ofp, (BFD_HOST_U_64_BIT) val))
++ if (gmon_io_write_64 (ofp, (uint64_t) val))
+ return 1;
+ break;
+-#endif
+ }
+ return 0;
+ }
+@@ -232,9 +218,7 @@ gmon_io_write (FILE *ofp, char *buf, size_t n)
+ static int
+ gmon_read_raw_arc (FILE *ifp, bfd_vma *fpc, bfd_vma *spc, unsigned long *cnt)
+ {
+-#ifdef BFD_HOST_U_64_BIT
+- BFD_HOST_U_64_BIT cnt64;
+-#endif
++ uint64_t cnt64;
+ unsigned int cnt32;
+
+ if (gmon_io_read_vma (ifp, fpc)
+@@ -249,13 +233,11 @@ gmon_read_raw_arc (FILE *ifp, bfd_vma *fpc, bfd_vma *spc, unsigned long *cnt)
+ *cnt = cnt32;
+ break;
+
+-#ifdef BFD_HOST_U_64_BIT
+ case ptr_64bit:
+ if (gmon_io_read_64 (ifp, &cnt64))
+ return 1;
+ *cnt = cnt64;
+ break;
+-#endif
+
+ default:
+ return 1;
+@@ -278,12 +260,10 @@ gmon_write_raw_arc (FILE *ofp, bfd_vma fpc, bfd_vma spc, unsigned long cnt)
+ return 1;
+ break;
+
+-#ifdef BFD_HOST_U_64_BIT
+ case ptr_64bit:
+- if (gmon_io_write_64 (ofp, (BFD_HOST_U_64_BIT) cnt))
++ if (gmon_io_write_64 (ofp, (uint64_t) cnt))
+ return 1;
+ break;
+-#endif
+ }
+ return 0;
+ }
+diff --git a/include/elf/nfp.h b/include/elf/nfp.h
+index 5a06051196c..c89cefff27b 100644
+--- a/include/elf/nfp.h
++++ b/include/elf/nfp.h
+@@ -102,7 +102,7 @@ extern "C"
+ #define SHF_NFP_INIT 0x80000000
+ #define SHF_NFP_INIT2 0x40000000
+ #define SHF_NFP_SCS(shf) (((shf) >> 32) & 0xFF)
+-#define SHF_NFP_SET_SCS(v) (((BFD_HOST_U_64_BIT)((v) & 0xFF)) << 32)
++#define SHF_NFP_SET_SCS(v) ((uint64_t) ((v) & 0xFF) << 32)
+
+ /* NFP Section Info
+ For PROGBITS and NOBITS sections:
+diff --git a/include/opcode/csky.h b/include/opcode/csky.h
+index ed00bfd7cd6..faecba11611 100644
+--- a/include/opcode/csky.h
++++ b/include/opcode/csky.h
+@@ -22,46 +22,46 @@
+ #include "dis-asm.h"
+
+ /* The following bitmasks control instruction set architecture. */
+-#define CSKYV1_ISA_E1 ((bfd_uint64_t)1 << 0)
+-#define CSKYV2_ISA_E1 ((bfd_uint64_t)1 << 1)
+-#define CSKYV2_ISA_1E2 ((bfd_uint64_t)1 << 2)
+-#define CSKYV2_ISA_2E3 ((bfd_uint64_t)1 << 3)
+-#define CSKYV2_ISA_3E7 ((bfd_uint64_t)1 << 4)
+-#define CSKYV2_ISA_7E10 ((bfd_uint64_t)1 << 5)
+-#define CSKYV2_ISA_3E3R1 ((bfd_uint64_t)1 << 6)
+-#define CSKYV2_ISA_3E3R2 ((bfd_uint64_t)1 << 7)
+-#define CSKYV2_ISA_10E60 ((bfd_uint64_t)1 << 8)
+-#define CSKYV2_ISA_3E3R3 ((bfd_uint64_t)1 << 9)
+-
+-#define CSKY_ISA_TRUST ((bfd_uint64_t)1 << 11)
+-#define CSKY_ISA_CACHE ((bfd_uint64_t)1 << 12)
+-#define CSKY_ISA_NVIC ((bfd_uint64_t)1 << 13)
+-#define CSKY_ISA_CP ((bfd_uint64_t)1 << 14)
+-#define CSKY_ISA_MP ((bfd_uint64_t)1 << 15)
+-#define CSKY_ISA_MP_1E2 ((bfd_uint64_t)1 << 16)
+-#define CSKY_ISA_JAVA ((bfd_uint64_t)1 << 17)
+-#define CSKY_ISA_MAC ((bfd_uint64_t)1 << 18)
+-#define CSKY_ISA_MAC_DSP ((bfd_uint64_t)1 << 19)
++#define CSKYV1_ISA_E1 ((uint64_t) 1 << 0)
++#define CSKYV2_ISA_E1 ((uint64_t) 1 << 1)
++#define CSKYV2_ISA_1E2 ((uint64_t) 1 << 2)
++#define CSKYV2_ISA_2E3 ((uint64_t) 1 << 3)
++#define CSKYV2_ISA_3E7 ((uint64_t) 1 << 4)
++#define CSKYV2_ISA_7E10 ((uint64_t) 1 << 5)
++#define CSKYV2_ISA_3E3R1 ((uint64_t) 1 << 6)
++#define CSKYV2_ISA_3E3R2 ((uint64_t) 1 << 7)
++#define CSKYV2_ISA_10E60 ((uint64_t) 1 << 8)
++#define CSKYV2_ISA_3E3R3 ((uint64_t) 1 << 9)
++
++#define CSKY_ISA_TRUST ((uint64_t) 1 << 11)
++#define CSKY_ISA_CACHE ((uint64_t) 1 << 12)
++#define CSKY_ISA_NVIC ((uint64_t) 1 << 13)
++#define CSKY_ISA_CP ((uint64_t) 1 << 14)
++#define CSKY_ISA_MP ((uint64_t) 1 << 15)
++#define CSKY_ISA_MP_1E2 ((uint64_t) 1 << 16)
++#define CSKY_ISA_JAVA ((uint64_t) 1 << 17)
++#define CSKY_ISA_MAC ((uint64_t) 1 << 18)
++#define CSKY_ISA_MAC_DSP ((uint64_t) 1 << 19)
+
+ /* Base ISA for csky v1 and v2. */
+-#define CSKY_ISA_DSP ((bfd_uint64_t)1 << 20)
+-#define CSKY_ISA_DSP_1E2 ((bfd_uint64_t)1 << 21)
+-#define CSKY_ISA_DSP_ENHANCE ((bfd_uint64_t)1 << 22)
+-#define CSKY_ISA_DSPE60 ((bfd_uint64_t)1 << 23)
++#define CSKY_ISA_DSP ((uint64_t) 1 << 20)
++#define CSKY_ISA_DSP_1E2 ((uint64_t) 1 << 21)
++#define CSKY_ISA_DSP_ENHANCE ((uint64_t) 1 << 22)
++#define CSKY_ISA_DSPE60 ((uint64_t) 1 << 23)
+
+ /* Base float instruction (803f & 810f). */
+-#define CSKY_ISA_FLOAT_E1 ((bfd_uint64_t)1 << 25)
++#define CSKY_ISA_FLOAT_E1 ((uint64_t) 1 << 25)
+ /* M_FLOAT support (810f). */
+-#define CSKY_ISA_FLOAT_1E2 ((bfd_uint64_t)1 << 26)
++#define CSKY_ISA_FLOAT_1E2 ((uint64_t) 1 << 26)
+ /* 803 support (803f). */
+-#define CSKY_ISA_FLOAT_1E3 ((bfd_uint64_t)1 << 27)
++#define CSKY_ISA_FLOAT_1E3 ((uint64_t) 1 << 27)
+ /* 807 support (803f & 807f). */
+-#define CSKY_ISA_FLOAT_3E4 ((bfd_uint64_t)1 << 28)
++#define CSKY_ISA_FLOAT_3E4 ((uint64_t) 1 << 28)
+ /* 860 support. */
+-#define CSKY_ISA_FLOAT_7E60 ((bfd_uint64_t)1 << 36)
++#define CSKY_ISA_FLOAT_7E60 ((uint64_t) 1 << 36)
+ /* Vector DSP support. */
+-#define CSKY_ISA_VDSP ((bfd_uint64_t)1 << 29)
+-#define CSKY_ISA_VDSP_2 ((bfd_uint64_t)1 << 30)
++#define CSKY_ISA_VDSP ((uint64_t) 1 << 29)
++#define CSKY_ISA_VDSP_2 ((uint64_t) 1 << 30)
+
+ /* The following bitmasks control cpu architecture for CSKY. */
+ #define CSKY_ABI_V1 (1 << 28)
+diff --git a/include/opcode/ia64.h b/include/opcode/ia64.h
+index fbdd8f14e65..42a6812c3f8 100644
+--- a/include/opcode/ia64.h
++++ b/include/opcode/ia64.h
+@@ -29,7 +29,7 @@
+ extern "C" {
+ #endif
+
+-typedef BFD_HOST_U_64_BIT ia64_insn;
++typedef uint64_t ia64_insn;
+
+ enum ia64_insn_type
+ {
+diff --git a/opcodes/csky-dis.c b/opcodes/csky-dis.c
+index b7c833623e5..99103ff57b5 100644
+--- a/opcodes/csky-dis.c
++++ b/opcodes/csky-dis.c
+@@ -49,7 +49,7 @@ struct csky_dis_info
+ disassemble_info *info;
+ /* Opcode information. */
+ struct csky_opcode_info const *opinfo;
+- BFD_HOST_U_64_BIT isa;
++ uint64_t isa;
+ /* The value of operand to show. */
+ int value;
+ /* Whether to look up/print a symbol name. */
+diff --git a/opcodes/csky-opc.h b/opcodes/csky-opc.h
+index b65efe19d9f..d2db90ede95 100644
+--- a/opcodes/csky-opc.h
++++ b/opcodes/csky-opc.h
+@@ -271,8 +271,8 @@ struct csky_opcode
+ /* Encodings for 32-bit opcodes. */
+ struct csky_opcode_info op32[OP_TABLE_NUM];
+ /* Instruction set flag. */
+- BFD_HOST_U_64_BIT isa_flag16;
+- BFD_HOST_U_64_BIT isa_flag32;
++ uint64_t isa_flag16;
++ uint64_t isa_flag32;
+ /* Whether this insn needs relocation, 0: no, !=0: yes. */
+ signed int reloc16;
+ signed int reloc32;
+diff --git a/opcodes/ia64-dis.c b/opcodes/ia64-dis.c
+index 5eb37277a5d..e76f40393c6 100644
+--- a/opcodes/ia64-dis.c
++++ b/opcodes/ia64-dis.c
+@@ -73,7 +73,7 @@ print_insn_ia64 (bfd_vma memaddr, struct disassemble_info *info)
+ const struct ia64_operand *odesc;
+ const struct ia64_opcode *idesc;
+ const char *err, *str, *tname;
+- BFD_HOST_U_64_BIT value;
++ uint64_t value;
+ bfd_byte bundle[16];
+ enum ia64_unit unit;
+ char regname[16];
+--
+2.31.1
+
diff --git a/meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-3.patch b/meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-3.patch
new file mode 100644
index 0000000000..6a838ea3ea
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-3.patch
@@ -0,0 +1,156 @@
+From 31d6c13defeba7716ebc9d5c8f81f2f35fe39980 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Tue, 14 Jun 2022 12:46:42 +0930
+Subject: [PATCH] PR29230, segv in lookup_symbol_in_variable_table
+
+The PR23230 testcase uses indexed strings without specifying
+SW_AT_str_offsets_base. In this case we left u.str with garbage (from
+u.val) which then led to a segfault when attempting to access the
+string. Fix that by clearing u.str. The patch also adds missing
+sanity checks in the recently committed read_indexed_address and
+read_indexed_string functions.
+
+ PR 29230
+ * dwarf2.c (read_indexed_address): Return uint64_t. Sanity check idx.
+ (read_indexed_string): Use uint64_t for str_offset. Sanity check idx.
+ (read_attribute_value): Clear u.str for indexed string forms when
+ DW_AT_str_offsets_base is not yet read or missing.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=31d6c13defeba7716ebc9d5c8f81f2f35fe39980]
+
+CVE: CVE-2023-1579
+
+Signed-off-by: Yash Shinde <Yash.Shinde@windriver.com>
+
+---
+ bfd/dwarf2.c | 51 ++++++++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 42 insertions(+), 9 deletions(-)
+
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index 51018e1ab45..aaa2d84887f 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -1353,13 +1353,13 @@ is_addrx_form (enum dwarf_form form)
+
+ /* Returns the address in .debug_addr section using DW_AT_addr_base.
+ Used to implement DW_FORM_addrx*. */
+-static bfd_vma
++static uint64_t
+ read_indexed_address (uint64_t idx, struct comp_unit *unit)
+ {
+ struct dwarf2_debug *stash = unit->stash;
+ struct dwarf2_debug_file *file = unit->file;
+- size_t addr_base = unit->dwarf_addr_offset;
+ bfd_byte *info_ptr;
++ size_t offset;
+
+ if (stash == NULL)
+ return 0;
+@@ -1369,12 +1369,23 @@ read_indexed_address (uint64_t idx, struct comp_unit *unit)
+ &file->dwarf_addr_buffer, &file->dwarf_addr_size))
+ return 0;
+
+- info_ptr = file->dwarf_addr_buffer + addr_base + idx * unit->offset_size;
++ if (_bfd_mul_overflow (idx, unit->offset_size, &offset))
++ return 0;
++
++ offset += unit->dwarf_addr_offset;
++ if (offset < unit->dwarf_addr_offset
++ || offset > file->dwarf_addr_size
++ || file->dwarf_addr_size - offset < unit->offset_size)
++ return 0;
++
++ info_ptr = file->dwarf_addr_buffer + offset;
+
+ if (unit->offset_size == 4)
+ return bfd_get_32 (unit->abfd, info_ptr);
+- else
++ else if (unit->offset_size == 8)
+ return bfd_get_64 (unit->abfd, info_ptr);
++ else
++ return 0;
+ }
+
+ /* Returns the string using DW_AT_str_offsets_base.
+@@ -1385,7 +1396,8 @@ read_indexed_string (uint64_t idx, struct comp_unit *unit)
+ struct dwarf2_debug *stash = unit->stash;
+ struct dwarf2_debug_file *file = unit->file;
+ bfd_byte *info_ptr;
+- unsigned long str_offset;
++ uint64_t str_offset;
++ size_t offset;
+
+ if (stash == NULL)
+ return NULL;
+@@ -1401,15 +1413,26 @@ read_indexed_string (uint64_t idx, struct comp_unit *unit)
+ &file->dwarf_str_offsets_size))
+ return NULL;
+
+- info_ptr = (file->dwarf_str_offsets_buffer
+- + unit->dwarf_str_offset
+- + idx * unit->offset_size);
++ if (_bfd_mul_overflow (idx, unit->offset_size, &offset))
++ return NULL;
++
++ offset += unit->dwarf_str_offset;
++ if (offset < unit->dwarf_str_offset
++ || offset > file->dwarf_str_offsets_size
++ || file->dwarf_str_offsets_size - offset < unit->offset_size)
++ return NULL;
++
++ info_ptr = file->dwarf_str_offsets_buffer + offset;
+
+ if (unit->offset_size == 4)
+ str_offset = bfd_get_32 (unit->abfd, info_ptr);
+- else
++ else if (unit->offset_size == 8)
+ str_offset = bfd_get_64 (unit->abfd, info_ptr);
++ else
++ return NULL;
+
++ if (str_offset >= file->dwarf_str_size)
++ return NULL;
+ return (const char *) file->dwarf_str_buffer + str_offset;
+ }
+
+@@ -1534,27 +1557,37 @@ read_attribute_value (struct attribute * attr,
+ is not yet read. */
+ if (unit->dwarf_str_offset != 0)
+ attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
++ else
++ attr->u.str = NULL;
+ break;
+ case DW_FORM_strx2:
+ attr->u.val = read_2_bytes (abfd, &info_ptr, info_ptr_end);
+ if (unit->dwarf_str_offset != 0)
+ attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
++ else
++ attr->u.str = NULL;
+ break;
+ case DW_FORM_strx3:
+ attr->u.val = read_3_bytes (abfd, &info_ptr, info_ptr_end);
+ if (unit->dwarf_str_offset != 0)
+ attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
++ else
++ attr->u.str = NULL;
+ break;
+ case DW_FORM_strx4:
+ attr->u.val = read_4_bytes (abfd, &info_ptr, info_ptr_end);
+ if (unit->dwarf_str_offset != 0)
+ attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
++ else
++ attr->u.str = NULL;
+ break;
+ case DW_FORM_strx:
+ attr->u.val = _bfd_safe_read_leb128 (abfd, &info_ptr,
+ false, info_ptr_end);
+ if (unit->dwarf_str_offset != 0)
+ attr->u.str = (char *) read_indexed_string (attr->u.val, unit);
++ else
++ attr->u.str = NULL;
+ break;
+ case DW_FORM_exprloc:
+ case DW_FORM_block:
+--
+2.31.1
+
diff --git a/meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-4.patch b/meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-4.patch
new file mode 100644
index 0000000000..c5a869ca9d
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0021-CVE-2023-1579-4.patch
@@ -0,0 +1,37 @@
+From 3e307d538c351aa9327cbad672c884059ecc20dd Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Wed, 11 Jan 2023 12:13:46 +0000
+Subject: [PATCH] Fix a potential illegal memory access in the BFD library when
+ parsing a corrupt DWARF file.
+
+ PR 29988
+ * dwarf2.c (read_indexed_address): Fix check for an out of range
+ offset.
+
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=3e307d538c351aa9327cbad672c884059ecc20dd]
+
+CVE: CVE-2023-1579
+
+Signed-off-by: Yash Shinde <Yash.Shinde@windriver.com>
+
+---
+ bfd/ChangeLog | 6 ++++++
+ bfd/dwarf2.c | 2 +-
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index 6eb6e04e6e5..4ec0053a111 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -1412,7 +1412,7 @@ read_indexed_address (uint64_t idx, struct comp_unit *unit)
+ offset += unit->dwarf_addr_offset;
+ if (offset < unit->dwarf_addr_offset
+ || offset > file->dwarf_addr_size
+- || file->dwarf_addr_size - offset < unit->offset_size)
++ || file->dwarf_addr_size - offset < unit->addr_size)
+ return 0;
+
+ info_ptr = file->dwarf_addr_buffer + offset;
+--
+2.31.1
+
diff --git a/meta/recipes-devtools/binutils/binutils/0022-CVE-2023-25584-1.patch b/meta/recipes-devtools/binutils/binutils/0022-CVE-2023-25584-1.patch
new file mode 100644
index 0000000000..990243f5c9
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0022-CVE-2023-25584-1.patch
@@ -0,0 +1,56 @@
+From: Alan Modra <amodra@gmail.com>
+Date: Thu, 17 Mar 2022 09:35:39 +0000 (+1030)
+Subject: ubsan: Null dereference in parse_module
+X-Git-Tag: gdb-12.1-release~59
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=c9178f285acf19e066be8367185d52837161b0a2
+
+ubsan: Null dereference in parse_module
+
+ * vms-alpha.c (parse_module): Sanity check that DST__K_RTNBEG
+ has set module->func_table for DST__K_RTNEND. Check return
+ of bfd_zalloc.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=c9178f285acf19e066be8367185d52837161b0a2]
+
+CVE: CVE-2023-25584
+
+Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
+
+---
+
+diff --git a/bfd/vms-alpha.c b/bfd/vms-alpha.c
+index 4a92574c850..1129c98f0e2 100644
+--- a/bfd/vms-alpha.c
++++ b/bfd/vms-alpha.c
+@@ -4352,9 +4352,13 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+
+ /* Initialize tables with zero element. */
+ curr_srec = (struct srecinfo *) bfd_zalloc (abfd, sizeof (struct srecinfo));
++ if (!curr_srec)
++ return false;
+ module->srec_table = curr_srec;
+
+ curr_line = (struct lineinfo *) bfd_zalloc (abfd, sizeof (struct lineinfo));
++ if (!curr_line)
++ return false;
+ module->line_table = curr_line;
+
+ while (length == -1 || ptr < maxptr)
+@@ -4389,6 +4393,8 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+ case DST__K_RTNBEG:
+ funcinfo = (struct funcinfo *)
+ bfd_zalloc (abfd, sizeof (struct funcinfo));
++ if (!funcinfo)
++ return false;
+ funcinfo->name
+ = _bfd_vms_save_counted_string (abfd, ptr + DST_S_B_RTNBEG_NAME,
+ maxptr - (ptr + DST_S_B_RTNBEG_NAME));
+@@ -4401,6 +4407,8 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+ break;
+
+ case DST__K_RTNEND:
++ if (!module->func_table)
++ return false;
+ module->func_table->high = module->func_table->low
+ + bfd_getl32 (ptr + DST_S_L_RTNEND_SIZE) - 1;
+
diff --git a/meta/recipes-devtools/binutils/binutils/0022-CVE-2023-25584-2.patch b/meta/recipes-devtools/binutils/binutils/0022-CVE-2023-25584-2.patch
new file mode 100644
index 0000000000..f4c5ed2aff
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0022-CVE-2023-25584-2.patch
@@ -0,0 +1,38 @@
+From da928f639002002dfc649ed9f50492d5d6cb4cee Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Mon, 5 Dec 2022 11:11:44 +0000
+Subject: [PATCH] Fix an illegal memory access when parsing a corrupt VMS Alpha
+ file.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix an illegal memory access when parsing a corrupt VMS Alpha file.
+
+ PR 29848
+ * vms-alpha.c (parse_module): Fix potential out of bounds memory
+ access.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=942fa4fb32738ecbb447546d54f1e5f0312d2ed4]
+
+CVE: CVE-2023-25584
+
+Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
+
+---
+ bfd/vms-alpha.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/bfd/vms-alpha.c b/bfd/vms-alpha.c
+index c548722c..53b3f1bf 100644
+--- a/bfd/vms-alpha.c
++++ b/bfd/vms-alpha.c
+@@ -4361,7 +4361,7 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+ return false;
+ module->line_table = curr_line;
+
+- while (length == -1 || ptr < maxptr)
++ while (length == -1 || (ptr + 3) < maxptr)
+ {
+ /* The first byte is not counted in the recorded length. */
+ int rec_length = bfd_getl16 (ptr) + 1;
diff --git a/meta/recipes-devtools/binutils/binutils/0022-CVE-2023-25584-3.patch b/meta/recipes-devtools/binutils/binutils/0022-CVE-2023-25584-3.patch
new file mode 100644
index 0000000000..47cc3f310b
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0022-CVE-2023-25584-3.patch
@@ -0,0 +1,536 @@
+From: Alan Modra <amodra@gmail.com>
+Date: Mon, 12 Dec 2022 07:58:49 +0000 (+1030)
+Subject: Lack of bounds checking in vms-alpha.c parse_module
+X-Git-Tag: gdb-13-branchpoint~87
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=77c225bdeb410cf60da804879ad41622f5f1aa44
+
+Lack of bounds checking in vms-alpha.c parse_module
+
+ PR 29873
+ PR 29874
+ PR 29875
+ PR 29876
+ PR 29877
+ PR 29878
+ PR 29879
+ PR 29880
+ PR 29881
+ PR 29882
+ PR 29883
+ PR 29884
+ PR 29885
+ PR 29886
+ PR 29887
+ PR 29888
+ PR 29889
+ PR 29890
+ PR 29891
+ * vms-alpha.c (parse_module): Make length param bfd_size_type.
+ Delete length == -1 checks. Sanity check record_length.
+ Sanity check DST__K_MODBEG, DST__K_RTNBEG, DST__K_RTNEND lengths.
+ Sanity check DST__K_SOURCE and DST__K_LINE_NUM elements
+ before accessing.
+ (build_module_list): Pass dst_section size to parse_module.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=77c225bdeb410cf60da804879ad41622f5f1aa44]
+
+CVE: CVE-2023-25584
+CVE: CVE-2022-47673
+
+Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
+Signed-off-by: Chaitanya Vadrevu <chaitanya.vadrevu@ni.com>
+
+---
+
+diff --git a/bfd/vms-alpha.c b/bfd/vms-alpha.c
+index c0eb5bc5a2a..3b63259cc81 100644
+--- a/bfd/vms-alpha.c
++++ b/bfd/vms-alpha.c
+@@ -4340,7 +4340,7 @@ new_module (bfd *abfd)
+
+ static bool
+ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+- int length)
++ bfd_size_type length)
+ {
+ unsigned char *maxptr = ptr + length;
+ unsigned char *src_ptr, *pcl_ptr;
+@@ -4361,7 +4361,7 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+ return false;
+ module->line_table = curr_line;
+
+- while (length == -1 || (ptr + 3) < maxptr)
++ while (ptr + 3 < maxptr)
+ {
+ /* The first byte is not counted in the recorded length. */
+ int rec_length = bfd_getl16 (ptr) + 1;
+@@ -4369,15 +4369,19 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+
+ vms_debug2 ((2, "DST record: leng %d, type %d\n", rec_length, rec_type));
+
+- if (length == -1 && rec_type == DST__K_MODEND)
++ if (rec_length > maxptr - ptr)
++ break;
++ if (rec_type == DST__K_MODEND)
+ break;
+
+ switch (rec_type)
+ {
+ case DST__K_MODBEG:
++ if (rec_length <= DST_S_B_MODBEG_NAME)
++ break;
+ module->name
+ = _bfd_vms_save_counted_string (abfd, ptr + DST_S_B_MODBEG_NAME,
+- maxptr - (ptr + DST_S_B_MODBEG_NAME));
++ rec_length - DST_S_B_MODBEG_NAME);
+
+ curr_pc = 0;
+ prev_pc = 0;
+@@ -4391,13 +4395,15 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+ break;
+
+ case DST__K_RTNBEG:
++ if (rec_length <= DST_S_B_RTNBEG_NAME)
++ break;
+ funcinfo = (struct funcinfo *)
+ bfd_zalloc (abfd, sizeof (struct funcinfo));
+ if (!funcinfo)
+ return false;
+ funcinfo->name
+ = _bfd_vms_save_counted_string (abfd, ptr + DST_S_B_RTNBEG_NAME,
+- maxptr - (ptr + DST_S_B_RTNBEG_NAME));
++ rec_length - DST_S_B_RTNBEG_NAME);
+ funcinfo->low = bfd_getl32 (ptr + DST_S_L_RTNBEG_ADDRESS);
+ funcinfo->next = module->func_table;
+ module->func_table = funcinfo;
+@@ -4407,6 +4413,8 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+ break;
+
+ case DST__K_RTNEND:
++ if (rec_length < DST_S_L_RTNEND_SIZE + 4)
++ break;
+ if (!module->func_table)
+ return false;
+ module->func_table->high = module->func_table->low
+@@ -4439,10 +4447,63 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+
+ vms_debug2 ((3, "source info\n"));
+
+- while (src_ptr < ptr + rec_length)
++ while (src_ptr - ptr < rec_length)
+ {
+ int cmd = src_ptr[0], cmd_length, data;
+
++ switch (cmd)
++ {
++ case DST__K_SRC_DECLFILE:
++ if (src_ptr - ptr + DST_S_B_SRC_DF_LENGTH >= rec_length)
++ cmd_length = 0x10000;
++ else
++ cmd_length = src_ptr[DST_S_B_SRC_DF_LENGTH] + 2;
++ break;
++
++ case DST__K_SRC_DEFLINES_B:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SRC_DEFLINES_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SRC_INCRLNUM_B:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SRC_SETFILE:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SRC_SETLNUM_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SRC_SETLNUM_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SRC_SETREC_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SRC_SETREC_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SRC_FORMFEED:
++ cmd_length = 1;
++ break;
++
++ default:
++ cmd_length = 2;
++ break;
++ }
++
++ if (src_ptr - ptr + cmd_length > rec_length)
++ break;
++
+ switch (cmd)
+ {
+ case DST__K_SRC_DECLFILE:
+@@ -4467,7 +4528,6 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+
+ module->file_table [fileid].name = filename;
+ module->file_table [fileid].srec = 1;
+- cmd_length = src_ptr[DST_S_B_SRC_DF_LENGTH] + 2;
+ vms_debug2 ((4, "DST_S_C_SRC_DECLFILE: %d, %s\n",
+ fileid, module->file_table [fileid].name));
+ }
+@@ -4484,7 +4544,6 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+ srec->sfile = curr_srec->sfile;
+ curr_srec->next = srec;
+ curr_srec = srec;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST_S_C_SRC_DEFLINES_B: %d\n", data));
+ break;
+
+@@ -4499,14 +4558,12 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+ srec->sfile = curr_srec->sfile;
+ curr_srec->next = srec;
+ curr_srec = srec;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST_S_C_SRC_DEFLINES_W: %d\n", data));
+ break;
+
+ case DST__K_SRC_INCRLNUM_B:
+ data = src_ptr[DST_S_B_SRC_UNSBYTE];
+ curr_srec->line += data;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST_S_C_SRC_INCRLNUM_B: %d\n", data));
+ break;
+
+@@ -4514,21 +4571,18 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+ data = bfd_getl16 (src_ptr + DST_S_W_SRC_UNSWORD);
+ curr_srec->sfile = data;
+ curr_srec->srec = module->file_table[data].srec;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST_S_C_SRC_SETFILE: %d\n", data));
+ break;
+
+ case DST__K_SRC_SETLNUM_L:
+ data = bfd_getl32 (src_ptr + DST_S_L_SRC_UNSLONG);
+ curr_srec->line = data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST_S_C_SRC_SETLNUM_L: %d\n", data));
+ break;
+
+ case DST__K_SRC_SETLNUM_W:
+ data = bfd_getl16 (src_ptr + DST_S_W_SRC_UNSWORD);
+ curr_srec->line = data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST_S_C_SRC_SETLNUM_W: %d\n", data));
+ break;
+
+@@ -4536,7 +4590,6 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+ data = bfd_getl32 (src_ptr + DST_S_L_SRC_UNSLONG);
+ curr_srec->srec = data;
+ module->file_table[curr_srec->sfile].srec = data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST_S_C_SRC_SETREC_L: %d\n", data));
+ break;
+
+@@ -4544,19 +4597,16 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+ data = bfd_getl16 (src_ptr + DST_S_W_SRC_UNSWORD);
+ curr_srec->srec = data;
+ module->file_table[curr_srec->sfile].srec = data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST_S_C_SRC_SETREC_W: %d\n", data));
+ break;
+
+ case DST__K_SRC_FORMFEED:
+- cmd_length = 1;
+ vms_debug2 ((4, "DST_S_C_SRC_FORMFEED\n"));
+ break;
+
+ default:
+ _bfd_error_handler (_("unknown source command %d"),
+ cmd);
+- cmd_length = 2;
+ break;
+ }
+
+@@ -4569,18 +4619,114 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+
+ vms_debug2 ((3, "line info\n"));
+
+- while (pcl_ptr < ptr + rec_length)
++ while (pcl_ptr - ptr < rec_length)
+ {
+ /* The command byte is signed so we must sign-extend it. */
+ int cmd = ((signed char *)pcl_ptr)[0], cmd_length, data;
+
++ switch (cmd)
++ {
++ case DST__K_DELTA_PC_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_DELTA_PC_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_INCR_LINUM:
++ cmd_length = 2;
++ break;
++
++ case DST__K_INCR_LINUM_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_INCR_LINUM_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SET_LINUM_INCR:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SET_LINUM_INCR_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_RESET_LINUM_INCR:
++ cmd_length = 1;
++ break;
++
++ case DST__K_BEG_STMT_MODE:
++ cmd_length = 1;
++ break;
++
++ case DST__K_END_STMT_MODE:
++ cmd_length = 1;
++ break;
++
++ case DST__K_SET_LINUM_B:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SET_LINUM:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SET_LINUM_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SET_PC:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SET_PC_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SET_PC_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SET_STMTNUM:
++ cmd_length = 2;
++ break;
++
++ case DST__K_TERM:
++ cmd_length = 2;
++ break;
++
++ case DST__K_TERM_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_TERM_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SET_ABS_PC:
++ cmd_length = 5;
++ break;
++
++ default:
++ if (cmd <= 0)
++ cmd_length = 1;
++ else
++ cmd_length = 2;
++ break;
++ }
++
++ if (pcl_ptr - ptr + cmd_length > rec_length)
++ break;
++
+ switch (cmd)
+ {
+ case DST__K_DELTA_PC_W:
+ data = bfd_getl16 (pcl_ptr + DST_S_W_PCLINE_UNSWORD);
+ curr_pc += data;
+ curr_linenum += 1;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST__K_DELTA_PC_W: %d\n", data));
+ break;
+
+@@ -4588,131 +4734,111 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_pc += data;
+ curr_linenum += 1;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_DELTA_PC_L: %d\n", data));
+ break;
+
+ case DST__K_INCR_LINUM:
+ data = pcl_ptr[DST_S_B_PCLINE_UNSBYTE];
+ curr_linenum += data;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST__K_INCR_LINUM: %d\n", data));
+ break;
+
+ case DST__K_INCR_LINUM_W:
+ data = bfd_getl16 (pcl_ptr + DST_S_W_PCLINE_UNSWORD);
+ curr_linenum += data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST__K_INCR_LINUM_W: %d\n", data));
+ break;
+
+ case DST__K_INCR_LINUM_L:
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_linenum += data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_INCR_LINUM_L: %d\n", data));
+ break;
+
+ case DST__K_SET_LINUM_INCR:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_LINUM_INCR");
+- cmd_length = 2;
+ break;
+
+ case DST__K_SET_LINUM_INCR_W:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_LINUM_INCR_W");
+- cmd_length = 3;
+ break;
+
+ case DST__K_RESET_LINUM_INCR:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_RESET_LINUM_INCR");
+- cmd_length = 1;
+ break;
+
+ case DST__K_BEG_STMT_MODE:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_BEG_STMT_MODE");
+- cmd_length = 1;
+ break;
+
+ case DST__K_END_STMT_MODE:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_END_STMT_MODE");
+- cmd_length = 1;
+ break;
+
+ case DST__K_SET_LINUM_B:
+ data = pcl_ptr[DST_S_B_PCLINE_UNSBYTE];
+ curr_linenum = data;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST__K_SET_LINUM_B: %d\n", data));
+ break;
+
+ case DST__K_SET_LINUM:
+ data = bfd_getl16 (pcl_ptr + DST_S_W_PCLINE_UNSWORD);
+ curr_linenum = data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST__K_SET_LINE_NUM: %d\n", data));
+ break;
+
+ case DST__K_SET_LINUM_L:
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_linenum = data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_SET_LINUM_L: %d\n", data));
+ break;
+
+ case DST__K_SET_PC:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_PC");
+- cmd_length = 2;
+ break;
+
+ case DST__K_SET_PC_W:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_PC_W");
+- cmd_length = 3;
+ break;
+
+ case DST__K_SET_PC_L:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_PC_L");
+- cmd_length = 5;
+ break;
+
+ case DST__K_SET_STMTNUM:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_STMTNUM");
+- cmd_length = 2;
+ break;
+
+ case DST__K_TERM:
+ data = pcl_ptr[DST_S_B_PCLINE_UNSBYTE];
+ curr_pc += data;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST__K_TERM: %d\n", data));
+ break;
+
+ case DST__K_TERM_W:
+ data = bfd_getl16 (pcl_ptr + DST_S_W_PCLINE_UNSWORD);
+ curr_pc += data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST__K_TERM_W: %d\n", data));
+ break;
+
+ case DST__K_TERM_L:
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_pc += data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_TERM_L: %d\n", data));
+ break;
+
+ case DST__K_SET_ABS_PC:
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_pc = data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_SET_ABS_PC: 0x%x\n", data));
+ break;
+
+@@ -4721,15 +4847,11 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+ {
+ curr_pc -= cmd;
+ curr_linenum += 1;
+- cmd_length = 1;
+ vms_debug2 ((4, "bump pc to 0x%lx and line to %d\n",
+ (unsigned long)curr_pc, curr_linenum));
+ }
+ else
+- {
+- _bfd_error_handler (_("unknown line command %d"), cmd);
+- cmd_length = 2;
+- }
++ _bfd_error_handler (_("unknown line command %d"), cmd);
+ break;
+ }
+
+@@ -4859,7 +4981,8 @@ build_module_list (bfd *abfd)
+ return NULL;
+
+ module = new_module (abfd);
+- if (!parse_module (abfd, module, PRIV (dst_section)->contents, -1))
++ if (!parse_module (abfd, module, PRIV (dst_section)->contents,
++ PRIV (dst_section)->size))
+ return NULL;
+ list = module;
+ }
diff --git a/meta/recipes-devtools/binutils/binutils/0023-CVE-2023-25585.patch b/meta/recipes-devtools/binutils/binutils/0023-CVE-2023-25585.patch
new file mode 100644
index 0000000000..e31a027b9f
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0023-CVE-2023-25585.patch
@@ -0,0 +1,54 @@
+From: Alan Modra <amodra@gmail.com>
+Date: Mon, 12 Dec 2022 08:31:08 +0000 (+1030)
+Subject: PR29892, Field file_table of struct module is uninitialized
+X-Git-Tag: gdb-13-branchpoint~86
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=65cf035b8dc1df5d8020e0b1449514a3c42933e7
+
+PR29892, Field file_table of struct module is uninitialized
+
+ PR 29892
+ * vms-alphs.c (new_module): Use bfd_zmalloc to alloc file_table.
+ (parse_module): Rewrite file_table reallocation code and clear.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=65cf035b8dc1df5d8020e0b1449514a3c42933e7]
+
+CVE: CVE-2023-25585
+
+Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
+
+---
+
+diff --git a/bfd/vms-alpha.c b/bfd/vms-alpha.c
+index 3b63259cc81..6ee7060b0b2 100644
+--- a/bfd/vms-alpha.c
++++ b/bfd/vms-alpha.c
+@@ -4337,7 +4337,7 @@ new_module (bfd *abfd)
+ = (struct module *) bfd_zalloc (abfd, sizeof (struct module));
+ module->file_table_count = 16; /* Arbitrary. */
+ module->file_table
+- = bfd_malloc (module->file_table_count * sizeof (struct fileinfo));
++ = bfd_zmalloc (module->file_table_count * sizeof (struct fileinfo));
+ return module;
+ }
+
+@@ -4520,15 +4520,18 @@ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+ src_ptr + DST_S_B_SRC_DF_FILENAME,
+ ptr + rec_length - (src_ptr + DST_S_B_SRC_DF_FILENAME));
+
+- while (fileid >= module->file_table_count)
++ if (fileid >= module->file_table_count)
+ {
+- module->file_table_count *= 2;
++ unsigned int old_count = module->file_table_count;
++ module->file_table_count += fileid;
+ module->file_table
+ = bfd_realloc_or_free (module->file_table,
+ module->file_table_count
+ * sizeof (struct fileinfo));
+ if (module->file_table == NULL)
+ return false;
++ memset (module->file_table + old_count, 0,
++ fileid * sizeof (struct fileinfo));
+ }
+
+ module->file_table [fileid].name = filename;
diff --git a/meta/recipes-devtools/binutils/binutils/0025-CVE-2023-25588.patch b/meta/recipes-devtools/binutils/binutils/0025-CVE-2023-25588.patch
new file mode 100644
index 0000000000..9b5825037f
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0025-CVE-2023-25588.patch
@@ -0,0 +1,149 @@
+From: Alan Modra <amodra@gmail.com>
+Date: Fri, 14 Oct 2022 00:00:21 +0000 (+1030)
+Subject: PR29677, Field `the_bfd` of `asymbol` is uninitialised
+X-Git-Tag: gdb-13-branchpoint~871
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=d12f8998d2d086f0a6606589e5aedb7147e6f2f1
+
+PR29677, Field `the_bfd` of `asymbol` is uninitialised
+
+Besides not initialising the_bfd of synthetic symbols, counting
+symbols when sizing didn't match symbols created if there were any
+dynsyms named "". We don't want synthetic symbols without names
+anyway, so get rid of them. Also, simplify and correct sanity checks.
+
+ PR 29677
+ * mach-o.c (bfd_mach_o_get_synthetic_symtab): Rewrite.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=d12f8998d2d086f0a6606589e5aedb7147e6f2f1]
+
+CVE: CVE-2023-25588
+CVE: CVE-2022-47696
+
+Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
+Signed-off-by: Chaitanya Vadrevu <chaitanya.vadrevu@ni.com>
+
+---
+
+diff --git a/bfd/mach-o.c b/bfd/mach-o.c
+index acb35e7f0c6..5279343768c 100644
+--- a/bfd/mach-o.c
++++ b/bfd/mach-o.c
+@@ -938,11 +938,9 @@ bfd_mach_o_get_synthetic_symtab (bfd *abfd,
+ bfd_mach_o_symtab_command *symtab = mdata->symtab;
+ asymbol *s;
+ char * s_start;
+- char * s_end;
+ unsigned long count, i, j, n;
+ size_t size;
+ char *names;
+- char *nul_name;
+ const char stub [] = "$stub";
+
+ *ret = NULL;
+@@ -955,27 +953,27 @@ bfd_mach_o_get_synthetic_symtab (bfd *abfd,
+ /* We need to allocate a bfd symbol for every indirect symbol and to
+ allocate the memory for its name. */
+ count = dysymtab->nindirectsyms;
+- size = count * sizeof (asymbol) + 1;
+-
++ size = 0;
+ for (j = 0; j < count; j++)
+ {
+- const char * strng;
+ unsigned int isym = dysymtab->indirect_syms[j];
++ const char *str;
+
+ /* Some indirect symbols are anonymous. */
+- if (isym < symtab->nsyms && (strng = symtab->symbols[isym].symbol.name))
+- /* PR 17512: file: f5b8eeba. */
+- size += strnlen (strng, symtab->strsize - (strng - symtab->strtab)) + sizeof (stub);
++ if (isym < symtab->nsyms
++ && (str = symtab->symbols[isym].symbol.name) != NULL)
++ {
++ /* PR 17512: file: f5b8eeba. */
++ size += strnlen (str, symtab->strsize - (str - symtab->strtab));
++ size += sizeof (stub);
++ }
+ }
+
+- s_start = bfd_malloc (size);
++ s_start = bfd_malloc (size + count * sizeof (asymbol));
+ s = *ret = (asymbol *) s_start;
+ if (s == NULL)
+ return -1;
+ names = (char *) (s + count);
+- nul_name = names;
+- *names++ = 0;
+- s_end = s_start + size;
+
+ n = 0;
+ for (i = 0; i < mdata->nsects; i++)
+@@ -997,47 +995,39 @@ bfd_mach_o_get_synthetic_symtab (bfd *abfd,
+ entry_size = bfd_mach_o_section_get_entry_size (abfd, sec);
+
+ /* PR 17512: file: 08e15eec. */
+- if (first >= count || last >= count || first > last)
++ if (first >= count || last > count || first > last)
+ goto fail;
+
+ for (j = first; j < last; j++)
+ {
+ unsigned int isym = dysymtab->indirect_syms[j];
+-
+- /* PR 17512: file: 04d64d9b. */
+- if (((char *) s) + sizeof (* s) > s_end)
+- goto fail;
+-
+- s->flags = BSF_GLOBAL | BSF_SYNTHETIC;
+- s->section = sec->bfdsection;
+- s->value = addr - sec->addr;
+- s->udata.p = NULL;
++ const char *str;
++ size_t len;
+
+ if (isym < symtab->nsyms
+- && symtab->symbols[isym].symbol.name)
++ && (str = symtab->symbols[isym].symbol.name) != NULL)
+ {
+- const char *sym = symtab->symbols[isym].symbol.name;
+- size_t len;
+-
+- s->name = names;
+- len = strlen (sym);
+- /* PR 17512: file: 47dfd4d2. */
+- if (names + len >= s_end)
++ /* PR 17512: file: 04d64d9b. */
++ if (n >= count)
+ goto fail;
+- memcpy (names, sym, len);
+- names += len;
+- /* PR 17512: file: 18f340a4. */
+- if (names + sizeof (stub) >= s_end)
++ len = strnlen (str, symtab->strsize - (str - symtab->strtab));
++ /* PR 17512: file: 47dfd4d2, 18f340a4. */
++ if (size < len + sizeof (stub))
+ goto fail;
+- memcpy (names, stub, sizeof (stub));
+- names += sizeof (stub);
++ memcpy (names, str, len);
++ memcpy (names + len, stub, sizeof (stub));
++ s->name = names;
++ names += len + sizeof (stub);
++ size -= len + sizeof (stub);
++ s->the_bfd = symtab->symbols[isym].symbol.the_bfd;
++ s->flags = BSF_GLOBAL | BSF_SYNTHETIC;
++ s->section = sec->bfdsection;
++ s->value = addr - sec->addr;
++ s->udata.p = NULL;
++ s++;
++ n++;
+ }
+- else
+- s->name = nul_name;
+-
+ addr += entry_size;
+- s++;
+- n++;
+ }
+ break;
+ default:
diff --git a/meta/recipes-devtools/binutils/binutils/0026-CVE-2023-1972.patch b/meta/recipes-devtools/binutils/binutils/0026-CVE-2023-1972.patch
new file mode 100644
index 0000000000..f86adad217
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0026-CVE-2023-1972.patch
@@ -0,0 +1,41 @@
+From: Nick Clifton <nickc@redhat.com>
+Date: Thu, 30 Mar 2023 09:10:09 +0000 (+0100)
+Subject: Fix an illegal memory access when an accessing a zer0-lengthverdef table.
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=c22d38baefc5a7a1e1f5cdc9dbb556b1f0ec5c57
+
+Fix an illegal memory access when an accessing a zer0-lengthverdef table.
+
+ PR 30285
+ * elf.c (_bfd_elf_slurp_version_tables): Fail if no version definitions are allocated.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=c22d38baefc5a7a1e1f5cdc9dbb556b1f0ec5c57]
+
+CVE: CVE-2023-1972
+
+Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
+
+---
+
+diff --git a/bfd/elf.c b/bfd/elf.c
+index 027d0143735..185028cbd97 100644
+--- a/bfd/elf.c
++++ b/bfd/elf.c
+@@ -9030,6 +9030,9 @@ _bfd_elf_slurp_version_tables (bfd *abfd, bool default_imported_symver)
+ bfd_set_error (bfd_error_file_too_big);
+ goto error_return_verdef;
+ }
++
++ if (amt == 0)
++ goto error_return_verdef;
+ elf_tdata (abfd)->verdef = (Elf_Internal_Verdef *) bfd_zalloc (abfd, amt);
+ if (elf_tdata (abfd)->verdef == NULL)
+ goto error_return_verdef;
+@@ -9133,6 +9136,8 @@ _bfd_elf_slurp_version_tables (bfd *abfd, bool default_imported_symver)
+ bfd_set_error (bfd_error_file_too_big);
+ goto error_return;
+ }
++ if (amt == 0)
++ goto error_return;
+ elf_tdata (abfd)->verdef = (Elf_Internal_Verdef *) bfd_zalloc (abfd, amt);
+ if (elf_tdata (abfd)->verdef == NULL)
+ goto error_return;
diff --git a/meta/recipes-devtools/binutils/binutils/0027-CVE-2022-47008.patch b/meta/recipes-devtools/binutils/binutils/0027-CVE-2022-47008.patch
new file mode 100644
index 0000000000..a3fff65409
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0027-CVE-2022-47008.patch
@@ -0,0 +1,67 @@
+From: Alan Modra <amodra@gmail.com>
+Date: Thu, 16 Jun 2022 23:43:38 +0000 (+0930)
+Subject: PR29255, memory leak in make_tempdir
+X-Git-Tag: binutils-2_39~236
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=d6e1d48c83b165c129cb0aa78905f7ca80a1f682
+
+PR29255, memory leak in make_tempdir
+
+ PR 29255
+ * bucomm.c (make_tempdir, make_tempname): Free template on all
+ failure paths.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=d6e1d48c83b165c129cb0aa78905f7ca80a1f682]
+
+CVE: CVE-2022-47008
+
+Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
+
+---
+
+diff --git a/binutils/bucomm.c b/binutils/bucomm.c
+index fdc2209df9c..4395cb9f7f5 100644
+--- a/binutils/bucomm.c
++++ b/binutils/bucomm.c
+@@ -537,8 +537,9 @@ make_tempname (const char *filename, int *ofd)
+ #else
+ tmpname = mktemp (tmpname);
+ if (tmpname == NULL)
+- return NULL;
+- fd = open (tmpname, O_RDWR | O_CREAT | O_EXCL, 0600);
++ fd = -1;
++ else
++ fd = open (tmpname, O_RDWR | O_CREAT | O_EXCL, 0600);
+ #endif
+ if (fd == -1)
+ {
+@@ -556,22 +557,23 @@ char *
+ make_tempdir (const char *filename)
+ {
+ char *tmpname = template_in_dir (filename);
++ char *ret;
+
+ #ifdef HAVE_MKDTEMP
+- return mkdtemp (tmpname);
++ ret = mkdtemp (tmpname);
+ #else
+- tmpname = mktemp (tmpname);
+- if (tmpname == NULL)
+- return NULL;
++ ret = mktemp (tmpname);
+ #if defined (_WIN32) && !defined (__CYGWIN32__)
+ if (mkdir (tmpname) != 0)
+- return NULL;
++ ret = NULL;
+ #else
+ if (mkdir (tmpname, 0700) != 0)
+- return NULL;
++ ret = NULL;
+ #endif
+- return tmpname;
+ #endif
++ if (ret == NULL)
++ free (tmpname);
++ return ret;
+ }
+
+ /* Parse a string into a VMA, with a fatal error if it can't be
diff --git a/meta/recipes-devtools/binutils/binutils/0028-CVE-2022-47011.patch b/meta/recipes-devtools/binutils/binutils/0028-CVE-2022-47011.patch
new file mode 100644
index 0000000000..73ae46e218
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0028-CVE-2022-47011.patch
@@ -0,0 +1,35 @@
+From: Alan Modra <amodra@gmail.com>
+Date: Mon, 20 Jun 2022 01:09:13 +0000 (+0930)
+Subject: PR29261, memory leak in parse_stab_struct_fields
+X-Git-Tag: binutils-2_39~225
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=8a24927bc8dbf6beac2000593b21235c3796dc35
+
+PR29261, memory leak in parse_stab_struct_fields
+
+ PR 29261
+ * stabs.c (parse_stab_struct_fields): Free "fields" on failure path.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=8a24927bc8dbf6beac2000593b21235c3796dc35]
+
+CVE: CVE-2022-47011
+
+Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
+
+---
+
+diff --git a/binutils/stabs.c b/binutils/stabs.c
+index 796ff85b86a..bf3f578cbcc 100644
+--- a/binutils/stabs.c
++++ b/binutils/stabs.c
+@@ -2367,7 +2367,10 @@ parse_stab_struct_fields (void *dhandle,
+
+ if (! parse_stab_one_struct_field (dhandle, info, pp, p, fields + c,
+ staticsp, p_end))
+- return false;
++ {
++ free (fields);
++ return false;
++ }
+
+ ++c;
+ }
diff --git a/meta/recipes-devtools/binutils/binutils/0029-CVE-2022-48065-1.patch b/meta/recipes-devtools/binutils/binutils/0029-CVE-2022-48065-1.patch
new file mode 100644
index 0000000000..4642251f9b
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0029-CVE-2022-48065-1.patch
@@ -0,0 +1,31 @@
+From: Jan Beulich <jbeulich@suse.com>
+Date: Tue, 29 Mar 2022 06:19:14 +0000 (+0200)
+Subject: bfd/Dwarf2: gas doesn't mangle names
+X-Git-Tag: binutils-2_39~1287
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=ddfc2f56d5782af79c696d7fef7c73bba11e8b09
+
+bfd/Dwarf2: gas doesn't mangle names
+
+Include the language identifier emitted by gas in the set of ones where
+no mangled names are expected. Even if there could be "hand-mangled"
+names, gas doesn't emit DW_AT_linkage_name in the first place.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=ddfc2f56d5782af79c696d7fef7c73bba11e8b09]
+
+CVE: CVE-2022-48065
+
+Signed-off-by: Sanjana Venkatesh <Sanjana.Venkatesh@windriver.com>
+
+---
+
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index 8cd0ce9d425..9aa4e955a5e 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -1441,6 +1441,7 @@ non_mangled (int lang)
+ case DW_LANG_PLI:
+ case DW_LANG_UPC:
+ case DW_LANG_C11:
++ case DW_LANG_Mips_Assembler:
+ return true;
+ }
+ }
diff --git a/meta/recipes-devtools/binutils/binutils/0029-CVE-2022-48065-2.patch b/meta/recipes-devtools/binutils/binutils/0029-CVE-2022-48065-2.patch
new file mode 100644
index 0000000000..8aa21f2716
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0029-CVE-2022-48065-2.patch
@@ -0,0 +1,115 @@
+From: Alan Modra <amodra@gmail.com>
+Date: Wed, 21 Sep 2022 05:15:44 +0000 (+0930)
+Subject: dwarf2.c: mangle_style
+X-Git-Tag: gdb-13-branchpoint~1165
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=4609af80c29db6015ce01b67c48f237c210da9b4
+
+dwarf2.c: mangle_style
+
+non_mangled incorrectly returned "true" for Ada. Correct that, and
+add a few more non-mangled entries. Return a value suitable for
+passing to cplus_demangle to control demangling.
+
+ * dwarf2.c: Include demangle.h.
+ (mangle_style): Rename from non_mangled. Return DMGL_* value
+ to suit lang. Adjust all callers.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=4609af80c29db6015ce01b67c48f237c210da9b4]
+
+CVE: CVE-2022-48065
+
+Signed-off-by: Sanjana Venkatesh <Sanjana.Venkatesh@windriver.com>
+
+---
+
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index e7c12c3e9de..138cdbb00bb 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -32,6 +32,7 @@
+ #include "sysdep.h"
+ #include "bfd.h"
+ #include "libiberty.h"
++#include "demangle.h"
+ #include "libbfd.h"
+ #include "elf-bfd.h"
+ #include "dwarf2.h"
+@@ -1711,31 +1712,52 @@ read_attribute (struct attribute * attr,
+ return info_ptr;
+ }
+
+-/* Return whether DW_AT_name will return the same as DW_AT_linkage_name
+- for a function. */
++/* Return mangling style given LANG. */
+
+-static bool
+-non_mangled (int lang)
++static int
++mangle_style (int lang)
+ {
+ switch (lang)
+ {
++ case DW_LANG_Ada83:
++ case DW_LANG_Ada95:
++ return DMGL_GNAT;
++
++ case DW_LANG_C_plus_plus:
++ case DW_LANG_C_plus_plus_03:
++ case DW_LANG_C_plus_plus_11:
++ case DW_LANG_C_plus_plus_14:
++ return DMGL_GNU_V3;
++
++ case DW_LANG_Java:
++ return DMGL_JAVA;
++
++ case DW_LANG_D:
++ return DMGL_DLANG;
++
++ case DW_LANG_Rust:
++ case DW_LANG_Rust_old:
++ return DMGL_RUST;
++
+ default:
+- return false;
++ return DMGL_AUTO;
+
+ case DW_LANG_C89:
+ case DW_LANG_C:
+- case DW_LANG_Ada83:
+ case DW_LANG_Cobol74:
+ case DW_LANG_Cobol85:
+ case DW_LANG_Fortran77:
+ case DW_LANG_Pascal83:
+- case DW_LANG_C99:
+- case DW_LANG_Ada95:
+ case DW_LANG_PLI:
++ case DW_LANG_C99:
+ case DW_LANG_UPC:
+ case DW_LANG_C11:
+ case DW_LANG_Mips_Assembler:
+- return true;
++ case DW_LANG_Upc:
++ case DW_LANG_HP_Basic91:
++ case DW_LANG_HP_IMacro:
++ case DW_LANG_HP_Assembler:
++ return 0;
+ }
+ }
+
+@@ -3599,7 +3621,7 @@ find_abstract_instance (struct comp_unit *unit,
+ if (name == NULL && is_str_form (&attr))
+ {
+ name = attr.u.str;
+- if (non_mangled (unit->lang))
++ if (mangle_style (unit->lang) == 0)
+ *is_linkage = true;
+ }
+ break;
+@@ -4095,7 +4117,7 @@ scan_unit_for_symbols (struct comp_unit *unit)
+ if (func->name == NULL && is_str_form (&attr))
+ {
+ func->name = attr.u.str;
+- if (non_mangled (unit->lang))
++ if (mangle_style (unit->lang) == 0)
+ func->is_linkage = true;
+ }
+ break;
diff --git a/meta/recipes-devtools/binutils/binutils/0029-CVE-2022-48065-3.patch b/meta/recipes-devtools/binutils/binutils/0029-CVE-2022-48065-3.patch
new file mode 100644
index 0000000000..35a658a22c
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0029-CVE-2022-48065-3.patch
@@ -0,0 +1,122 @@
+From: Alan Modra <amodra@gmail.com>
+Date: Wed, 21 Dec 2022 11:10:12 +0000 (+1030)
+Subject: PR29925, Memory leak in find_abstract_instance
+X-Git-Tag: binutils-2_40~192
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=d28fbc7197ba0e021a43f873eff90b05dcdcff6a
+
+PR29925, Memory leak in find_abstract_instance
+
+The testcase in the PR had a variable with both DW_AT_decl_file and
+DW_AT_specification, where the DW_AT_specification also specified
+DW_AT_decl_file. This leads to a memory leak as the file name is
+malloced and duplicates are not expected.
+
+I've also changed find_abstract_instance to not use a temp for "name",
+because that can result in a change in behaviour from the usual last
+of duplicate attributes wins.
+
+ PR 29925
+ * dwarf2.c (find_abstract_instance): Delete "name" variable.
+ Free *filename_ptr before assigning new file name.
+ (scan_unit_for_symbols): Similarly free func->file and
+ var->file before assigning.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=d28fbc7197ba0e021a43f873eff90b05dcdcff6a]
+
+CVE: CVE-2022-48065
+
+Signed-off-by: Sanjana Venkatesh <Sanjana.Venkatesh@windriver.com>
+
+---
+
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index 0cd8152ee6e..b608afbc0cf 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -3441,7 +3441,6 @@ find_abstract_instance (struct comp_unit *unit,
+ struct abbrev_info *abbrev;
+ uint64_t die_ref = attr_ptr->u.val;
+ struct attribute attr;
+- const char *name = NULL;
+
+ if (recur_count == 100)
+ {
+@@ -3602,9 +3601,9 @@ find_abstract_instance (struct comp_unit *unit,
+ case DW_AT_name:
+ /* Prefer DW_AT_MIPS_linkage_name or DW_AT_linkage_name
+ over DW_AT_name. */
+- if (name == NULL && is_str_form (&attr))
++ if (*pname == NULL && is_str_form (&attr))
+ {
+- name = attr.u.str;
++ *pname = attr.u.str;
+ if (mangle_style (unit->lang) == 0)
+ *is_linkage = true;
+ }
+@@ -3612,7 +3611,7 @@ find_abstract_instance (struct comp_unit *unit,
+ case DW_AT_specification:
+ if (is_int_form (&attr)
+ && !find_abstract_instance (unit, &attr, recur_count + 1,
+- &name, is_linkage,
++ pname, is_linkage,
+ filename_ptr, linenumber_ptr))
+ return false;
+ break;
+@@ -3622,7 +3621,7 @@ find_abstract_instance (struct comp_unit *unit,
+ non-string forms into these attributes. */
+ if (is_str_form (&attr))
+ {
+- name = attr.u.str;
++ *pname = attr.u.str;
+ *is_linkage = true;
+ }
+ break;
+@@ -3630,8 +3629,11 @@ find_abstract_instance (struct comp_unit *unit,
+ if (!comp_unit_maybe_decode_line_info (unit))
+ return false;
+ if (is_int_form (&attr))
+- *filename_ptr = concat_filename (unit->line_table,
+- attr.u.val);
++ {
++ free (*filename_ptr);
++ *filename_ptr = concat_filename (unit->line_table,
++ attr.u.val);
++ }
+ break;
+ case DW_AT_decl_line:
+ if (is_int_form (&attr))
+@@ -3643,7 +3645,6 @@ find_abstract_instance (struct comp_unit *unit,
+ }
+ }
+ }
+- *pname = name;
+ return true;
+ }
+
+@@ -4139,8 +4140,11 @@ scan_unit_for_symbols (struct comp_unit *unit)
+
+ case DW_AT_decl_file:
+ if (is_int_form (&attr))
+- func->file = concat_filename (unit->line_table,
+- attr.u.val);
++ {
++ free (func->file);
++ func->file = concat_filename (unit->line_table,
++ attr.u.val);
++ }
+ break;
+
+ case DW_AT_decl_line:
+@@ -4182,8 +4186,11 @@ scan_unit_for_symbols (struct comp_unit *unit)
+
+ case DW_AT_decl_file:
+ if (is_int_form (&attr))
+- var->file = concat_filename (unit->line_table,
+- attr.u.val);
++ {
++ free (var->file);
++ var->file = concat_filename (unit->line_table,
++ attr.u.val);
++ }
+ break;
+
+ case DW_AT_decl_line:
diff --git a/meta/recipes-devtools/binutils/binutils/0030-CVE-2022-44840.patch b/meta/recipes-devtools/binutils/binutils/0030-CVE-2022-44840.patch
new file mode 100644
index 0000000000..2f4c38044b
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0030-CVE-2022-44840.patch
@@ -0,0 +1,151 @@
+From: Alan Modra <amodra@gmail.com>
+Date: Sun, 30 Oct 2022 08:38:51 +0000 (+1030)
+Subject: Pool section entries for DWP version 1
+X-Git-Tag: gdb-13-branchpoint~664
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=28750e3b967da2207d51cbce9fc8be262817ee59
+
+Pool section entries for DWP version 1
+
+Ref: https://gcc.gnu.org/wiki/DebugFissionDWP?action=recall&rev=3
+
+Fuzzers have found a weakness in the code stashing pool section
+entries. With random nonsensical values in the index entries (rather
+than each index pointing to its own set distinct from other sets),
+it's possible to overflow the space allocated, losing the NULL
+terminator. Without a terminator, find_section_in_set can run off the
+end of the shndx_pool buffer. Fix this by scanning the pool directly.
+
+binutils/
+ * dwarf.c (add_shndx_to_cu_tu_entry): Delete range check.
+ (end_cu_tu_entry): Likewise.
+ (process_cu_tu_index): Fill shndx_pool by directly scanning
+ pool, rather than indirectly from index entries.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=blobdiff_plain;f=binutils/dwarf.c;h=7730293326ac1049451eb4a037ac86d827030700;hp=c6340a28906114e9df29d7401472c7dc0a98c2b1;hb=28750e3b967da2207d51cbce9fc8be262817ee59;hpb=60095ba3b8f8ba26a6389dded732fa446422c98f]
+
+CVE: CVE-2022-44840
+
+Signed-off-by: yash shinde <yash.shinde@windriver.com>
+
+diff --git a/binutils/dwarf.c b/binutils/dwarf.c
+index c6340a28906..7730293326a 100644
+--- a/binutils/dwarf.c
++++ b/binutils/dwarf.c
+@@ -10652,22 +10652,12 @@ prealloc_cu_tu_list (unsigned int nshndx)
+ static void
+ add_shndx_to_cu_tu_entry (unsigned int shndx)
+ {
+- if (shndx_pool_used >= shndx_pool_size)
+- {
+- error (_("Internal error: out of space in the shndx pool.\n"));
+- return;
+- }
+ shndx_pool [shndx_pool_used++] = shndx;
+ }
+
+ static void
+ end_cu_tu_entry (void)
+ {
+- if (shndx_pool_used >= shndx_pool_size)
+- {
+- error (_("Internal error: out of space in the shndx pool.\n"));
+- return;
+- }
+ shndx_pool [shndx_pool_used++] = 0;
+ }
+
+@@ -10773,53 +10763,55 @@ process_cu_tu_index (struct dwarf_section *section, int do_display)
+
+ if (version == 1)
+ {
++ unsigned char *shndx_list;
++ unsigned int shndx;
++
+ if (!do_display)
+- prealloc_cu_tu_list ((limit - ppool) / 4);
+- for (i = 0; i < nslots; i++)
+ {
+- unsigned char *shndx_list;
+- unsigned int shndx;
+-
+- SAFE_BYTE_GET (signature, phash, 8, limit);
+- if (signature != 0)
++ prealloc_cu_tu_list ((limit - ppool) / 4);
++ for (shndx_list = ppool + 4; shndx_list <= limit - 4; shndx_list += 4)
+ {
+- SAFE_BYTE_GET (j, pindex, 4, limit);
+- shndx_list = ppool + j * 4;
+- /* PR 17531: file: 705e010d. */
+- if (shndx_list < ppool)
+- {
+- warn (_("Section index pool located before start of section\n"));
+- return 0;
+- }
++ shndx = byte_get (shndx_list, 4);
++ add_shndx_to_cu_tu_entry (shndx);
++ }
++ end_cu_tu_entry ();
++ }
++ else
++ for (i = 0; i < nslots; i++)
++ {
++ SAFE_BYTE_GET (signature, phash, 8, limit);
++ if (signature != 0)
++ {
++ SAFE_BYTE_GET (j, pindex, 4, limit);
++ shndx_list = ppool + j * 4;
++ /* PR 17531: file: 705e010d. */
++ if (shndx_list < ppool)
++ {
++ warn (_("Section index pool located before start of section\n"));
++ return 0;
++ }
+
+- if (do_display)
+ printf (_(" [%3d] Signature: 0x%s Sections: "),
+ i, dwarf_vmatoa ("x", signature));
+- for (;;)
+- {
+- if (shndx_list >= limit)
+- {
+- warn (_("Section %s too small for shndx pool\n"),
+- section->name);
+- return 0;
+- }
+- SAFE_BYTE_GET (shndx, shndx_list, 4, limit);
+- if (shndx == 0)
+- break;
+- if (do_display)
++ for (;;)
++ {
++ if (shndx_list >= limit)
++ {
++ warn (_("Section %s too small for shndx pool\n"),
++ section->name);
++ return 0;
++ }
++ SAFE_BYTE_GET (shndx, shndx_list, 4, limit);
++ if (shndx == 0)
++ break;
+ printf (" %d", shndx);
+- else
+- add_shndx_to_cu_tu_entry (shndx);
+- shndx_list += 4;
+- }
+- if (do_display)
++ shndx_list += 4;
++ }
+ printf ("\n");
+- else
+- end_cu_tu_entry ();
+- }
+- phash += 8;
+- pindex += 4;
+- }
++ }
++ phash += 8;
++ pindex += 4;
++ }
+ }
+ else if (version == 2)
+ {
diff --git a/meta/recipes-devtools/binutils/binutils/0031-CVE-2022-45703-1.patch b/meta/recipes-devtools/binutils/binutils/0031-CVE-2022-45703-1.patch
new file mode 100644
index 0000000000..3db4385e13
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0031-CVE-2022-45703-1.patch
@@ -0,0 +1,147 @@
+From: Alan Modra <amodra@gmail.com>
+Date: Tue, 24 May 2022 00:02:14 +0000 (+0930)
+Subject: PR29169, invalid read displaying fuzzed .gdb_index
+X-Git-Tag: binutils-2_39~530
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=244e19c79111eed017ee38ab1d44fb2a6cd1b636
+
+PR29169, invalid read displaying fuzzed .gdb_index
+
+ PR 29169
+ * dwarf.c (display_gdb_index): Combine sanity checks. Calculate
+ element counts, not word counts.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=244e19c79111eed017ee38ab1d44fb2a6cd1b636]
+
+CVE: CVE-2022-45703
+
+Signed-off-by: yash shinde <yash.shinde@windriver.com>
+
+---
+
+diff --git a/binutils/dwarf.c b/binutils/dwarf.c
+index 7de6f28161f..c855972a12f 100644
+--- a/binutils/dwarf.c
++++ b/binutils/dwarf.c
+@@ -10406,7 +10406,7 @@ display_gdb_index (struct dwarf_section *section,
+ uint32_t cu_list_offset, tu_list_offset;
+ uint32_t address_table_offset, symbol_table_offset, constant_pool_offset;
+ unsigned int cu_list_elements, tu_list_elements;
+- unsigned int address_table_size, symbol_table_slots;
++ unsigned int address_table_elements, symbol_table_slots;
+ unsigned char *cu_list, *tu_list;
+ unsigned char *address_table, *symbol_table, *constant_pool;
+ unsigned int i;
+@@ -10454,48 +10454,19 @@ display_gdb_index (struct dwarf_section *section,
+ || tu_list_offset > section->size
+ || address_table_offset > section->size
+ || symbol_table_offset > section->size
+- || constant_pool_offset > section->size)
++ || constant_pool_offset > section->size
++ || tu_list_offset < cu_list_offset
++ || address_table_offset < tu_list_offset
++ || symbol_table_offset < address_table_offset
++ || constant_pool_offset < symbol_table_offset)
+ {
+ warn (_("Corrupt header in the %s section.\n"), section->name);
+ return 0;
+ }
+
+- /* PR 17531: file: 418d0a8a. */
+- if (tu_list_offset < cu_list_offset)
+- {
+- warn (_("TU offset (%x) is less than CU offset (%x)\n"),
+- tu_list_offset, cu_list_offset);
+- return 0;
+- }
+-
+- cu_list_elements = (tu_list_offset - cu_list_offset) / 8;
+-
+- if (address_table_offset < tu_list_offset)
+- {
+- warn (_("Address table offset (%x) is less than TU offset (%x)\n"),
+- address_table_offset, tu_list_offset);
+- return 0;
+- }
+-
+- tu_list_elements = (address_table_offset - tu_list_offset) / 8;
+-
+- /* PR 17531: file: 18a47d3d. */
+- if (symbol_table_offset < address_table_offset)
+- {
+- warn (_("Symbol table offset (%x) is less then Address table offset (%x)\n"),
+- symbol_table_offset, address_table_offset);
+- return 0;
+- }
+-
+- address_table_size = symbol_table_offset - address_table_offset;
+-
+- if (constant_pool_offset < symbol_table_offset)
+- {
+- warn (_("Constant pool offset (%x) is less than symbol table offset (%x)\n"),
+- constant_pool_offset, symbol_table_offset);
+- return 0;
+- }
+-
++ cu_list_elements = (tu_list_offset - cu_list_offset) / 16;
++ tu_list_elements = (address_table_offset - tu_list_offset) / 24;
++ address_table_elements = (symbol_table_offset - address_table_offset) / 20;
+ symbol_table_slots = (constant_pool_offset - symbol_table_offset) / 8;
+
+ cu_list = start + cu_list_offset;
+@@ -10504,31 +10475,25 @@ display_gdb_index (struct dwarf_section *section,
+ symbol_table = start + symbol_table_offset;
+ constant_pool = start + constant_pool_offset;
+
+- if (address_table_offset + address_table_size > section->size)
+- {
+- warn (_("Address table extends beyond end of section.\n"));
+- return 0;
+- }
+-
+ printf (_("\nCU table:\n"));
+- for (i = 0; i < cu_list_elements; i += 2)
++ for (i = 0; i < cu_list_elements; i++)
+ {
+- uint64_t cu_offset = byte_get_little_endian (cu_list + i * 8, 8);
+- uint64_t cu_length = byte_get_little_endian (cu_list + i * 8 + 8, 8);
++ uint64_t cu_offset = byte_get_little_endian (cu_list + i * 16, 8);
++ uint64_t cu_length = byte_get_little_endian (cu_list + i * 16 + 8, 8);
+
+- printf (_("[%3u] 0x%lx - 0x%lx\n"), i / 2,
++ printf (_("[%3u] 0x%lx - 0x%lx\n"), i,
+ (unsigned long) cu_offset,
+ (unsigned long) (cu_offset + cu_length - 1));
+ }
+
+ printf (_("\nTU table:\n"));
+- for (i = 0; i < tu_list_elements; i += 3)
++ for (i = 0; i < tu_list_elements; i++)
+ {
+- uint64_t tu_offset = byte_get_little_endian (tu_list + i * 8, 8);
+- uint64_t type_offset = byte_get_little_endian (tu_list + i * 8 + 8, 8);
+- uint64_t signature = byte_get_little_endian (tu_list + i * 8 + 16, 8);
++ uint64_t tu_offset = byte_get_little_endian (tu_list + i * 24, 8);
++ uint64_t type_offset = byte_get_little_endian (tu_list + i * 24 + 8, 8);
++ uint64_t signature = byte_get_little_endian (tu_list + i * 24 + 16, 8);
+
+- printf (_("[%3u] 0x%lx 0x%lx "), i / 3,
++ printf (_("[%3u] 0x%lx 0x%lx "), i,
+ (unsigned long) tu_offset,
+ (unsigned long) type_offset);
+ print_dwarf_vma (signature, 8);
+@@ -10536,12 +10501,11 @@ display_gdb_index (struct dwarf_section *section,
+ }
+
+ printf (_("\nAddress table:\n"));
+- for (i = 0; i < address_table_size && i <= address_table_size - (2 * 8 + 4);
+- i += 2 * 8 + 4)
++ for (i = 0; i < address_table_elements; i++)
+ {
+- uint64_t low = byte_get_little_endian (address_table + i, 8);
+- uint64_t high = byte_get_little_endian (address_table + i + 8, 8);
+- uint32_t cu_index = byte_get_little_endian (address_table + i + 16, 4);
++ uint64_t low = byte_get_little_endian (address_table + i * 20, 8);
++ uint64_t high = byte_get_little_endian (address_table + i * 20 + 8, 8);
++ uint32_t cu_index = byte_get_little_endian (address_table + i + 20 + 16, 4);
+
+ print_dwarf_vma (low, 8);
+ print_dwarf_vma (high, 8);
diff --git a/meta/recipes-devtools/binutils/binutils/0031-CVE-2022-45703-2.patch b/meta/recipes-devtools/binutils/binutils/0031-CVE-2022-45703-2.patch
new file mode 100644
index 0000000000..1fac9739dd
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0031-CVE-2022-45703-2.patch
@@ -0,0 +1,31 @@
+From 69bfd1759db41c8d369f9dcc98a135c5a5d97299 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Fri, 18 Nov 2022 11:29:13 +1030
+Subject: [PATCH] PR29799 heap buffer overflow in display_gdb_index
+ dwarf.c:10548
+
+ PR 29799
+ * dwarf.c (display_gdb_index): Typo fix.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=blobdiff_plain;f=binutils/dwarf.c;h=4bba8dfb81a6df49f5e61b3fae99dd545cc5c7dd;hp=7730293326ac1049451eb4a037ac86d827030700;hb=69bfd1759db41c8d369f9dcc98a135c5a5d97299;hpb=7828dfa93b210b6bbc6596e6e096cc150a9f8aa4]
+
+CVE: CVE-2022-45703
+
+Signed-off-by: yash shinde <yash.shinde@windriver.com>
+
+---
+ binutils/dwarf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/binutils/dwarf.c b/binutils/dwarf.c
+index 7730293326a..4bba8dfb81a 100644
+--- a/binutils/dwarf.c
++++ b/binutils/dwarf.c
+@@ -10562,7 +10562,7 @@ display_gdb_index (struct dwarf_section
+ {
+ uint64_t low = byte_get_little_endian (address_table + i * 20, 8);
+ uint64_t high = byte_get_little_endian (address_table + i * 20 + 8, 8);
+- uint32_t cu_index = byte_get_little_endian (address_table + i + 20 + 16, 4);
++ uint32_t cu_index = byte_get_little_endian (address_table + i * 20 + 16, 4);
+
+ print_dwarf_vma (low, 8);
+ print_dwarf_vma (high, 8);
diff --git a/meta/recipes-devtools/binutils/binutils/0031-CVE-2022-47695.patch b/meta/recipes-devtools/binutils/binutils/0031-CVE-2022-47695.patch
new file mode 100644
index 0000000000..f2e9cea027
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0031-CVE-2022-47695.patch
@@ -0,0 +1,58 @@
+From 2f7426b9bb2d2450b32cad3d79fab9abe3ec42bb Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Sun, 4 Dec 2022 22:15:40 +1030
+Subject: [PATCH] PR29846, segmentation fault in objdump.c compare_symbols
+
+Fixes a fuzzed object file problem where plt relocs were manipulated
+in such a way that two synthetic symbols were generated at the same
+plt location. Won't occur in real object files.
+
+ PR 29846
+ PR 20337
+ * objdump.c (compare_symbols): Test symbol flags to exclude
+ section and synthetic symbols before attempting to check flavour.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=3d3af4ba39e892b1c544d667ca241846bc3df386]
+
+CVE: CVE-2022-47695
+
+Signed-off-by: Chaitanya Vadrevu <chaitanya.vadrevu@ni.com>
+---
+ binutils/objdump.c | 23 ++++++++++-------------
+ 1 file changed, 10 insertions(+), 13 deletions(-)
+
+diff --git a/binutils/objdump.c b/binutils/objdump.c
+index 08a0fe521d8..21f75f4db40 100644
+--- a/binutils/objdump.c
++++ b/binutils/objdump.c
+@@ -1165,20 +1165,17 @@ compare_symbols (const void *ap, const void *bp)
+ return 1;
+ }
+
+- if (bfd_get_flavour (bfd_asymbol_bfd (a)) == bfd_target_elf_flavour
++ /* Sort larger size ELF symbols before smaller. See PR20337. */
++ bfd_vma asz = 0;
++ if ((a->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0
++ && bfd_get_flavour (bfd_asymbol_bfd (a)) == bfd_target_elf_flavour)
++ asz = ((elf_symbol_type *) a)->internal_elf_sym.st_size;
++ bfd_vma bsz = 0;
++ if ((b->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0
+ && bfd_get_flavour (bfd_asymbol_bfd (b)) == bfd_target_elf_flavour)
+- {
+- bfd_vma asz, bsz;
+-
+- asz = 0;
+- if ((a->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0)
+- asz = ((elf_symbol_type *) a)->internal_elf_sym.st_size;
+- bsz = 0;
+- if ((b->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0)
+- bsz = ((elf_symbol_type *) b)->internal_elf_sym.st_size;
+- if (asz != bsz)
+- return asz > bsz ? -1 : 1;
+- }
++ bsz = ((elf_symbol_type *) b)->internal_elf_sym.st_size;
++ if (asz != bsz)
++ return asz > bsz ? -1 : 1;
+
+ /* Symbols that start with '.' might be section names, so sort them
+ after symbols that don't start with '.'. */
diff --git a/meta/recipes-devtools/binutils/binutils/0032-CVE-2022-47010.patch b/meta/recipes-devtools/binutils/binutils/0032-CVE-2022-47010.patch
new file mode 100644
index 0000000000..9648033e67
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0032-CVE-2022-47010.patch
@@ -0,0 +1,38 @@
+From: Alan Modra <amodra@gmail.com>
+Date: Mon, 20 Jun 2022 01:09:31 +0000 (+0930)
+Subject: PR29262, memory leak in pr_function_type
+X-Git-Tag: binutils-2_39~224
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=0d02e70b197c786f26175b9a73f94e01d14abdab
+
+PR29262, memory leak in pr_function_type
+
+ PR 29262
+ * prdbg.c (pr_function_type): Free "s" on failure path.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=0d02e70b197c786f26175b9a73f94e01d14abdab]
+
+CVE: CVE-2022-47010
+
+Signed-off-by: Sanjana Venkatesh <Sanjana.Venkatesh@windriver.com>
+
+---
+
+diff --git a/binutils/prdbg.c b/binutils/prdbg.c
+index c1e41628d26..bb42a5b6c2d 100644
+--- a/binutils/prdbg.c
++++ b/binutils/prdbg.c
+@@ -742,12 +742,9 @@ pr_function_type (void *p, int argcount, bool varargs)
+
+ strcat (s, ")");
+
+- if (! substitute_type (info, s))
+- return false;
+-
++ bool ret = substitute_type (info, s);
+ free (s);
+-
+- return true;
++ return ret;
+ }
+
+ /* Turn the top type on the stack into a reference to that type. */
diff --git a/meta/recipes-devtools/binutils/binutils/0033-CVE-2022-47007.patch b/meta/recipes-devtools/binutils/binutils/0033-CVE-2022-47007.patch
new file mode 100644
index 0000000000..cc6dfe684b
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0033-CVE-2022-47007.patch
@@ -0,0 +1,34 @@
+From: Alan Modra <amodra@gmail.com>
+Date: Thu, 16 Jun 2022 23:30:41 +0000 (+0930)
+Subject: PR29254, memory leak in stab_demangle_v3_arg
+X-Git-Tag: binutils-2_39~237
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=0ebc886149c22aceaf8ed74267821a59ca9d03eb
+
+PR29254, memory leak in stab_demangle_v3_arg
+
+ PR 29254
+ * stabs.c (stab_demangle_v3_arg): Free dt on failure path.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=0ebc886149c22aceaf8ed74267821a59ca9d03eb]
+
+CVE: CVE-2022-47007
+
+Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
+---
+
+diff --git a/binutils/stabs.c b/binutils/stabs.c
+index 2b5241637c1..796ff85b86a 100644
+--- a/binutils/stabs.c
++++ b/binutils/stabs.c
+@@ -5467,7 +5467,10 @@ stab_demangle_v3_arg (void *dhandle, struct stab_handle *info,
+ dc->u.s_binary.right,
+ &varargs);
+ if (pargs == NULL)
+- return NULL;
++ {
++ free (dt);
++ return NULL;
++ }
+
+ return debug_make_function_type (dhandle, dt, pargs, varargs);
+ }
diff --git a/meta/recipes-devtools/binutils/binutils/0034-CVE-2022-48064.patch b/meta/recipes-devtools/binutils/binutils/0034-CVE-2022-48064.patch
new file mode 100644
index 0000000000..b0840366c7
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0034-CVE-2022-48064.patch
@@ -0,0 +1,57 @@
+From: Alan Modra <amodra@gmail.com>
+Date: Tue, 20 Dec 2022 13:17:03 +0000 (+1030)
+Subject: PR29922, SHT_NOBITS section avoids section size sanity check
+X-Git-Tag: binutils-2_40~202
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=8f2c64de86bc3d7556121fe296dd679000283931
+
+PR29922, SHT_NOBITS section avoids section size sanity check
+
+ PR 29922
+ * dwarf2.c (find_debug_info): Ignore sections without
+ SEC_HAS_CONTENTS.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=8f2c64de86bc3d7556121fe296dd679000283931]
+
+CVE: CVE-2022-48064
+
+Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
+
+---
+
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index 95f45708e9d..0cd8152ee6e 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -4831,16 +4831,19 @@ find_debug_info (bfd *abfd, const struct dwarf_debug_section *debug_sections,
+ {
+ look = debug_sections[debug_info].uncompressed_name;
+ msec = bfd_get_section_by_name (abfd, look);
+- if (msec != NULL)
++ /* Testing SEC_HAS_CONTENTS is an anti-fuzzer measure. Of
++ course debug sections always have contents. */
++ if (msec != NULL && (msec->flags & SEC_HAS_CONTENTS) != 0)
+ return msec;
+
+ look = debug_sections[debug_info].compressed_name;
+ msec = bfd_get_section_by_name (abfd, look);
+- if (msec != NULL)
++ if (msec != NULL && (msec->flags & SEC_HAS_CONTENTS) != 0)
+ return msec;
+
+ for (msec = abfd->sections; msec != NULL; msec = msec->next)
+- if (startswith (msec->name, GNU_LINKONCE_INFO))
++ if ((msec->flags & SEC_HAS_CONTENTS) != 0
++ && startswith (msec->name, GNU_LINKONCE_INFO))
+ return msec;
+
+ return NULL;
+@@ -4848,6 +4851,9 @@ find_debug_info (bfd *abfd, const struct dwarf_debug_section *debug_sections,
+
+ for (msec = after_sec->next; msec != NULL; msec = msec->next)
+ {
++ if ((msec->flags & SEC_HAS_CONTENTS) == 0)
++ continue;
++
+ look = debug_sections[debug_info].uncompressed_name;
+ if (strcmp (msec->name, look) == 0)
+ return msec;
diff --git a/meta/recipes-devtools/binutils/binutils/0035-CVE-2023-39129.patch b/meta/recipes-devtools/binutils/binutils/0035-CVE-2023-39129.patch
new file mode 100644
index 0000000000..63fb44d59a
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0035-CVE-2023-39129.patch
@@ -0,0 +1,50 @@
+From: Keith Seitz <keiths@...>
+Date: Wed, 2 Aug 2023 15:35:11 +0000 (-0700)
+Subject: Verify COFF symbol stringtab offset
+X-Git-Tag: gdb-14-branchpoint~473
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=58abdf887821a5da09ba184c6e400a3bc5cccd5a
+
+Verify COFF symbol stringtab offset
+
+This patch addresses an issue with malformed/fuzzed debug information that
+was recently reported in gdb/30639. That bug specifically deals with
+an ASAN issue, but the reproducer provided by the reporter causes a
+another failure outside of ASAN:
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=58abdf887821a5da09ba184c6e400a3bc5cccd5a]
+
+CVE: CVE-2023-39129
+
+Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
+
+diff --git a/gdb/coffread.c b/gdb/coffread.c
+--- a/gdb/coffread.c
++++ b/gdb/coffread.c
+@@ -159,6 +160,7 @@ static file_ptr linetab_offset;
+ static file_ptr linetab_size;
+
+ static char *stringtab = NULL;
++static long stringtab_length = 0;
+
+ extern void stabsread_clear_cache (void);
+
+@@ -1303,6 +1298,7 @@ init_stringtab (bfd *abfd, file_ptr offset, gdb::unique_xmalloc_ptr<char> *stora
+ /* This is in target format (probably not very useful, and not
+ currently used), not host format. */
+ memcpy (stringtab, lengthbuf, sizeof lengthbuf);
++ stringtab_length = length;
+ if (length == sizeof length) /* Empty table -- just the count. */
+ return 0;
+
+@@ -1322,8 +1318,9 @@ getsymname (struct internal_syment *symbol_entry)
+
+ if (symbol_entry->_n._n_n._n_zeroes == 0)
+ {
+- /* FIXME: Probably should be detecting corrupt symbol files by
+- seeing whether offset points to within the stringtab. */
++ if (symbol_entry->_n._n_n._n_offset > stringtab_length)
++ error (_("COFF Error: string table offset (%ld) outside string table (length %ld)"),
++ symbol_entry->_n._n_n._n_offset, stringtab_length);
+ result = stringtab + symbol_entry->_n._n_n._n_offset;
+ }
+ else
diff --git a/meta/recipes-devtools/binutils/binutils/0036-CVE-2023-39130.patch b/meta/recipes-devtools/binutils/binutils/0036-CVE-2023-39130.patch
new file mode 100644
index 0000000000..bfd5b18d7d
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0036-CVE-2023-39130.patch
@@ -0,0 +1,326 @@
+From 2db20b97f1dc3e5dce3d6ed74a8a62f0dede8c80 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Wed, 9 Aug 2023 09:58:36 +0930
+Subject: [PATCH] gdb: warn unused result for bfd IO functions
+
+This fixes the compilation warnings introduced by my bfdio.c patch.
+
+The removed bfd_seeks in coff_symfile_read date back to 1994, commit
+7f4c859520, prior to which the file used stdio rather than bfd to read
+symbols. Since it now uses bfd to read the file there should be no
+need to synchronise to bfd's idea of the file position. I also fixed
+a potential uninitialised memory access.
+
+Approved-By: Andrew Burgess <aburgess@redhat.com>
+
+Upstream-Status: Backport from [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=2db20b97f1dc3e5dce3d6ed74a8a62f0dede8c80]
+CVE: CVE-2023-39130
+Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
+---
+ gdb/coff-pe-read.c | 114 +++++++++++++++++++++++++++++----------------
+ gdb/coffread.c | 27 ++---------
+ gdb/dbxread.c | 7 +--
+ gdb/xcoffread.c | 5 +-
+ 4 files changed, 85 insertions(+), 68 deletions(-)
+
+diff --git a/gdb/coff-pe-read.c b/gdb/coff-pe-read.c
+--- a/gdb/coff-pe-read.c
++++ b/gdb/coff-pe-read.c
+@@ -291,23 +291,31 @@ read_pe_truncate_name (char *dll_name)
+
+ /* Low-level support functions, direct from the ld module pe-dll.c. */
+ static unsigned int
+-pe_get16 (bfd *abfd, int where)
++pe_get16 (bfd *abfd, int where, bool *fail)
+ {
+ unsigned char b[2];
+
+- bfd_seek (abfd, (file_ptr) where, SEEK_SET);
+- bfd_bread (b, (bfd_size_type) 2, abfd);
++ if (bfd_seek (abfd, where, SEEK_SET) != 0
++ || bfd_bread (b, 2, abfd) != 2)
++ {
++ *fail = true;
++ return 0;
++ }
+ return b[0] + (b[1] << 8);
+ }
+
+ static unsigned int
+-pe_get32 (bfd *abfd, int where)
++pe_get32 (bfd *abfd, int where, bool *fail)
+ {
+ unsigned char b[4];
+
+- bfd_seek (abfd, (file_ptr) where, SEEK_SET);
+- bfd_bread (b, (bfd_size_type) 4, abfd);
+- return b[0] + (b[1] << 8) + (b[2] << 16) + (b[3] << 24);
++ if (bfd_seek (abfd, where, SEEK_SET) != 0
++ || bfd_bread (b, 4, abfd) != 4)
++ {
++ *fail = true;
++ return 0;
++ }
++ return b[0] + (b[1] << 8) + (b[2] << 16) + ((unsigned) b[3] << 24);
+ }
+
+ static unsigned int
+@@ -323,7 +331,7 @@ pe_as32 (void *ptr)
+ {
+ unsigned char *b = (unsigned char *) ptr;
+
+- return b[0] + (b[1] << 8) + (b[2] << 16) + (b[3] << 24);
++ return b[0] + (b[1] << 8) + (b[2] << 16) + ((unsigned) b[3] << 24);
+ }
+
+ /* Read the (non-debug) export symbol table from a portable
+@@ -376,37 +384,50 @@ read_pe_exported_syms (minimal_symbol_re
+ || strcmp (target, "pei-i386") == 0
+ || strcmp (target, "pe-arm-wince-little") == 0
+ || strcmp (target, "pei-arm-wince-little") == 0);
++
++ /* Possibly print a debug message about DLL not having a valid format. */
++ auto maybe_print_debug_msg = [&] () -> void {
++ if (debug_coff_pe_read)
++ fprintf_unfiltered (gdb_stdlog, _("%s doesn't appear to be a DLL\n"),
++ bfd_get_filename (dll));
++ };
++
+ if (!is_pe32 && !is_pe64)
+- {
+- /* This is not a recognized PE format file. Abort now, because
+- the code is untested on anything else. *FIXME* test on
+- further architectures and loosen or remove this test. */
+- return;
+- }
++ return maybe_print_debug_msg ();
+
+ /* Get pe_header, optional header and numbers of export entries. */
+- pe_header_offset = pe_get32 (dll, 0x3c);
++ bool fail = false;
++ pe_header_offset = pe_get32 (dll, 0x3c, &fail);
++ if (fail)
++ return maybe_print_debug_msg ();
+ opthdr_ofs = pe_header_offset + 4 + 20;
+ if (is_pe64)
+- num_entries = pe_get32 (dll, opthdr_ofs + 108);
++ num_entries = pe_get32 (dll, opthdr_ofs + 108, &fail);
+ else
+- num_entries = pe_get32 (dll, opthdr_ofs + 92);
++ num_entries = pe_get32 (dll, opthdr_ofs + 92, &fail);
++ if (fail)
++ return maybe_print_debug_msg ();
+
+ if (num_entries < 1) /* No exports. */
+ return;
+ if (is_pe64)
+ {
+- export_opthdrrva = pe_get32 (dll, opthdr_ofs + 112);
+- export_opthdrsize = pe_get32 (dll, opthdr_ofs + 116);
++ export_opthdrrva = pe_get32 (dll, opthdr_ofs + 112, &fail);
++ export_opthdrsize = pe_get32 (dll, opthdr_ofs + 116, &fail);
+ }
+ else
+ {
+- export_opthdrrva = pe_get32 (dll, opthdr_ofs + 96);
+- export_opthdrsize = pe_get32 (dll, opthdr_ofs + 100);
++ export_opthdrrva = pe_get32 (dll, opthdr_ofs + 96, &fail);
++ export_opthdrsize = pe_get32 (dll, opthdr_ofs + 100, &fail);
+ }
+- nsections = pe_get16 (dll, pe_header_offset + 4 + 2);
++ if (fail)
++ return maybe_print_debug_msg ();
++
++ nsections = pe_get16 (dll, pe_header_offset + 4 + 2, &fail);
+ secptr = (pe_header_offset + 4 + 20 +
+- pe_get16 (dll, pe_header_offset + 4 + 16));
++ pe_get16 (dll, pe_header_offset + 4 + 16, &fail));
++ if (fail)
++ return maybe_print_debug_msg ();
+ expptr = 0;
+ export_size = 0;
+
+@@ -415,12 +436,13 @@ read_pe_exported_syms (minimal_symbol_re
+ {
+ char sname[8];
+ unsigned long secptr1 = secptr + 40 * i;
+- unsigned long vaddr = pe_get32 (dll, secptr1 + 12);
+- unsigned long vsize = pe_get32 (dll, secptr1 + 16);
+- unsigned long fptr = pe_get32 (dll, secptr1 + 20);
+-
+- bfd_seek (dll, (file_ptr) secptr1, SEEK_SET);
+- bfd_bread (sname, (bfd_size_type) sizeof (sname), dll);
++ unsigned long vaddr = pe_get32 (dll, secptr1 + 12, &fail);
++ unsigned long vsize = pe_get32 (dll, secptr1 + 16, &fail);
++ unsigned long fptr = pe_get32 (dll, secptr1 + 20, &fail);
++
++ if (fail
++ || bfd_seek (dll, secptr1, SEEK_SET) != 0
++ || bfd_bread (sname, sizeof (sname), dll) != sizeof (sname))
+
+ if ((strcmp (sname, ".edata") == 0)
+ || (vaddr <= export_opthdrrva && export_opthdrrva < vaddr + vsize))
+@@ -461,16 +483,18 @@ read_pe_exported_syms (minimal_symbol_re
+ for (i = 0; i < nsections; i++)
+ {
+ unsigned long secptr1 = secptr + 40 * i;
+- unsigned long vsize = pe_get32 (dll, secptr1 + 8);
+- unsigned long vaddr = pe_get32 (dll, secptr1 + 12);
+- unsigned long characteristics = pe_get32 (dll, secptr1 + 36);
++ unsigned long vsize = pe_get32 (dll, secptr1 + 8, &fail);
++ unsigned long vaddr = pe_get32 (dll, secptr1 + 12, &fail);
++ unsigned long characteristics = pe_get32 (dll, secptr1 + 36, &fail);
+ char sec_name[SCNNMLEN + 1];
+ int sectix;
+ unsigned int bfd_section_index;
+ asection *section;
+
+- bfd_seek (dll, (file_ptr) secptr1 + 0, SEEK_SET);
+- bfd_bread (sec_name, (bfd_size_type) SCNNMLEN, dll);
++ if (fail
++ || bfd_seek (dll, secptr1 + 0, SEEK_SET) != 0
++ || bfd_bread (sec_name, SCNNMLEN, dll) != SCNNMLEN)
++ return maybe_print_debug_msg ();
+ sec_name[SCNNMLEN] = '\0';
+
+ sectix = read_pe_section_index (sec_name);
+@@ -509,8 +533,9 @@ read_pe_exported_syms (minimal_symbol_re
+ gdb::def_vector<unsigned char> expdata_storage (export_size);
+ expdata = expdata_storage.data ();
+
+- bfd_seek (dll, (file_ptr) expptr, SEEK_SET);
+- bfd_bread (expdata, (bfd_size_type) export_size, dll);
++ if (bfd_seek (dll, expptr, SEEK_SET) != 0
++ || bfd_bread (expdata, export_size, dll) != export_size)
++ return maybe_print_debug_msg ();
+ erva = expdata - export_rva;
+
+ nexp = pe_as32 (expdata + 24);
+@@ -658,20 +683,27 @@ pe_text_section_offset (struct bfd *abfd
+ }
+
+ /* Get pe_header, optional header and numbers of sections. */
+- pe_header_offset = pe_get32 (abfd, 0x3c);
+- nsections = pe_get16 (abfd, pe_header_offset + 4 + 2);
++ bool fail = false;
++ pe_header_offset = pe_get32 (abfd, 0x3c, &fail);
++ if (fail)
++ return DEFAULT_COFF_PE_TEXT_SECTION_OFFSET;
++ nsections = pe_get16 (abfd, pe_header_offset + 4 + 2, &fail);
+ secptr = (pe_header_offset + 4 + 20 +
+- pe_get16 (abfd, pe_header_offset + 4 + 16));
++ pe_get16 (abfd, pe_header_offset + 4 + 16, &fail));
++ if (fail)
++ return DEFAULT_COFF_PE_TEXT_SECTION_OFFSET;
+
+ /* Get the rva and size of the export section. */
+ for (i = 0; i < nsections; i++)
+ {
+ char sname[SCNNMLEN + 1];
+ unsigned long secptr1 = secptr + 40 * i;
+- unsigned long vaddr = pe_get32 (abfd, secptr1 + 12);
++ unsigned long vaddr = pe_get32 (abfd, secptr1 + 12, &fail);
+
+- bfd_seek (abfd, (file_ptr) secptr1, SEEK_SET);
+- bfd_bread (sname, (bfd_size_type) SCNNMLEN, abfd);
++ if (fail
++ || bfd_seek (abfd, secptr1, SEEK_SET) != 0
++ || bfd_bread (sname, SCNNMLEN, abfd) != SCNNMLEN)
++ return DEFAULT_COFF_PE_TEXT_SECTION_OFFSET;
+ sname[SCNNMLEN] = '\0';
+ if (strcmp (sname, ".text") == 0)
+ return vaddr;
+diff --git a/gdb/coffread.c b/gdb/coffread.c
+--- a/gdb/coffread.c
++++ b/gdb/coffread.c
+@@ -690,8 +690,6 @@ coff_symfile_read (struct objfile *objfi
+
+ /* FIXME: dubious. Why can't we use something normal like
+ bfd_get_section_contents? */
+- bfd_seek (abfd, abfd->where, 0);
+-
+ stabstrsize = bfd_section_size (info->stabstrsect);
+
+ coffstab_build_psymtabs (objfile,
+@@ -780,22 +778,6 @@ coff_symtab_read (minimal_symbol_reader
+
+ scoped_free_pendings free_pending;
+
+- /* Work around a stdio bug in SunOS4.1.1 (this makes me nervous....
+- it's hard to know I've really worked around it. The fix should
+- be harmless, anyway). The symptom of the bug is that the first
+- fread (in read_one_sym), will (in my example) actually get data
+- from file offset 268, when the fseek was to 264 (and ftell shows
+- 264). This causes all hell to break loose. I was unable to
+- reproduce this on a short test program which operated on the same
+- file, performing (I think) the same sequence of operations.
+-
+- It stopped happening when I put in this (former) rewind().
+-
+- FIXME: Find out if this has been reported to Sun, whether it has
+- been fixed in a later release, etc. */
+-
+- bfd_seek (objfile->obfd, 0, 0);
+-
+ /* Position to read the symbol table. */
+ val = bfd_seek (objfile->obfd, symtab_offset, 0);
+ if (val < 0)
+@@ -1285,12 +1267,13 @@ init_stringtab (bfd *abfd, file_ptr offs
+ if (bfd_seek (abfd, offset, 0) < 0)
+ return -1;
+
+- val = bfd_bread ((char *) lengthbuf, sizeof lengthbuf, abfd);
+- length = bfd_h_get_32 (symfile_bfd, lengthbuf);
+-
++ val = bfd_bread (lengthbuf, sizeof lengthbuf, abfd);
+ /* If no string table is needed, then the file may end immediately
+ after the symbols. Just return with `stringtab' set to null. */
+- if (val != sizeof lengthbuf || length < sizeof lengthbuf)
++ if (val != sizeof lengthbuf)
++ return 0;
++ length = bfd_h_get_32 (symfile_bfd, lengthbuf);
++ if (length < sizeof lengthbuf)
+ return 0;
+
+ storage->reset ((char *) xmalloc (length));
+diff --git a/gdb/dbxread.c b/gdb/dbxread.c
+--- a/gdb/dbxread.c
++++ b/gdb/dbxread.c
+@@ -812,7 +812,8 @@ stabs_seek (int sym_offset)
+ symbuf_left -= sym_offset;
+ }
+ else
+- bfd_seek (symfile_bfd, sym_offset, SEEK_CUR);
++ if (bfd_seek (symfile_bfd, sym_offset, SEEK_CUR) != 0)
++ perror_with_name (bfd_get_filename (symfile_bfd));
+ }
+
+ #define INTERNALIZE_SYMBOL(intern, extern, abfd) \
+@@ -2095,8 +2096,8 @@ dbx_expand_psymtab (legacy_psymtab *pst,
+ symbol_size = SYMBOL_SIZE (pst);
+
+ /* Read in this file's symbols. */
+- bfd_seek (objfile->obfd, SYMBOL_OFFSET (pst), SEEK_SET);
+- read_ofile_symtab (objfile, pst);
++ if (bfd_seek (objfile->obfd, SYMBOL_OFFSET (pst), SEEK_SET) == 0)
++ read_ofile_symtab (objfile, pst);
+ }
+
+ pst->readin = true;
+diff --git a/gdb/xcoffread.c b/gdb/xcoffread.c
+--- a/gdb/xcoffread.c
++++ b/gdb/xcoffread.c
+@@ -865,8 +865,9 @@ enter_line_range (struct subfile *subfil
+
+ while (curoffset <= limit_offset)
+ {
+- bfd_seek (abfd, curoffset, SEEK_SET);
+- bfd_bread (ext_lnno, linesz, abfd);
++ if (bfd_seek (abfd, curoffset, SEEK_SET) != 0
++ || bfd_bread (ext_lnno, linesz, abfd) != linesz)
++ return;
+ bfd_coff_swap_lineno_in (abfd, ext_lnno, &int_lnno);
+
+ /* Find the address this line represents. */
+--
+2.39.3
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-48063.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-48063.patch
new file mode 100644
index 0000000000..ea2e030503
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-48063.patch
@@ -0,0 +1,48 @@
+From 75393a2d54bcc40053e5262a3de9d70c5ebfbbfd Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Wed, 21 Dec 2022 11:51:23 +0000
+Subject: [PATCH] Fix an attempt to allocate an unreasonably large amount of
+ memory when parsing a corrupt ELF file.
+
+ PR 29924
+ * objdump.c (load_specific_debug_section): Check for excessively
+ large sections.
+
+Upstream-Status: Backport
+CVE: CVE-2022-48063
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ binutils/ChangeLog | 6 ++++++
+ binutils/objdump.c | 4 +++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+Index: git/binutils/objdump.c
+===================================================================
+--- git.orig/binutils/objdump.c
++++ git/binutils/objdump.c
+@@ -3768,7 +3768,9 @@ load_specific_debug_section (enum dwarf_
+ section->size = bfd_section_size (sec);
+ /* PR 24360: On 32-bit hosts sizeof (size_t) < sizeof (bfd_size_type). */
+ alloced = amt = section->size + 1;
+- if (alloced != amt || alloced == 0)
++ if (alloced != amt
++ || alloced == 0
++ || (bfd_get_size (abfd) != 0 && alloced >= bfd_get_size (abfd)))
+ {
+ section->start = NULL;
+ free_debug_section (debug);
+Index: git/binutils/ChangeLog
+===================================================================
+--- git.orig/binutils/ChangeLog
++++ git/binutils/ChangeLog
+@@ -1,3 +1,9 @@
++2022-12-21 Nick Clifton <nickc@redhat.com>
++
++ PR 29924
++ * objdump.c (load_specific_debug_section): Check for excessively
++ large sections.
++
+ 2022-03-23 Nick Clifton <nickc@redhat.com>
+
+ Import patch from mainline:
diff --git a/meta/recipes-devtools/bootchart2/bootchart2/0001-bootchart2-support-usrmerge.patch b/meta/recipes-devtools/bootchart2/bootchart2/0001-bootchart2-support-usrmerge.patch
deleted file mode 100644
index 88597cf3a9..0000000000
--- a/meta/recipes-devtools/bootchart2/bootchart2/0001-bootchart2-support-usrmerge.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From b6d1a1ff2de363b1b76c8c70f77ae56a4e4d4b56 Mon Sep 17 00:00:00 2001
-From: Changqing Li <changqing.li@windriver.com>
-Date: Thu, 5 Sep 2019 18:37:31 +0800
-Subject: [PATCH] bootchart2: support usrmerge
-
-Upstream-Status: Inappropriate [oe-specific]
-
-Signed-off-by: Changqing Li <changqing.li@windriver.com>
----
- Makefile | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/Makefile b/Makefile
-index 1cc2974..f988904 100644
---- a/Makefile
-+++ b/Makefile
-@@ -36,7 +36,7 @@ endif
- PY_SITEDIR ?= $(PY_LIBDIR)/site-packages
- LIBC_A_PATH = /usr$(LIBDIR)
- # Always lib, even on systems that otherwise use lib64
--SYSTEMD_UNIT_DIR = $(EARLY_PREFIX)/lib/systemd/system
-+SYSTEMD_UNIT_DIR ?= $(EARLY_PREFIX)/lib/systemd/system
- COLLECTOR = \
- collector/collector.o \
- collector/output.o \
-@@ -99,7 +99,7 @@ install-chroot:
- install -d $(DESTDIR)$(PKGLIBDIR)/tmpfs
-
- install-collector: all install-chroot
-- install -m 755 -D bootchartd $(DESTDIR)$(EARLY_PREFIX)/sbin/$(PROGRAM_PREFIX)bootchartd$(PROGRAM_SUFFIX)
-+ install -m 755 -D bootchartd $(DESTDIR)${BASE_SBINDIR}/$(PROGRAM_PREFIX)bootchartd$(PROGRAM_SUFFIX)
- install -m 644 -D bootchartd.conf $(DESTDIR)/etc/$(PROGRAM_PREFIX)bootchartd$(PROGRAM_SUFFIX).conf
- install -m 755 -D bootchart-collector $(DESTDIR)$(PKGLIBDIR)/$(PROGRAM_PREFIX)bootchart$(PROGRAM_SUFFIX)-collector
-
---
-2.7.4
-
diff --git a/meta/recipes-devtools/bootchart2/bootchart2_0.14.9.bb b/meta/recipes-devtools/bootchart2/bootchart2_0.14.9.bb
index b1628075a7..38a1c9d147 100644
--- a/meta/recipes-devtools/bootchart2/bootchart2_0.14.9.bb
+++ b/meta/recipes-devtools/bootchart2/bootchart2_0.14.9.bb
@@ -93,7 +93,6 @@ UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>\d+\.\d+(\.\d+)*)"
SRC_URI = "git://github.com/xrmx/bootchart.git;branch=master;protocol=https \
file://bootchartd_stop.sh \
file://0001-collector-Allocate-space-on-heap-for-chunks.patch \
- file://0001-bootchart2-support-usrmerge.patch \
file://0001-bootchartd.in-make-sure-only-one-bootchartd-process.patch \
"
@@ -119,12 +118,11 @@ UPDATERCPN = "bootchartd-stop-initscript"
INITSCRIPT_NAME = "bootchartd_stop.sh"
INITSCRIPT_PARAMS = "start 99 2 3 4 5 ."
-EXTRA_OEMAKE = 'BASE_SBINDIR="${base_sbindir}"'
-
do_compile:prepend () {
export PY_LIBDIR="${libdir}/${PYTHON_DIR}"
export BINDIR="${bindir}"
- export LIBDIR="${base_libdir}"
+ export LIBDIR="/${baselib}"
+ export EARLY_PREFIX="${root_prefix}"
}
do_install () {
@@ -132,9 +130,8 @@ do_install () {
export PY_LIBDIR="${libdir}/${PYTHON_DIR}"
export BINDIR="${bindir}"
export DESTDIR="${D}"
- export LIBDIR="${base_libdir}"
- export PKGLIBDIR="${base_libdir}/bootchart"
- export SYSTEMD_UNIT_DIR="${systemd_system_unitdir}"
+ export LIBDIR="/${baselib}"
+ export EARLY_PREFIX="${root_prefix}"
oe_runmake install NO_PYTHON_COMPILE=1
install -d ${D}${sysconfdir}/init.d
diff --git a/meta/recipes-devtools/ccache/ccache/0001-build-Fix-FTBFS-with-not-yet-released-GCC-13.patch b/meta/recipes-devtools/ccache/ccache/0001-build-Fix-FTBFS-with-not-yet-released-GCC-13.patch
new file mode 100644
index 0000000000..d62e1ef26b
--- /dev/null
+++ b/meta/recipes-devtools/ccache/ccache/0001-build-Fix-FTBFS-with-not-yet-released-GCC-13.patch
@@ -0,0 +1,92 @@
+From 1523eaeff4669e421b3f60618b43c878e4860fe6 Mon Sep 17 00:00:00 2001
+From: Joel Rosdahl <joel@rosdahl.net>
+Date: Tue, 5 Jul 2022 21:42:58 +0200
+Subject: [PATCH] build: Fix FTBFS with not yet released GCC 13
+
+Reference: https://gcc.gnu.org/gcc-13/porting_to.html#header-dep-changes
+
+Fixes #1105.
+
+Upstream-Status: Backport [v4.7 https://github.com/ccache/ccache/commit/19ef6e267d38d4d8b3e11c915213472d5662d593]
+Signed-off-by: Martin Jansa <martin.jansa@gmail.com>
+---
+ src/Stat.hpp | 1 +
+ src/core/CacheEntryHeader.hpp | 2 ++
+ src/core/Sloppiness.hpp | 1 +
+ src/core/Statistics.hpp | 3 ++-
+ src/util/TextTable.hpp | 3 ++-
+ 5 files changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/src/Stat.hpp b/src/Stat.hpp
+index 2f56214a..074cdeeb 100644
+--- a/src/Stat.hpp
++++ b/src/Stat.hpp
+@@ -23,6 +23,7 @@
+ #include <sys/stat.h>
+ #include <sys/types.h>
+
++#include <cstdint>
+ #include <ctime>
+ #include <string>
+
+diff --git a/src/core/CacheEntryHeader.hpp b/src/core/CacheEntryHeader.hpp
+index 4c3e04c7..dcc32e1c 100644
+--- a/src/core/CacheEntryHeader.hpp
++++ b/src/core/CacheEntryHeader.hpp
+@@ -21,6 +21,8 @@
+ #include <compression/types.hpp>
+ #include <core/types.hpp>
+
++#include <cstdint>
++
+ // Cache entry format
+ // ==================
+ //
+diff --git a/src/core/Sloppiness.hpp b/src/core/Sloppiness.hpp
+index 917526bf..1ab31d71 100644
+--- a/src/core/Sloppiness.hpp
++++ b/src/core/Sloppiness.hpp
+@@ -18,6 +18,7 @@
+
+ #pragma once
+
++#include <cstdint>
+ #include <string>
+
+ namespace core {
+diff --git a/src/core/Statistics.hpp b/src/core/Statistics.hpp
+index 3e9ed816..54f32e9c 100644
+--- a/src/core/Statistics.hpp
++++ b/src/core/Statistics.hpp
+@@ -1,4 +1,4 @@
+-// Copyright (C) 2020-2021 Joel Rosdahl and other contributors
++// Copyright (C) 2020-2022 Joel Rosdahl and other contributors
+ //
+ // See doc/AUTHORS.adoc for a complete list of contributors.
+ //
+@@ -20,6 +20,7 @@
+
+ #include <core/StatisticsCounters.hpp>
+
++#include <cstdint>
+ #include <string>
+ #include <unordered_map>
+ #include <vector>
+diff --git a/src/util/TextTable.hpp b/src/util/TextTable.hpp
+index 05c0e0e5..60edee75 100644
+--- a/src/util/TextTable.hpp
++++ b/src/util/TextTable.hpp
+@@ -1,4 +1,4 @@
+-// Copyright (C) 2021 Joel Rosdahl and other contributors
++// Copyright (C) 2021-2022 Joel Rosdahl and other contributors
+ //
+ // See doc/AUTHORS.adoc for a complete list of contributors.
+ //
+@@ -18,6 +18,7 @@
+
+ #pragma once
+
++#include <cstdint>
+ #include <string>
+ #include <vector>
+
diff --git a/meta/recipes-devtools/ccache/ccache_4.6.bb b/meta/recipes-devtools/ccache/ccache_4.6.bb
index f019679cf1..d94c5d591a 100644
--- a/meta/recipes-devtools/ccache/ccache_4.6.bb
+++ b/meta/recipes-devtools/ccache/ccache_4.6.bb
@@ -11,7 +11,9 @@ LIC_FILES_CHKSUM = "file://LICENSE.adoc;md5=ff5327dc93e2b286c931dda3d6079da9"
DEPENDS = "zstd"
-SRC_URI = "https://github.com/ccache/ccache/releases/download/v${PV}/${BP}.tar.gz"
+SRC_URI = "https://github.com/ccache/ccache/releases/download/v${PV}/${BP}.tar.gz \
+ file://0001-build-Fix-FTBFS-with-not-yet-released-GCC-13.patch \
+"
SRC_URI[sha256sum] = "73a1767ac6b7c0404a1a55f761a746d338e702883c7137fbf587023062258625"
UPSTREAM_CHECK_URI = "https://github.com/ccache/ccache/releases/"
diff --git a/meta/recipes-devtools/cmake/cmake-native_3.22.3.bb b/meta/recipes-devtools/cmake/cmake-native_3.22.3.bb
index ee1f7761c4..45ea78ae00 100644
--- a/meta/recipes-devtools/cmake/cmake-native_3.22.3.bb
+++ b/meta/recipes-devtools/cmake/cmake-native_3.22.3.bb
@@ -32,6 +32,7 @@ CMAKE_EXTRACONF = "\
-DCMAKE_USE_SYSTEM_LIBRARY_EXPAT=0 \
-DENABLE_ACL=0 -DHAVE_ACL_LIBACL_H=0 \
-DHAVE_SYS_ACL_H=0 \
+ -DCURL_LIBRARIES=-lcurl \
"
do_configure () {
diff --git a/meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake b/meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake
index 86446c3ace..6434b27371 100644
--- a/meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake
+++ b/meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake
@@ -1,7 +1,6 @@
set( CMAKE_SYSTEM_NAME Linux )
set( CMAKE_C_FLAGS $ENV{CFLAGS} CACHE STRING "" FORCE )
set( CMAKE_CXX_FLAGS $ENV{CXXFLAGS} CACHE STRING "" FORCE )
-set( CMAKE_ASM_FLAGS ${CMAKE_C_FLAGS} CACHE STRING "" FORCE )
set( CMAKE_SYSROOT $ENV{OECORE_TARGET_SYSROOT} )
set( CMAKE_FIND_ROOT_PATH $ENV{OECORE_TARGET_SYSROOT} )
@@ -12,13 +11,13 @@ set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
set(CMAKE_FIND_LIBRARY_CUSTOM_LIB_SUFFIX "$ENV{OE_CMAKE_FIND_LIBRARY_CUSTOM_LIB_SUFFIX}")
-# Set CMAKE_SYSTEM_PROCESSOR from the sysroot name (assuming processor-distro-os).
-if ($ENV{SDKTARGETSYSROOT} MATCHES "/sysroots/([a-zA-Z0-9_-]+)-.+-.+")
- set(CMAKE_SYSTEM_PROCESSOR ${CMAKE_MATCH_1})
-endif()
+set( CMAKE_SYSTEM_PROCESSOR $ENV{OECORE_TARGET_ARCH} )
# Include the toolchain configuration subscripts
file( GLOB toolchain_config_files "${CMAKE_CURRENT_LIST_FILE}.d/*.cmake" )
foreach(config ${toolchain_config_files})
include(${config})
endforeach()
+
+unset(CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES)
+unset(CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES)
diff --git a/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_1a.patch b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_1a.patch
new file mode 100644
index 0000000000..bf93fbc13c
--- /dev/null
+++ b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_1a.patch
@@ -0,0 +1,236 @@
+From ee6db10dd70b8fdc7a93cffd7cf5bc7a28f9d3d7 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Mon, 20 Feb 2023 14:53:21 +0100
+Subject: [PATCH 1/5] dmidecode: Split table fetching from decoding
+
+Clean up function dmi_table so that it does only one thing:
+* dmi_table() is renamed to dmi_table_get(). It now retrieves the
+ DMI table, but does not process it any longer.
+* Decoding or dumping the table is now done in smbios3_decode(),
+ smbios_decode() and legacy_decode().
+No functional change.
+
+A side effect of this change is that writing the header and body of
+dump files is now done in a single location. This is required to
+further consolidate the writing of dump files.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Reviewed-by: Jerry Hoemann <jerry.hoemann@hpe.com>
+
+CVE: CVE-2023-30630
+
+Upstream-Status: Backport [https://git.savannah.nongnu.org/cgit/dmidecode.git/commit/?id=39b2dd7b6ab719b920e96ed832cfb4bdd664e808]
+
+Signed-off-by: Adrian Freihofer <adrian.freihofer@siemens.com>
+---
+ dmidecode.c | 86 ++++++++++++++++++++++++++++++++++++++---------------
+ 1 file changed, 62 insertions(+), 24 deletions(-)
+
+diff --git a/dmidecode.c b/dmidecode.c
+index cd2b5c9..b082c03 100644
+--- a/dmidecode.c
++++ b/dmidecode.c
+@@ -5247,8 +5247,9 @@ static void dmi_table_decode(u8 *buf, u32 len, u16 num, u16 ver, u32 flags)
+ }
+ }
+
+-static void dmi_table(off_t base, u32 len, u16 num, u32 ver, const char *devmem,
+- u32 flags)
++/* Allocates a buffer for the table, must be freed by the caller */
++static u8 *dmi_table_get(off_t base, u32 *len, u16 num, u32 ver,
++ const char *devmem, u32 flags)
+ {
+ u8 *buf;
+
+@@ -5267,7 +5268,7 @@ static void dmi_table(off_t base, u32 len, u16 num, u32 ver, const char *devmem,
+ {
+ if (num)
+ pr_info("%u structures occupying %u bytes.",
+- num, len);
++ num, *len);
+ if (!(opt.flags & FLAG_FROM_DUMP))
+ pr_info("Table at 0x%08llX.",
+ (unsigned long long)base);
+@@ -5285,19 +5286,19 @@ static void dmi_table(off_t base, u32 len, u16 num, u32 ver, const char *devmem,
+ * would be the result of the kernel truncating the table on
+ * parse error.
+ */
+- size_t size = len;
++ size_t size = *len;
+ buf = read_file(flags & FLAG_NO_FILE_OFFSET ? 0 : base,
+ &size, devmem);
+- if (!(opt.flags & FLAG_QUIET) && num && size != (size_t)len)
++ if (!(opt.flags & FLAG_QUIET) && num && size != (size_t)*len)
+ {
+ fprintf(stderr, "Wrong DMI structures length: %u bytes "
+ "announced, only %lu bytes available.\n",
+- len, (unsigned long)size);
++ *len, (unsigned long)size);
+ }
+- len = size;
++ *len = size;
+ }
+ else
+- buf = mem_chunk(base, len, devmem);
++ buf = mem_chunk(base, *len, devmem);
+
+ if (buf == NULL)
+ {
+@@ -5307,15 +5308,9 @@ static void dmi_table(off_t base, u32 len, u16 num, u32 ver, const char *devmem,
+ fprintf(stderr,
+ "Try compiling dmidecode with -DUSE_MMAP.\n");
+ #endif
+- return;
+ }
+
+- if (opt.flags & FLAG_DUMP_BIN)
+- dmi_table_dump(buf, len);
+- else
+- dmi_table_decode(buf, len, num, ver >> 8, flags);
+-
+- free(buf);
++ return buf;
+ }
+
+
+@@ -5350,8 +5345,9 @@ static void overwrite_smbios3_address(u8 *buf)
+
+ static int smbios3_decode(u8 *buf, const char *devmem, u32 flags)
+ {
+- u32 ver;
++ u32 ver, len;
+ u64 offset;
++ u8 *table;
+
+ /* Don't let checksum run beyond the buffer */
+ if (buf[0x06] > 0x20)
+@@ -5377,8 +5373,12 @@ static int smbios3_decode(u8 *buf, const char *devmem, u32 flags)
+ return 0;
+ }
+
+- dmi_table(((off_t)offset.h << 32) | offset.l,
+- DWORD(buf + 0x0C), 0, ver, devmem, flags | FLAG_STOP_AT_EOT);
++ /* Maximum length, may get trimmed */
++ len = DWORD(buf + 0x0C);
++ table = dmi_table_get(((off_t)offset.h << 32) | offset.l, &len, 0, ver,
++ devmem, flags | FLAG_STOP_AT_EOT);
++ if (table == NULL)
++ return 1;
+
+ if (opt.flags & FLAG_DUMP_BIN)
+ {
+@@ -5387,18 +5387,28 @@ static int smbios3_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 32);
+ overwrite_smbios3_address(crafted);
+
++ dmi_table_dump(table, len);
+ if (!(opt.flags & FLAG_QUIET))
+ pr_comment("Writing %d bytes to %s.", crafted[0x06],
+ opt.dumpfile);
+ write_dump(0, crafted[0x06], crafted, opt.dumpfile, 1);
+ }
++ else
++ {
++ dmi_table_decode(table, len, 0, ver >> 8,
++ flags | FLAG_STOP_AT_EOT);
++ }
++
++ free(table);
+
+ return 1;
+ }
+
+ static int smbios_decode(u8 *buf, const char *devmem, u32 flags)
+ {
+- u16 ver;
++ u16 ver, num;
++ u32 len;
++ u8 *table;
+
+ /* Don't let checksum run beyond the buffer */
+ if (buf[0x05] > 0x20)
+@@ -5438,8 +5448,13 @@ static int smbios_decode(u8 *buf, const char *devmem, u32 flags)
+ pr_info("SMBIOS %u.%u present.",
+ ver >> 8, ver & 0xFF);
+
+- dmi_table(DWORD(buf + 0x18), WORD(buf + 0x16), WORD(buf + 0x1C),
+- ver << 8, devmem, flags);
++ /* Maximum length, may get trimmed */
++ len = WORD(buf + 0x16);
++ num = WORD(buf + 0x1C);
++ table = dmi_table_get(DWORD(buf + 0x18), &len, num, ver << 8,
++ devmem, flags);
++ if (table == NULL)
++ return 1;
+
+ if (opt.flags & FLAG_DUMP_BIN)
+ {
+@@ -5448,27 +5463,43 @@ static int smbios_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 32);
+ overwrite_dmi_address(crafted + 0x10);
+
++ dmi_table_dump(table, len);
+ if (!(opt.flags & FLAG_QUIET))
+ pr_comment("Writing %d bytes to %s.", crafted[0x05],
+ opt.dumpfile);
+ write_dump(0, crafted[0x05], crafted, opt.dumpfile, 1);
+ }
++ else
++ {
++ dmi_table_decode(table, len, num, ver, flags);
++ }
++
++ free(table);
+
+ return 1;
+ }
+
+ static int legacy_decode(u8 *buf, const char *devmem, u32 flags)
+ {
++ u16 ver, num;
++ u32 len;
++ u8 *table;
++
+ if (!checksum(buf, 0x0F))
+ return 0;
+
++ ver = ((buf[0x0E] & 0xF0) << 4) + (buf[0x0E] & 0x0F);
+ if (!(opt.flags & FLAG_QUIET))
+ pr_info("Legacy DMI %u.%u present.",
+ buf[0x0E] >> 4, buf[0x0E] & 0x0F);
+
+- dmi_table(DWORD(buf + 0x08), WORD(buf + 0x06), WORD(buf + 0x0C),
+- ((buf[0x0E] & 0xF0) << 12) + ((buf[0x0E] & 0x0F) << 8),
+- devmem, flags);
++ /* Maximum length, may get trimmed */
++ len = WORD(buf + 0x06);
++ num = WORD(buf + 0x0C);
++ table = dmi_table_get(DWORD(buf + 0x08), &len, num, ver << 8,
++ devmem, flags);
++ if (table == NULL)
++ return 1;
+
+ if (opt.flags & FLAG_DUMP_BIN)
+ {
+@@ -5477,11 +5508,18 @@ static int legacy_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 16);
+ overwrite_dmi_address(crafted);
+
++ dmi_table_dump(table, len);
+ if (!(opt.flags & FLAG_QUIET))
+ pr_comment("Writing %d bytes to %s.", 0x0F,
+ opt.dumpfile);
+ write_dump(0, 0x0F, crafted, opt.dumpfile, 1);
+ }
++ else
++ {
++ dmi_table_decode(table, len, num, ver, flags);
++ }
++
++ free(table);
+
+ return 1;
+ }
+--
+2.41.0
+
diff --git a/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_1b.patch b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_1b.patch
new file mode 100644
index 0000000000..e03bda05e4
--- /dev/null
+++ b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_1b.patch
@@ -0,0 +1,197 @@
+From d362549bce92ac22860cda8cad4532c1a3fe6928 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Mon, 20 Feb 2023 14:53:25 +0100
+Subject: [PATCH 2/5] dmidecode: Write the whole dump file at once
+
+When option --dump-bin is used, write the whole dump file at once,
+instead of opening and closing the file separately for the table
+and then for the entry point.
+
+As the file writing function is no longer generic, it gets moved
+from util.c to dmidecode.c.
+
+One minor functional change resulting from the new implementation is
+that the entry point is written first now, so the messages printed
+are swapped.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Reviewed-by: Jerry Hoemann <jerry.hoemann@hpe.com>
+
+CVE: CVE-2023-30630
+
+Upstream-Status: Backport [https://git.savannah.nongnu.org/cgit/dmidecode.git/commit/?id=d8cfbc808f387e87091c25e7d5b8c2bb348bb206]
+
+Signed-off-by: Adrian Freihofer <adrian.freihofer@siemens.com>
+---
+ dmidecode.c | 69 +++++++++++++++++++++++++++++++++++++++--------------
+ util.c | 40 -------------------------------
+ util.h | 1 -
+ 3 files changed, 51 insertions(+), 59 deletions(-)
+
+diff --git a/dmidecode.c b/dmidecode.c
+index b082c03..a80a140 100644
+--- a/dmidecode.c
++++ b/dmidecode.c
+@@ -5130,11 +5130,56 @@ static void dmi_table_string(const struct dmi_header *h, const u8 *data, u16 ver
+ }
+ }
+
+-static void dmi_table_dump(const u8 *buf, u32 len)
++static int dmi_table_dump(const u8 *ep, u32 ep_len, const u8 *table,
++ u32 table_len)
+ {
++ FILE *f;
++
++ f = fopen(opt.dumpfile, "wb");
++ if (!f)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fopen");
++ return -1;
++ }
++
++ if (!(opt.flags & FLAG_QUIET))
++ pr_comment("Writing %d bytes to %s.", ep_len, opt.dumpfile);
++ if (fwrite(ep, ep_len, 1, f) != 1)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fwrite");
++ goto err_close;
++ }
++
++ if (fseek(f, 32, SEEK_SET) != 0)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fseek");
++ goto err_close;
++ }
++
+ if (!(opt.flags & FLAG_QUIET))
+- pr_comment("Writing %d bytes to %s.", len, opt.dumpfile);
+- write_dump(32, len, buf, opt.dumpfile, 0);
++ pr_comment("Writing %d bytes to %s.", table_len, opt.dumpfile);
++ if (fwrite(table, table_len, 1, f) != 1)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fwrite");
++ goto err_close;
++ }
++
++ if (fclose(f))
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fclose");
++ return -1;
++ }
++
++ return 0;
++
++err_close:
++ fclose(f);
++ return -1;
+ }
+
+ static void dmi_table_decode(u8 *buf, u32 len, u16 num, u16 ver, u32 flags)
+@@ -5387,11 +5432,7 @@ static int smbios3_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 32);
+ overwrite_smbios3_address(crafted);
+
+- dmi_table_dump(table, len);
+- if (!(opt.flags & FLAG_QUIET))
+- pr_comment("Writing %d bytes to %s.", crafted[0x06],
+- opt.dumpfile);
+- write_dump(0, crafted[0x06], crafted, opt.dumpfile, 1);
++ dmi_table_dump(crafted, crafted[0x06], table, len);
+ }
+ else
+ {
+@@ -5463,11 +5504,7 @@ static int smbios_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 32);
+ overwrite_dmi_address(crafted + 0x10);
+
+- dmi_table_dump(table, len);
+- if (!(opt.flags & FLAG_QUIET))
+- pr_comment("Writing %d bytes to %s.", crafted[0x05],
+- opt.dumpfile);
+- write_dump(0, crafted[0x05], crafted, opt.dumpfile, 1);
++ dmi_table_dump(crafted, crafted[0x05], table, len);
+ }
+ else
+ {
+@@ -5508,11 +5545,7 @@ static int legacy_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 16);
+ overwrite_dmi_address(crafted);
+
+- dmi_table_dump(table, len);
+- if (!(opt.flags & FLAG_QUIET))
+- pr_comment("Writing %d bytes to %s.", 0x0F,
+- opt.dumpfile);
+- write_dump(0, 0x0F, crafted, opt.dumpfile, 1);
++ dmi_table_dump(crafted, 0x0F, table, len);
+ }
+ else
+ {
+diff --git a/util.c b/util.c
+index 04aaadd..1547096 100644
+--- a/util.c
++++ b/util.c
+@@ -259,46 +259,6 @@ out:
+ return p;
+ }
+
+-int write_dump(size_t base, size_t len, const void *data, const char *dumpfile, int add)
+-{
+- FILE *f;
+-
+- f = fopen(dumpfile, add ? "r+b" : "wb");
+- if (!f)
+- {
+- fprintf(stderr, "%s: ", dumpfile);
+- perror("fopen");
+- return -1;
+- }
+-
+- if (fseek(f, base, SEEK_SET) != 0)
+- {
+- fprintf(stderr, "%s: ", dumpfile);
+- perror("fseek");
+- goto err_close;
+- }
+-
+- if (fwrite(data, len, 1, f) != 1)
+- {
+- fprintf(stderr, "%s: ", dumpfile);
+- perror("fwrite");
+- goto err_close;
+- }
+-
+- if (fclose(f))
+- {
+- fprintf(stderr, "%s: ", dumpfile);
+- perror("fclose");
+- return -1;
+- }
+-
+- return 0;
+-
+-err_close:
+- fclose(f);
+- return -1;
+-}
+-
+ /* Returns end - start + 1, assuming start < end */
+ u64 u64_range(u64 start, u64 end)
+ {
+diff --git a/util.h b/util.h
+index 3094cf8..ef24eb9 100644
+--- a/util.h
++++ b/util.h
+@@ -27,5 +27,4 @@
+ int checksum(const u8 *buf, size_t len);
+ void *read_file(off_t base, size_t *len, const char *filename);
+ void *mem_chunk(off_t base, size_t len, const char *devmem);
+-int write_dump(size_t base, size_t len, const void *data, const char *dumpfile, int add);
+ u64 u64_range(u64 start, u64 end);
+--
+2.41.0
+
diff --git a/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_2.patch b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_2.patch
new file mode 100644
index 0000000000..37167a9c4f
--- /dev/null
+++ b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_2.patch
@@ -0,0 +1,83 @@
+From 2d26f187c734635d072d24ea401255b84f03f4c4 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Tue, 27 Jun 2023 10:03:53 +0000
+Subject: [PATCH 3/5] dmidecode: Do not let --dump-bin overwrite an existing
+ file
+
+Make sure that the file passed to option --dump-bin does not already
+exist. In practice, it is rather unlikely that an honest user would
+want to overwrite an existing dump file, while this possibility
+could be used by a rogue user to corrupt a system file.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Reviewed-by: Jerry Hoemann <jerry.hoemann@hpe.com>
+
+CVE: CVE-2023-30630
+
+Upstream-Status: Backport
+[https://github.com/mirror/dmidecode/commit/6ca381c1247c81f74e1ca4e7706f70bdda72e6f2]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ dmidecode.c | 14 ++++++++++++--
+ man/dmidecode.8 | 3 ++-
+ 2 files changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/dmidecode.c b/dmidecode.c
+index a80a140..32a77cc 100644
+--- a/dmidecode.c
++++ b/dmidecode.c
+@@ -60,6 +60,7 @@
+ * https://www.dmtf.org/sites/default/files/DSP0270_1.0.1.pdf
+ */
+
++#include <fcntl.h>
+ #include <stdio.h>
+ #include <string.h>
+ #include <strings.h>
+@@ -5133,13 +5134,22 @@ static void dmi_table_string(const struct dmi_header *h, const u8 *data, u16 ver
+ static int dmi_table_dump(const u8 *ep, u32 ep_len, const u8 *table,
+ u32 table_len)
+ {
++ int fd;
+ FILE *f;
+
+- f = fopen(opt.dumpfile, "wb");
++ fd = open(opt.dumpfile, O_WRONLY|O_CREAT|O_EXCL, 0666);
++ if (fd == -1)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("open");
++ return -1;
++ }
++
++ f = fdopen(fd, "wb");
+ if (!f)
+ {
+ fprintf(stderr, "%s: ", opt.dumpfile);
+- perror("fopen");
++ perror("fdopen");
+ return -1;
+ }
+
+diff --git a/man/dmidecode.8 b/man/dmidecode.8
+index 64dc7e7..d5b7f01 100644
+--- a/man/dmidecode.8
++++ b/man/dmidecode.8
+@@ -1,4 +1,4 @@
+-.TH DMIDECODE 8 "January 2019" "dmidecode"
++.TH DMIDECODE 8 "February 2023" "dmidecode"
+ .\"
+ .SH NAME
+ dmidecode \- \s-1DMI\s0 table decoder
+@@ -132,6 +132,7 @@ hexadecimal and \s-1ASCII\s0. This option is mainly useful for debugging.
+ Do not decode the entries, instead dump the DMI data to a file in binary
+ form. The generated file is suitable to pass to \fB--from-dump\fR
+ later.
++\fIFILE\fP must not exist.
+ .TP
+ .BR " " " " "--from-dump FILE"
+ Read the DMI data from a binary file previously generated using
+--
+2.41.0
+
diff --git a/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_3.patch b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_3.patch
new file mode 100644
index 0000000000..181092a3fd
--- /dev/null
+++ b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_3.patch
@@ -0,0 +1,71 @@
+From ac881f801b92b57fd8daac65fb16fff6d84fd366 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Tue, 27 Jun 2023 10:25:50 +0000
+Subject: [PATCH 4/5] Consistently use read_file() when reading from a dump
+ file
+
+Use read_file() instead of mem_chunk() to read the entry point from a
+dump file. This is faster, and consistent with how we then read the
+actual DMI table from that dump file.
+
+This made no functional difference so far, which is why it went
+unnoticed for years. But now that a file type check was added to the
+mem_chunk() function, we must stop using it to read from regular
+files.
+
+This will again allow root to use the --from-dump option.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Tested-by: Jerry Hoemann <jerry.hoemann@hpe.com>
+
+CVE: CVE-2023-30630
+
+Upstream-Status: Backport [https://git.savannah.nongnu.org/cgit/dmidecode.git/commit/?id=c76ddda0ba0aa99a55945e3290095c2ec493c892]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ dmidecode.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/dmidecode.c b/dmidecode.c
+index 32a77cc..9a691e0 100644
+--- a/dmidecode.c
++++ b/dmidecode.c
+@@ -5693,17 +5693,25 @@ int main(int argc, char * const argv[])
+ pr_comment("dmidecode %s", VERSION);
+
+ /* Read from dump if so instructed */
++ size = 0x20;
+ if (opt.flags & FLAG_FROM_DUMP)
+ {
+ if (!(opt.flags & FLAG_QUIET))
+ pr_info("Reading SMBIOS/DMI data from file %s.",
+ opt.dumpfile);
+- if ((buf = mem_chunk(0, 0x20, opt.dumpfile)) == NULL)
++ if ((buf = read_file(0, &size, opt.dumpfile)) == NULL)
+ {
+ ret = 1;
+ goto exit_free;
+ }
+
++ /* Truncated entry point can't be processed */
++ if (size < 0x20)
++ {
++ ret = 1;
++ goto done;
++ }
++
+ if (memcmp(buf, "_SM3_", 5) == 0)
+ {
+ if (smbios3_decode(buf, opt.dumpfile, 0))
+@@ -5727,7 +5735,6 @@ int main(int argc, char * const argv[])
+ * contain one of several types of entry points, so read enough for
+ * the largest one, then determine what type it contains.
+ */
+- size = 0x20;
+ if (!(opt.flags & FLAG_NO_SYSFS)
+ && (buf = read_file(0, &size, SYS_ENTRY_FILE)) != NULL)
+ {
+--
+2.41.0
+
diff --git a/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_4.patch b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_4.patch
new file mode 100644
index 0000000000..b7d7f4ff96
--- /dev/null
+++ b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630_4.patch
@@ -0,0 +1,138 @@
+From 2fb126eef436389a2dc48d4225b4a9888b0625a8 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Tue, 27 Jun 2023 10:58:11 +0000
+Subject: [PATCH 5/5] Don't read beyond sysfs entry point buffer
+
+Functions smbios_decode() and smbios3_decode() include a check
+against buffer overrun. This check assumes that the buffer length is
+always 32 bytes. This is true when reading from /dev/mem or from a
+dump file, however when reading from sysfs, the buffer length is the
+size of the actual sysfs attribute file, typically 31 bytes for an
+SMBIOS 2.x entry point and 24 bytes for an SMBIOS 3.x entry point.
+
+In the unlikely event of a malformed entry point, with encoded length
+larger than expected but smaller than or equal to 32, we would hit a
+buffer overrun. So properly pass the actual buffer length as an
+argument and perform the check against it.
+
+In practice, this will never happen, because on the Linux kernel
+side, the size of the sysfs attribute file is decided from the entry
+point length field. So it is technically impossible for them not to
+match. But user-space code should not make such assumptions.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+
+CVE: CVE-2023-30630
+
+Upstream-Status: Backport
+[https://git.savannah.nongnu.org/cgit/dmidecode.git/commit/?id=2b83c4b898f8325313162f588765411e8e3e5561]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ dmidecode.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/dmidecode.c b/dmidecode.c
+index 9a691e0..e725801 100644
+--- a/dmidecode.c
++++ b/dmidecode.c
+@@ -5398,14 +5398,14 @@ static void overwrite_smbios3_address(u8 *buf)
+ buf[0x17] = 0;
+ }
+
+-static int smbios3_decode(u8 *buf, const char *devmem, u32 flags)
++static int smbios3_decode(u8 *buf, size_t buf_len, const char *devmem, u32 flags)
+ {
+ u32 ver, len;
+ u64 offset;
+ u8 *table;
+
+ /* Don't let checksum run beyond the buffer */
+- if (buf[0x06] > 0x20)
++ if (buf[0x06] > buf_len)
+ {
+ fprintf(stderr,
+ "Entry point length too large (%u bytes, expected %u).\n",
+@@ -5455,14 +5455,14 @@ static int smbios3_decode(u8 *buf, const char *devmem, u32 flags)
+ return 1;
+ }
+
+-static int smbios_decode(u8 *buf, const char *devmem, u32 flags)
++static int smbios_decode(u8 *buf, size_t buf_len, const char *devmem, u32 flags)
+ {
+ u16 ver, num;
+ u32 len;
+ u8 *table;
+
+ /* Don't let checksum run beyond the buffer */
+- if (buf[0x05] > 0x20)
++ if (buf[0x05] > buf_len)
+ {
+ fprintf(stderr,
+ "Entry point length too large (%u bytes, expected %u).\n",
+@@ -5714,12 +5714,12 @@ int main(int argc, char * const argv[])
+
+ if (memcmp(buf, "_SM3_", 5) == 0)
+ {
+- if (smbios3_decode(buf, opt.dumpfile, 0))
++ if (smbios3_decode(buf, size, opt.dumpfile, 0))
+ found++;
+ }
+ else if (memcmp(buf, "_SM_", 4) == 0)
+ {
+- if (smbios_decode(buf, opt.dumpfile, 0))
++ if (smbios_decode(buf, size, opt.dumpfile, 0))
+ found++;
+ }
+ else if (memcmp(buf, "_DMI_", 5) == 0)
+@@ -5742,12 +5742,12 @@ int main(int argc, char * const argv[])
+ pr_info("Getting SMBIOS data from sysfs.");
+ if (size >= 24 && memcmp(buf, "_SM3_", 5) == 0)
+ {
+- if (smbios3_decode(buf, SYS_TABLE_FILE, FLAG_NO_FILE_OFFSET))
++ if (smbios3_decode(buf, size, SYS_TABLE_FILE, FLAG_NO_FILE_OFFSET))
+ found++;
+ }
+ else if (size >= 31 && memcmp(buf, "_SM_", 4) == 0)
+ {
+- if (smbios_decode(buf, SYS_TABLE_FILE, FLAG_NO_FILE_OFFSET))
++ if (smbios_decode(buf, size, SYS_TABLE_FILE, FLAG_NO_FILE_OFFSET))
+ found++;
+ }
+ else if (size >= 15 && memcmp(buf, "_DMI_", 5) == 0)
+@@ -5784,12 +5784,12 @@ int main(int argc, char * const argv[])
+
+ if (memcmp(buf, "_SM3_", 5) == 0)
+ {
+- if (smbios3_decode(buf, opt.devmem, 0))
++ if (smbios3_decode(buf, 0x20, opt.devmem, 0))
+ found++;
+ }
+ else if (memcmp(buf, "_SM_", 4) == 0)
+ {
+- if (smbios_decode(buf, opt.devmem, 0))
++ if (smbios_decode(buf, 0x20, opt.devmem, 0))
+ found++;
+ }
+ goto done;
+@@ -5810,7 +5810,7 @@ memory_scan:
+ {
+ if (memcmp(buf + fp, "_SM3_", 5) == 0)
+ {
+- if (smbios3_decode(buf + fp, opt.devmem, 0))
++ if (smbios3_decode(buf + fp, 0x20, opt.devmem, 0))
+ {
+ found++;
+ goto done;
+@@ -5823,7 +5823,7 @@ memory_scan:
+ {
+ if (memcmp(buf + fp, "_SM_", 4) == 0 && fp <= 0xFFE0)
+ {
+- if (smbios_decode(buf + fp, opt.devmem, 0))
++ if (smbios_decode(buf + fp, 0x20, opt.devmem, 0))
+ {
+ found++;
+ goto done;
+--
+2.41.0
+
diff --git a/meta/recipes-devtools/dmidecode/dmidecode_3.3.bb b/meta/recipes-devtools/dmidecode/dmidecode_3.3.bb
index 23540b2703..c0f6b45313 100644
--- a/meta/recipes-devtools/dmidecode/dmidecode_3.3.bb
+++ b/meta/recipes-devtools/dmidecode/dmidecode_3.3.bb
@@ -6,6 +6,11 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=b234ee4d69f5fce4486a80fdaf4a4263"
SRC_URI = "${SAVANNAH_NONGNU_MIRROR}/dmidecode/${BP}.tar.xz \
file://0001-Committing-changes-from-do_unpack_extra.patch \
+ file://CVE-2023-30630_1a.patch \
+ file://CVE-2023-30630_1b.patch \
+ file://CVE-2023-30630_2.patch \
+ file://CVE-2023-30630_3.patch \
+ file://CVE-2023-30630_4.patch \
"
COMPATIBLE_HOST = "(i.86|x86_64|aarch64|arm|powerpc|powerpc64).*-linux"
diff --git a/meta/recipes-devtools/dpkg/dpkg/0001-Dpkg-Source-Archive-Prevent-directory-traversal-for-.patch b/meta/recipes-devtools/dpkg/dpkg/0001-Dpkg-Source-Archive-Prevent-directory-traversal-for-.patch
new file mode 100644
index 0000000000..d249d854fb
--- /dev/null
+++ b/meta/recipes-devtools/dpkg/dpkg/0001-Dpkg-Source-Archive-Prevent-directory-traversal-for-.patch
@@ -0,0 +1,328 @@
+From 6d8a6799639f8853a2af1f9036bc70fddbfdd2a2 Mon Sep 17 00:00:00 2001
+From: Guillem Jover <guillem@debian.org>
+Date: Tue, 3 May 2022 02:09:32 +0200
+Subject: [PATCH] Dpkg::Source::Archive: Prevent directory traversal for
+ in-place extracts
+
+For untrusted v2 and v3 source package formats that include a debian.tar
+archive, when we are extracting it, we do that as an in-place extraction,
+which can lead to directory traversal situations on specially crafted
+orig.tar and debian.tar tarballs.
+
+GNU tar replaces entries on the filesystem by the entries present on
+the tarball, but it will follow symlinks when the symlink pathname
+itself is not present as an actual directory on the tarball.
+
+This means we can create an orig.tar where there's a symlink pointing
+out of the source tree root directory, and then a debian.tar that
+contains an entry within that symlink as if it was a directory, without
+a directory entry for the symlink pathname itself, which will be
+extracted following the symlink outside the source tree root.
+
+This is currently noted as expected in GNU tar documentation. But even
+if there was a new extraction mode avoiding this problem we'd need such
+new version. Using perl's Archive::Tar would solve the problem, but
+switching to such different pure perl implementation, could cause
+compatibility or performance issues.
+
+What we do is when we are requested to perform an in-place extract, we
+instead still use a temporary directory, then walk that directory and
+remove any matching entry in the destination directory, replicating what
+GNU tar would do, but in addition avoiding the directory traversal issue
+for symlinks. Which should work with any tar implementation and be safe.
+
+Reported-by: Max Justicz <max@justi.cz>
+Stable-Candidates: 1.18.x 1.19.x 1.20.x
+Fixes: commit 0c0057a27fecccab77d2b3cffa9a7d172846f0b4 (1.14.17)
+Fixes: CVE-2022-1664
+
+CVE: CVE-2022-1664
+Upstream-Status: Backport [7a6c03cb34d4a09f35df2f10779cbf1b70a5200b]
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ scripts/Dpkg/Source/Archive.pm | 122 +++++++++++++++++++++++++-------
+ scripts/t/Dpkg_Source_Archive.t | 110 +++++++++++++++++++++++++++-
+ 2 files changed, 204 insertions(+), 28 deletions(-)
+
+diff --git a/scripts/Dpkg/Source/Archive.pm b/scripts/Dpkg/Source/Archive.pm
+index 33c181b20..2ddd04af8 100644
+--- a/scripts/Dpkg/Source/Archive.pm
++++ b/scripts/Dpkg/Source/Archive.pm
+@@ -21,9 +21,11 @@ use warnings;
+ our $VERSION = '0.01';
+
+ use Carp;
++use Errno qw(ENOENT);
+ use File::Temp qw(tempdir);
+ use File::Basename qw(basename);
+ use File::Spec;
++use File::Find;
+ use Cwd;
+
+ use Dpkg ();
+@@ -110,19 +112,13 @@ sub extract {
+ my %spawn_opts = (wait_child => 1);
+
+ # Prepare destination
+- my $tmp;
+- if ($opts{in_place}) {
+- $spawn_opts{chdir} = $dest;
+- $tmp = $dest; # So that fixperms call works
+- } else {
+- my $template = basename($self->get_filename()) . '.tmp-extract.XXXXX';
+- unless (-e $dest) {
+- # Kludge so that realpath works
+- mkdir($dest) or syserr(g_('cannot create directory %s'), $dest);
+- }
+- $tmp = tempdir($template, DIR => Cwd::realpath("$dest/.."), CLEANUP => 1);
+- $spawn_opts{chdir} = $tmp;
++ my $template = basename($self->get_filename()) . '.tmp-extract.XXXXX';
++ unless (-e $dest) {
++ # Kludge so that realpath works
++ mkdir($dest) or syserr(g_('cannot create directory %s'), $dest);
+ }
++ my $tmp = tempdir($template, DIR => Cwd::realpath("$dest/.."), CLEANUP => 1);
++ $spawn_opts{chdir} = $tmp;
+
+ # Prepare stuff that handles the input of tar
+ $self->ensure_open('r', delete_sig => [ 'PIPE' ]);
+@@ -145,22 +141,94 @@ sub extract {
+ # have to be calculated using mount options and other madness.
+ fixperms($tmp) unless $opts{no_fixperms};
+
+- # Stop here if we extracted in-place as there's nothing to move around
+- return if $opts{in_place};
+-
+- # Rename extracted directory
+- opendir(my $dir_dh, $tmp) or syserr(g_('cannot opendir %s'), $tmp);
+- my @entries = grep { $_ ne '.' && $_ ne '..' } readdir($dir_dh);
+- closedir($dir_dh);
+- my $done = 0;
+- erasedir($dest);
+- if (scalar(@entries) == 1 && ! -l "$tmp/$entries[0]" && -d _) {
+- rename("$tmp/$entries[0]", $dest)
+- or syserr(g_('unable to rename %s to %s'),
+- "$tmp/$entries[0]", $dest);
++ # If we are extracting "in-place" do not remove the destination directory.
++ if ($opts{in_place}) {
++ my $canon_basedir = Cwd::realpath($dest);
++ # On Solaris /dev/null points to /devices/pseudo/mm@0:null.
++ my $canon_devnull = Cwd::realpath('/dev/null');
++ my $check_symlink = sub {
++ my $pathname = shift;
++ my $canon_pathname = Cwd::realpath($pathname);
++ if (not defined $canon_pathname) {
++ return if $! == ENOENT;
++
++ syserr(g_("pathname '%s' cannot be canonicalized"), $pathname);
++ }
++ return if $canon_pathname eq $canon_devnull;
++ return if $canon_pathname eq $canon_basedir;
++ return if $canon_pathname =~ m{^\Q$canon_basedir/\E};
++ warning(g_("pathname '%s' points outside source root (to '%s')"),
++ $pathname, $canon_pathname);
++ };
++
++ my $move_in_place = sub {
++ my $relpath = File::Spec->abs2rel($File::Find::name, $tmp);
++ my $destpath = File::Spec->catfile($dest, $relpath);
++
++ my ($mode, $atime, $mtime);
++ lstat $File::Find::name
++ or syserr(g_('cannot get source pathname %s metadata'), $File::Find::name);
++ ((undef) x 2, $mode, (undef) x 5, $atime, $mtime) = lstat _;
++ my $src_is_dir = -d _;
++
++ my $dest_exists = 1;
++ if (not lstat $destpath) {
++ if ($! == ENOENT) {
++ $dest_exists = 0;
++ } else {
++ syserr(g_('cannot get target pathname %s metadata'), $destpath);
++ }
++ }
++ my $dest_is_dir = -d _;
++ if ($dest_exists) {
++ if ($dest_is_dir && $src_is_dir) {
++ # Refresh the destination directory attributes with the
++ # ones from the tarball.
++ chmod $mode, $destpath
++ or syserr(g_('cannot change directory %s mode'), $File::Find::name);
++ utime $atime, $mtime, $destpath
++ or syserr(g_('cannot change directory %s times'), $File::Find::name);
++
++ # We should do nothing, and just walk further tree.
++ return;
++ } elsif ($dest_is_dir) {
++ rmdir $destpath
++ or syserr(g_('cannot remove destination directory %s'), $destpath);
++ } else {
++ $check_symlink->($destpath);
++ unlink $destpath
++ or syserr(g_('cannot remove destination file %s'), $destpath);
++ }
++ }
++ # If we are moving a directory, we do not need to walk it.
++ if ($src_is_dir) {
++ $File::Find::prune = 1;
++ }
++ rename $File::Find::name, $destpath
++ or syserr(g_('cannot move %s to %s'), $File::Find::name, $destpath);
++ };
++
++ find({
++ wanted => $move_in_place,
++ no_chdir => 1,
++ dangling_symlinks => 0,
++ }, $tmp);
+ } else {
+- rename($tmp, $dest)
+- or syserr(g_('unable to rename %s to %s'), $tmp, $dest);
++ # Rename extracted directory
++ opendir(my $dir_dh, $tmp) or syserr(g_('cannot opendir %s'), $tmp);
++ my @entries = grep { $_ ne '.' && $_ ne '..' } readdir($dir_dh);
++ closedir($dir_dh);
++
++ erasedir($dest);
++
++ if (scalar(@entries) == 1 && ! -l "$tmp/$entries[0]" && -d _) {
++ rename("$tmp/$entries[0]", $dest)
++ or syserr(g_('unable to rename %s to %s'),
++ "$tmp/$entries[0]", $dest);
++ } else {
++ rename($tmp, $dest)
++ or syserr(g_('unable to rename %s to %s'), $tmp, $dest);
++ }
+ }
+ erasedir($tmp);
+ }
+diff --git a/scripts/t/Dpkg_Source_Archive.t b/scripts/t/Dpkg_Source_Archive.t
+index 7b70da68e..504fbe1d4 100644
+--- a/scripts/t/Dpkg_Source_Archive.t
++++ b/scripts/t/Dpkg_Source_Archive.t
+@@ -16,12 +16,120 @@
+ use strict;
+ use warnings;
+
+-use Test::More tests => 1;
++use Test::More tests => 4;
++use Test::Dpkg qw(:paths);
++
++use File::Spec;
++use File::Path qw(make_path rmtree);
+
+ BEGIN {
+ use_ok('Dpkg::Source::Archive');
+ }
+
++use Dpkg;
++
++my $tmpdir = test_get_temp_path();
++
++rmtree($tmpdir);
++
++sub test_touch
++{
++ my ($name, $data) = @_;
++
++ open my $fh, '>', $name
++ or die "cannot touch file $name\n";
++ print { $fh } $data if $data;
++ close $fh;
++}
++
++sub test_path_escape
++{
++ my $name = shift;
++
++ my $treedir = File::Spec->rel2abs("$tmpdir/$name-tree");
++ my $overdir = File::Spec->rel2abs("$tmpdir/$name-overlay");
++ my $outdir = "$tmpdir/$name-out";
++ my $expdir = "$tmpdir/$name-exp";
++
++ # This is the base directory, where we are going to be extracting stuff
++ # into, which include traps.
++ make_path("$treedir/subdir-a");
++ test_touch("$treedir/subdir-a/file-a");
++ test_touch("$treedir/subdir-a/file-pre-a");
++ make_path("$treedir/subdir-b");
++ test_touch("$treedir/subdir-b/file-b");
++ test_touch("$treedir/subdir-b/file-pre-b");
++ symlink File::Spec->abs2rel($outdir, $treedir), "$treedir/symlink-escape";
++ symlink File::Spec->abs2rel("$outdir/nonexistent", $treedir), "$treedir/symlink-nonexistent";
++ symlink "$treedir/file", "$treedir/symlink-within";
++ test_touch("$treedir/supposed-dir");
++
++ # This is the overlay directory, which we'll pack and extract over the
++ # base directory.
++ make_path($overdir);
++ make_path("$overdir/subdir-a/aa");
++ test_touch("$overdir/subdir-a/aa/file-aa", 'aa');
++ test_touch("$overdir/subdir-a/file-a", 'a');
++ make_path("$overdir/subdir-b/bb");
++ test_touch("$overdir/subdir-b/bb/file-bb", 'bb');
++ test_touch("$overdir/subdir-b/file-b", 'b');
++ make_path("$overdir/symlink-escape");
++ test_touch("$overdir/symlink-escape/escaped-file", 'escaped');
++ test_touch("$overdir/symlink-nonexistent", 'nonexistent');
++ make_path("$overdir/symlink-within");
++ make_path("$overdir/supposed-dir");
++ test_touch("$overdir/supposed-dir/supposed-file", 'something');
++
++ # Generate overlay tar.
++ system($Dpkg::PROGTAR, '-cf', "$overdir.tar", '-C', $overdir, qw(
++ subdir-a subdir-b
++ symlink-escape/escaped-file symlink-nonexistent symlink-within
++ supposed-dir
++ )) == 0
++ or die "cannot create overlay tar archive\n";
++
++ # This is the expected directory, which we'll be comparing against.
++ make_path($expdir);
++ system('cp', '-a', $overdir, $expdir) == 0
++ or die "cannot copy overlay hierarchy into expected directory\n";
++
++ # Store the expected and out reference directories into a tar to compare
++ # its structure against the result reference.
++ system($Dpkg::PROGTAR, '-cf', "$expdir.tar", '-C', $overdir, qw(
++ subdir-a subdir-b
++ symlink-escape/escaped-file symlink-nonexistent symlink-within
++ supposed-dir
++ ), '-C', $treedir, qw(
++ subdir-a/file-pre-a
++ subdir-b/file-pre-b
++ )) == 0
++ or die "cannot create expected tar archive\n";
++
++ # This directory is supposed to remain empty, anything inside implies a
++ # directory traversal.
++ make_path($outdir);
++
++ my $warnseen;
++ local $SIG{__WARN__} = sub { $warnseen = $_[0] };
++
++ # Perform the extraction.
++ my $tar = Dpkg::Source::Archive->new(filename => "$overdir.tar");
++ $tar->extract($treedir, in_place => 1);
++
++ # Store the result into a tar to compare its structure against a reference.
++ system($Dpkg::PROGTAR, '-cf', "$treedir.tar", '-C', $treedir, '.');
++
++ # Check results
++ ok(length $warnseen && $warnseen =~ m/points outside source root/,
++ 'expected warning seen');
++ ok(system($Dpkg::PROGTAR, '--compare', '-f', "$expdir.tar", '-C', $treedir) == 0,
++ 'expected directory matches');
++ ok(! -e "$outdir/escaped-file",
++ 'expected output directory is empty, directory traversal');
++}
++
++test_path_escape('in-place');
++
+ # TODO: Add actual test cases.
+
+ 1;
+--
+2.33.0
+
diff --git a/meta/recipes-devtools/dpkg/dpkg_1.21.4.bb b/meta/recipes-devtools/dpkg/dpkg_1.21.4.bb
index 681909f0bf..7ef6233ee4 100644
--- a/meta/recipes-devtools/dpkg/dpkg_1.21.4.bb
+++ b/meta/recipes-devtools/dpkg/dpkg_1.21.4.bb
@@ -14,6 +14,7 @@ SRC_URI = "git://salsa.debian.org/dpkg-team/dpkg.git;protocol=https;branch=main
file://0001-dpkg-Support-muslx32-build.patch \
file://pager.patch \
file://0001-Add-support-for-riscv32-CPU.patch \
+ file://0001-Dpkg-Source-Archive-Prevent-directory-traversal-for-.patch \
"
SRC_URI:append:class-native = " file://0001-build.c-ignore-return-of-1-from-tar-cf.patch"
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest b/meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest
index c97c0377e9..279923db8e 100644
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest
@@ -8,3 +8,4 @@ rm -f *.tmp
rm -f *.ok
rm -f *.failed
rm -f *.log
+cp ../data/test_data.tmp ./
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.46.5.bb b/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.46.5.bb
index 5b2d1921f0..68c620cf71 100644
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.46.5.bb
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.46.5.bb
@@ -141,4 +141,7 @@ do_install_ptest() {
install -d ${D}${PTEST_PATH}/lib
install -m 0644 ${B}/lib/config.h ${D}${PTEST_PATH}/lib/
+
+ install -d ${D}${PTEST_PATH}/data
+ install -m 0644 ${B}/tests/test_data.tmp ${D}${PTEST_PATH}/data/
}
diff --git a/meta/recipes-devtools/elfutils/elfutils_0.186.bb b/meta/recipes-devtools/elfutils/elfutils_0.186.bb
index 46ee40cce6..d742a2e14e 100644
--- a/meta/recipes-devtools/elfutils/elfutils_0.186.bb
+++ b/meta/recipes-devtools/elfutils/elfutils_0.186.bb
@@ -35,6 +35,8 @@ PTEST_ENABLED:libc-musl = "0"
EXTRA_OECONF = "--program-prefix=eu-"
+BUILD_CFLAGS += "-Wno-error=stringop-overflow"
+
DEPENDS_BZIP2 = "bzip2-replacement-native"
DEPENDS_BZIP2:class-target = "bzip2"
diff --git a/meta/recipes-devtools/file/file/CVE-2022-48554.patch b/meta/recipes-devtools/file/file/CVE-2022-48554.patch
new file mode 100644
index 0000000000..c285bd2c23
--- /dev/null
+++ b/meta/recipes-devtools/file/file/CVE-2022-48554.patch
@@ -0,0 +1,35 @@
+CVE: CVE-2022-48554
+Upstream-Status: Backport [ https://github.com/file/file/commit/497aabb29cd08d2a5aeb63e45798d65fcbe03502 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+From 497aabb29cd08d2a5aeb63e45798d65fcbe03502 Mon Sep 17 00:00:00 2001
+From: Christos Zoulas <christos@zoulas.com>
+Date: Mon, 14 Feb 2022 16:26:10 +0000
+Subject: [PATCH] PR/310: p870613: Don't use strlcpy to copy the string, it
+ will try to scan the source string to find out how much space is needed the
+ source string might not be NUL terminated.
+
+---
+ src/funcs.c | 11 +++++++----
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/src/funcs.c b/src/funcs.c
+index 89e1da597..dcfd352d2 100644
+--- a/src/funcs.c
++++ b/src/funcs.c
+@@ -54,9 +54,12 @@ FILE_RCSID("@(#)$File: funcs.c,v 1.124 2022/01/10 14:15:08 christos Exp $")
+ protected char *
+ file_copystr(char *buf, size_t blen, size_t width, const char *str)
+ {
+- if (++width > blen)
+- width = blen;
+- strlcpy(buf, str, width);
++ if (blen == 0)
++ return buf;
++ if (width >= blen)
++ width = blen - 1;
++ memcpy(buf, str, width);
++ buf[width] = '\0';
+ return buf;
+ }
+
diff --git a/meta/recipes-devtools/file/file_5.41.bb b/meta/recipes-devtools/file/file_5.41.bb
index 653887e97a..6fd4f2c746 100644
--- a/meta/recipes-devtools/file/file_5.41.bb
+++ b/meta/recipes-devtools/file/file_5.41.bb
@@ -11,7 +11,9 @@ LIC_FILES_CHKSUM = "file://COPYING;beginline=2;md5=0251eaec1188b20d9a72c502ecfdd
DEPENDS = "file-replacement-native"
DEPENDS:class-native = "bzip2-replacement-native"
-SRC_URI = "git://github.com/file/file.git;branch=master;protocol=https"
+SRC_URI = "git://github.com/file/file.git;branch=master;protocol=https \
+ file://CVE-2022-48554.patch \
+"
SRCREV = "504206e53a89fd6eed71aeaf878aa3512418eab1"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-devtools/gcc/gcc-11.3.inc b/meta/recipes-devtools/gcc/gcc-11.4.inc
index acbb43a25f..fd6a3e92e3 100644
--- a/meta/recipes-devtools/gcc/gcc-11.3.inc
+++ b/meta/recipes-devtools/gcc/gcc-11.4.inc
@@ -2,11 +2,11 @@ require gcc-common.inc
# Third digit in PV should be incremented after a minor release
-PV = "11.3.0"
+PV = "11.4.0"
# BINV should be incremented to a revision after a minor gcc release
-BINV = "11.3.0"
+BINV = "11.4.0"
FILESEXTRAPATHS =. "${FILE_DIRNAME}/gcc:${FILE_DIRNAME}/gcc/backport:"
@@ -48,7 +48,6 @@ SRC_URI = "\
file://0016-If-CXXFLAGS-contains-something-unsupported-by-the-bu.patch \
file://0017-handle-sysroot-support-for-nativesdk-gcc.patch \
file://0018-Search-target-sysroot-gcc-version-specific-dirs-with.patch \
- file://0019-nios2-Define-MUSL_DYNAMIC_LINKER.patch \
file://0020-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch \
file://0021-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch \
file://0022-sync-gcc-stddef.h-with-musl.patch \
@@ -59,20 +58,27 @@ SRC_URI = "\
file://0027-libatomic-Do-not-enforce-march-on-aarch64.patch \
file://0028-debug-101473-apply-debug-prefix-maps-before-checksum.patch \
file://0029-Fix-install-path-of-linux64.h.patch \
- \
+ file://0030-rust-recursion-limit.patch \
+ file://0031-gcc-sanitizers-fix.patch \
file://0001-CVE-2021-42574.patch \
file://0002-CVE-2021-42574.patch \
file://0003-CVE-2021-42574.patch \
file://0004-CVE-2021-42574.patch \
file://0001-CVE-2021-46195.patch \
+ file://0001-aarch64-Update-Neoverse-N2-core-defini.patch \
+ file://0002-aarch64-add-armv9-a-to-march.patch \
+ file://0003-aarch64-Enable-FP16-feature-by-default-for-Armv9.patch \
+ file://0004-arm-add-armv9-a-architecture-to-march.patch \
+ file://CVE-2023-4039.patch \
"
-SRC_URI[sha256sum] = "b47cf2818691f5b1e21df2bb38c795fac2cfbd640ede2d0a5e1c89e338a3ac39"
+
+SRC_URI[sha256sum] = "3f2db222b007e8a4a23cd5ba56726ef08e8b1f1eb2055ee72c1402cea73a8dd9"
S = "${TMPDIR}/work-shared/gcc-${PV}-${PR}/gcc-${PV}"
# For dev release snapshotting
#S = "${TMPDIR}/work-shared/gcc-${PV}-${PR}/gcc-${RELEASE}"
-#B = "${WORKDIR}/gcc-${PV}/build.${HOST_SYS}.${TARGET_SYS}"
+B = "${WORKDIR}/gcc-${PV}/build.${HOST_SYS}.${TARGET_SYS}"
# Language Overrides
FORTRAN = ""
diff --git a/meta/recipes-devtools/gcc/gcc-configure-common.inc b/meta/recipes-devtools/gcc/gcc-configure-common.inc
index e4cdb73f0a..dba25eb754 100644
--- a/meta/recipes-devtools/gcc/gcc-configure-common.inc
+++ b/meta/recipes-devtools/gcc/gcc-configure-common.inc
@@ -40,7 +40,6 @@ EXTRA_OECONF = "\
${@get_gcc_mips_plt_setting(bb, d)} \
${@get_gcc_ppc_plt_settings(bb, d)} \
${@get_gcc_multiarch_setting(bb, d)} \
- --enable-standard-branch-protection \
"
# glibc version is a minimum controlling whether features are enabled.
diff --git a/meta/recipes-devtools/gcc/gcc-cross-canadian.inc b/meta/recipes-devtools/gcc/gcc-cross-canadian.inc
index a87b446c4f..c36e4cba81 100644
--- a/meta/recipes-devtools/gcc/gcc-cross-canadian.inc
+++ b/meta/recipes-devtools/gcc/gcc-cross-canadian.inc
@@ -9,6 +9,7 @@ GCCMULTILIB = "--enable-multilib"
require gcc-configure-common.inc
+EXTRA_OECONF += "--with-plugin-ld=ld"
EXTRA_OECONF_PATHS = "\
--with-gxx-include-dir=/not/exist${target_includedir}/c++/${BINV} \
--with-build-time-tools=${STAGING_DIR_NATIVE}${prefix_native}/${TARGET_SYS}/bin \
@@ -134,8 +135,6 @@ do_install () {
ln -sf ${BINRELPATH}/${TARGET_PREFIX}$t$suffix $dest$t$suffix
done
- t=real-ld
- ln -sf ${BINRELPATH}/${TARGET_PREFIX}ld$suffix $dest$t$suffix
# libquadmath headers need to be available in the gcc libexec dir
install -d ${D}${libdir}/gcc/${TARGET_SYS}/${BINV}/include/
diff --git a/meta/recipes-devtools/gcc/gcc-cross-canadian_11.3.bb b/meta/recipes-devtools/gcc/gcc-cross-canadian_11.4.bb
index bf53c5cd78..bf53c5cd78 100644
--- a/meta/recipes-devtools/gcc/gcc-cross-canadian_11.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-cross-canadian_11.4.bb
diff --git a/meta/recipes-devtools/gcc/gcc-cross_11.3.bb b/meta/recipes-devtools/gcc/gcc-cross_11.4.bb
index b43cca0c52..b43cca0c52 100644
--- a/meta/recipes-devtools/gcc/gcc-cross_11.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-cross_11.4.bb
diff --git a/meta/recipes-devtools/gcc/gcc-crosssdk_11.3.bb b/meta/recipes-devtools/gcc/gcc-crosssdk_11.4.bb
index 40a6c4feff..40a6c4feff 100644
--- a/meta/recipes-devtools/gcc/gcc-crosssdk_11.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-crosssdk_11.4.bb
diff --git a/meta/recipes-devtools/gcc/gcc-multilib-config.inc b/meta/recipes-devtools/gcc/gcc-multilib-config.inc
index 26bfed9507..2dbbc23c94 100644
--- a/meta/recipes-devtools/gcc/gcc-multilib-config.inc
+++ b/meta/recipes-devtools/gcc/gcc-multilib-config.inc
@@ -154,7 +154,7 @@ python gcc_multilib_setup() {
gcc_header_config_files = {
'x86_64' : ['gcc/config/linux.h', 'gcc/config/i386/linux.h', 'gcc/config/i386/linux64.h'],
'i586' : ['gcc/config/linux.h', 'gcc/config/i386/linux.h', 'gcc/config/i386/linux64.h'],
- 'i686' : ['gcc/config/linux.h', 'gcc/config/i386/linux64.h'],
+ 'i686' : ['gcc/config/linux.h', 'gcc/config/i386/linux.h', 'gcc/config/i386/linux64.h'],
'mips' : ['gcc/config/linux.h', 'gcc/config/mips/linux.h', 'gcc/config/mips/linux64.h'],
'mips64' : ['gcc/config/linux.h', 'gcc/config/mips/linux.h', 'gcc/config/mips/linux64.h'],
'powerpc' : ['gcc/config/linux.h', 'gcc/config/rs6000/linux64.h'],
diff --git a/meta/recipes-devtools/gcc/gcc-runtime.inc b/meta/recipes-devtools/gcc/gcc-runtime.inc
index e9f2cf16e8..d019b0790b 100644
--- a/meta/recipes-devtools/gcc/gcc-runtime.inc
+++ b/meta/recipes-devtools/gcc/gcc-runtime.inc
@@ -53,7 +53,7 @@ RUNTIMETARGET:libc-newlib = "libstdc++-v3"
REL_S = "/usr/src/debug/${PN}/${EXTENDPE}${PV}-${PR}"
DEBUG_PREFIX_MAP:class-target = " \
- -fdebug-prefix-map=${WORKDIR}/recipe-sysroot= \
+ -fdebug-prefix-map=${WORKDIR}/${MLPREFIX}recipe-sysroot= \
-fdebug-prefix-map=${WORKDIR}/recipe-sysroot-native= \
-fdebug-prefix-map=${S}=${REL_S} \
-fdebug-prefix-map=${S}/include=${REL_S}/libstdc++-v3/../include \
@@ -68,7 +68,8 @@ do_configure () {
# libstdc++ isn't built yet so CXX would error not able to find it which breaks stdc++'s configure
# tests. Create a dummy empty lib for the purposes of configure.
mkdir -p ${WORKDIR}/dummylib
- touch ${WORKDIR}/dummylib/libstdc++.so
+ ${CC} -x c /dev/null -c -o ${WORKDIR}/dummylib/dummylib.o
+ ${AR} rcs ${WORKDIR}/dummylib/libstdc++.a ${WORKDIR}/dummylib/dummylib.o
for d in libgcc ${RUNTIMETARGET}; do
echo "Configuring $d"
rm -rf ${B}/${TARGET_SYS}/$d/
diff --git a/meta/recipes-devtools/gcc/gcc-runtime_11.3.bb b/meta/recipes-devtools/gcc/gcc-runtime_11.4.bb
index dd430b57eb..dd430b57eb 100644
--- a/meta/recipes-devtools/gcc/gcc-runtime_11.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-runtime_11.4.bb
diff --git a/meta/recipes-devtools/gcc/gcc-sanitizers_11.3.bb b/meta/recipes-devtools/gcc/gcc-sanitizers_11.4.bb
index 8bda2ccad6..8bda2ccad6 100644
--- a/meta/recipes-devtools/gcc/gcc-sanitizers_11.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-sanitizers_11.4.bb
diff --git a/meta/recipes-devtools/gcc/gcc-shared-source.inc b/meta/recipes-devtools/gcc/gcc-shared-source.inc
index aac4b49313..7aa1c22bf0 100644
--- a/meta/recipes-devtools/gcc/gcc-shared-source.inc
+++ b/meta/recipes-devtools/gcc/gcc-shared-source.inc
@@ -9,3 +9,16 @@ SRC_URI = ""
do_configure[depends] += "gcc-source-${PV}:do_preconfigure"
do_populate_lic[depends] += "gcc-source-${PV}:do_unpack"
+do_deploy_source_date_epoch[depends] += "gcc-source-${PV}:do_deploy_source_date_epoch"
+
+# Copy the SDE from the shared workdir to the recipe workdir
+do_deploy_source_date_epoch () {
+ sde_file=${SDE_FILE}
+ sde_file=${sde_file#${WORKDIR}/}
+ mkdir -p ${SDE_DEPLOYDIR} $(dirname ${SDE_FILE})
+ cp -p $(dirname ${S})/$sde_file ${SDE_DEPLOYDIR}
+ cp -p $(dirname ${S})/$sde_file ${SDE_FILE}
+}
+
+# patch is available via gcc-source recipe
+CVE_CHECK_IGNORE += "CVE-2023-4039"
diff --git a/meta/recipes-devtools/gcc/gcc-source.inc b/meta/recipes-devtools/gcc/gcc-source.inc
index 224b7778ef..265bcf4bef 100644
--- a/meta/recipes-devtools/gcc/gcc-source.inc
+++ b/meta/recipes-devtools/gcc/gcc-source.inc
@@ -17,6 +17,13 @@ STAMPCLEAN = "${STAMPS_DIR}/work-shared/gcc-${PV}-*"
INHIBIT_DEFAULT_DEPS = "1"
DEPENDS = ""
PACKAGES = ""
+TARGET_ARCH = "allarch"
+TARGET_AS_ARCH = "none"
+TARGET_CC_ARCH = "none"
+TARGET_LD_ARCH = "none"
+TARGET_OS = "linux"
+baselib = "lib"
+PACKAGE_ARCH = "all"
B = "${WORKDIR}/build"
@@ -25,8 +32,6 @@ python do_preconfigure () {
import subprocess
cmd = d.expand('cd ${S} && PATH=${PATH} gnu-configize')
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- # See 0044-gengtypes.patch, we need to regenerate this file
- bb.utils.remove(d.expand("${S}/gcc/gengtype-lex.c"))
cmd = d.expand("sed -i 's/BUILD_INFO=info/BUILD_INFO=/' ${S}/gcc/configure")
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
diff --git a/meta/recipes-devtools/gcc/gcc-source_11.3.bb b/meta/recipes-devtools/gcc/gcc-source_11.4.bb
index b890fa33ea..b890fa33ea 100644
--- a/meta/recipes-devtools/gcc/gcc-source_11.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-source_11.4.bb
diff --git a/meta/recipes-devtools/gcc/gcc-testsuite.inc b/meta/recipes-devtools/gcc/gcc-testsuite.inc
index f68fec58ed..64f60c730f 100644
--- a/meta/recipes-devtools/gcc/gcc-testsuite.inc
+++ b/meta/recipes-devtools/gcc/gcc-testsuite.inc
@@ -51,9 +51,10 @@ python check_prepare() {
# enable all valid instructions, since the test suite itself does not
# limit itself to the target cpu options.
# - valid for x86*, powerpc, arm, arm64
- if qemu_binary.lstrip("qemu-") in ["x86_64", "i386", "ppc", "arm", "aarch64"]:
+ if qemu_binary.lstrip("qemu-") in ["x86_64", "i386", "arm", "aarch64"]:
args += ["-cpu", "max"]
-
+ elif qemu_binary.lstrip("qemu-") in ["ppc"]:
+ args += d.getVar("QEMU_EXTRAOPTIONS_%s" % d.getVar('PACKAGE_ARCH')).split()
sysroot = d.getVar("RECIPE_SYSROOT")
args += ["-L", sysroot]
# lib paths are static here instead of using $libdir since this is used by a -cross recipe
diff --git a/meta/recipes-devtools/gcc/gcc/0001-aarch64-Update-Neoverse-N2-core-defini.patch b/meta/recipes-devtools/gcc/gcc/0001-aarch64-Update-Neoverse-N2-core-defini.patch
new file mode 100644
index 0000000000..a0c9db72e1
--- /dev/null
+++ b/meta/recipes-devtools/gcc/gcc/0001-aarch64-Update-Neoverse-N2-core-defini.patch
@@ -0,0 +1,38 @@
+From 9f37d31324f89d0b7b2abac988a976d121ae29c6 Mon Sep 17 00:00:00 2001
+From: Andre Vieira <andre.simoesdiasvieira@arm.com>
+Date: Thu, 8 Sep 2022 06:02:18 +0000
+Subject: [PATCH 1/4] aarch64: Update Neoverse N2 core definition
+
+commit 9f37d31324f89d0b7b2abac988a976d121ae29c6 from upstream.
+
+gcc/ChangeLog:
+
+ * config/aarch64/aarch64-cores.def: Update Neoverse N2 core entry.
+
+Upstream-Status: Backport
+Signed-off-by: Ruiqiang Hao <Ruiqiang.Hao@windriver.com>
+---
+ gcc/config/aarch64/aarch64-cores.def | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def
+index 4643e0e27..3478e567a 100644
+--- a/gcc/config/aarch64/aarch64-cores.def
++++ b/gcc/config/aarch64/aarch64-cores.def
+@@ -147,7 +147,6 @@
+ AARCH64_CORE("saphira", saphira, saphira, 8_4A, AARCH64_FL_FOR_ARCH8_4 | AARCH64_FL_CRYPTO, saphira, 0x51, 0xC01, -1)
+
+ /* Armv8.5-A Architecture Processors. */
+-AARCH64_CORE("neoverse-n2", neoversen2, cortexa57, 8_5A, AARCH64_FL_FOR_ARCH8_5 | AARCH64_FL_I8MM | AARCH64_FL_BF16 | AARCH64_FL_F16 | AARCH64_FL_SVE | AARCH64_FL_SVE2 | AARCH64_FL_SVE2_BITPERM | AARCH64_FL_RNG | AARCH64_FL_MEMTAG, neoversen2, 0x41, 0xd49, -1)
+ AARCH64_CORE("neoverse-v2", neoversev2, cortexa57, 8_5A, AARCH64_FL_FOR_ARCH8_5 | AARCH64_FL_I8MM | AARCH64_FL_BF16 | AARCH64_FL_F16 | AARCH64_FL_SVE | AARCH64_FL_SVE2 | AARCH64_FL_SVE2_BITPERM | AARCH64_FL_RNG | AARCH64_FL_MEMTAG, neoverse512tvb, 0x41, 0xd4f, -1)
+
+ /* ARMv8-A big.LITTLE implementations. */
+@@ -165,4 +164,7 @@
+ /* Armv8-R Architecture Processors. */
+ AARCH64_CORE("cortex-r82", cortexr82, cortexa53, 8R, AARCH64_FL_FOR_ARCH8_R, cortexa53, 0x41, 0xd15, -1)
+
++/* Armv9-A Architecture Processors. */
++AARCH64_CORE("neoverse-n2", neoversen2, cortexa57, 9A, AARCH64_FL_FOR_ARCH9 | AARCH64_FL_I8MM | AARCH64_FL_BF16 | AARCH64_FL_SVE2_BITPERM | AARCH64_FL_RNG | AARCH64_FL_MEMTAG | AARCH64_FL_PROFILE, neoversen2, 0x41, 0xd49, -1)
++
+ #undef AARCH64_CORE
+
diff --git a/meta/recipes-devtools/gcc/gcc/0002-aarch64-add-armv9-a-to-march.patch b/meta/recipes-devtools/gcc/gcc/0002-aarch64-add-armv9-a-to-march.patch
new file mode 100644
index 0000000000..2b1c17f53e
--- /dev/null
+++ b/meta/recipes-devtools/gcc/gcc/0002-aarch64-add-armv9-a-to-march.patch
@@ -0,0 +1,89 @@
+From d3cf45d15b2fabc767b2d10a0c6bb9fb845e4f99 Mon Sep 17 00:00:00 2001
+From: Przemyslaw Wirkus <przemyslaw.wirkus@arm.com>
+Date: Fri, 1 Oct 2021 10:06:45 +0100
+Subject: [PATCH 2/4] aarch64: add armv9-a to -march
+
+commit f0688d42c9b74a6999548ff2e79ae440b049b87f from upstream
+
+gcc/ChangeLog:
+
+ * config/aarch64/aarch64-arches.def (AARCH64_ARCH): Added
+ armv9-a.
+ * config/aarch64/aarch64.h (AARCH64_FL_V9): New.
+ (AARCH64_FL_FOR_ARCH9): New flags for Armv9-A.
+ (AARCH64_ISA_V9): New ISA flag.
+ * doc/invoke.texi: Update docs.
+
+Upstream-Status: Backport
+Signed-off-by: Ruiqiang Hao <Ruiqiang.Hao@windriver.com>
+---
+ gcc/config/aarch64/aarch64-arches.def | 1 +
+ gcc/config/aarch64/aarch64.h | 5 +++++
+ gcc/doc/invoke.texi | 3 +++
+ 3 files changed, 9 insertions(+)
+
+diff --git a/gcc/config/aarch64/aarch64-arches.def b/gcc/config/aarch64/aarch64-arches.def
+index b7497277b..c47ca622c 100644
+--- a/gcc/config/aarch64/aarch64-arches.def
++++ b/gcc/config/aarch64/aarch64-arches.def
+@@ -38,5 +38,6 @@ AARCH64_ARCH("armv8.4-a", generic, 8_4A, 8, AARCH64_FL_FOR_ARCH8_4)
+ AARCH64_ARCH("armv8.5-a", generic, 8_5A, 8, AARCH64_FL_FOR_ARCH8_5)
+ AARCH64_ARCH("armv8.6-a", generic, 8_6A, 8, AARCH64_FL_FOR_ARCH8_6)
+ AARCH64_ARCH("armv8-r", generic, 8R , 8, AARCH64_FL_FOR_ARCH8_R)
++AARCH64_ARCH("armv9-a", generic, 9A , 9, AARCH64_FL_FOR_ARCH9)
+
+ #undef AARCH64_ARCH
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index bfffbcd6a..b914bfb5c 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -230,6 +230,8 @@ extern unsigned aarch64_architecture_version;
+
+ /* Pointer Authentication (PAUTH) extension. */
+ #define AARCH64_FL_PAUTH (1ULL << 40)
++/* Armv9.0-A. */
++#define AARCH64_FL_V9 (1ULL << 41) /* Armv9.0-A Architecture. */
+
+ /* Has FP and SIMD. */
+ #define AARCH64_FL_FPSIMD (AARCH64_FL_FP | AARCH64_FL_SIMD)
+@@ -257,6 +259,8 @@ extern unsigned aarch64_architecture_version;
+ | AARCH64_FL_I8MM | AARCH64_FL_BF16)
+ #define AARCH64_FL_FOR_ARCH8_R \
+ (AARCH64_FL_FOR_ARCH8_4 | AARCH64_FL_V8_R)
++#define AARCH64_FL_FOR_ARCH9 \
++ (AARCH64_FL_FOR_ARCH8_5 | AARCH64_FL_SVE | AARCH64_FL_SVE2 | AARCH64_FL_V9)
+
+ /* Macros to test ISA flags. */
+
+@@ -295,6 +299,7 @@ extern unsigned aarch64_architecture_version;
+ #define AARCH64_ISA_SB (aarch64_isa_flags & AARCH64_FL_SB)
+ #define AARCH64_ISA_V8_R (aarch64_isa_flags & AARCH64_FL_V8_R)
+ #define AARCH64_ISA_PAUTH (aarch64_isa_flags & AARCH64_FL_PAUTH)
++#define AARCH64_ISA_V9 (aarch64_isa_flags & AARCH64_FL_V9)
+
+ /* Crypto is an optional extension to AdvSIMD. */
+ #define TARGET_CRYPTO (TARGET_SIMD && AARCH64_ISA_CRYPTO)
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index c47cfd472..7184a62d0 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -18270,6 +18270,8 @@ and the features that they enable by default:
+ @item @samp{armv8.4-a} @tab Armv8.4-A @tab @samp{armv8.3-a}, @samp{+flagm}, @samp{+fp16fml}, @samp{+dotprod}
+ @item @samp{armv8.5-a} @tab Armv8.5-A @tab @samp{armv8.4-a}, @samp{+sb}, @samp{+ssbs}, @samp{+predres}
+ @item @samp{armv8.6-a} @tab Armv8.6-A @tab @samp{armv8.5-a}, @samp{+bf16}, @samp{+i8mm}
++@item @samp{armv8.7-a} @tab Armv8.7-A @tab @samp{armv8.6-a}, @samp{+ls64}
++@item @samp{armv9-a} @tab Armv9-A @tab @samp{armv8.5-a}, @samp{+sve}, @samp{+sve2}
+ @item @samp{armv8-r} @tab Armv8-R @tab @samp{armv8-r}
+ @end multitable
+
+@@ -19692,6 +19694,7 @@ Permissible names are:
+ @samp{armv8.4-a},
+ @samp{armv8.5-a},
+ @samp{armv8.6-a},
++@samp{armv9-a},
+ @samp{armv7-r},
+ @samp{armv8-r},
+ @samp{armv6-m}, @samp{armv6s-m},
+--
+2.32.0
+
diff --git a/meta/recipes-devtools/gcc/gcc/0003-aarch64-Enable-FP16-feature-by-default-for-Armv9.patch b/meta/recipes-devtools/gcc/gcc/0003-aarch64-Enable-FP16-feature-by-default-for-Armv9.patch
new file mode 100644
index 0000000000..2e85384b43
--- /dev/null
+++ b/meta/recipes-devtools/gcc/gcc/0003-aarch64-Enable-FP16-feature-by-default-for-Armv9.patch
@@ -0,0 +1,38 @@
+From 49bfa1927813ae898dfa4e0d2bbde033c353e3dc Mon Sep 17 00:00:00 2001
+From: Andre Vieira <andre.simoesdiasvieira@arm.com>
+Date: Tue, 22 Mar 2022 11:44:06 +0000
+Subject: [PATCH 3/4] aarch64: Enable FP16 feature by default for Armv9
+
+commit 0bae246acc758d4b11dd575b05207fd69169109b from upstream
+
+This patch adds the feature bit for FP16 to the feature set for Armv9 since
+Armv9 requires SVE to be implemented and SVE requires FP16 to be implemented.
+
+2022-03-22 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * config/aarch64/aarch64.h (AARCH64_FL_FOR_ARCH9): Add FP16 feature
+ bit.
+
+Upstream-Status: Backport
+Signed-off-by: Ruiqiang Hao <Ruiqiang.Hao@windriver.com>
+---
+ gcc/config/aarch64/aarch64.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index b914bfb5c..55b60d540 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -260,7 +260,8 @@ extern unsigned aarch64_architecture_version;
+ #define AARCH64_FL_FOR_ARCH8_R \
+ (AARCH64_FL_FOR_ARCH8_4 | AARCH64_FL_V8_R)
+ #define AARCH64_FL_FOR_ARCH9 \
+- (AARCH64_FL_FOR_ARCH8_5 | AARCH64_FL_SVE | AARCH64_FL_SVE2 | AARCH64_FL_V9)
++ (AARCH64_FL_FOR_ARCH8_5 | AARCH64_FL_SVE | AARCH64_FL_SVE2 | AARCH64_FL_V9 \
++ | AARCH64_FL_F16)
+
+ /* Macros to test ISA flags. */
+
+--
+2.32.0
+
diff --git a/meta/recipes-devtools/gcc/gcc/0004-arm-add-armv9-a-architecture-to-march.patch b/meta/recipes-devtools/gcc/gcc/0004-arm-add-armv9-a-architecture-to-march.patch
new file mode 100644
index 0000000000..b9b0988d5a
--- /dev/null
+++ b/meta/recipes-devtools/gcc/gcc/0004-arm-add-armv9-a-architecture-to-march.patch
@@ -0,0 +1,291 @@
+From e66a37acae62236611f951e706e9a2bfbd753f39 Mon Sep 17 00:00:00 2001
+From: Przemyslaw Wirkus <przemyslaw.wirkus@arm.com>
+Date: Tue, 9 Nov 2021 09:40:05 +0000
+Subject: [PATCH 4/4] arm: add armv9-a architecture to -march
+
+commit 32ba7860ccaddd5219e6dae94a3d0653e124c9dd from upstream
+
+In this patch:
+ + Add `armv9-a` to -march.
+ + Update multilib with armv9-a and armv9-a+simd.
+
+gcc/ChangeLog:
+
+ * config/arm/arm-cpus.in (armv9): New define.
+ (ARMv9a): New group.
+ (armv9-a): New arch definition.
+ * config/arm/arm-tables.opt: Regenerate.
+ * config/arm/arm.h (BASE_ARCH_9A): New arch enum value.
+ * config/arm/t-aprofile: Added armv9-a and armv9+simd.
+ * config/arm/t-arm-elf: Added arm9-a, v9_fps and all_v9_archs
+ to MULTILIB_MATCHES.
+ * config/arm/t-multilib: Added v9_a_nosimd_variants and
+ v9_a_simd_variants to MULTILIB_MATCHES.
+ * doc/invoke.texi: Update docs.
+
+gcc/testsuite/ChangeLog:
+
+ * gcc.target/arm/multilib.exp: Update test with armv9-a entries.
+ * lib/target-supports.exp (v9a): Add new armflag.
+ (__ARM_ARCH_9A__): Add new armdef.
+
+Upstream-Status: Backport
+Signed-off-by: Ruiqiang Hao <Ruiqiang.Hao@windriver.com>
+---
+ gcc/config/arm/arm-cpus.in | 19 +++++++++++++++++
+ gcc/config/arm/arm-tables.opt | 7 +++++--
+ gcc/config/arm/arm.h | 3 ++-
+ gcc/config/arm/t-aprofile | 25 +++++++++++++++++++----
+ gcc/config/arm/t-arm-elf | 9 ++++++++
+ gcc/config/arm/t-multilib | 12 +++++++++++
+ gcc/doc/invoke.texi | 1 +
+ gcc/testsuite/gcc.target/arm/multilib.exp | 8 ++++++++
+ gcc/testsuite/lib/target-supports.exp | 3 ++-
+ 9 files changed, 79 insertions(+), 8 deletions(-)
+
+Index: gcc/gcc/config/arm/arm-cpus.in
+===================================================================
+--- a/gcc/config/arm/arm-cpus.in
++++ b/gcc/config/arm/arm-cpus.in
+@@ -132,6 +132,9 @@ define feature cmse
+ # Architecture rel 8.1-M.
+ define feature armv8_1m_main
+
++# Architecture rel 9.0.
++define feature armv9
++
+ # Floating point and Neon extensions.
+ # VFPv1 is not supported in GCC.
+
+@@ -293,6 +296,7 @@ define fgroup ARMv8m_base ARMv6m armv8 c
+ define fgroup ARMv8m_main ARMv7m armv8 cmse
+ define fgroup ARMv8r ARMv8a
+ define fgroup ARMv8_1m_main ARMv8m_main armv8_1m_main
++define fgroup ARMv9a ARMv8_5a armv9
+
+ # Useful combinations.
+ define fgroup VFPv2 vfpv2
+@@ -751,6 +755,21 @@ begin arch armv8.1-m.main
+ option cdecp7 add cdecp7
+ end arch armv8.1-m.main
+
++begin arch armv9-a
++ tune for cortex-a53
++ tune flags CO_PROC
++ base 9A
++ profile A
++ isa ARMv9a
++ option simd add FP_ARMv8 DOTPROD
++ option fp16 add fp16 fp16fml FP_ARMv8 DOTPROD
++ option crypto add FP_ARMv8 CRYPTO DOTPROD
++ option nocrypto remove ALL_CRYPTO
++ option nofp remove ALL_FP
++ option i8mm add i8mm FP_ARMv8 DOTPROD
++ option bf16 add bf16 FP_ARMv8 DOTPROD
++end arch armv9-a
++
+ begin arch iwmmxt
+ tune for iwmmxt
+ tune flags LDSCHED STRONG XSCALE
+Index: gcc/gcc/config/arm/arm-tables.opt
+===================================================================
+--- a/gcc/config/arm/arm-tables.opt
++++ b/gcc/config/arm/arm-tables.opt
+@@ -380,10 +380,13 @@ EnumValue
+ Enum(arm_arch) String(armv8.1-m.main) Value(30)
+
+ EnumValue
+-Enum(arm_arch) String(iwmmxt) Value(31)
++Enum(arm_arch) String(armv9-a) Value(31)
+
+ EnumValue
+-Enum(arm_arch) String(iwmmxt2) Value(32)
++Enum(arm_arch) String(iwmmxt) Value(32)
++
++EnumValue
++Enum(arm_arch) String(iwmmxt2) Value(33)
+
+ Enum
+ Name(arm_fpu) Type(enum fpu_type)
+Index: gcc/gcc/config/arm/arm.h
+===================================================================
+--- a/gcc/config/arm/arm.h
++++ b/gcc/config/arm/arm.h
+@@ -456,7 +456,8 @@ enum base_architecture
+ BASE_ARCH_8A = 8,
+ BASE_ARCH_8M_BASE = 8,
+ BASE_ARCH_8M_MAIN = 8,
+- BASE_ARCH_8R = 8
++ BASE_ARCH_8R = 8,
++ BASE_ARCH_9A = 9
+ };
+
+ /* The major revision number of the ARM Architecture implemented by the target. */
+Index: gcc/gcc/config/arm/t-aprofile
+===================================================================
+--- a/gcc/config/arm/t-aprofile
++++ b/gcc/config/arm/t-aprofile
+@@ -26,8 +26,8 @@
+
+ # Arch and FPU variants to build libraries with
+
+-MULTI_ARCH_OPTS_A = march=armv7-a/march=armv7-a+fp/march=armv7-a+simd/march=armv7ve+simd/march=armv8-a/march=armv8-a+simd
+-MULTI_ARCH_DIRS_A = v7-a v7-a+fp v7-a+simd v7ve+simd v8-a v8-a+simd
++MULTI_ARCH_OPTS_A = march=armv7-a/march=armv7-a+fp/march=armv7-a+simd/march=armv7ve+simd/march=armv8-a/march=armv8-a+simd/march=armv9-a/march=armv9-a+simd
++MULTI_ARCH_DIRS_A = v7-a v7-a+fp v7-a+simd v7ve+simd v8-a v8-a+simd v9-a v9-a+simd
+
+ # ARMv7-A - build nofp, fp-d16 and SIMD variants
+
+@@ -46,6 +46,11 @@ MULTILIB_REQUIRED += mthumb/march=armv8-
+ MULTILIB_REQUIRED += mthumb/march=armv8-a+simd/mfloat-abi=hard
+ MULTILIB_REQUIRED += mthumb/march=armv8-a+simd/mfloat-abi=softfp
+
++# Armv9-A - build nofp and SIMD variants.
++MULTILIB_REQUIRED += mthumb/march=armv9-a/mfloat-abi=soft
++MULTILIB_REQUIRED += mthumb/march=armv9-a+simd/mfloat-abi=hard
++MULTILIB_REQUIRED += mthumb/march=armv9-a+simd/mfloat-abi=softfp
++
+ # Matches
+
+ # Arch Matches
+@@ -129,17 +134,29 @@ MULTILIB_MATCHES += march?armv8-a=march?
+ MULTILIB_MATCHES += $(foreach ARCH, $(v8_6_a_simd_variants), \
+ march?armv8-a+simd=march?armv8.6-a$(ARCH))
+
++# Armv9 without SIMD: map down to base architecture
++MULTILIB_MATCHES += $(foreach ARCH, $(v9_a_nosimd_variants), \
++ march?armv9-a=march?armv9-a$(ARCH))
++
++# Armv9 with SIMD: map down to base arch + simd
++MULTILIB_MATCHES += march?armv9-a+simd=march?armv9-a+crc+simd \
++ $(foreach ARCH, $(filter-out +simd, $(v9_a_simd_variants)), \
++ march?armv9-a+simd=march?armv9-a$(ARCH) \
++ march?armv9-a+simd=march?armv9-a+crc$(ARCH))
++
+ # Use Thumb libraries for everything.
+
+ MULTILIB_REUSE += mthumb/march.armv7-a/mfloat-abi.soft=marm/march.armv7-a/mfloat-abi.soft
+
+ MULTILIB_REUSE += mthumb/march.armv8-a/mfloat-abi.soft=marm/march.armv8-a/mfloat-abi.soft
+
++MULTILIB_REUSE += mthumb/march.armv9-a/mfloat-abi.soft=marm/march.armv9-a/mfloat-abi.soft
++
+ MULTILIB_REUSE += $(foreach ABI, hard softfp, \
+- $(foreach ARCH, armv7-a+fp armv7-a+simd armv7ve+simd armv8-a+simd, \
++ $(foreach ARCH, armv7-a+fp armv7-a+simd armv7ve+simd armv8-a+simd armv9-a+simd, \
+ mthumb/march.$(ARCH)/mfloat-abi.$(ABI)=marm/march.$(ARCH)/mfloat-abi.$(ABI)))
+
+ # Softfp but no FP, use the soft-float libraries.
+ MULTILIB_REUSE += $(foreach MODE, arm thumb, \
+- $(foreach ARCH, armv7-a armv8-a, \
++ $(foreach ARCH, armv7-a armv8-a armv9-a, \
+ mthumb/march.$(ARCH)/mfloat-abi.soft=m$(MODE)/march.$(ARCH)/mfloat-abi.softfp))
+Index: gcc/gcc/config/arm/t-arm-elf
+===================================================================
+--- a/gcc/config/arm/t-arm-elf
++++ b/gcc/config/arm/t-arm-elf
+@@ -38,6 +38,8 @@ v7ve_fps := vfpv3-d16 vfpv3 vfpv3-d16-fp
+ # it seems to work ok.
+ v8_fps := simd fp16 crypto fp16+crypto dotprod fp16fml
+
++v9_fps := simd fp16 crypto fp16+crypto dotprod fp16fml
++
+ # We don't do anything special with these. Pre-v4t probably doesn't work.
+ all_early_nofp := armv4 armv4t armv5t
+
+@@ -49,6 +51,8 @@ all_v7_a_r := armv7-a armv7ve armv7-r
+ all_v8_archs := armv8-a armv8-a+crc armv8.1-a armv8.2-a armv8.3-a armv8.4-a \
+ armv8.5-a armv8.6-a
+
++all_v9_archs := armv9-a
++
+ # No floating point variants, require thumb1 softfp
+ all_nofp_t := armv6-m armv6s-m armv8-m.base
+
+@@ -110,6 +114,11 @@ MULTILIB_MATCHES += $(foreach ARCH,
+ $(foreach FPARCH, $(v8_fps), \
+ march?armv7+fp=march?$(ARCH)+$(FPARCH)))
+
++MULTILIB_MATCHES += $(foreach ARCH, $(all_v9_archs), \
++ march?armv7+fp=march?$(ARCH) \
++ $(foreach FPARCH, $(v9_fps), \
++ march?armv7+fp=march?$(ARCH)+$(FPARCH)))
++
+ MULTILIB_MATCHES += $(foreach ARCH, armv7e-m armv8-m.mainline, \
+ march?armv7+fp=march?$(ARCH)+fp.dp)
+
+Index: gcc/gcc/config/arm/t-multilib
+===================================================================
+--- a/gcc/config/arm/t-multilib
++++ b/gcc/config/arm/t-multilib
+@@ -78,6 +78,8 @@ v8_4_a_simd_variants := $(call all_feat_
+ v8_5_a_simd_variants := $(call all_feat_combs, simd fp16 crypto i8mm bf16)
+ v8_6_a_simd_variants := $(call all_feat_combs, simd fp16 crypto i8mm bf16)
+ v8_r_nosimd_variants := +crc
++v9_a_nosimd_variants := +crc
++v9_a_simd_variants := $(call all_feat_combs, simd fp16 crypto i8mm bf16)
+
+ ifneq (,$(HAS_APROFILE))
+ include $(srcdir)/config/arm/t-aprofile
+@@ -202,6 +204,16 @@ MULTILIB_MATCHES += march?armv7=march?ar
+ MULTILIB_MATCHES += $(foreach ARCH, $(v8_6_a_simd_variants), \
+ march?armv7+fp=march?armv8.6-a$(ARCH))
+
++# Armv9
++MULTILIB_MATCHES += march?armv7=march?armv9-a
++MULTILIB_MATCHES += $(foreach ARCH, $(v9_a_nosimd_variants), \
++ march?armv7=march?armv9-a$(ARCH))
++
++# Armv9 with SIMD
++MULTILIB_MATCHES += march?armv7+fp=march?armv9-a+crc+simd \
++ $(foreach ARCH, $(v9_a_simd_variants), \
++ march?armv7+fp=march?armv9-a$(ARCH) \
++ march?armv7+fp=march?armv9-a+crc$(ARCH))
+ endif # Not APROFILE.
+
+ # Use Thumb libraries for everything.
+Index: gcc/gcc/doc/invoke.texi
+===================================================================
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -19701,6 +19701,7 @@ Permissible names are:
+ @samp{armv7-m}, @samp{armv7e-m},
+ @samp{armv8-m.base}, @samp{armv8-m.main},
+ @samp{armv8.1-m.main},
++@samp{armv9-a},
+ @samp{iwmmxt} and @samp{iwmmxt2}.
+
+ Additionally, the following architectures, which lack support for the
+Index: gcc/gcc/testsuite/gcc.target/arm/multilib.exp
+===================================================================
+--- a/gcc/testsuite/gcc.target/arm/multilib.exp
++++ b/gcc/testsuite/gcc.target/arm/multilib.exp
+@@ -135,6 +135,14 @@ if {[multilib_config "aprofile"] } {
+ {-march=armv8.6-a+simd+fp16 -mfloat-abi=softfp} "thumb/v8-a+simd/softfp"
+ {-march=armv8.6-a+simd+fp16+nofp -mfloat-abi=softfp} "thumb/v8-a/nofp"
+ {-march=armv8.6-a+simd+nofp+fp16 -mfloat-abi=softfp} "thumb/v8-a+simd/softfp"
++ {-march=armv9-a+crypto -mfloat-abi=soft} "thumb/v9-a/nofp"
++ {-march=armv9-a+simd+crypto -mfloat-abi=softfp} "thumb/v9-a+simd/softfp"
++ {-march=armv9-a+simd+crypto+nofp -mfloat-abi=softfp} "thumb/v9-a/nofp"
++ {-march=armv9-a+simd+nofp+crypto -mfloat-abi=softfp} "thumb/v9-a+simd/softfp"
++ {-march=armv9-a+fp16 -mfloat-abi=soft} "thumb/v9-a/nofp"
++ {-march=armv9-a+simd+fp16 -mfloat-abi=softfp} "thumb/v9-a+simd/softfp"
++ {-march=armv9-a+simd+fp16+nofp -mfloat-abi=softfp} "thumb/v9-a/nofp"
++ {-march=armv9-a+simd+nofp+fp16 -mfloat-abi=softfp} "thumb/v9-a+simd/softfp"
+ {-mcpu=cortex-a53+crypto -mfloat-abi=hard} "thumb/v8-a+simd/hard"
+ {-mcpu=cortex-a53+nofp -mfloat-abi=softfp} "thumb/v8-a/nofp"
+ {-march=armv8-a+crc -mfloat-abi=hard -mfpu=vfp} "thumb/v8-a+simd/hard"
+Index: gcc/gcc/testsuite/lib/target-supports.exp
+===================================================================
+--- a/gcc/testsuite/lib/target-supports.exp
++++ b/gcc/testsuite/lib/target-supports.exp
+@@ -4820,7 +4820,8 @@ foreach { armfunc armflag armdefs } {
+ v8m_base "-march=armv8-m.base -mthumb -mfloat-abi=soft"
+ __ARM_ARCH_8M_BASE__
+ v8m_main "-march=armv8-m.main -mthumb" __ARM_ARCH_8M_MAIN__
+- v8_1m_main "-march=armv8.1-m.main -mthumb" __ARM_ARCH_8M_MAIN__ } {
++ v8_1m_main "-march=armv8.1-m.main -mthumb" __ARM_ARCH_8M_MAIN__
++ v9a "-march=armv9-a" __ARM_ARCH_9A__ } {
+ eval [string map [list FUNC $armfunc FLAG $armflag DEFS $armdefs ] {
+ proc check_effective_target_arm_arch_FUNC_ok { } {
+ return [check_no_compiler_messages arm_arch_FUNC_ok assembly {
diff --git a/meta/recipes-devtools/gcc/gcc/0006-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch b/meta/recipes-devtools/gcc/gcc/0006-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch
index ef19eef822..ece5873258 100644
--- a/meta/recipes-devtools/gcc/gcc/0006-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch
+++ b/meta/recipes-devtools/gcc/gcc/0006-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch
@@ -1,4 +1,4 @@
-From 84dd8ea4c982fc2c82af642293d29e9c1880de5b Mon Sep 17 00:00:00 2001
+From 4de00af67b57b5440bdf61ab364ad959ad0aeee7 Mon Sep 17 00:00:00 2001
From: Khem Raj <raj.khem@gmail.com>
Date: Fri, 29 Mar 2013 09:24:50 +0400
Subject: [PATCH] Define GLIBC_DYNAMIC_LINKER and UCLIBC_DYNAMIC_LINKER
@@ -12,26 +12,35 @@ SH, sparc, alpha for possible future support (if any)
Removes the do_headerfix task in metadata
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+
Upstream-Status: Inappropriate [OE configuration]
Signed-off-by: Khem Raj <raj.khem@gmail.com>
+
+Refresh patch from master to deduplicate patches and fix arm linker
+Signed-off-by: Pavel Zhukov <pavel@zhukoff.net>
---
gcc/config/aarch64/aarch64-linux.h | 4 ++--
gcc/config/alpha/linux-elf.h | 4 ++--
- gcc/config/arm/linux-eabi.h | 4 ++--
+ gcc/config/arm/linux-eabi.h | 6 +++---
gcc/config/arm/linux-elf.h | 2 +-
- gcc/config/i386/linux.h | 2 +-
- gcc/config/i386/linux64.h | 6 +++---
+ gcc/config/i386/linux.h | 4 ++--
+ gcc/config/i386/linux64.h | 12 ++++++------
gcc/config/linux.h | 8 ++++----
- gcc/config/mips/linux.h | 12 ++++++------
- gcc/config/riscv/linux.h | 2 +-
+ gcc/config/microblaze/linux.h | 4 ++--
+ gcc/config/mips/linux.h | 18 +++++++++---------
+ gcc/config/nios2/linux.h | 4 ++--
+ gcc/config/riscv/linux.h | 4 ++--
gcc/config/rs6000/linux64.h | 15 +++++----------
- gcc/config/sh/linux.h | 2 +-
+ gcc/config/rs6000/sysv4.h | 4 ++--
+ gcc/config/s390/linux.h | 8 ++++----
+ gcc/config/sh/linux.h | 4 ++--
gcc/config/sparc/linux.h | 2 +-
gcc/config/sparc/linux64.h | 4 ++--
- 13 files changed, 31 insertions(+), 36 deletions(-)
+ 17 files changed, 53 insertions(+), 58 deletions(-)
-diff --git a/gcc/config/aarch64/aarch64-linux.h b/gcc/config/aarch64/aarch64-linux.h
-index 7f2529a2a1d..4bcae7f3110 100644
+Index: gcc/gcc/config/aarch64/aarch64-linux.h
+===================================================================
--- a/gcc/config/aarch64/aarch64-linux.h
+++ b/gcc/config/aarch64/aarch64-linux.h
@@ -21,10 +21,10 @@
@@ -47,11 +56,11 @@ index 7f2529a2a1d..4bcae7f3110 100644
#undef ASAN_CC1_SPEC
#define ASAN_CC1_SPEC "%{%:sanitize(address):-funwind-tables}"
-diff --git a/gcc/config/alpha/linux-elf.h b/gcc/config/alpha/linux-elf.h
-index c1dae8ca2cf..3ce2b76c1a4 100644
+Index: gcc/gcc/config/alpha/linux-elf.h
+===================================================================
--- a/gcc/config/alpha/linux-elf.h
+++ b/gcc/config/alpha/linux-elf.h
-@@ -23,8 +23,8 @@ along with GCC; see the file COPYING3. If not see
+@@ -23,8 +23,8 @@ along with GCC; see the file COPYING3.
#define EXTRA_SPECS \
{ "elf_dynamic_linker", ELF_DYNAMIC_LINKER },
@@ -62,8 +71,8 @@ index c1dae8ca2cf..3ce2b76c1a4 100644
#if DEFAULT_LIBC == LIBC_UCLIBC
#define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:" G ";:" U "}"
#elif DEFAULT_LIBC == LIBC_GLIBC
-diff --git a/gcc/config/arm/linux-eabi.h b/gcc/config/arm/linux-eabi.h
-index 85d0136e76e..6bd95855827 100644
+Index: gcc/gcc/config/arm/linux-eabi.h
+===================================================================
--- a/gcc/config/arm/linux-eabi.h
+++ b/gcc/config/arm/linux-eabi.h
@@ -65,8 +65,8 @@
@@ -77,8 +86,17 @@ index 85d0136e76e..6bd95855827 100644
#define GLIBC_DYNAMIC_LINKER_DEFAULT GLIBC_DYNAMIC_LINKER_SOFT_FLOAT
#define GLIBC_DYNAMIC_LINKER \
-diff --git a/gcc/config/arm/linux-elf.h b/gcc/config/arm/linux-elf.h
-index 0c1c4e70b6b..6bd643ade11 100644
+@@ -89,7 +89,7 @@
+ #define MUSL_DYNAMIC_LINKER_E "%{mbig-endian:eb}"
+ #endif
+ #define MUSL_DYNAMIC_LINKER \
+- "/lib/ld-musl-arm" MUSL_DYNAMIC_LINKER_E "%{mfloat-abi=hard:hf}%{mfdpic:-fdpic}.so.1"
++ SYSTEMLIBS_DIR "ld-musl-arm" MUSL_DYNAMIC_LINKER_E "%{mfloat-abi=hard:hf}%{mfdpic:-fdpic}.so.1"
+
+ /* At this point, bpabi.h will have clobbered LINK_SPEC. We want to
+ use the GNU/Linux version, not the generic BPABI version. */
+Index: gcc/gcc/config/arm/linux-elf.h
+===================================================================
--- a/gcc/config/arm/linux-elf.h
+++ b/gcc/config/arm/linux-elf.h
@@ -60,7 +60,7 @@
@@ -90,11 +108,11 @@ index 0c1c4e70b6b..6bd643ade11 100644
#define LINUX_TARGET_LINK_SPEC "%{h*} \
%{static:-Bstatic} \
-diff --git a/gcc/config/i386/linux.h b/gcc/config/i386/linux.h
-index 04b274f1654..7aafcf3ac2d 100644
+Index: gcc/gcc/config/i386/linux.h
+===================================================================
--- a/gcc/config/i386/linux.h
+++ b/gcc/config/i386/linux.h
-@@ -20,7 +20,7 @@ along with GCC; see the file COPYING3. If not see
+@@ -20,7 +20,7 @@ along with GCC; see the file COPYING3.
<http://www.gnu.org/licenses/>. */
#define GNU_USER_LINK_EMULATION "elf_i386"
@@ -102,12 +120,13 @@ index 04b274f1654..7aafcf3ac2d 100644
+#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux.so.2"
#undef MUSL_DYNAMIC_LINKER
- #define MUSL_DYNAMIC_LINKER "/lib/ld-musl-i386.so.1"
-diff --git a/gcc/config/i386/linux64.h b/gcc/config/i386/linux64.h
-index b3822ced528..92d303e80d6 100644
+-#define MUSL_DYNAMIC_LINKER "/lib/ld-musl-i386.so.1"
++#define MUSL_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-musl-i386.so.1"
+Index: gcc/gcc/config/i386/linux64.h
+===================================================================
--- a/gcc/config/i386/linux64.h
+++ b/gcc/config/i386/linux64.h
-@@ -27,9 +27,9 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+@@ -27,13 +27,13 @@ see the files COPYING3 and COPYING.RUNTI
#define GNU_USER_LINK_EMULATION64 "elf_x86_64"
#define GNU_USER_LINK_EMULATIONX32 "elf32_x86_64"
@@ -119,12 +138,19 @@ index b3822ced528..92d303e80d6 100644
+#define GLIBC_DYNAMIC_LINKERX32 SYSTEMLIBS_DIR "ld-linux-x32.so.2"
#undef MUSL_DYNAMIC_LINKER32
- #define MUSL_DYNAMIC_LINKER32 "/lib/ld-musl-i386.so.1"
-diff --git a/gcc/config/linux.h b/gcc/config/linux.h
-index 4e1db60fced..87efc5f69fe 100644
+-#define MUSL_DYNAMIC_LINKER32 "/lib/ld-musl-i386.so.1"
++#define MUSL_DYNAMIC_LINKER32 SYSTEMLIBS_DIR "ld-musl-i386.so.1"
+ #undef MUSL_DYNAMIC_LINKER64
+-#define MUSL_DYNAMIC_LINKER64 "/lib/ld-musl-x86_64.so.1"
++#define MUSL_DYNAMIC_LINKER64 SYSTEMLIBS_DIR "ld-musl-x86_64.so.1"
+ #undef MUSL_DYNAMIC_LINKERX32
+-#define MUSL_DYNAMIC_LINKERX32 "/lib/ld-musl-x32.so.1"
++#define MUSL_DYNAMIC_LINKERX32 SYSTEMLIBS_DIR "ld-musl-x32.so.1"
+Index: gcc/gcc/config/linux.h
+===================================================================
--- a/gcc/config/linux.h
+++ b/gcc/config/linux.h
-@@ -94,10 +94,10 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+@@ -94,10 +94,10 @@ see the files COPYING3 and COPYING.RUNTI
GLIBC_DYNAMIC_LINKER must be defined for each target using them, or
GLIBC_DYNAMIC_LINKER32 and GLIBC_DYNAMIC_LINKER64 for targets
supporting both 32-bit and 64-bit compilation. */
@@ -139,11 +165,33 @@ index 4e1db60fced..87efc5f69fe 100644
#define BIONIC_DYNAMIC_LINKER "/system/bin/linker"
#define BIONIC_DYNAMIC_LINKER32 "/system/bin/linker"
#define BIONIC_DYNAMIC_LINKER64 "/system/bin/linker64"
-diff --git a/gcc/config/mips/linux.h b/gcc/config/mips/linux.h
-index 44a85e410d9..8d41b5574f6 100644
+Index: gcc/gcc/config/microblaze/linux.h
+===================================================================
+--- a/gcc/config/microblaze/linux.h
++++ b/gcc/config/microblaze/linux.h
+@@ -28,7 +28,7 @@
+ #undef TLS_NEEDS_GOT
+ #define TLS_NEEDS_GOT 1
+
+-#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
++#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "/ld.so.1"
+ #define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0"
+
+ #if TARGET_BIG_ENDIAN_DEFAULT == 0 /* LE */
+@@ -38,7 +38,7 @@
+ #endif
+
+ #undef MUSL_DYNAMIC_LINKER
+-#define MUSL_DYNAMIC_LINKER "/lib/ld-musl-microblaze" MUSL_DYNAMIC_LINKER_E ".so.1"
++#define MUSL_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-musl-microblaze" MUSL_DYNAMIC_LINKER_E ".so.1"
+
+ #undef SUBTARGET_EXTRA_SPECS
+ #define SUBTARGET_EXTRA_SPECS \
+Index: gcc/gcc/config/mips/linux.h
+===================================================================
--- a/gcc/config/mips/linux.h
+++ b/gcc/config/mips/linux.h
-@@ -22,20 +22,20 @@ along with GCC; see the file COPYING3. If not see
+@@ -22,29 +22,29 @@ along with GCC; see the file COPYING3.
#define GNU_USER_LINK_EMULATIONN32 "elf32%{EB:b}%{EL:l}tsmipn32"
#define GLIBC_DYNAMIC_LINKER32 \
@@ -170,11 +218,36 @@ index 44a85e410d9..8d41b5574f6 100644
#undef MUSL_DYNAMIC_LINKER32
#define MUSL_DYNAMIC_LINKER32 \
-diff --git a/gcc/config/riscv/linux.h b/gcc/config/riscv/linux.h
-index fce5b896e6e..03aa55cb5ab 100644
+- "/lib/ld-musl-mips%{mips32r6|mips64r6:r6}%{EL:el}%{msoft-float:-sf}.so.1"
++ SYSTEMLIBS_DIR "ld-musl-mips%{mips32r6|mips64r6:r6}%{EL:el}%{msoft-float:-sf}.so.1"
+ #undef MUSL_DYNAMIC_LINKER64
+ #define MUSL_DYNAMIC_LINKER64 \
+- "/lib/ld-musl-mips64%{mips64r6:r6}%{EL:el}%{msoft-float:-sf}.so.1"
++ SYSTEMLIBS_DIR "ld-musl-mips64%{mips64r6:r6}%{EL:el}%{msoft-float:-sf}.so.1"
+ #define MUSL_DYNAMIC_LINKERN32 \
+- "/lib/ld-musl-mipsn32%{mips64r6:r6}%{EL:el}%{msoft-float:-sf}.so.1"
++ SYSTEMLIBS_DIR "ld-musl-mipsn32%{mips64r6:r6}%{EL:el}%{msoft-float:-sf}.so.1"
+
+ #define BIONIC_DYNAMIC_LINKERN32 "/system/bin/linker32"
+ #define GNU_USER_DYNAMIC_LINKERN32 \
+Index: gcc/gcc/config/nios2/linux.h
+===================================================================
+--- a/gcc/config/nios2/linux.h
++++ b/gcc/config/nios2/linux.h
+@@ -29,7 +29,7 @@
+ #undef CPP_SPEC
+ #define CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
+
+-#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-nios2.so.1"
++#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux-nios2.so.1"
+
+ #undef LINK_SPEC
+ #define LINK_SPEC LINK_SPEC_ENDIAN \
+Index: gcc/gcc/config/riscv/linux.h
+===================================================================
--- a/gcc/config/riscv/linux.h
+++ b/gcc/config/riscv/linux.h
-@@ -22,7 +22,7 @@ along with GCC; see the file COPYING3. If not see
+@@ -22,7 +22,7 @@ along with GCC; see the file COPYING3.
GNU_USER_TARGET_OS_CPP_BUILTINS(); \
} while (0)
@@ -183,8 +256,17 @@ index fce5b896e6e..03aa55cb5ab 100644
#define MUSL_ABI_SUFFIX \
"%{mabi=ilp32:-sf}" \
-diff --git a/gcc/config/rs6000/linux64.h b/gcc/config/rs6000/linux64.h
-index e3f2cd254f6..a11e01faa3d 100644
+@@ -33,7 +33,7 @@ along with GCC; see the file COPYING3.
+ "%{mabi=lp64d:}"
+
+ #undef MUSL_DYNAMIC_LINKER
+-#define MUSL_DYNAMIC_LINKER "/lib/ld-musl-riscv" XLEN_SPEC MUSL_ABI_SUFFIX ".so.1"
++#define MUSL_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-musl-riscv" XLEN_SPEC MUSL_ABI_SUFFIX ".so.1"
+
+ /* Because RISC-V only has word-sized atomics, it requries libatomic where
+ others do not. So link libatomic by default, as needed. */
+Index: gcc/gcc/config/rs6000/linux64.h
+===================================================================
--- a/gcc/config/rs6000/linux64.h
+++ b/gcc/config/rs6000/linux64.h
@@ -336,24 +336,19 @@ extern int dot_symbols;
@@ -217,12 +299,55 @@ index e3f2cd254f6..a11e01faa3d 100644
#undef DEFAULT_ASM_ENDIAN
#if (TARGET_DEFAULT & MASK_LITTLE_ENDIAN)
-diff --git a/gcc/config/sh/linux.h b/gcc/config/sh/linux.h
-index 7558d2f7195..3aaa6c3a078 100644
+Index: gcc/gcc/config/rs6000/sysv4.h
+===================================================================
+--- a/gcc/config/rs6000/sysv4.h
++++ b/gcc/config/rs6000/sysv4.h
+@@ -780,10 +780,10 @@ GNU_USER_TARGET_CC1_SPEC
+
+ #define MUSL_DYNAMIC_LINKER_E ENDIAN_SELECT("","le","")
+
+-#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
++#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld.so.1"
+ #undef MUSL_DYNAMIC_LINKER
+ #define MUSL_DYNAMIC_LINKER \
+- "/lib/ld-musl-powerpc" MUSL_DYNAMIC_LINKER_E "%{msoft-float:-sf}.so.1"
++ SYSTEMLIBS_DIR "ld-musl-powerpc" MUSL_DYNAMIC_LINKER_E "%{msoft-float:-sf}.so.1"
+
+ #ifndef GNU_USER_DYNAMIC_LINKER
+ #define GNU_USER_DYNAMIC_LINKER GLIBC_DYNAMIC_LINKER
+Index: gcc/gcc/config/s390/linux.h
+===================================================================
+--- a/gcc/config/s390/linux.h
++++ b/gcc/config/s390/linux.h
+@@ -72,13 +72,13 @@ along with GCC; see the file COPYING3.
+ #define MULTILIB_DEFAULTS { "m31" }
+ #endif
+
+-#define GLIBC_DYNAMIC_LINKER32 "/lib/ld.so.1"
+-#define GLIBC_DYNAMIC_LINKER64 "/lib/ld64.so.1"
++#define GLIBC_DYNAMIC_LINKER32 SYSTEMLIBS_DIR "ld.so.1"
++#define GLIBC_DYNAMIC_LINKER64 SYSTEMLIBS_DIR "ld64.so.1"
+
+ #undef MUSL_DYNAMIC_LINKER32
+-#define MUSL_DYNAMIC_LINKER32 "/lib/ld-musl-s390.so.1"
++#define MUSL_DYNAMIC_LINKER32 SYSTEMLIBS_DIR "ld-musl-s390.so.1"
+ #undef MUSL_DYNAMIC_LINKER64
+-#define MUSL_DYNAMIC_LINKER64 "/lib/ld-musl-s390x.so.1"
++#define MUSL_DYNAMIC_LINKER64 SYSTEMLIBS_DIR "ld-musl-s390x.so.1"
+
+ #undef LINK_SPEC
+ #define LINK_SPEC \
+Index: gcc/gcc/config/sh/linux.h
+===================================================================
--- a/gcc/config/sh/linux.h
+++ b/gcc/config/sh/linux.h
-@@ -64,7 +64,7 @@ along with GCC; see the file COPYING3. If not see
- "/lib/ld-musl-sh" MUSL_DYNAMIC_LINKER_E MUSL_DYNAMIC_LINKER_FP \
+@@ -61,10 +61,10 @@ along with GCC; see the file COPYING3.
+
+ #undef MUSL_DYNAMIC_LINKER
+ #define MUSL_DYNAMIC_LINKER \
+- "/lib/ld-musl-sh" MUSL_DYNAMIC_LINKER_E MUSL_DYNAMIC_LINKER_FP \
++ SYSTEMLIBS_DIR "ld-musl-sh" MUSL_DYNAMIC_LINKER_E MUSL_DYNAMIC_LINKER_FP \
"%{mfdpic:-fdpic}.so.1"
-#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2"
@@ -230,11 +355,11 @@ index 7558d2f7195..3aaa6c3a078 100644
#undef SUBTARGET_LINK_EMUL_SUFFIX
#define SUBTARGET_LINK_EMUL_SUFFIX "%{mfdpic:_fd;:_linux}"
-diff --git a/gcc/config/sparc/linux.h b/gcc/config/sparc/linux.h
-index 2550d7ee8f0..a94f4cd8ba2 100644
+Index: gcc/gcc/config/sparc/linux.h
+===================================================================
--- a/gcc/config/sparc/linux.h
+++ b/gcc/config/sparc/linux.h
-@@ -78,7 +78,7 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
+@@ -78,7 +78,7 @@ extern const char *host_detect_local_cpu
When the -shared link option is used a final link is not being
done. */
@@ -243,11 +368,11 @@ index 2550d7ee8f0..a94f4cd8ba2 100644
#undef LINK_SPEC
#define LINK_SPEC "-m elf32_sparc %{shared:-shared} \
-diff --git a/gcc/config/sparc/linux64.h b/gcc/config/sparc/linux64.h
-index 95af8afa9b5..63127afb074 100644
+Index: gcc/gcc/config/sparc/linux64.h
+===================================================================
--- a/gcc/config/sparc/linux64.h
+++ b/gcc/config/sparc/linux64.h
-@@ -78,8 +78,8 @@ along with GCC; see the file COPYING3. If not see
+@@ -78,8 +78,8 @@ along with GCC; see the file COPYING3.
When the -shared link option is used a final link is not being
done. */
diff --git a/meta/recipes-devtools/gcc/gcc/0009-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch b/meta/recipes-devtools/gcc/gcc/0009-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch
index ac139542f1..1ec942e977 100644
--- a/meta/recipes-devtools/gcc/gcc/0009-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch
+++ b/meta/recipes-devtools/gcc/gcc/0009-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch
@@ -18,13 +18,13 @@ Upstream-Status: Pending
gcc/config/arm/linux-eabi.h | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
-diff --git a/gcc/config/arm/linux-eabi.h b/gcc/config/arm/linux-eabi.h
-index 6bd95855827..77befab5da8 100644
+Index: gcc/gcc/config/arm/linux-eabi.h
+===================================================================
--- a/gcc/config/arm/linux-eabi.h
+++ b/gcc/config/arm/linux-eabi.h
@@ -91,10 +91,14 @@
#define MUSL_DYNAMIC_LINKER \
- "/lib/ld-musl-arm" MUSL_DYNAMIC_LINKER_E "%{mfloat-abi=hard:hf}%{mfdpic:-fdpic}.so.1"
+ SYSTEMLIBS_DIR "ld-musl-arm" MUSL_DYNAMIC_LINKER_E "%{mfloat-abi=hard:hf}%{mfdpic:-fdpic}.so.1"
+/* For armv4 we pass --fix-v4bx to linker to support EABI */
+#undef TARGET_FIX_V4BX_SPEC
diff --git a/meta/recipes-devtools/gcc/gcc/0019-nios2-Define-MUSL_DYNAMIC_LINKER.patch b/meta/recipes-devtools/gcc/gcc/0019-nios2-Define-MUSL_DYNAMIC_LINKER.patch
deleted file mode 100644
index 76ebfd7f77..0000000000
--- a/meta/recipes-devtools/gcc/gcc/0019-nios2-Define-MUSL_DYNAMIC_LINKER.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From 9ec4db8e910d9a51ae43f6b20d4bf1dac2d8cca8 Mon Sep 17 00:00:00 2001
-From: Khem Raj <raj.khem@gmail.com>
-Date: Tue, 2 Feb 2016 10:26:10 -0800
-Subject: [PATCH] nios2: Define MUSL_DYNAMIC_LINKER
-
-Upstream-Status: Backport [https://gcc.gnu.org/git/?p=gcc.git;a=commitdiff;h=e5ddbbf992b909d8e38851bd3179d29389e6ac97]
-
-Signed-off-by: Marek Vasut <marex@denx.de>
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
----
- gcc/config/nios2/linux.h | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/gcc/config/nios2/linux.h b/gcc/config/nios2/linux.h
-index 08edf1521f6..15696d86241 100644
---- a/gcc/config/nios2/linux.h
-+++ b/gcc/config/nios2/linux.h
-@@ -30,6 +30,7 @@
- #define CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
-
- #define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-nios2.so.1"
-+#define MUSL_DYNAMIC_LINKER "/lib/ld-musl-nios2.so.1"
-
- #undef LINK_SPEC
- #define LINK_SPEC LINK_SPEC_ENDIAN \
diff --git a/meta/recipes-devtools/gcc/gcc/0030-rust-recursion-limit.patch b/meta/recipes-devtools/gcc/gcc/0030-rust-recursion-limit.patch
new file mode 100644
index 0000000000..bbe2f18f6f
--- /dev/null
+++ b/meta/recipes-devtools/gcc/gcc/0030-rust-recursion-limit.patch
@@ -0,0 +1,92 @@
+From 9234cdca6ee88badfc00297e72f13dac4e540c79 Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Fri, 1 Jul 2022 15:58:52 +0100
+Subject: [PATCH] Add a recursion limit to the demangle_const function in the
+ Rust demangler.
+
+libiberty/
+ PR demangler/105039
+ * rust-demangle.c (demangle_const): Add recursion limit.
+
+Upstream-Status: Backport [https://gcc.gnu.org/git/gitweb.cgi?p=gcc.git;h=9234cdca6ee88badfc00297e72f13dac4e540c79]
+---
+ libiberty/rust-demangle.c | 29 ++++++++++++++++++++---------
+ 1 file changed, 20 insertions(+), 9 deletions(-)
+
+diff --git a/libiberty/rust-demangle.c b/libiberty/rust-demangle.c
+index bb58d900e27..36afcfae278 100644
+--- a/libiberty/rust-demangle.c
++++ b/libiberty/rust-demangle.c
+@@ -126,7 +126,7 @@ parse_integer_62 (struct rust_demangler *rdm)
+ return 0;
+
+ x = 0;
+- while (!eat (rdm, '_'))
++ while (!eat (rdm, '_') && !rdm->errored)
+ {
+ c = next (rdm);
+ x *= 62;
+@@ -1148,6 +1148,15 @@ demangle_const (struct rust_demangler *rdm)
+ if (rdm->errored)
+ return;
+
++ if (rdm->recursion != RUST_NO_RECURSION_LIMIT)
++ {
++ ++ rdm->recursion;
++ if (rdm->recursion > RUST_MAX_RECURSION_COUNT)
++ /* FIXME: There ought to be a way to report
++ that the recursion limit has been reached. */
++ goto fail_return;
++ }
++
+ if (eat (rdm, 'B'))
+ {
+ backref = parse_integer_62 (rdm);
+@@ -1158,7 +1167,7 @@ demangle_const (struct rust_demangler *rdm)
+ demangle_const (rdm);
+ rdm->next = old_next;
+ }
+- return;
++ goto pass_return;
+ }
+
+ ty_tag = next (rdm);
+@@ -1167,7 +1176,7 @@ demangle_const (struct rust_demangler *rdm)
+ /* Placeholder. */
+ case 'p':
+ PRINT ("_");
+- return;
++ goto pass_return;
+
+ /* Unsigned integer types. */
+ case 'h':
+@@ -1200,18 +1209,20 @@ demangle_const (struct rust_demangler *rdm)
+ break;
+
+ default:
+- rdm->errored = 1;
+- return;
++ goto fail_return;
+ }
+
+- if (rdm->errored)
+- return;
+-
+- if (rdm->verbose)
++ if (!rdm->errored && rdm->verbose)
+ {
+ PRINT (": ");
+ PRINT (basic_type (ty_tag));
+ }
++
++ fail_return:
++ rdm->errored = 1;
++ pass_return:
++ if (rdm->recursion != RUST_NO_RECURSION_LIMIT)
++ -- rdm->recursion;
+ }
+
+ static void
+--
+2.31.1
+
diff --git a/meta/recipes-devtools/gcc/gcc/0031-gcc-sanitizers-fix.patch b/meta/recipes-devtools/gcc/gcc/0031-gcc-sanitizers-fix.patch
new file mode 100644
index 0000000000..d63618132a
--- /dev/null
+++ b/meta/recipes-devtools/gcc/gcc/0031-gcc-sanitizers-fix.patch
@@ -0,0 +1,63 @@
+From fb77ca05ffb4f8e666878f2f6718a9fb4d686839 Mon Sep 17 00:00:00 2001
+From: Thurston Dang <thurston@google.com>
+Date: Thu, 13 Apr 2023 23:55:01 +0000
+Subject: [PATCH] Re-land 'ASan: move allocator base to avoid conflict with
+ high-entropy ASLR for x86-64 Linux'
+
+D147984 was reverted because it broke lit tests on Mac. This revision is based on D147984
+but maintains the old behavior for Apple.
+
+Note that, per the follow-up discussion with MaskRay in D147984, this patch excludes Apple
+but includes other platforms (e.g., aarch64, MIPS64) and OSes (e.g., FreeBSD, S390X), not just
+x86-64 Linux.
+
+Original commit message from D147984:
+
+Users have discovered [*] that when CONFIG_ARCH_MMAP_RND_BITS == 32,
+it will frequently conflict with ASan's allocator on x86-64 Linux, because the
+PIE program segment base address of 0x555555555554 plus an ASLR shift of up to
+((2**32) * 4K == 0x100000000000) will sometimes exceed ASan's hardcoded
+base address of 0x600000000000. We fix this by simply moving the allocator base
+to 0x500000000000, which is below the PIE program segment base address. This is
+cleaner than trying to move it to another location that is sandwiched between
+the PIE program and library segments, because if either of those grow too large,
+it will collide with the allocator region.
+
+Note that we will never need to change this base address again (unless we want to increase
+the size of the allocator), because ASLR cannot be set above 32-bits for x86-64 Linux (the
+PIE program segment and library segments would collide with each other; see also
+ARCH_MMAP_RND_BITS_MAX in https://github.com/torvalds/linux/blob/master/arch/x86/Kconfig).
+
+[*] see https://b.corp.google.com/issues/276925478
+and https://groups.google.com/a/google.com/g/chrome-os-gardeners/c/BbfzCP3dEeo/m/h3C_vVUxCQAJ
+
+Differential Revision: https://reviews.llvm.org/D148280
+
+Upstream-Status: Backport from llvm-project: https://github.com/llvm/llvm-project/commit/fb77ca05ffb4f8e666878f2f6718a9fb4d686839
+Signed-off-by: Claus Stovgaard <claus.stovgaard@gmail.com>
+---
+ libsanitizer/asan/asan_allocator.h | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h
+index 0b4dbf03bb9d53..6a12a6c6025283 100644
+--- a/libsanitizer/asan/asan_allocator.h
++++ b/libsanitizer/asan/asan_allocator.h
+@@ -143,11 +143,15 @@ typedef DefaultSizeClassMap SizeClassMap;
+ const uptr kAllocatorSpace = ~(uptr)0;
+ const uptr kAllocatorSize = 0x8000000000ULL; // 500G
+ typedef DefaultSizeClassMap SizeClassMap;
+-# else
++# elif SANITIZER_APPLE
+ const uptr kAllocatorSpace = 0x600000000000ULL;
+ const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+ typedef DefaultSizeClassMap SizeClassMap;
+-# endif
++# else
++const uptr kAllocatorSpace = 0x500000000000ULL;
++const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
++typedef DefaultSizeClassMap SizeClassMap;
++# endif
+ template <typename AddressSpaceViewTy>
+ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
diff --git a/meta/recipes-devtools/gcc/gcc/CVE-2023-4039.patch b/meta/recipes-devtools/gcc/gcc/CVE-2023-4039.patch
new file mode 100644
index 0000000000..41684fe7dd
--- /dev/null
+++ b/meta/recipes-devtools/gcc/gcc/CVE-2023-4039.patch
@@ -0,0 +1,2893 @@
+From: Richard Sandiford <richard.sandiford@arm.com>
+Subject: [PATCH 00/19] aarch64: Fix -fstack-protector issue
+Date: Tue, 12 Sep 2023 16:25:10 +0100
+
+This series of patches fixes deficiencies in GCC's -fstack-protector
+implementation for AArch64 when using dynamically allocated stack space.
+This is CVE-2023-4039. See:
+
+https://developer.arm.com/Arm%20Security%20Center/GCC%20Stack%20Protector%20Vulnerability%20AArch64
+https://github.com/metaredteam/external-disclosures/security/advisories/GHSA-x7ch-h5rf-w2mf
+
+for more details.
+
+The fix is to put the saved registers above the locals area when
+-fstack-protector is used.
+
+The series also fixes a stack-clash problem that I found while working
+on the CVE. In unpatched sources, the stack-clash problem would only
+trigger for unrealistic numbers of arguments (8K 64-bit arguments, or an
+equivalent). But it would be a more significant issue with the new
+-fstack-protector frame layout. It's therefore important that both
+problems are fixed together.
+
+Some reorganisation of the code seemed necessary to fix the problems in a
+cleanish way. The series is therefore quite long, but only a handful of
+patches should have any effect on code generation.
+
+See the individual patches for a detailed description.
+
+Tested on aarch64-linux-gnu. Pushed to trunk and to all active branches.
+I've also pushed backports to GCC 7+ to vendors/ARM/heads/CVE-2023-4039.
+
+CVE: CVE-2023-4039
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+
+From 52816ab48f97968f3fbfb5656250f3de7c00166d Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:43 +0100
+Subject: [PATCH 01/19] aarch64: Use local frame vars in shrink-wrapping code
+
+aarch64_layout_frame uses a shorthand for referring to
+cfun->machine->frame:
+
+ aarch64_frame &frame = cfun->machine->frame;
+
+This patch does the same for some other heavy users of the structure.
+No functional change intended.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_save_callee_saves): Use
+ a local shorthand for cfun->machine->frame.
+ (aarch64_restore_callee_saves, aarch64_get_separate_components):
+ (aarch64_process_components): Likewise.
+ (aarch64_allocate_and_probe_stack_space): Likewise.
+ (aarch64_expand_prologue, aarch64_expand_epilogue): Likewise.
+ (aarch64_layout_frame): Use existing shorthand for one more case.
+---
+ gcc/config/aarch64/aarch64.c | 115 ++++++++++++++++++-----------------
+ 1 file changed, 60 insertions(+), 55 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 391a93f3018..77c1d1300a5 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -7994,6 +7994,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+ unsigned start, unsigned limit, bool skip_wb,
+ bool hard_fp_valid_p)
+ {
++ aarch64_frame &frame = cfun->machine->frame;
+ rtx_insn *insn;
+ unsigned regno;
+ unsigned regno2;
+@@ -8008,8 +8009,8 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+ bool frame_related_p = aarch64_emit_cfi_for_reg_p (regno);
+
+ if (skip_wb
+- && (regno == cfun->machine->frame.wb_candidate1
+- || regno == cfun->machine->frame.wb_candidate2))
++ && (regno == frame.wb_candidate1
++ || regno == frame.wb_candidate2))
+ continue;
+
+ if (cfun->machine->reg_is_wrapped_separately[regno])
+@@ -8017,7 +8018,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+
+ machine_mode mode = aarch64_reg_save_mode (regno);
+ reg = gen_rtx_REG (mode, regno);
+- offset = start_offset + cfun->machine->frame.reg_offset[regno];
++ offset = start_offset + frame.reg_offset[regno];
+ rtx base_rtx = stack_pointer_rtx;
+ poly_int64 sp_offset = offset;
+
+@@ -8030,7 +8031,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+ {
+ gcc_assert (known_eq (start_offset, 0));
+ poly_int64 fp_offset
+- = cfun->machine->frame.below_hard_fp_saved_regs_size;
++ = frame.below_hard_fp_saved_regs_size;
+ if (hard_fp_valid_p)
+ base_rtx = hard_frame_pointer_rtx;
+ else
+@@ -8052,8 +8053,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+ && (regno2 = aarch64_next_callee_save (regno + 1, limit)) <= limit
+ && !cfun->machine->reg_is_wrapped_separately[regno2]
+ && known_eq (GET_MODE_SIZE (mode),
+- cfun->machine->frame.reg_offset[regno2]
+- - cfun->machine->frame.reg_offset[regno]))
++ frame.reg_offset[regno2] - frame.reg_offset[regno]))
+ {
+ rtx reg2 = gen_rtx_REG (mode, regno2);
+ rtx mem2;
+@@ -8103,6 +8103,7 @@ static void
+ aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
+ unsigned limit, bool skip_wb, rtx *cfi_ops)
+ {
++ aarch64_frame &frame = cfun->machine->frame;
+ unsigned regno;
+ unsigned regno2;
+ poly_int64 offset;
+@@ -8119,13 +8120,13 @@ aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
+ rtx reg, mem;
+
+ if (skip_wb
+- && (regno == cfun->machine->frame.wb_candidate1
+- || regno == cfun->machine->frame.wb_candidate2))
++ && (regno == frame.wb_candidate1
++ || regno == frame.wb_candidate2))
+ continue;
+
+ machine_mode mode = aarch64_reg_save_mode (regno);
+ reg = gen_rtx_REG (mode, regno);
+- offset = start_offset + cfun->machine->frame.reg_offset[regno];
++ offset = start_offset + frame.reg_offset[regno];
+ rtx base_rtx = stack_pointer_rtx;
+ if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
+ aarch64_adjust_sve_callee_save_base (mode, base_rtx, anchor_reg,
+@@ -8136,8 +8137,7 @@ aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
+ && (regno2 = aarch64_next_callee_save (regno + 1, limit)) <= limit
+ && !cfun->machine->reg_is_wrapped_separately[regno2]
+ && known_eq (GET_MODE_SIZE (mode),
+- cfun->machine->frame.reg_offset[regno2]
+- - cfun->machine->frame.reg_offset[regno]))
++ frame.reg_offset[regno2] - frame.reg_offset[regno]))
+ {
+ rtx reg2 = gen_rtx_REG (mode, regno2);
+ rtx mem2;
+@@ -8242,6 +8242,7 @@ offset_12bit_unsigned_scaled_p (machine_mode mode, poly_int64 offset)
+ static sbitmap
+ aarch64_get_separate_components (void)
+ {
++ aarch64_frame &frame = cfun->machine->frame;
+ sbitmap components = sbitmap_alloc (LAST_SAVED_REGNUM + 1);
+ bitmap_clear (components);
+
+@@ -8258,18 +8259,18 @@ aarch64_get_separate_components (void)
+ if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
+ continue;
+
+- poly_int64 offset = cfun->machine->frame.reg_offset[regno];
++ poly_int64 offset = frame.reg_offset[regno];
+
+ /* If the register is saved in the first SVE save slot, we use
+ it as a stack probe for -fstack-clash-protection. */
+ if (flag_stack_clash_protection
+- && maybe_ne (cfun->machine->frame.below_hard_fp_saved_regs_size, 0)
++ && maybe_ne (frame.below_hard_fp_saved_regs_size, 0)
+ && known_eq (offset, 0))
+ continue;
+
+ /* Get the offset relative to the register we'll use. */
+ if (frame_pointer_needed)
+- offset -= cfun->machine->frame.below_hard_fp_saved_regs_size;
++ offset -= frame.below_hard_fp_saved_regs_size;
+ else
+ offset += crtl->outgoing_args_size;
+
+@@ -8288,11 +8289,11 @@ aarch64_get_separate_components (void)
+ /* If the spare predicate register used by big-endian SVE code
+ is call-preserved, it must be saved in the main prologue
+ before any saves that use it. */
+- if (cfun->machine->frame.spare_pred_reg != INVALID_REGNUM)
+- bitmap_clear_bit (components, cfun->machine->frame.spare_pred_reg);
++ if (frame.spare_pred_reg != INVALID_REGNUM)
++ bitmap_clear_bit (components, frame.spare_pred_reg);
+
+- unsigned reg1 = cfun->machine->frame.wb_candidate1;
+- unsigned reg2 = cfun->machine->frame.wb_candidate2;
++ unsigned reg1 = frame.wb_candidate1;
++ unsigned reg2 = frame.wb_candidate2;
+ /* If registers have been chosen to be stored/restored with
+ writeback don't interfere with them to avoid having to output explicit
+ stack adjustment instructions. */
+@@ -8401,6 +8402,7 @@ aarch64_get_next_set_bit (sbitmap bmp, unsigned int start)
+ static void
+ aarch64_process_components (sbitmap components, bool prologue_p)
+ {
++ aarch64_frame &frame = cfun->machine->frame;
+ rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
+ ? HARD_FRAME_POINTER_REGNUM
+ : STACK_POINTER_REGNUM);
+@@ -8415,9 +8417,9 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+ machine_mode mode = aarch64_reg_save_mode (regno);
+
+ rtx reg = gen_rtx_REG (mode, regno);
+- poly_int64 offset = cfun->machine->frame.reg_offset[regno];
++ poly_int64 offset = frame.reg_offset[regno];
+ if (frame_pointer_needed)
+- offset -= cfun->machine->frame.below_hard_fp_saved_regs_size;
++ offset -= frame.below_hard_fp_saved_regs_size;
+ else
+ offset += crtl->outgoing_args_size;
+
+@@ -8442,14 +8444,14 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+ break;
+ }
+
+- poly_int64 offset2 = cfun->machine->frame.reg_offset[regno2];
++ poly_int64 offset2 = frame.reg_offset[regno2];
+ /* The next register is not of the same class or its offset is not
+ mergeable with the current one into a pair. */
+ if (aarch64_sve_mode_p (mode)
+ || !satisfies_constraint_Ump (mem)
+ || GP_REGNUM_P (regno) != GP_REGNUM_P (regno2)
+ || (crtl->abi->id () == ARM_PCS_SIMD && FP_REGNUM_P (regno))
+- || maybe_ne ((offset2 - cfun->machine->frame.reg_offset[regno]),
++ || maybe_ne ((offset2 - frame.reg_offset[regno]),
+ GET_MODE_SIZE (mode)))
+ {
+ insn = emit_insn (set);
+@@ -8471,7 +8473,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+ /* REGNO2 can be saved/restored in a pair with REGNO. */
+ rtx reg2 = gen_rtx_REG (mode, regno2);
+ if (frame_pointer_needed)
+- offset2 -= cfun->machine->frame.below_hard_fp_saved_regs_size;
++ offset2 -= frame.below_hard_fp_saved_regs_size;
+ else
+ offset2 += crtl->outgoing_args_size;
+ rtx addr2 = plus_constant (Pmode, ptr_reg, offset2);
+@@ -8566,6 +8568,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ bool frame_related_p,
+ bool final_adjustment_p)
+ {
++ aarch64_frame &frame = cfun->machine->frame;
+ HOST_WIDE_INT guard_size
+ = 1 << param_stack_clash_protection_guard_size;
+ HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
+@@ -8586,25 +8589,25 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ register as a probe. We can't assume that LR was saved at position 0
+ though, so treat any space below it as unprobed. */
+ if (final_adjustment_p
+- && known_eq (cfun->machine->frame.below_hard_fp_saved_regs_size, 0))
++ && known_eq (frame.below_hard_fp_saved_regs_size, 0))
+ {
+- poly_int64 lr_offset = cfun->machine->frame.reg_offset[LR_REGNUM];
++ poly_int64 lr_offset = frame.reg_offset[LR_REGNUM];
+ if (known_ge (lr_offset, 0))
+ min_probe_threshold -= lr_offset.to_constant ();
+ else
+ gcc_assert (!flag_stack_clash_protection || known_eq (poly_size, 0));
+ }
+
+- poly_int64 frame_size = cfun->machine->frame.frame_size;
++ poly_int64 frame_size = frame.frame_size;
+
+ /* We should always have a positive probe threshold. */
+ gcc_assert (min_probe_threshold > 0);
+
+ if (flag_stack_clash_protection && !final_adjustment_p)
+ {
+- poly_int64 initial_adjust = cfun->machine->frame.initial_adjust;
+- poly_int64 sve_callee_adjust = cfun->machine->frame.sve_callee_adjust;
+- poly_int64 final_adjust = cfun->machine->frame.final_adjust;
++ poly_int64 initial_adjust = frame.initial_adjust;
++ poly_int64 sve_callee_adjust = frame.sve_callee_adjust;
++ poly_int64 final_adjust = frame.final_adjust;
+
+ if (known_eq (frame_size, 0))
+ {
+@@ -8893,17 +8896,18 @@ aarch64_epilogue_uses (int regno)
+ void
+ aarch64_expand_prologue (void)
+ {
+- poly_int64 frame_size = cfun->machine->frame.frame_size;
+- poly_int64 initial_adjust = cfun->machine->frame.initial_adjust;
+- HOST_WIDE_INT callee_adjust = cfun->machine->frame.callee_adjust;
+- poly_int64 final_adjust = cfun->machine->frame.final_adjust;
+- poly_int64 callee_offset = cfun->machine->frame.callee_offset;
+- poly_int64 sve_callee_adjust = cfun->machine->frame.sve_callee_adjust;
++ aarch64_frame &frame = cfun->machine->frame;
++ poly_int64 frame_size = frame.frame_size;
++ poly_int64 initial_adjust = frame.initial_adjust;
++ HOST_WIDE_INT callee_adjust = frame.callee_adjust;
++ poly_int64 final_adjust = frame.final_adjust;
++ poly_int64 callee_offset = frame.callee_offset;
++ poly_int64 sve_callee_adjust = frame.sve_callee_adjust;
+ poly_int64 below_hard_fp_saved_regs_size
+- = cfun->machine->frame.below_hard_fp_saved_regs_size;
+- unsigned reg1 = cfun->machine->frame.wb_candidate1;
+- unsigned reg2 = cfun->machine->frame.wb_candidate2;
+- bool emit_frame_chain = cfun->machine->frame.emit_frame_chain;
++ = frame.below_hard_fp_saved_regs_size;
++ unsigned reg1 = frame.wb_candidate1;
++ unsigned reg2 = frame.wb_candidate2;
++ bool emit_frame_chain = frame.emit_frame_chain;
+ rtx_insn *insn;
+
+ if (flag_stack_clash_protection && known_eq (callee_adjust, 0))
+@@ -8969,7 +8973,7 @@ aarch64_expand_prologue (void)
+
+ /* The offset of the frame chain record (if any) from the current SP. */
+ poly_int64 chain_offset = (initial_adjust + callee_adjust
+- - cfun->machine->frame.hard_fp_offset);
++ - frame.hard_fp_offset);
+ gcc_assert (known_ge (chain_offset, 0));
+
+ /* The offset of the bottom of the save area from the current SP. */
+@@ -9072,15 +9076,16 @@ aarch64_use_return_insn_p (void)
+ void
+ aarch64_expand_epilogue (bool for_sibcall)
+ {
+- poly_int64 initial_adjust = cfun->machine->frame.initial_adjust;
+- HOST_WIDE_INT callee_adjust = cfun->machine->frame.callee_adjust;
+- poly_int64 final_adjust = cfun->machine->frame.final_adjust;
+- poly_int64 callee_offset = cfun->machine->frame.callee_offset;
+- poly_int64 sve_callee_adjust = cfun->machine->frame.sve_callee_adjust;
++ aarch64_frame &frame = cfun->machine->frame;
++ poly_int64 initial_adjust = frame.initial_adjust;
++ HOST_WIDE_INT callee_adjust = frame.callee_adjust;
++ poly_int64 final_adjust = frame.final_adjust;
++ poly_int64 callee_offset = frame.callee_offset;
++ poly_int64 sve_callee_adjust = frame.sve_callee_adjust;
+ poly_int64 below_hard_fp_saved_regs_size
+- = cfun->machine->frame.below_hard_fp_saved_regs_size;
+- unsigned reg1 = cfun->machine->frame.wb_candidate1;
+- unsigned reg2 = cfun->machine->frame.wb_candidate2;
++ = frame.below_hard_fp_saved_regs_size;
++ unsigned reg1 = frame.wb_candidate1;
++ unsigned reg2 = frame.wb_candidate2;
+ rtx cfi_ops = NULL;
+ rtx_insn *insn;
+ /* A stack clash protection prologue may not have left EP0_REGNUM or
+@@ -9113,7 +9118,7 @@ aarch64_expand_epilogue (bool for_sibcall)
+ /* We need to add memory barrier to prevent read from deallocated stack. */
+ bool need_barrier_p
+ = maybe_ne (get_frame_size ()
+- + cfun->machine->frame.saved_varargs_size, 0);
++ + frame.saved_varargs_size, 0);
+
+ /* Emit a barrier to prevent loads from a deallocated stack. */
+ if (maybe_gt (final_adjust, crtl->outgoing_args_size)
+@@ -11744,24 +11749,24 @@ aarch64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
+ poly_int64
+ aarch64_initial_elimination_offset (unsigned from, unsigned to)
+ {
++ aarch64_frame &frame = cfun->machine->frame;
++
+ if (to == HARD_FRAME_POINTER_REGNUM)
+ {
+ if (from == ARG_POINTER_REGNUM)
+- return cfun->machine->frame.hard_fp_offset;
++ return frame.hard_fp_offset;
+
+ if (from == FRAME_POINTER_REGNUM)
+- return cfun->machine->frame.hard_fp_offset
+- - cfun->machine->frame.locals_offset;
++ return frame.hard_fp_offset - frame.locals_offset;
+ }
+
+ if (to == STACK_POINTER_REGNUM)
+ {
+ if (from == FRAME_POINTER_REGNUM)
+- return cfun->machine->frame.frame_size
+- - cfun->machine->frame.locals_offset;
++ return frame.frame_size - frame.locals_offset;
+ }
+
+- return cfun->machine->frame.frame_size;
++ return frame.frame_size;
+ }
+
+
+--
+2.34.1
+
+
+From a2a57f7ec7912e77eb26919545807d90065584ff Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:44 +0100
+Subject: [PATCH 02/19] aarch64: Avoid a use of callee_offset
+
+When we emit the frame chain, i.e. when we reach Here in this statement
+of aarch64_expand_prologue:
+
+ if (emit_frame_chain)
+ {
+ // Here
+ ...
+ }
+
+the stack is in one of two states:
+
+- We've allocated up to the frame chain, but no more.
+
+- We've allocated the whole frame, and the frame chain is within easy
+ reach of the new SP.
+
+The offset of the frame chain from the current SP is available
+in aarch64_frame as callee_offset. It is also available as the
+chain_offset local variable, where the latter is calculated from other
+data. (However, chain_offset is not always equal to callee_offset when
+!emit_frame_chain, so chain_offset isn't redundant.)
+
+In c600df9a4060da3c6121ff4d0b93f179eafd69d1 I switched to using
+chain_offset for the initialisation of the hard frame pointer:
+
+ aarch64_add_offset (Pmode, hard_frame_pointer_rtx,
+- stack_pointer_rtx, callee_offset,
++ stack_pointer_rtx, chain_offset,
+ tmp1_rtx, tmp0_rtx, frame_pointer_needed);
+
+But the later REG_CFA_ADJUST_CFA handling still used callee_offset.
+
+I think the difference is harmless, but it's more logical for the
+CFA note to be in sync, and it's more convenient for later patches
+if it uses chain_offset.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_expand_prologue): Use
+ chain_offset rather than callee_offset.
+---
+ gcc/config/aarch64/aarch64.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 77c1d1300a5..6bc026bd08f 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -8901,7 +8901,6 @@ aarch64_expand_prologue (void)
+ poly_int64 initial_adjust = frame.initial_adjust;
+ HOST_WIDE_INT callee_adjust = frame.callee_adjust;
+ poly_int64 final_adjust = frame.final_adjust;
+- poly_int64 callee_offset = frame.callee_offset;
+ poly_int64 sve_callee_adjust = frame.sve_callee_adjust;
+ poly_int64 below_hard_fp_saved_regs_size
+ = frame.below_hard_fp_saved_regs_size;
+@@ -9010,8 +9009,7 @@ aarch64_expand_prologue (void)
+ implicit. */
+ if (!find_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX))
+ {
+- rtx src = plus_constant (Pmode, stack_pointer_rtx,
+- callee_offset);
++ rtx src = plus_constant (Pmode, stack_pointer_rtx, chain_offset);
+ add_reg_note (insn, REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (hard_frame_pointer_rtx, src));
+ }
+--
+2.34.1
+
+
+From 5efdcc8ed19d9d9e708a001f5dc695560411496d Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:44 +0100
+Subject: [PATCH 03/19] aarch64: Explicitly handle frames with no saved
+ registers
+
+If a frame has no saved registers, it can be allocated in one go.
+There is no need to treat the areas below and above the saved
+registers as separate.
+
+And if we allocate the frame in one go, it should be allocated
+as the initial_adjust rather than the final_adjust. This allows the
+frame size to grow to guard_size - guard_used_by_caller before a stack
+probe is needed. (A frame with no register saves is necessarily a
+leaf frame.)
+
+This is a no-op as thing stand, since a leaf function will have
+no outgoing arguments, and so all the frame will be above where
+the saved registers normally go.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Explicitly
+ allocate the frame in one go if there are no saved registers.
+---
+ gcc/config/aarch64/aarch64.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 6bc026bd08f..05e6ae8c0c9 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -7609,9 +7609,11 @@ aarch64_layout_frame (void)
+
+ HOST_WIDE_INT const_size, const_outgoing_args_size, const_fp_offset;
+ HOST_WIDE_INT const_saved_regs_size;
+- if (frame.frame_size.is_constant (&const_size)
+- && const_size < max_push_offset
+- && known_eq (frame.hard_fp_offset, const_size))
++ if (known_eq (frame.saved_regs_size, 0))
++ frame.initial_adjust = frame.frame_size;
++ else if (frame.frame_size.is_constant (&const_size)
++ && const_size < max_push_offset
++ && known_eq (frame.hard_fp_offset, const_size))
+ {
+ /* Simple, small frame with no outgoing arguments:
+
+--
+2.34.1
+
+
+From a8385d14318634f2e3a08a75bd2d6e2810f8cec9 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:45 +0100
+Subject: [PATCH 04/19] aarch64: Add bytes_below_saved_regs to frame info
+
+The frame layout code currently hard-codes the assumption that
+the number of bytes below the saved registers is equal to the
+size of the outgoing arguments. This patch abstracts that
+value into a new field of aarch64_frame.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::bytes_below_saved_regs): New
+ field.
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Initialize it,
+ and use it instead of crtl->outgoing_args_size.
+ (aarch64_get_separate_components): Use bytes_below_saved_regs instead
+ of outgoing_args_size.
+ (aarch64_process_components): Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 71 ++++++++++++++++++------------------
+ gcc/config/aarch64/aarch64.h | 5 +++
+ 2 files changed, 41 insertions(+), 35 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 05e6ae8c0c9..8fa5a0b2545 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -7476,6 +7476,8 @@ aarch64_layout_frame (void)
+ gcc_assert (crtl->is_leaf
+ || maybe_ne (frame.reg_offset[R30_REGNUM], SLOT_NOT_REQUIRED));
+
++ frame.bytes_below_saved_regs = crtl->outgoing_args_size;
++
+ /* Now assign stack slots for the registers. Start with the predicate
+ registers, since predicate LDR and STR have a relatively small
+ offset range. These saves happen below the hard frame pointer. */
+@@ -7580,18 +7582,18 @@ aarch64_layout_frame (void)
+
+ poly_int64 varargs_and_saved_regs_size = offset + frame.saved_varargs_size;
+
+- poly_int64 above_outgoing_args
++ poly_int64 saved_regs_and_above
+ = aligned_upper_bound (varargs_and_saved_regs_size
+ + get_frame_size (),
+ STACK_BOUNDARY / BITS_PER_UNIT);
+
+ frame.hard_fp_offset
+- = above_outgoing_args - frame.below_hard_fp_saved_regs_size;
++ = saved_regs_and_above - frame.below_hard_fp_saved_regs_size;
+
+ /* Both these values are already aligned. */
+- gcc_assert (multiple_p (crtl->outgoing_args_size,
++ gcc_assert (multiple_p (frame.bytes_below_saved_regs,
+ STACK_BOUNDARY / BITS_PER_UNIT));
+- frame.frame_size = above_outgoing_args + crtl->outgoing_args_size;
++ frame.frame_size = saved_regs_and_above + frame.bytes_below_saved_regs;
+
+ frame.locals_offset = frame.saved_varargs_size;
+
+@@ -7607,7 +7609,7 @@ aarch64_layout_frame (void)
+ else if (frame.wb_candidate1 != INVALID_REGNUM)
+ max_push_offset = 256;
+
+- HOST_WIDE_INT const_size, const_outgoing_args_size, const_fp_offset;
++ HOST_WIDE_INT const_size, const_below_saved_regs, const_fp_offset;
+ HOST_WIDE_INT const_saved_regs_size;
+ if (known_eq (frame.saved_regs_size, 0))
+ frame.initial_adjust = frame.frame_size;
+@@ -7615,31 +7617,31 @@ aarch64_layout_frame (void)
+ && const_size < max_push_offset
+ && known_eq (frame.hard_fp_offset, const_size))
+ {
+- /* Simple, small frame with no outgoing arguments:
++ /* Simple, small frame with no data below the saved registers.
+
+ stp reg1, reg2, [sp, -frame_size]!
+ stp reg3, reg4, [sp, 16] */
+ frame.callee_adjust = const_size;
+ }
+- else if (crtl->outgoing_args_size.is_constant (&const_outgoing_args_size)
++ else if (frame.bytes_below_saved_regs.is_constant (&const_below_saved_regs)
+ && frame.saved_regs_size.is_constant (&const_saved_regs_size)
+- && const_outgoing_args_size + const_saved_regs_size < 512
+- /* We could handle this case even with outgoing args, provided
+- that the number of args left us with valid offsets for all
+- predicate and vector save slots. It's such a rare case that
+- it hardly seems worth the effort though. */
+- && (!saves_below_hard_fp_p || const_outgoing_args_size == 0)
++ && const_below_saved_regs + const_saved_regs_size < 512
++ /* We could handle this case even with data below the saved
++ registers, provided that that data left us with valid offsets
++ for all predicate and vector save slots. It's such a rare
++ case that it hardly seems worth the effort though. */
++ && (!saves_below_hard_fp_p || const_below_saved_regs == 0)
+ && !(cfun->calls_alloca
+ && frame.hard_fp_offset.is_constant (&const_fp_offset)
+ && const_fp_offset < max_push_offset))
+ {
+- /* Frame with small outgoing arguments:
++ /* Frame with small area below the saved registers:
+
+ sub sp, sp, frame_size
+- stp reg1, reg2, [sp, outgoing_args_size]
+- stp reg3, reg4, [sp, outgoing_args_size + 16] */
++ stp reg1, reg2, [sp, bytes_below_saved_regs]
++ stp reg3, reg4, [sp, bytes_below_saved_regs + 16] */
+ frame.initial_adjust = frame.frame_size;
+- frame.callee_offset = const_outgoing_args_size;
++ frame.callee_offset = const_below_saved_regs;
+ }
+ else if (saves_below_hard_fp_p
+ && known_eq (frame.saved_regs_size,
+@@ -7649,30 +7651,29 @@ aarch64_layout_frame (void)
+
+ sub sp, sp, hard_fp_offset + below_hard_fp_saved_regs_size
+ save SVE registers relative to SP
+- sub sp, sp, outgoing_args_size */
++ sub sp, sp, bytes_below_saved_regs */
+ frame.initial_adjust = (frame.hard_fp_offset
+ + frame.below_hard_fp_saved_regs_size);
+- frame.final_adjust = crtl->outgoing_args_size;
++ frame.final_adjust = frame.bytes_below_saved_regs;
+ }
+ else if (frame.hard_fp_offset.is_constant (&const_fp_offset)
+ && const_fp_offset < max_push_offset)
+ {
+- /* Frame with large outgoing arguments or SVE saves, but with
+- a small local area:
++ /* Frame with large area below the saved registers, or with SVE saves,
++ but with a small area above:
+
+ stp reg1, reg2, [sp, -hard_fp_offset]!
+ stp reg3, reg4, [sp, 16]
+ [sub sp, sp, below_hard_fp_saved_regs_size]
+ [save SVE registers relative to SP]
+- sub sp, sp, outgoing_args_size */
++ sub sp, sp, bytes_below_saved_regs */
+ frame.callee_adjust = const_fp_offset;
+ frame.sve_callee_adjust = frame.below_hard_fp_saved_regs_size;
+- frame.final_adjust = crtl->outgoing_args_size;
++ frame.final_adjust = frame.bytes_below_saved_regs;
+ }
+ else
+ {
+- /* Frame with large local area and outgoing arguments or SVE saves,
+- using frame pointer:
++ /* General case:
+
+ sub sp, sp, hard_fp_offset
+ stp x29, x30, [sp, 0]
+@@ -7680,10 +7681,10 @@ aarch64_layout_frame (void)
+ stp reg3, reg4, [sp, 16]
+ [sub sp, sp, below_hard_fp_saved_regs_size]
+ [save SVE registers relative to SP]
+- sub sp, sp, outgoing_args_size */
++ sub sp, sp, bytes_below_saved_regs */
+ frame.initial_adjust = frame.hard_fp_offset;
+ frame.sve_callee_adjust = frame.below_hard_fp_saved_regs_size;
+- frame.final_adjust = crtl->outgoing_args_size;
++ frame.final_adjust = frame.bytes_below_saved_regs;
+ }
+
+ /* Make sure the individual adjustments add up to the full frame size. */
+@@ -8274,7 +8275,7 @@ aarch64_get_separate_components (void)
+ if (frame_pointer_needed)
+ offset -= frame.below_hard_fp_saved_regs_size;
+ else
+- offset += crtl->outgoing_args_size;
++ offset += frame.bytes_below_saved_regs;
+
+ /* Check that we can access the stack slot of the register with one
+ direct load with no adjustments needed. */
+@@ -8423,7 +8424,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+ if (frame_pointer_needed)
+ offset -= frame.below_hard_fp_saved_regs_size;
+ else
+- offset += crtl->outgoing_args_size;
++ offset += frame.bytes_below_saved_regs;
+
+ rtx addr = plus_constant (Pmode, ptr_reg, offset);
+ rtx mem = gen_frame_mem (mode, addr);
+@@ -8477,7 +8478,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+ if (frame_pointer_needed)
+ offset2 -= frame.below_hard_fp_saved_regs_size;
+ else
+- offset2 += crtl->outgoing_args_size;
++ offset2 += frame.bytes_below_saved_regs;
+ rtx addr2 = plus_constant (Pmode, ptr_reg, offset2);
+ rtx mem2 = gen_frame_mem (mode, addr2);
+ rtx set2 = prologue_p ? gen_rtx_SET (mem2, reg2)
+@@ -8551,10 +8552,10 @@ aarch64_stack_clash_protection_alloca_probe_range (void)
+ registers. If POLY_SIZE is not large enough to require a probe this function
+ will only adjust the stack. When allocating the stack space
+ FRAME_RELATED_P is then used to indicate if the allocation is frame related.
+- FINAL_ADJUSTMENT_P indicates whether we are allocating the outgoing
+- arguments. If we are then we ensure that any allocation larger than the ABI
+- defined buffer needs a probe so that the invariant of having a 1KB buffer is
+- maintained.
++ FINAL_ADJUSTMENT_P indicates whether we are allocating the area below
++ the saved registers. If we are then we ensure that any allocation
++ larger than the ABI defined buffer needs a probe so that the
++ invariant of having a 1KB buffer is maintained.
+
+ We emit barriers after each stack adjustment to prevent optimizations from
+ breaking the invariant that we never drop the stack more than a page. This
+@@ -8763,7 +8764,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ /* Handle any residuals. Residuals of at least MIN_PROBE_THRESHOLD have to
+ be probed. This maintains the requirement that each page is probed at
+ least once. For initial probing we probe only if the allocation is
+- more than GUARD_SIZE - buffer, and for the outgoing arguments we probe
++ more than GUARD_SIZE - buffer, and below the saved registers we probe
+ if the amount is larger than buffer. GUARD_SIZE - buffer + buffer ==
+ GUARD_SIZE. This works that for any allocation that is large enough to
+ trigger a probe here, we'll have at least one, and if they're not large
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index bb383acfae8..6f0b8c7107e 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -837,6 +837,11 @@ struct GTY (()) aarch64_frame
+ /* The size of the callee-save registers with a slot in REG_OFFSET. */
+ poly_int64 saved_regs_size;
+
++ /* The number of bytes between the bottom of the static frame (the bottom
++ of the outgoing arguments) and the bottom of the register save area.
++ This value is always a multiple of STACK_BOUNDARY. */
++ poly_int64 bytes_below_saved_regs;
++
+ /* The size of the callee-save registers with a slot in REG_OFFSET that
+ are saved below the hard frame pointer. */
+ poly_int64 below_hard_fp_saved_regs_size;
+--
+2.34.1
+
+
+From d3f6ceecc8a7f128a9e6cb7d8aecf0de81ed9705 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:45 +0100
+Subject: [PATCH 05/19] aarch64: Add bytes_below_hard_fp to frame info
+
+Following on from the previous bytes_below_saved_regs patch, this one
+records the number of bytes that are below the hard frame pointer.
+This eventually replaces below_hard_fp_saved_regs_size.
+
+If a frame pointer is not needed, the epilogue adds final_adjust
+to the stack pointer before restoring registers:
+
+ aarch64_add_sp (tmp1_rtx, tmp0_rtx, final_adjust, true);
+
+Therefore, if the epilogue needs to restore the stack pointer from
+the hard frame pointer, the directly corresponding offset is:
+
+ -bytes_below_hard_fp + final_adjust
+
+i.e. go from the hard frame pointer to the bottom of the frame,
+then add the same amount as if we were using the stack pointer
+from the outset.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::bytes_below_hard_fp): New
+ field.
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Initialize it.
+ (aarch64_expand_epilogue): Use it instead of
+ below_hard_fp_saved_regs_size.
+---
+ gcc/config/aarch64/aarch64.c | 6 +++---
+ gcc/config/aarch64/aarch64.h | 5 +++++
+ 2 files changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 8fa5a0b2545..e03adf57226 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -7528,6 +7528,7 @@ aarch64_layout_frame (void)
+ of the callee save area. */
+ bool saves_below_hard_fp_p = maybe_ne (offset, 0);
+ frame.below_hard_fp_saved_regs_size = offset;
++ frame.bytes_below_hard_fp = offset + frame.bytes_below_saved_regs;
+ if (frame.emit_frame_chain)
+ {
+ /* FP and LR are placed in the linkage record. */
+@@ -9083,8 +9084,7 @@ aarch64_expand_epilogue (bool for_sibcall)
+ poly_int64 final_adjust = frame.final_adjust;
+ poly_int64 callee_offset = frame.callee_offset;
+ poly_int64 sve_callee_adjust = frame.sve_callee_adjust;
+- poly_int64 below_hard_fp_saved_regs_size
+- = frame.below_hard_fp_saved_regs_size;
++ poly_int64 bytes_below_hard_fp = frame.bytes_below_hard_fp;
+ unsigned reg1 = frame.wb_candidate1;
+ unsigned reg2 = frame.wb_candidate2;
+ rtx cfi_ops = NULL;
+@@ -9140,7 +9140,7 @@ aarch64_expand_epilogue (bool for_sibcall)
+ is restored on the instruction doing the writeback. */
+ aarch64_add_offset (Pmode, stack_pointer_rtx,
+ hard_frame_pointer_rtx,
+- -callee_offset - below_hard_fp_saved_regs_size,
++ -bytes_below_hard_fp + final_adjust,
+ tmp1_rtx, tmp0_rtx, callee_adjust == 0);
+ else
+ /* The case where we need to re-use the register here is very rare, so
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index 6f0b8c7107e..21ac920a3fe 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -846,6 +846,11 @@ struct GTY (()) aarch64_frame
+ are saved below the hard frame pointer. */
+ poly_int64 below_hard_fp_saved_regs_size;
+
++ /* The number of bytes between the bottom of the static frame (the bottom
++ of the outgoing arguments) and the hard frame pointer. This value is
++ always a multiple of STACK_BOUNDARY. */
++ poly_int64 bytes_below_hard_fp;
++
+ /* Offset from the base of the frame (incomming SP) to the
+ top of the locals area. This value is always a multiple of
+ STACK_BOUNDARY. */
+--
+2.34.1
+
+
+From e8a7ec87fcdbaa5f7c7bd499aebe5cefacbf8687 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:46 +0100
+Subject: [PATCH 06/19] aarch64: Tweak aarch64_save/restore_callee_saves
+
+aarch64_save_callee_saves and aarch64_restore_callee_saves took
+a parameter called start_offset that gives the offset of the
+bottom of the saved register area from the current stack pointer.
+However, it's more convenient for later patches if we use the
+bottom of the entire frame as the reference point, rather than
+the bottom of the saved registers.
+
+Doing that removes the need for the callee_offset field.
+Other than that, this is not a win on its own. It only really
+makes sense in combination with the follow-on patches.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::callee_offset): Delete.
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Remove
+ callee_offset handling.
+ (aarch64_save_callee_saves): Replace the start_offset parameter
+ with a bytes_below_sp parameter.
+ (aarch64_restore_callee_saves): Likewise.
+ (aarch64_expand_prologue): Update accordingly.
+ (aarch64_expand_epilogue): Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 56 ++++++++++++++++++------------------
+ gcc/config/aarch64/aarch64.h | 4 ---
+ 2 files changed, 28 insertions(+), 32 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index e03adf57226..96e99f6c17a 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -7602,7 +7602,6 @@ aarch64_layout_frame (void)
+ frame.final_adjust = 0;
+ frame.callee_adjust = 0;
+ frame.sve_callee_adjust = 0;
+- frame.callee_offset = 0;
+
+ HOST_WIDE_INT max_push_offset = 0;
+ if (frame.wb_candidate2 != INVALID_REGNUM)
+@@ -7642,7 +7641,6 @@ aarch64_layout_frame (void)
+ stp reg1, reg2, [sp, bytes_below_saved_regs]
+ stp reg3, reg4, [sp, bytes_below_saved_regs + 16] */
+ frame.initial_adjust = frame.frame_size;
+- frame.callee_offset = const_below_saved_regs;
+ }
+ else if (saves_below_hard_fp_p
+ && known_eq (frame.saved_regs_size,
+@@ -7989,12 +7987,13 @@ aarch64_add_cfa_expression (rtx_insn *insn, rtx reg,
+ }
+
+ /* Emit code to save the callee-saved registers from register number START
+- to LIMIT to the stack at the location starting at offset START_OFFSET,
+- skipping any write-back candidates if SKIP_WB is true. HARD_FP_VALID_P
+- is true if the hard frame pointer has been set up. */
++ to LIMIT to the stack. The stack pointer is currently BYTES_BELOW_SP
++ bytes above the bottom of the static frame. Skip any write-back
++ candidates if SKIP_WB is true. HARD_FP_VALID_P is true if the hard
++ frame pointer has been set up. */
+
+ static void
+-aarch64_save_callee_saves (poly_int64 start_offset,
++aarch64_save_callee_saves (poly_int64 bytes_below_sp,
+ unsigned start, unsigned limit, bool skip_wb,
+ bool hard_fp_valid_p)
+ {
+@@ -8022,7 +8021,9 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+
+ machine_mode mode = aarch64_reg_save_mode (regno);
+ reg = gen_rtx_REG (mode, regno);
+- offset = start_offset + frame.reg_offset[regno];
++ offset = (frame.reg_offset[regno]
++ + frame.bytes_below_saved_regs
++ - bytes_below_sp);
+ rtx base_rtx = stack_pointer_rtx;
+ poly_int64 sp_offset = offset;
+
+@@ -8033,9 +8034,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+ else if (GP_REGNUM_P (regno)
+ && (!offset.is_constant (&const_offset) || const_offset >= 512))
+ {
+- gcc_assert (known_eq (start_offset, 0));
+- poly_int64 fp_offset
+- = frame.below_hard_fp_saved_regs_size;
++ poly_int64 fp_offset = frame.bytes_below_hard_fp - bytes_below_sp;
+ if (hard_fp_valid_p)
+ base_rtx = hard_frame_pointer_rtx;
+ else
+@@ -8099,12 +8098,13 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+ }
+
+ /* Emit code to restore the callee registers from register number START
+- up to and including LIMIT. Restore from the stack offset START_OFFSET,
+- skipping any write-back candidates if SKIP_WB is true. Write the
+- appropriate REG_CFA_RESTORE notes into CFI_OPS. */
++ up to and including LIMIT. The stack pointer is currently BYTES_BELOW_SP
++ bytes above the bottom of the static frame. Skip any write-back
++ candidates if SKIP_WB is true. Write the appropriate REG_CFA_RESTORE
++ notes into CFI_OPS. */
+
+ static void
+-aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
++aarch64_restore_callee_saves (poly_int64 bytes_below_sp, unsigned start,
+ unsigned limit, bool skip_wb, rtx *cfi_ops)
+ {
+ aarch64_frame &frame = cfun->machine->frame;
+@@ -8130,7 +8130,9 @@ aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
+
+ machine_mode mode = aarch64_reg_save_mode (regno);
+ reg = gen_rtx_REG (mode, regno);
+- offset = start_offset + frame.reg_offset[regno];
++ offset = (frame.reg_offset[regno]
++ + frame.bytes_below_saved_regs
++ - bytes_below_sp);
+ rtx base_rtx = stack_pointer_rtx;
+ if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
+ aarch64_adjust_sve_callee_save_base (mode, base_rtx, anchor_reg,
+@@ -8906,8 +8908,6 @@ aarch64_expand_prologue (void)
+ HOST_WIDE_INT callee_adjust = frame.callee_adjust;
+ poly_int64 final_adjust = frame.final_adjust;
+ poly_int64 sve_callee_adjust = frame.sve_callee_adjust;
+- poly_int64 below_hard_fp_saved_regs_size
+- = frame.below_hard_fp_saved_regs_size;
+ unsigned reg1 = frame.wb_candidate1;
+ unsigned reg2 = frame.wb_candidate2;
+ bool emit_frame_chain = frame.emit_frame_chain;
+@@ -8979,8 +8979,8 @@ aarch64_expand_prologue (void)
+ - frame.hard_fp_offset);
+ gcc_assert (known_ge (chain_offset, 0));
+
+- /* The offset of the bottom of the save area from the current SP. */
+- poly_int64 saved_regs_offset = chain_offset - below_hard_fp_saved_regs_size;
++ /* The offset of the current SP from the bottom of the static frame. */
++ poly_int64 bytes_below_sp = frame_size - initial_adjust - callee_adjust;
+
+ if (emit_frame_chain)
+ {
+@@ -8988,7 +8988,7 @@ aarch64_expand_prologue (void)
+ {
+ reg1 = R29_REGNUM;
+ reg2 = R30_REGNUM;
+- aarch64_save_callee_saves (saved_regs_offset, reg1, reg2,
++ aarch64_save_callee_saves (bytes_below_sp, reg1, reg2,
+ false, false);
+ }
+ else
+@@ -9028,7 +9028,7 @@ aarch64_expand_prologue (void)
+ emit_insn (gen_stack_tie (stack_pointer_rtx, hard_frame_pointer_rtx));
+ }
+
+- aarch64_save_callee_saves (saved_regs_offset, R0_REGNUM, R30_REGNUM,
++ aarch64_save_callee_saves (bytes_below_sp, R0_REGNUM, R30_REGNUM,
+ callee_adjust != 0 || emit_frame_chain,
+ emit_frame_chain);
+ if (maybe_ne (sve_callee_adjust, 0))
+@@ -9038,16 +9038,17 @@ aarch64_expand_prologue (void)
+ aarch64_allocate_and_probe_stack_space (tmp1_rtx, tmp0_rtx,
+ sve_callee_adjust,
+ !frame_pointer_needed, false);
+- saved_regs_offset += sve_callee_adjust;
++ bytes_below_sp -= sve_callee_adjust;
+ }
+- aarch64_save_callee_saves (saved_regs_offset, P0_REGNUM, P15_REGNUM,
++ aarch64_save_callee_saves (bytes_below_sp, P0_REGNUM, P15_REGNUM,
+ false, emit_frame_chain);
+- aarch64_save_callee_saves (saved_regs_offset, V0_REGNUM, V31_REGNUM,
++ aarch64_save_callee_saves (bytes_below_sp, V0_REGNUM, V31_REGNUM,
+ callee_adjust != 0 || emit_frame_chain,
+ emit_frame_chain);
+
+ /* We may need to probe the final adjustment if it is larger than the guard
+ that is assumed by the called. */
++ gcc_assert (known_eq (bytes_below_sp, final_adjust));
+ aarch64_allocate_and_probe_stack_space (tmp1_rtx, tmp0_rtx, final_adjust,
+ !frame_pointer_needed, true);
+ }
+@@ -9082,7 +9083,6 @@ aarch64_expand_epilogue (bool for_sibcall)
+ poly_int64 initial_adjust = frame.initial_adjust;
+ HOST_WIDE_INT callee_adjust = frame.callee_adjust;
+ poly_int64 final_adjust = frame.final_adjust;
+- poly_int64 callee_offset = frame.callee_offset;
+ poly_int64 sve_callee_adjust = frame.sve_callee_adjust;
+ poly_int64 bytes_below_hard_fp = frame.bytes_below_hard_fp;
+ unsigned reg1 = frame.wb_candidate1;
+@@ -9150,13 +9150,13 @@ aarch64_expand_epilogue (bool for_sibcall)
+
+ /* Restore the vector registers before the predicate registers,
+ so that we can use P4 as a temporary for big-endian SVE frames. */
+- aarch64_restore_callee_saves (callee_offset, V0_REGNUM, V31_REGNUM,
++ aarch64_restore_callee_saves (final_adjust, V0_REGNUM, V31_REGNUM,
+ callee_adjust != 0, &cfi_ops);
+- aarch64_restore_callee_saves (callee_offset, P0_REGNUM, P15_REGNUM,
++ aarch64_restore_callee_saves (final_adjust, P0_REGNUM, P15_REGNUM,
+ false, &cfi_ops);
+ if (maybe_ne (sve_callee_adjust, 0))
+ aarch64_add_sp (NULL_RTX, NULL_RTX, sve_callee_adjust, true);
+- aarch64_restore_callee_saves (callee_offset - sve_callee_adjust,
++ aarch64_restore_callee_saves (final_adjust + sve_callee_adjust,
+ R0_REGNUM, R30_REGNUM,
+ callee_adjust != 0, &cfi_ops);
+
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index 21ac920a3fe..57e67217745 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -873,10 +873,6 @@ struct GTY (()) aarch64_frame
+ It is zero when no push is used. */
+ HOST_WIDE_INT callee_adjust;
+
+- /* The offset from SP to the callee-save registers after initial_adjust.
+- It may be non-zero if no push is used (ie. callee_adjust == 0). */
+- poly_int64 callee_offset;
+-
+ /* The size of the stack adjustment before saving or after restoring
+ SVE registers. */
+ poly_int64 sve_callee_adjust;
+--
+2.34.1
+
+
+From 7356df0319aefe4c68ef57ec4c6bd18c72188a34 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:46 +0100
+Subject: [PATCH 07/19] aarch64: Only calculate chain_offset if there is a
+ chain
+
+After previous patches, it is no longer necessary to calculate
+a chain_offset in cases where there is no chain record.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_expand_prologue): Move the
+ calculation of chain_offset into the emit_frame_chain block.
+---
+ gcc/config/aarch64/aarch64.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 96e99f6c17a..cf5244b7ec0 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -8974,16 +8974,16 @@ aarch64_expand_prologue (void)
+ if (callee_adjust != 0)
+ aarch64_push_regs (reg1, reg2, callee_adjust);
+
+- /* The offset of the frame chain record (if any) from the current SP. */
+- poly_int64 chain_offset = (initial_adjust + callee_adjust
+- - frame.hard_fp_offset);
+- gcc_assert (known_ge (chain_offset, 0));
+-
+ /* The offset of the current SP from the bottom of the static frame. */
+ poly_int64 bytes_below_sp = frame_size - initial_adjust - callee_adjust;
+
+ if (emit_frame_chain)
+ {
++ /* The offset of the frame chain record (if any) from the current SP. */
++ poly_int64 chain_offset = (initial_adjust + callee_adjust
++ - frame.hard_fp_offset);
++ gcc_assert (known_ge (chain_offset, 0));
++
+ if (callee_adjust == 0)
+ {
+ reg1 = R29_REGNUM;
+--
+2.34.1
+
+
+From 82fb69e75c21010f7afc72bb842751164fe8fc72 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:46 +0100
+Subject: [PATCH 08/19] aarch64: Rename locals_offset to bytes_above_locals
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+locals_offset was described as:
+
+ /* Offset from the base of the frame (incomming SP) to the
+ top of the locals area. This value is always a multiple of
+ STACK_BOUNDARY. */
+
+This is implicitly an “upside down” view of the frame: the incoming
+SP is at offset 0, and anything N bytes below the incoming SP is at
+offset N (rather than -N).
+
+However, reg_offset instead uses a “right way up” view; that is,
+it views offsets in address terms. Something above X is at a
+positive offset from X and something below X is at a negative
+offset from X.
+
+Also, even on FRAME_GROWS_DOWNWARD targets like AArch64,
+target-independent code views offsets in address terms too:
+locals are allocated at negative offsets to virtual_stack_vars.
+
+It seems confusing to have *_offset fields of the same structure
+using different polarities like this. This patch tries to avoid
+that by renaming locals_offset to bytes_above_locals.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::locals_offset): Rename to...
+ (aarch64_frame::bytes_above_locals): ...this.
+ * config/aarch64/aarch64.c (aarch64_layout_frame)
+ (aarch64_initial_elimination_offset): Update accordingly.
+---
+ gcc/config/aarch64/aarch64.c | 6 +++---
+ gcc/config/aarch64/aarch64.h | 6 +++---
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index cf5244b7ec0..d54f7a89306 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -7596,7 +7596,7 @@ aarch64_layout_frame (void)
+ STACK_BOUNDARY / BITS_PER_UNIT));
+ frame.frame_size = saved_regs_and_above + frame.bytes_below_saved_regs;
+
+- frame.locals_offset = frame.saved_varargs_size;
++ frame.bytes_above_locals = frame.saved_varargs_size;
+
+ frame.initial_adjust = 0;
+ frame.final_adjust = 0;
+@@ -11758,13 +11758,13 @@ aarch64_initial_elimination_offset (unsigned from, unsigned to)
+ return frame.hard_fp_offset;
+
+ if (from == FRAME_POINTER_REGNUM)
+- return frame.hard_fp_offset - frame.locals_offset;
++ return frame.hard_fp_offset - frame.bytes_above_locals;
+ }
+
+ if (to == STACK_POINTER_REGNUM)
+ {
+ if (from == FRAME_POINTER_REGNUM)
+- return frame.frame_size - frame.locals_offset;
++ return frame.frame_size - frame.bytes_above_locals;
+ }
+
+ return frame.frame_size;
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index 57e67217745..3c5e3dd429d 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -851,10 +851,10 @@ struct GTY (()) aarch64_frame
+ always a multiple of STACK_BOUNDARY. */
+ poly_int64 bytes_below_hard_fp;
+
+- /* Offset from the base of the frame (incomming SP) to the
+- top of the locals area. This value is always a multiple of
++ /* The number of bytes between the top of the locals area and the top
++ of the frame (the incomming SP). This value is always a multiple of
+ STACK_BOUNDARY. */
+- poly_int64 locals_offset;
++ poly_int64 bytes_above_locals;
+
+ /* Offset from the base of the frame (incomming SP) to the
+ hard_frame_pointer. This value is always a multiple of
+--
+2.34.1
+
+
+From fa6600b55b49ee14d8288f13719ceea2a75eea60 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:47 +0100
+Subject: [PATCH 09/19] aarch64: Rename hard_fp_offset to bytes_above_hard_fp
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Similarly to the previous locals_offset patch, hard_fp_offset
+was described as:
+
+ /* Offset from the base of the frame (incomming SP) to the
+ hard_frame_pointer. This value is always a multiple of
+ STACK_BOUNDARY. */
+ poly_int64 hard_fp_offset;
+
+which again took an “upside-down” view: higher offsets meant lower
+addresses. This patch renames the field to bytes_above_hard_fp instead.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::hard_fp_offset): Rename
+ to...
+ (aarch64_frame::bytes_above_hard_fp): ...this.
+ * config/aarch64/aarch64.c (aarch64_layout_frame)
+ (aarch64_expand_prologue): Update accordingly.
+ (aarch64_initial_elimination_offset): Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 26 +++++++++++++-------------
+ gcc/config/aarch64/aarch64.h | 6 +++---
+ 2 files changed, 16 insertions(+), 16 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index d54f7a89306..23cb084e5a7 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -7588,7 +7588,7 @@ aarch64_layout_frame (void)
+ + get_frame_size (),
+ STACK_BOUNDARY / BITS_PER_UNIT);
+
+- frame.hard_fp_offset
++ frame.bytes_above_hard_fp
+ = saved_regs_and_above - frame.below_hard_fp_saved_regs_size;
+
+ /* Both these values are already aligned. */
+@@ -7609,13 +7609,13 @@ aarch64_layout_frame (void)
+ else if (frame.wb_candidate1 != INVALID_REGNUM)
+ max_push_offset = 256;
+
+- HOST_WIDE_INT const_size, const_below_saved_regs, const_fp_offset;
++ HOST_WIDE_INT const_size, const_below_saved_regs, const_above_fp;
+ HOST_WIDE_INT const_saved_regs_size;
+ if (known_eq (frame.saved_regs_size, 0))
+ frame.initial_adjust = frame.frame_size;
+ else if (frame.frame_size.is_constant (&const_size)
+ && const_size < max_push_offset
+- && known_eq (frame.hard_fp_offset, const_size))
++ && known_eq (frame.bytes_above_hard_fp, const_size))
+ {
+ /* Simple, small frame with no data below the saved registers.
+
+@@ -7632,8 +7632,8 @@ aarch64_layout_frame (void)
+ case that it hardly seems worth the effort though. */
+ && (!saves_below_hard_fp_p || const_below_saved_regs == 0)
+ && !(cfun->calls_alloca
+- && frame.hard_fp_offset.is_constant (&const_fp_offset)
+- && const_fp_offset < max_push_offset))
++ && frame.bytes_above_hard_fp.is_constant (&const_above_fp)
++ && const_above_fp < max_push_offset))
+ {
+ /* Frame with small area below the saved registers:
+
+@@ -7651,12 +7651,12 @@ aarch64_layout_frame (void)
+ sub sp, sp, hard_fp_offset + below_hard_fp_saved_regs_size
+ save SVE registers relative to SP
+ sub sp, sp, bytes_below_saved_regs */
+- frame.initial_adjust = (frame.hard_fp_offset
++ frame.initial_adjust = (frame.bytes_above_hard_fp
+ + frame.below_hard_fp_saved_regs_size);
+ frame.final_adjust = frame.bytes_below_saved_regs;
+ }
+- else if (frame.hard_fp_offset.is_constant (&const_fp_offset)
+- && const_fp_offset < max_push_offset)
++ else if (frame.bytes_above_hard_fp.is_constant (&const_above_fp)
++ && const_above_fp < max_push_offset)
+ {
+ /* Frame with large area below the saved registers, or with SVE saves,
+ but with a small area above:
+@@ -7666,7 +7666,7 @@ aarch64_layout_frame (void)
+ [sub sp, sp, below_hard_fp_saved_regs_size]
+ [save SVE registers relative to SP]
+ sub sp, sp, bytes_below_saved_regs */
+- frame.callee_adjust = const_fp_offset;
++ frame.callee_adjust = const_above_fp;
+ frame.sve_callee_adjust = frame.below_hard_fp_saved_regs_size;
+ frame.final_adjust = frame.bytes_below_saved_regs;
+ }
+@@ -7681,7 +7681,7 @@ aarch64_layout_frame (void)
+ [sub sp, sp, below_hard_fp_saved_regs_size]
+ [save SVE registers relative to SP]
+ sub sp, sp, bytes_below_saved_regs */
+- frame.initial_adjust = frame.hard_fp_offset;
++ frame.initial_adjust = frame.bytes_above_hard_fp;
+ frame.sve_callee_adjust = frame.below_hard_fp_saved_regs_size;
+ frame.final_adjust = frame.bytes_below_saved_regs;
+ }
+@@ -8981,7 +8981,7 @@ aarch64_expand_prologue (void)
+ {
+ /* The offset of the frame chain record (if any) from the current SP. */
+ poly_int64 chain_offset = (initial_adjust + callee_adjust
+- - frame.hard_fp_offset);
++ - frame.bytes_above_hard_fp);
+ gcc_assert (known_ge (chain_offset, 0));
+
+ if (callee_adjust == 0)
+@@ -11755,10 +11755,10 @@ aarch64_initial_elimination_offset (unsigned from, unsigned to)
+ if (to == HARD_FRAME_POINTER_REGNUM)
+ {
+ if (from == ARG_POINTER_REGNUM)
+- return frame.hard_fp_offset;
++ return frame.bytes_above_hard_fp;
+
+ if (from == FRAME_POINTER_REGNUM)
+- return frame.hard_fp_offset - frame.bytes_above_locals;
++ return frame.bytes_above_hard_fp - frame.bytes_above_locals;
+ }
+
+ if (to == STACK_POINTER_REGNUM)
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index 3c5e3dd429d..9291cfd3ec8 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -856,10 +856,10 @@ struct GTY (()) aarch64_frame
+ STACK_BOUNDARY. */
+ poly_int64 bytes_above_locals;
+
+- /* Offset from the base of the frame (incomming SP) to the
+- hard_frame_pointer. This value is always a multiple of
++ /* The number of bytes between the hard_frame_pointer and the top of
++ the frame (the incomming SP). This value is always a multiple of
+ STACK_BOUNDARY. */
+- poly_int64 hard_fp_offset;
++ poly_int64 bytes_above_hard_fp;
+
+ /* The size of the frame. This value is the offset from base of the
+ frame (incomming SP) to the stack_pointer. This value is always
+--
+2.34.1
+
+
+From b8cd5a0229da78c2d1289d54731fbef0126617d5 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:47 +0100
+Subject: [PATCH 10/19] aarch64: Tweak frame_size comment
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch fixes another case in which a value was described with
+an “upside-down” view.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::frame_size): Tweak comment.
+---
+ gcc/config/aarch64/aarch64.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index 9291cfd3ec8..82883ad5a0d 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -861,8 +861,8 @@ struct GTY (()) aarch64_frame
+ STACK_BOUNDARY. */
+ poly_int64 bytes_above_hard_fp;
+
+- /* The size of the frame. This value is the offset from base of the
+- frame (incomming SP) to the stack_pointer. This value is always
++ /* The size of the frame, i.e. the number of bytes between the bottom
++ of the outgoing arguments and the incoming SP. This value is always
+ a multiple of STACK_BOUNDARY. */
+ poly_int64 frame_size;
+
+--
+2.34.1
+
+
+From 999c4a81cffddb850d6ab0f6d3a8de3e704d2f7a Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:48 +0100
+Subject: [PATCH 11/19] aarch64: Measure reg_offset from the bottom of the
+ frame
+
+reg_offset was measured from the bottom of the saved register area.
+This made perfect sense with the original layout, since the bottom
+of the saved register area was also the hard frame pointer address.
+It became slightly less obvious with SVE, since we save SVE
+registers below the hard frame pointer, but it still made sense.
+
+However, if we want to allow different frame layouts, it's more
+convenient and obvious to measure reg_offset from the bottom of
+the frame. After previous patches, it's also a slight simplification
+in its own right.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame): Add comment above
+ reg_offset.
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Walk offsets
+ from the bottom of the frame, rather than the bottom of the saved
+ register area. Measure reg_offset from the bottom of the frame
+ rather than the bottom of the saved register area.
+ (aarch64_save_callee_saves): Update accordingly.
+ (aarch64_restore_callee_saves): Likewise.
+ (aarch64_get_separate_components): Likewise.
+ (aarch64_process_components): Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 53 ++++++++++++++++--------------------
+ gcc/config/aarch64/aarch64.h | 3 ++
+ 2 files changed, 27 insertions(+), 29 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 23cb084e5a7..45ff664cba6 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -7398,7 +7398,6 @@ aarch64_needs_frame_chain (void)
+ static void
+ aarch64_layout_frame (void)
+ {
+- poly_int64 offset = 0;
+ int regno, last_fp_reg = INVALID_REGNUM;
+ machine_mode vector_save_mode = aarch64_reg_save_mode (V8_REGNUM);
+ poly_int64 vector_save_size = GET_MODE_SIZE (vector_save_mode);
+@@ -7476,7 +7475,9 @@ aarch64_layout_frame (void)
+ gcc_assert (crtl->is_leaf
+ || maybe_ne (frame.reg_offset[R30_REGNUM], SLOT_NOT_REQUIRED));
+
+- frame.bytes_below_saved_regs = crtl->outgoing_args_size;
++ poly_int64 offset = crtl->outgoing_args_size;
++ gcc_assert (multiple_p (offset, STACK_BOUNDARY / BITS_PER_UNIT));
++ frame.bytes_below_saved_regs = offset;
+
+ /* Now assign stack slots for the registers. Start with the predicate
+ registers, since predicate LDR and STR have a relatively small
+@@ -7488,7 +7489,8 @@ aarch64_layout_frame (void)
+ offset += BYTES_PER_SVE_PRED;
+ }
+
+- if (maybe_ne (offset, 0))
++ poly_int64 saved_prs_size = offset - frame.bytes_below_saved_regs;
++ if (maybe_ne (saved_prs_size, 0))
+ {
+ /* If we have any vector registers to save above the predicate registers,
+ the offset of the vector register save slots need to be a multiple
+@@ -7506,10 +7508,10 @@ aarch64_layout_frame (void)
+ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+ else
+ {
+- if (known_le (offset, vector_save_size))
+- offset = vector_save_size;
+- else if (known_le (offset, vector_save_size * 2))
+- offset = vector_save_size * 2;
++ if (known_le (saved_prs_size, vector_save_size))
++ offset = frame.bytes_below_saved_regs + vector_save_size;
++ else if (known_le (saved_prs_size, vector_save_size * 2))
++ offset = frame.bytes_below_saved_regs + vector_save_size * 2;
+ else
+ gcc_unreachable ();
+ }
+@@ -7526,9 +7528,10 @@ aarch64_layout_frame (void)
+
+ /* OFFSET is now the offset of the hard frame pointer from the bottom
+ of the callee save area. */
+- bool saves_below_hard_fp_p = maybe_ne (offset, 0);
+- frame.below_hard_fp_saved_regs_size = offset;
+- frame.bytes_below_hard_fp = offset + frame.bytes_below_saved_regs;
++ frame.below_hard_fp_saved_regs_size = offset - frame.bytes_below_saved_regs;
++ bool saves_below_hard_fp_p
++ = maybe_ne (frame.below_hard_fp_saved_regs_size, 0);
++ frame.bytes_below_hard_fp = offset;
+ if (frame.emit_frame_chain)
+ {
+ /* FP and LR are placed in the linkage record. */
+@@ -7579,9 +7582,10 @@ aarch64_layout_frame (void)
+
+ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+
+- frame.saved_regs_size = offset;
++ frame.saved_regs_size = offset - frame.bytes_below_saved_regs;
+
+- poly_int64 varargs_and_saved_regs_size = offset + frame.saved_varargs_size;
++ poly_int64 varargs_and_saved_regs_size
++ = frame.saved_regs_size + frame.saved_varargs_size;
+
+ poly_int64 saved_regs_and_above
+ = aligned_upper_bound (varargs_and_saved_regs_size
+@@ -8021,9 +8025,7 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
+
+ machine_mode mode = aarch64_reg_save_mode (regno);
+ reg = gen_rtx_REG (mode, regno);
+- offset = (frame.reg_offset[regno]
+- + frame.bytes_below_saved_regs
+- - bytes_below_sp);
++ offset = frame.reg_offset[regno] - bytes_below_sp;
+ rtx base_rtx = stack_pointer_rtx;
+ poly_int64 sp_offset = offset;
+
+@@ -8130,9 +8132,7 @@ aarch64_restore_callee_saves (poly_int64 bytes_below_sp, unsigned start,
+
+ machine_mode mode = aarch64_reg_save_mode (regno);
+ reg = gen_rtx_REG (mode, regno);
+- offset = (frame.reg_offset[regno]
+- + frame.bytes_below_saved_regs
+- - bytes_below_sp);
++ offset = frame.reg_offset[regno] - bytes_below_sp;
+ rtx base_rtx = stack_pointer_rtx;
+ if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
+ aarch64_adjust_sve_callee_save_base (mode, base_rtx, anchor_reg,
+@@ -8271,14 +8271,12 @@ aarch64_get_separate_components (void)
+ it as a stack probe for -fstack-clash-protection. */
+ if (flag_stack_clash_protection
+ && maybe_ne (frame.below_hard_fp_saved_regs_size, 0)
+- && known_eq (offset, 0))
++ && known_eq (offset, frame.bytes_below_saved_regs))
+ continue;
+
+ /* Get the offset relative to the register we'll use. */
+ if (frame_pointer_needed)
+- offset -= frame.below_hard_fp_saved_regs_size;
+- else
+- offset += frame.bytes_below_saved_regs;
++ offset -= frame.bytes_below_hard_fp;
+
+ /* Check that we can access the stack slot of the register with one
+ direct load with no adjustments needed. */
+@@ -8425,9 +8423,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+ rtx reg = gen_rtx_REG (mode, regno);
+ poly_int64 offset = frame.reg_offset[regno];
+ if (frame_pointer_needed)
+- offset -= frame.below_hard_fp_saved_regs_size;
+- else
+- offset += frame.bytes_below_saved_regs;
++ offset -= frame.bytes_below_hard_fp;
+
+ rtx addr = plus_constant (Pmode, ptr_reg, offset);
+ rtx mem = gen_frame_mem (mode, addr);
+@@ -8479,9 +8475,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+ /* REGNO2 can be saved/restored in a pair with REGNO. */
+ rtx reg2 = gen_rtx_REG (mode, regno2);
+ if (frame_pointer_needed)
+- offset2 -= frame.below_hard_fp_saved_regs_size;
+- else
+- offset2 += frame.bytes_below_saved_regs;
++ offset2 -= frame.bytes_below_hard_fp;
+ rtx addr2 = plus_constant (Pmode, ptr_reg, offset2);
+ rtx mem2 = gen_frame_mem (mode, addr2);
+ rtx set2 = prologue_p ? gen_rtx_SET (mem2, reg2)
+@@ -8597,7 +8591,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ if (final_adjustment_p
+ && known_eq (frame.below_hard_fp_saved_regs_size, 0))
+ {
+- poly_int64 lr_offset = frame.reg_offset[LR_REGNUM];
++ poly_int64 lr_offset = (frame.reg_offset[LR_REGNUM]
++ - frame.bytes_below_saved_regs);
+ if (known_ge (lr_offset, 0))
+ min_probe_threshold -= lr_offset.to_constant ();
+ else
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index 82883ad5a0d..c8ec3d58495 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -826,6 +826,9 @@ extern enum aarch64_processor aarch64_tune;
+ #ifdef HAVE_POLY_INT_H
+ struct GTY (()) aarch64_frame
+ {
++ /* The offset from the bottom of the static frame (the bottom of the
++ outgoing arguments) of each register save slot, or -2 if no save is
++ needed. */
+ poly_int64 reg_offset[LAST_SAVED_REGNUM + 1];
+
+ /* The number of extra stack bytes taken up by register varargs.
+--
+2.34.1
+
+
+From 8b664cc8f05c8130e8ca73a59ae2751cdef8a0ea Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:48 +0100
+Subject: [PATCH 12/19] aarch64: Simplify top of frame allocation
+
+After previous patches, it no longer really makes sense to allocate
+the top of the frame in terms of varargs_and_saved_regs_size and
+saved_regs_and_above.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Simplify
+ the allocation of the top of the frame.
+---
+ gcc/config/aarch64/aarch64.c | 23 ++++++++---------------
+ 1 file changed, 8 insertions(+), 15 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 45ff664cba6..779547d0344 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -7584,23 +7584,16 @@ aarch64_layout_frame (void)
+
+ frame.saved_regs_size = offset - frame.bytes_below_saved_regs;
+
+- poly_int64 varargs_and_saved_regs_size
+- = frame.saved_regs_size + frame.saved_varargs_size;
+-
+- poly_int64 saved_regs_and_above
+- = aligned_upper_bound (varargs_and_saved_regs_size
+- + get_frame_size (),
+- STACK_BOUNDARY / BITS_PER_UNIT);
+-
+- frame.bytes_above_hard_fp
+- = saved_regs_and_above - frame.below_hard_fp_saved_regs_size;
++ offset += get_frame_size ();
++ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
++ auto top_of_locals = offset;
+
+- /* Both these values are already aligned. */
+- gcc_assert (multiple_p (frame.bytes_below_saved_regs,
+- STACK_BOUNDARY / BITS_PER_UNIT));
+- frame.frame_size = saved_regs_and_above + frame.bytes_below_saved_regs;
++ offset += frame.saved_varargs_size;
++ gcc_assert (multiple_p (offset, STACK_BOUNDARY / BITS_PER_UNIT));
++ frame.frame_size = offset;
+
+- frame.bytes_above_locals = frame.saved_varargs_size;
++ frame.bytes_above_hard_fp = frame.frame_size - frame.bytes_below_hard_fp;
++ frame.bytes_above_locals = frame.frame_size - top_of_locals;
+
+ frame.initial_adjust = 0;
+ frame.final_adjust = 0;
+--
+2.34.1
+
+
+From bb4600071acc3a02db4f37ffb95c8495ad76a140 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:49 +0100
+Subject: [PATCH 13/19] aarch64: Minor initial adjustment tweak
+
+This patch just changes a calculation of initial_adjust
+to one that makes it slightly more obvious that the total
+adjustment is frame.frame_size.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Tweak
+ calculation of initial_adjust for frames in which all saves
+ are SVE saves.
+---
+ gcc/config/aarch64/aarch64.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 779547d0344..0b8992ada74 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -7645,11 +7645,10 @@ aarch64_layout_frame (void)
+ {
+ /* Frame in which all saves are SVE saves:
+
+- sub sp, sp, hard_fp_offset + below_hard_fp_saved_regs_size
++ sub sp, sp, frame_size - bytes_below_saved_regs
+ save SVE registers relative to SP
+ sub sp, sp, bytes_below_saved_regs */
+- frame.initial_adjust = (frame.bytes_above_hard_fp
+- + frame.below_hard_fp_saved_regs_size);
++ frame.initial_adjust = frame.frame_size - frame.bytes_below_saved_regs;
+ frame.final_adjust = frame.bytes_below_saved_regs;
+ }
+ else if (frame.bytes_above_hard_fp.is_constant (&const_above_fp)
+--
+2.34.1
+
+
+From f22329d5efbacf80edf4a2d45ebadd93f283252c Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:49 +0100
+Subject: [PATCH 14/19] aarch64: Tweak stack clash boundary condition
+
+The AArch64 ABI says that, when stack clash protection is used,
+there can be a maximum of 1KiB of unprobed space at sp on entry
+to a function. Therefore, we need to probe when allocating
+>= guard_size - 1KiB of data (>= rather than >). This is what
+GCC does.
+
+If an allocation is exactly guard_size bytes, it is enough to allocate
+those bytes and probe once at offset 1024. It isn't possible to use a
+single probe at any other offset: higher would conmplicate later code,
+by leaving more unprobed space than usual, while lower would risk
+leaving an entire page unprobed. For simplicity, the code probes all
+allocations at offset 1024.
+
+Some register saves also act as probes. If we need to allocate
+more space below the last such register save probe, we need to
+probe the allocation if it is > 1KiB. Again, this allocation is
+then sometimes (but not always) probed at offset 1024. This sort of
+allocation is currently only used for outgoing arguments, which are
+rarely this big.
+
+However, the code also probed if this final outgoing-arguments
+allocation was == 1KiB, rather than just > 1KiB. This isn't
+necessary, since the register save then probes at offset 1024
+as required. Continuing to probe allocations of exactly 1KiB
+would complicate later patches.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_allocate_and_probe_stack_space):
+ Don't probe final allocations that are exactly 1KiB in size (after
+ unprobed space above the final allocation has been deducted).
+
+gcc/testsuite/
+ * gcc.target/aarch64/stack-check-prologue-17.c: New test.
+---
+ gcc/config/aarch64/aarch64.c | 4 +-
+ .../aarch64/stack-check-prologue-17.c | 55 +++++++++++++++++++
+ 2 files changed, 58 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 0b8992ada74..bfd24876195 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -8564,9 +8564,11 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ HOST_WIDE_INT guard_size
+ = 1 << param_stack_clash_protection_guard_size;
+ HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
++ HOST_WIDE_INT byte_sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
++ gcc_assert (multiple_p (poly_size, byte_sp_alignment));
+ HOST_WIDE_INT min_probe_threshold
+ = (final_adjustment_p
+- ? guard_used_by_caller
++ ? guard_used_by_caller + byte_sp_alignment
+ : guard_size - guard_used_by_caller);
+ /* When doing the final adjustment for the outgoing arguments, take into
+ account any unprobed space there is above the current SP. There are
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+new file mode 100644
+index 00000000000..0d8a25d73a2
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+@@ -0,0 +1,55 @@
++/* { dg-options "-O2 -fstack-clash-protection -fomit-frame-pointer --param stack-clash-protection-guard-size=12" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++void f(int, ...);
++void g();
++
++/*
++** test1:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1024
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test1(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x);
++ }
++ g();
++ return 1;
++}
++
++/*
++** test2:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1040
++** str xzr, \[sp\]
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test2(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x);
++ }
++ g();
++ return 1;
++}
+--
+2.34.1
+
+
+From 174a9747491e591ef2abb3e20a0332303f11003a Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:49 +0100
+Subject: [PATCH 15/19] aarch64: Put LR save probe in first 16 bytes
+
+-fstack-clash-protection uses the save of LR as a probe for the next
+allocation. The next allocation could be:
+
+* another part of the static frame, e.g. when allocating SVE save slots
+ or outgoing arguments
+
+* an alloca in the same function
+
+* an allocation made by a callee function
+
+However, when -fomit-frame-pointer is used, the LR save slot is placed
+above the other GPR save slots. It could therefore be up to 80 bytes
+above the base of the GPR save area (which is also the hard fp address).
+
+aarch64_allocate_and_probe_stack_space took this into account when
+deciding how much subsequent space could be allocated without needing
+a probe. However, it interacted badly with:
+
+ /* If doing a small final adjustment, we always probe at offset 0.
+ This is done to avoid issues when LR is not at position 0 or when
+ the final adjustment is smaller than the probing offset. */
+ else if (final_adjustment_p && rounded_size == 0)
+ residual_probe_offset = 0;
+
+which forces any allocation that is smaller than the guard page size
+to be probed at offset 0 rather than the usual offset 1024. It was
+therefore possible to construct cases in which we had:
+
+* a probe using LR at SP + 80 bytes (or some other value >= 16)
+* an allocation of the guard page size - 16 bytes
+* a probe at SP + 0
+
+which allocates guard page size + 64 consecutive unprobed bytes.
+
+This patch requires the LR probe to be in the first 16 bytes of the
+save area when stack clash protection is active. Doing it
+unconditionally would cause code-quality regressions, but a later
+patch deals with that.
+
+The new comment doesn't say that the probe register is required
+to be LR, since a later patch removes that restriction.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Ensure that
+ the LR save slot is in the first 16 bytes of the register save area.
+ (aarch64_allocate_and_probe_stack_space): Remove workaround for
+ when LR was not in the first 16 bytes.
+
+gcc/testsuite/
+ * gcc.target/aarch64/stack-check-prologue-18.c: New test.
+---
+ gcc/config/aarch64/aarch64.c | 61 ++++-------
+ .../aarch64/stack-check-prologue-18.c | 100 ++++++++++++++++++
+ 2 files changed, 123 insertions(+), 38 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index bfd24876195..3f2b10de987 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -7532,26 +7532,34 @@ aarch64_layout_frame (void)
+ bool saves_below_hard_fp_p
+ = maybe_ne (frame.below_hard_fp_saved_regs_size, 0);
+ frame.bytes_below_hard_fp = offset;
++
++ auto allocate_gpr_slot = [&](unsigned int regno)
++ {
++ frame.reg_offset[regno] = offset;
++ if (frame.wb_candidate1 == INVALID_REGNUM)
++ frame.wb_candidate1 = regno;
++ else if (frame.wb_candidate2 == INVALID_REGNUM)
++ frame.wb_candidate2 = regno;
++ offset += UNITS_PER_WORD;
++ };
++
+ if (frame.emit_frame_chain)
+ {
+ /* FP and LR are placed in the linkage record. */
+- frame.reg_offset[R29_REGNUM] = offset;
+- frame.wb_candidate1 = R29_REGNUM;
+- frame.reg_offset[R30_REGNUM] = offset + UNITS_PER_WORD;
+- frame.wb_candidate2 = R30_REGNUM;
+- offset += 2 * UNITS_PER_WORD;
++ allocate_gpr_slot (R29_REGNUM);
++ allocate_gpr_slot (R30_REGNUM);
+ }
++ else if (flag_stack_clash_protection
++ && known_eq (frame.reg_offset[R30_REGNUM], SLOT_REQUIRED))
++ /* Put the LR save slot first, since it makes a good choice of probe
++ for stack clash purposes. The idea is that the link register usually
++ has to be saved before a call anyway, and so we lose little by
++ stopping it from being individually shrink-wrapped. */
++ allocate_gpr_slot (R30_REGNUM);
+
+ for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
+ if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
+- {
+- frame.reg_offset[regno] = offset;
+- if (frame.wb_candidate1 == INVALID_REGNUM)
+- frame.wb_candidate1 = regno;
+- else if (frame.wb_candidate2 == INVALID_REGNUM)
+- frame.wb_candidate2 = regno;
+- offset += UNITS_PER_WORD;
+- }
++ allocate_gpr_slot (regno);
+
+ poly_int64 max_int_offset = offset;
+ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+@@ -8570,29 +8578,6 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ = (final_adjustment_p
+ ? guard_used_by_caller + byte_sp_alignment
+ : guard_size - guard_used_by_caller);
+- /* When doing the final adjustment for the outgoing arguments, take into
+- account any unprobed space there is above the current SP. There are
+- two cases:
+-
+- - When saving SVE registers below the hard frame pointer, we force
+- the lowest save to take place in the prologue before doing the final
+- adjustment (i.e. we don't allow the save to be shrink-wrapped).
+- This acts as a probe at SP, so there is no unprobed space.
+-
+- - When there are no SVE register saves, we use the store of the link
+- register as a probe. We can't assume that LR was saved at position 0
+- though, so treat any space below it as unprobed. */
+- if (final_adjustment_p
+- && known_eq (frame.below_hard_fp_saved_regs_size, 0))
+- {
+- poly_int64 lr_offset = (frame.reg_offset[LR_REGNUM]
+- - frame.bytes_below_saved_regs);
+- if (known_ge (lr_offset, 0))
+- min_probe_threshold -= lr_offset.to_constant ();
+- else
+- gcc_assert (!flag_stack_clash_protection || known_eq (poly_size, 0));
+- }
+-
+ poly_int64 frame_size = frame.frame_size;
+
+ /* We should always have a positive probe threshold. */
+@@ -8772,8 +8757,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ if (final_adjustment_p && rounded_size != 0)
+ min_probe_threshold = 0;
+ /* If doing a small final adjustment, we always probe at offset 0.
+- This is done to avoid issues when LR is not at position 0 or when
+- the final adjustment is smaller than the probing offset. */
++ This is done to avoid issues when the final adjustment is smaller
++ than the probing offset. */
+ else if (final_adjustment_p && rounded_size == 0)
+ residual_probe_offset = 0;
+
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+new file mode 100644
+index 00000000000..82447d20fff
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+@@ -0,0 +1,100 @@
++/* { dg-options "-O2 -fstack-clash-protection -fomit-frame-pointer --param stack-clash-protection-guard-size=12" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++void f(int, ...);
++void g();
++
++/*
++** test1:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #4064
++** str xzr, \[sp\]
++** cbnz w0, .*
++** bl g
++** ...
++** str x26, \[sp, #?4128\]
++** ...
++*/
++int test1(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x);
++ }
++ g();
++ return 1;
++}
++
++/*
++** test2:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1040
++** str xzr, \[sp\]
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test2(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x);
++ }
++ g();
++ return 1;
++}
++
++/*
++** test3:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1024
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test3(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x);
++ }
++ g();
++ return 1;
++}
+--
+2.34.1
+
+
+From e932e11c353be52256dd30d30d924f4e834e3ca3 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:51 +0100
+Subject: [PATCH 16/19] aarch64: Simplify probe of final frame allocation
+
+Previous patches ensured that the final frame allocation only needs
+a probe when the size is strictly greater than 1KiB. It's therefore
+safe to use the normal 1024 probe offset in all cases.
+
+The main motivation for doing this is to simplify the code and
+remove the number of special cases.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_allocate_and_probe_stack_space):
+ Always probe the residual allocation at offset 1024, asserting
+ that that is in range.
+
+gcc/testsuite/
+ * gcc.target/aarch64/stack-check-prologue-17.c: Expect the probe
+ to be at offset 1024 rather than offset 0.
+ * gcc.target/aarch64/stack-check-prologue-18.c: Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 12 ++++--------
+ .../gcc.target/aarch64/stack-check-prologue-17.c | 2 +-
+ .../gcc.target/aarch64/stack-check-prologue-18.c | 4 ++--
+ 3 files changed, 7 insertions(+), 11 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 3f2b10de987..4b9cd687525 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -8751,16 +8751,12 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ are still safe. */
+ if (residual)
+ {
+- HOST_WIDE_INT residual_probe_offset = guard_used_by_caller;
++ gcc_assert (guard_used_by_caller + byte_sp_alignment <= size);
++
+ /* If we're doing final adjustments, and we've done any full page
+ allocations then any residual needs to be probed. */
+ if (final_adjustment_p && rounded_size != 0)
+ min_probe_threshold = 0;
+- /* If doing a small final adjustment, we always probe at offset 0.
+- This is done to avoid issues when the final adjustment is smaller
+- than the probing offset. */
+- else if (final_adjustment_p && rounded_size == 0)
+- residual_probe_offset = 0;
+
+ aarch64_sub_sp (temp1, temp2, residual, frame_related_p);
+ if (residual >= min_probe_threshold)
+@@ -8771,8 +8767,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ HOST_WIDE_INT_PRINT_DEC " bytes, probing will be required."
+ "\n", residual);
+
+- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+- residual_probe_offset));
++ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
++ guard_used_by_caller));
+ emit_insn (gen_blockage ());
+ }
+ }
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+index 0d8a25d73a2..f0ec1389771 100644
+--- a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+@@ -33,7 +33,7 @@ int test1(int z) {
+ ** ...
+ ** str x30, \[sp\]
+ ** sub sp, sp, #1040
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+index 82447d20fff..6383bec5ebc 100644
+--- a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+@@ -9,7 +9,7 @@ void g();
+ ** ...
+ ** str x30, \[sp\]
+ ** sub sp, sp, #4064
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+@@ -50,7 +50,7 @@ int test1(int z) {
+ ** ...
+ ** str x30, \[sp\]
+ ** sub sp, sp, #1040
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+--
+2.34.1
+
+
+From 9ed9fd54b2b471745c9489e83496c091a7b64904 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:52 +0100
+Subject: [PATCH 17/19] aarch64: Explicitly record probe registers in frame
+ info
+
+The stack frame is currently divided into three areas:
+
+A: the area above the hard frame pointer
+B: the SVE saves below the hard frame pointer
+C: the outgoing arguments
+
+If the stack frame is allocated in one chunk, the allocation needs a
+probe if the frame size is >= guard_size - 1KiB. In addition, if the
+function is not a leaf function, it must probe an address no more than
+1KiB above the outgoing SP. We ensured the second condition by
+
+(1) using single-chunk allocations for non-leaf functions only if
+ the link register save slot is within 512 bytes of the bottom
+ of the frame; and
+
+(2) using the link register save as a probe (meaning, for instance,
+ that it can't be individually shrink wrapped)
+
+If instead the stack is allocated in multiple chunks, then:
+
+* an allocation involving only the outgoing arguments (C above) requires
+ a probe if the allocation size is > 1KiB
+
+* any other allocation requires a probe if the allocation size
+ is >= guard_size - 1KiB
+
+* second and subsequent allocations require the previous allocation
+ to probe at the bottom of the allocated area, regardless of the size
+ of that previous allocation
+
+The final point means that, unlike for single allocations,
+it can be necessary to have both a non-SVE register probe and
+an SVE register probe. For example:
+
+* allocate A, probe using a non-SVE register save
+* allocate B, probe using an SVE register save
+* allocate C
+
+The non-SVE register used in this case was again the link register.
+It was previously used even if the link register save slot was some
+bytes above the bottom of the non-SVE register saves, but an earlier
+patch avoided that by putting the link register save slot first.
+
+As a belt-and-braces fix, this patch explicitly records which
+probe registers we're using and allows the non-SVE probe to be
+whichever register comes first (as for SVE).
+
+The patch also avoids unnecessary probes in sve/pcs/stack_clash_3.c.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::sve_save_and_probe)
+ (aarch64_frame::hard_fp_save_and_probe): New fields.
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Initialize them.
+ Rather than asserting that a leaf function saves LR, instead assert
+ that a leaf function saves something.
+ (aarch64_get_separate_components): Prevent the chosen probe
+ registers from being individually shrink-wrapped.
+ (aarch64_allocate_and_probe_stack_space): Remove workaround for
+ probe registers that aren't at the bottom of the previous allocation.
+
+gcc/testsuite/
+ * gcc.target/aarch64/sve/pcs/stack_clash_3.c: Avoid redundant probes.
+---
+ gcc/config/aarch64/aarch64.c | 68 +++++++++++++++----
+ gcc/config/aarch64/aarch64.h | 8 +++
+ .../aarch64/sve/pcs/stack_clash_3.c | 6 +-
+ 3 files changed, 64 insertions(+), 18 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 4b9cd687525..ef4b3b671ba 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -7469,15 +7469,11 @@ aarch64_layout_frame (void)
+ && !crtl->abi->clobbers_full_reg_p (regno))
+ frame.reg_offset[regno] = SLOT_REQUIRED;
+
+- /* With stack-clash, LR must be saved in non-leaf functions. The saving of
+- LR counts as an implicit probe which allows us to maintain the invariant
+- described in the comment at expand_prologue. */
+- gcc_assert (crtl->is_leaf
+- || maybe_ne (frame.reg_offset[R30_REGNUM], SLOT_NOT_REQUIRED));
+
+ poly_int64 offset = crtl->outgoing_args_size;
+ gcc_assert (multiple_p (offset, STACK_BOUNDARY / BITS_PER_UNIT));
+ frame.bytes_below_saved_regs = offset;
++ frame.sve_save_and_probe = INVALID_REGNUM;
+
+ /* Now assign stack slots for the registers. Start with the predicate
+ registers, since predicate LDR and STR have a relatively small
+@@ -7485,6 +7481,8 @@ aarch64_layout_frame (void)
+ for (regno = P0_REGNUM; regno <= P15_REGNUM; regno++)
+ if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
+ {
++ if (frame.sve_save_and_probe == INVALID_REGNUM)
++ frame.sve_save_and_probe = regno;
+ frame.reg_offset[regno] = offset;
+ offset += BYTES_PER_SVE_PRED;
+ }
+@@ -7522,6 +7520,8 @@ aarch64_layout_frame (void)
+ for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+ if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
+ {
++ if (frame.sve_save_and_probe == INVALID_REGNUM)
++ frame.sve_save_and_probe = regno;
+ frame.reg_offset[regno] = offset;
+ offset += vector_save_size;
+ }
+@@ -7531,10 +7531,18 @@ aarch64_layout_frame (void)
+ frame.below_hard_fp_saved_regs_size = offset - frame.bytes_below_saved_regs;
+ bool saves_below_hard_fp_p
+ = maybe_ne (frame.below_hard_fp_saved_regs_size, 0);
++ gcc_assert (!saves_below_hard_fp_p
++ || (frame.sve_save_and_probe != INVALID_REGNUM
++ && known_eq (frame.reg_offset[frame.sve_save_and_probe],
++ frame.bytes_below_saved_regs)));
++
+ frame.bytes_below_hard_fp = offset;
++ frame.hard_fp_save_and_probe = INVALID_REGNUM;
+
+ auto allocate_gpr_slot = [&](unsigned int regno)
+ {
++ if (frame.hard_fp_save_and_probe == INVALID_REGNUM)
++ frame.hard_fp_save_and_probe = regno;
+ frame.reg_offset[regno] = offset;
+ if (frame.wb_candidate1 == INVALID_REGNUM)
+ frame.wb_candidate1 = regno;
+@@ -7568,6 +7576,8 @@ aarch64_layout_frame (void)
+ for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+ if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
+ {
++ if (frame.hard_fp_save_and_probe == INVALID_REGNUM)
++ frame.hard_fp_save_and_probe = regno;
+ /* If there is an alignment gap between integer and fp callee-saves,
+ allocate the last fp register to it if possible. */
+ if (regno == last_fp_reg
+@@ -7591,6 +7601,17 @@ aarch64_layout_frame (void)
+ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+
+ frame.saved_regs_size = offset - frame.bytes_below_saved_regs;
++ gcc_assert (known_eq (frame.saved_regs_size,
++ frame.below_hard_fp_saved_regs_size)
++ || (frame.hard_fp_save_and_probe != INVALID_REGNUM
++ && known_eq (frame.reg_offset[frame.hard_fp_save_and_probe],
++ frame.bytes_below_hard_fp)));
++
++ /* With stack-clash, a register must be saved in non-leaf functions.
++ The saving of the bottommost register counts as an implicit probe,
++ which allows us to maintain the invariant described in the comment
++ at expand_prologue. */
++ gcc_assert (crtl->is_leaf || maybe_ne (frame.saved_regs_size, 0));
+
+ offset += get_frame_size ();
+ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+@@ -7690,6 +7711,25 @@ aarch64_layout_frame (void)
+ frame.final_adjust = frame.bytes_below_saved_regs;
+ }
+
++ /* The frame is allocated in pieces, with each non-final piece
++ including a register save at offset 0 that acts as a probe for
++ the following piece. In addition, the save of the bottommost register
++ acts as a probe for callees and allocas. Roll back any probes that
++ aren't needed.
++
++ A probe isn't needed if it is associated with the final allocation
++ (including callees and allocas) that happens before the epilogue is
++ executed. */
++ if (crtl->is_leaf
++ && !cfun->calls_alloca
++ && known_eq (frame.final_adjust, 0))
++ {
++ if (maybe_ne (frame.sve_callee_adjust, 0))
++ frame.sve_save_and_probe = INVALID_REGNUM;
++ else
++ frame.hard_fp_save_and_probe = INVALID_REGNUM;
++ }
++
+ /* Make sure the individual adjustments add up to the full frame size. */
+ gcc_assert (known_eq (frame.initial_adjust
+ + frame.callee_adjust
+@@ -8267,13 +8307,6 @@ aarch64_get_separate_components (void)
+
+ poly_int64 offset = frame.reg_offset[regno];
+
+- /* If the register is saved in the first SVE save slot, we use
+- it as a stack probe for -fstack-clash-protection. */
+- if (flag_stack_clash_protection
+- && maybe_ne (frame.below_hard_fp_saved_regs_size, 0)
+- && known_eq (offset, frame.bytes_below_saved_regs))
+- continue;
+-
+ /* Get the offset relative to the register we'll use. */
+ if (frame_pointer_needed)
+ offset -= frame.bytes_below_hard_fp;
+@@ -8308,6 +8341,13 @@ aarch64_get_separate_components (void)
+
+ bitmap_clear_bit (components, LR_REGNUM);
+ bitmap_clear_bit (components, SP_REGNUM);
++ if (flag_stack_clash_protection)
++ {
++ if (frame.sve_save_and_probe != INVALID_REGNUM)
++ bitmap_clear_bit (components, frame.sve_save_and_probe);
++ if (frame.hard_fp_save_and_probe != INVALID_REGNUM)
++ bitmap_clear_bit (components, frame.hard_fp_save_and_probe);
++ }
+
+ return components;
+ }
+@@ -8844,8 +8884,8 @@ aarch64_epilogue_uses (int regno)
+ When probing is needed, we emit a probe at the start of the prologue
+ and every PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE bytes thereafter.
+
+- We have to track how much space has been allocated and the only stores
+- to the stack we track as implicit probes are the FP/LR stores.
++ We can also use register saves as probes. These are stored in
++ sve_save_and_probe and hard_fp_save_and_probe.
+
+ For outgoing arguments we probe if the size is larger than 1KB, such that
+ the ABI specified buffer is maintained for the next callee.
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index c8ec3d58495..97173e48598 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -911,6 +911,14 @@ struct GTY (()) aarch64_frame
+ This is the register they should use. */
+ unsigned spare_pred_reg;
+
++ /* An SVE register that is saved below the hard frame pointer and that acts
++ as a probe for later allocations, or INVALID_REGNUM if none. */
++ unsigned sve_save_and_probe;
++
++ /* A register that is saved at the hard frame pointer and that acts
++ as a probe for later allocations, or INVALID_REGNUM if none. */
++ unsigned hard_fp_save_and_probe;
++
+ bool laid_out;
+ };
+
+diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_3.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_3.c
+index 3e01ec36c3a..3530a0d504b 100644
+--- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_3.c
++++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_3.c
+@@ -11,11 +11,10 @@
+ ** mov x11, sp
+ ** ...
+ ** sub sp, sp, x13
+-** str p4, \[sp\]
+ ** cbz w0, [^\n]*
++** str p4, \[sp\]
+ ** ...
+ ** ptrue p0\.b, all
+-** ldr p4, \[sp\]
+ ** addvl sp, sp, #1
+ ** ldr x24, \[sp\], 32
+ ** ret
+@@ -39,13 +38,12 @@ test_1 (int n)
+ ** mov x11, sp
+ ** ...
+ ** sub sp, sp, x13
+-** str p4, \[sp\]
+ ** cbz w0, [^\n]*
++** str p4, \[sp\]
+ ** str p5, \[sp, #1, mul vl\]
+ ** str p6, \[sp, #2, mul vl\]
+ ** ...
+ ** ptrue p0\.b, all
+-** ldr p4, \[sp\]
+ ** addvl sp, sp, #1
+ ** ldr x24, \[sp\], 32
+ ** ret
+--
+2.34.1
+
+
+From 4bbf7b6cdd02b0d547ddd6a630f2065680bf2f6b Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:52 +0100
+Subject: [PATCH 18/19] aarch64: Remove below_hard_fp_saved_regs_size
+
+After previous patches, it's no longer necessary to store
+saved_regs_size and below_hard_fp_saved_regs_size in the frame info.
+All measurements instead use the top or bottom of the frame as
+reference points.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::saved_regs_size)
+ (aarch64_frame::below_hard_fp_saved_regs_size): Delete.
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Update accordingly.
+---
+ gcc/config/aarch64/aarch64.c | 45 +++++++++++++++++-------------------
+ gcc/config/aarch64/aarch64.h | 7 ------
+ 2 files changed, 21 insertions(+), 31 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index ef4b3b671ba..385718a475b 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -7528,9 +7528,8 @@ aarch64_layout_frame (void)
+
+ /* OFFSET is now the offset of the hard frame pointer from the bottom
+ of the callee save area. */
+- frame.below_hard_fp_saved_regs_size = offset - frame.bytes_below_saved_regs;
+- bool saves_below_hard_fp_p
+- = maybe_ne (frame.below_hard_fp_saved_regs_size, 0);
++ auto below_hard_fp_saved_regs_size = offset - frame.bytes_below_saved_regs;
++ bool saves_below_hard_fp_p = maybe_ne (below_hard_fp_saved_regs_size, 0);
+ gcc_assert (!saves_below_hard_fp_p
+ || (frame.sve_save_and_probe != INVALID_REGNUM
+ && known_eq (frame.reg_offset[frame.sve_save_and_probe],
+@@ -7600,9 +7599,8 @@ aarch64_layout_frame (void)
+
+ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+
+- frame.saved_regs_size = offset - frame.bytes_below_saved_regs;
+- gcc_assert (known_eq (frame.saved_regs_size,
+- frame.below_hard_fp_saved_regs_size)
++ auto saved_regs_size = offset - frame.bytes_below_saved_regs;
++ gcc_assert (known_eq (saved_regs_size, below_hard_fp_saved_regs_size)
+ || (frame.hard_fp_save_and_probe != INVALID_REGNUM
+ && known_eq (frame.reg_offset[frame.hard_fp_save_and_probe],
+ frame.bytes_below_hard_fp)));
+@@ -7611,7 +7609,7 @@ aarch64_layout_frame (void)
+ The saving of the bottommost register counts as an implicit probe,
+ which allows us to maintain the invariant described in the comment
+ at expand_prologue. */
+- gcc_assert (crtl->is_leaf || maybe_ne (frame.saved_regs_size, 0));
++ gcc_assert (crtl->is_leaf || maybe_ne (saved_regs_size, 0));
+
+ offset += get_frame_size ();
+ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+@@ -7637,7 +7635,7 @@ aarch64_layout_frame (void)
+
+ HOST_WIDE_INT const_size, const_below_saved_regs, const_above_fp;
+ HOST_WIDE_INT const_saved_regs_size;
+- if (known_eq (frame.saved_regs_size, 0))
++ if (known_eq (saved_regs_size, 0))
+ frame.initial_adjust = frame.frame_size;
+ else if (frame.frame_size.is_constant (&const_size)
+ && const_size < max_push_offset
+@@ -7650,7 +7648,7 @@ aarch64_layout_frame (void)
+ frame.callee_adjust = const_size;
+ }
+ else if (frame.bytes_below_saved_regs.is_constant (&const_below_saved_regs)
+- && frame.saved_regs_size.is_constant (&const_saved_regs_size)
++ && saved_regs_size.is_constant (&const_saved_regs_size)
+ && const_below_saved_regs + const_saved_regs_size < 512
+ /* We could handle this case even with data below the saved
+ registers, provided that that data left us with valid offsets
+@@ -7669,8 +7667,7 @@ aarch64_layout_frame (void)
+ frame.initial_adjust = frame.frame_size;
+ }
+ else if (saves_below_hard_fp_p
+- && known_eq (frame.saved_regs_size,
+- frame.below_hard_fp_saved_regs_size))
++ && known_eq (saved_regs_size, below_hard_fp_saved_regs_size))
+ {
+ /* Frame in which all saves are SVE saves:
+
+@@ -7692,7 +7689,7 @@ aarch64_layout_frame (void)
+ [save SVE registers relative to SP]
+ sub sp, sp, bytes_below_saved_regs */
+ frame.callee_adjust = const_above_fp;
+- frame.sve_callee_adjust = frame.below_hard_fp_saved_regs_size;
++ frame.sve_callee_adjust = below_hard_fp_saved_regs_size;
+ frame.final_adjust = frame.bytes_below_saved_regs;
+ }
+ else
+@@ -7707,7 +7704,7 @@ aarch64_layout_frame (void)
+ [save SVE registers relative to SP]
+ sub sp, sp, bytes_below_saved_regs */
+ frame.initial_adjust = frame.bytes_above_hard_fp;
+- frame.sve_callee_adjust = frame.below_hard_fp_saved_regs_size;
++ frame.sve_callee_adjust = below_hard_fp_saved_regs_size;
+ frame.final_adjust = frame.bytes_below_saved_regs;
+ }
+
+@@ -8849,17 +8846,17 @@ aarch64_epilogue_uses (int regno)
+ | local variables | <-- frame_pointer_rtx
+ | |
+ +-------------------------------+
+- | padding | \
+- +-------------------------------+ |
+- | callee-saved registers | | frame.saved_regs_size
+- +-------------------------------+ |
+- | LR' | |
+- +-------------------------------+ |
+- | FP' | |
+- +-------------------------------+ |<- hard_frame_pointer_rtx (aligned)
+- | SVE vector registers | | \
+- +-------------------------------+ | | below_hard_fp_saved_regs_size
+- | SVE predicate registers | / /
++ | padding |
++ +-------------------------------+
++ | callee-saved registers |
++ +-------------------------------+
++ | LR' |
++ +-------------------------------+
++ | FP' |
++ +-------------------------------+ <-- hard_frame_pointer_rtx (aligned)
++ | SVE vector registers |
++ +-------------------------------+
++ | SVE predicate registers |
+ +-------------------------------+
+ | dynamic allocation |
+ +-------------------------------+
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index 97173e48598..9084b1cfb9d 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -837,18 +837,11 @@ struct GTY (()) aarch64_frame
+ STACK_BOUNDARY. */
+ HOST_WIDE_INT saved_varargs_size;
+
+- /* The size of the callee-save registers with a slot in REG_OFFSET. */
+- poly_int64 saved_regs_size;
+-
+ /* The number of bytes between the bottom of the static frame (the bottom
+ of the outgoing arguments) and the bottom of the register save area.
+ This value is always a multiple of STACK_BOUNDARY. */
+ poly_int64 bytes_below_saved_regs;
+
+- /* The size of the callee-save registers with a slot in REG_OFFSET that
+- are saved below the hard frame pointer. */
+- poly_int64 below_hard_fp_saved_regs_size;
+-
+ /* The number of bytes between the bottom of the static frame (the bottom
+ of the outgoing arguments) and the hard frame pointer. This value is
+ always a multiple of STACK_BOUNDARY. */
+--
+2.34.1
+
+
+From bea0985749c12fcc264710586addb7838cc61e6d Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:52 +0100
+Subject: [PATCH 19/19] aarch64: Make stack smash canary protect saved
+ registers
+
+AArch64 normally puts the saved registers near the bottom of the frame,
+immediately above any dynamic allocations. But this means that a
+stack-smash attack on those dynamic allocations could overwrite the
+saved registers without needing to reach as far as the stack smash
+canary.
+
+The same thing could also happen for variable-sized arguments that are
+passed by value, since those are allocated before a call and popped on
+return.
+
+This patch avoids that by putting the locals (and thus the canary) below
+the saved registers when stack smash protection is active.
+
+The patch fixes CVE-2023-4039.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_save_regs_above_locals_p):
+ New function.
+ (aarch64_layout_frame): Use it to decide whether locals should
+ go above or below the saved registers.
+ (aarch64_expand_prologue): Update stack layout comment.
+ Emit a stack tie after the final adjustment.
+
+gcc/testsuite/
+ * gcc.target/aarch64/stack-protector-8.c: New test.
+ * gcc.target/aarch64/stack-protector-9.c: Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 46 +++++++--
+ .../gcc.target/aarch64/stack-protector-8.c | 95 +++++++++++++++++++
+ .../gcc.target/aarch64/stack-protector-9.c | 33 +++++++
+ 3 files changed, 168 insertions(+), 6 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-protector-8.c
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-protector-9.c
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 385718a475b..3ccfd3c30fc 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -7392,6 +7392,20 @@ aarch64_needs_frame_chain (void)
+ return aarch64_use_frame_pointer;
+ }
+
++/* Return true if the current function should save registers above
++ the locals area, rather than below it. */
++
++static bool
++aarch64_save_regs_above_locals_p ()
++{
++ /* When using stack smash protection, make sure that the canary slot
++ comes between the locals and the saved registers. Otherwise,
++ it would be possible for a carefully sized smash attack to change
++ the saved registers (particularly LR and FP) without reaching the
++ canary. */
++ return crtl->stack_protect_guard;
++}
++
+ /* Mark the registers that need to be saved by the callee and calculate
+ the size of the callee-saved registers area and frame record (both FP
+ and LR may be omitted). */
+@@ -7403,6 +7417,7 @@ aarch64_layout_frame (void)
+ poly_int64 vector_save_size = GET_MODE_SIZE (vector_save_mode);
+ bool frame_related_fp_reg_p = false;
+ aarch64_frame &frame = cfun->machine->frame;
++ poly_int64 top_of_locals = -1;
+
+ frame.emit_frame_chain = aarch64_needs_frame_chain ();
+
+@@ -7469,9 +7484,16 @@ aarch64_layout_frame (void)
+ && !crtl->abi->clobbers_full_reg_p (regno))
+ frame.reg_offset[regno] = SLOT_REQUIRED;
+
++ bool regs_at_top_p = aarch64_save_regs_above_locals_p ();
+
+ poly_int64 offset = crtl->outgoing_args_size;
+ gcc_assert (multiple_p (offset, STACK_BOUNDARY / BITS_PER_UNIT));
++ if (regs_at_top_p)
++ {
++ offset += get_frame_size ();
++ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
++ top_of_locals = offset;
++ }
+ frame.bytes_below_saved_regs = offset;
+ frame.sve_save_and_probe = INVALID_REGNUM;
+
+@@ -7611,15 +7633,18 @@ aarch64_layout_frame (void)
+ at expand_prologue. */
+ gcc_assert (crtl->is_leaf || maybe_ne (saved_regs_size, 0));
+
+- offset += get_frame_size ();
+- offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+- auto top_of_locals = offset;
+-
++ if (!regs_at_top_p)
++ {
++ offset += get_frame_size ();
++ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
++ top_of_locals = offset;
++ }
+ offset += frame.saved_varargs_size;
+ gcc_assert (multiple_p (offset, STACK_BOUNDARY / BITS_PER_UNIT));
+ frame.frame_size = offset;
+
+ frame.bytes_above_hard_fp = frame.frame_size - frame.bytes_below_hard_fp;
++ gcc_assert (known_ge (top_of_locals, 0));
+ frame.bytes_above_locals = frame.frame_size - top_of_locals;
+
+ frame.initial_adjust = 0;
+@@ -8843,10 +8868,10 @@ aarch64_epilogue_uses (int regno)
+ | for register varargs |
+ | |
+ +-------------------------------+
+- | local variables | <-- frame_pointer_rtx
++ | local variables (1) | <-- frame_pointer_rtx
+ | |
+ +-------------------------------+
+- | padding |
++ | padding (1) |
+ +-------------------------------+
+ | callee-saved registers |
+ +-------------------------------+
+@@ -8858,6 +8883,10 @@ aarch64_epilogue_uses (int regno)
+ +-------------------------------+
+ | SVE predicate registers |
+ +-------------------------------+
++ | local variables (2) |
++ +-------------------------------+
++ | padding (2) |
++ +-------------------------------+
+ | dynamic allocation |
+ +-------------------------------+
+ | padding |
+@@ -8867,6 +8896,9 @@ aarch64_epilogue_uses (int regno)
+ +-------------------------------+
+ | | <-- stack_pointer_rtx (aligned)
+
++ The regions marked (1) and (2) are mutually exclusive. (2) is used
++ when aarch64_save_regs_above_locals_p is true.
++
+ Dynamic stack allocations via alloca() decrease stack_pointer_rtx
+ but leave frame_pointer_rtx and hard_frame_pointer_rtx
+ unchanged.
+@@ -9058,6 +9090,8 @@ aarch64_expand_prologue (void)
+ gcc_assert (known_eq (bytes_below_sp, final_adjust));
+ aarch64_allocate_and_probe_stack_space (tmp1_rtx, tmp0_rtx, final_adjust,
+ !frame_pointer_needed, true);
++ if (emit_frame_chain && maybe_ne (final_adjust, 0))
++ emit_insn (gen_stack_tie (stack_pointer_rtx, hard_frame_pointer_rtx));
+ }
+
+ /* Return TRUE if we can use a simple_return insn.
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-protector-8.c b/gcc/testsuite/gcc.target/aarch64/stack-protector-8.c
+new file mode 100644
+index 00000000000..e71d820e365
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/stack-protector-8.c
+@@ -0,0 +1,95 @@
++/* { dg-options " -O -fstack-protector-strong -mstack-protector-guard=sysreg -mstack-protector-guard-reg=tpidr2_el0 -mstack-protector-guard-offset=16" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++void g(void *);
++__SVBool_t *h(void *);
++
++/*
++** test1:
++** sub sp, sp, #288
++** stp x29, x30, \[sp, #?272\]
++** add x29, sp, #?272
++** mrs (x[0-9]+), tpidr2_el0
++** ldr (x[0-9]+), \[\1, #?16\]
++** str \2, \[sp, #?264\]
++** mov \2, #?0
++** add x0, sp, #?8
++** bl g
++** ...
++** mrs .*
++** ...
++** bne .*
++** ...
++** ldp x29, x30, \[sp, #?272\]
++** add sp, sp, #?288
++** ret
++** bl __stack_chk_fail
++*/
++int test1() {
++ int y[0x40];
++ g(y);
++ return 1;
++}
++
++/*
++** test2:
++** stp x29, x30, \[sp, #?-16\]!
++** mov x29, sp
++** sub sp, sp, #1040
++** mrs (x[0-9]+), tpidr2_el0
++** ldr (x[0-9]+), \[\1, #?16\]
++** str \2, \[sp, #?1032\]
++** mov \2, #?0
++** add x0, sp, #?8
++** bl g
++** ...
++** mrs .*
++** ...
++** bne .*
++** ...
++** add sp, sp, #?1040
++** ldp x29, x30, \[sp\], #?16
++** ret
++** bl __stack_chk_fail
++*/
++int test2() {
++ int y[0x100];
++ g(y);
++ return 1;
++}
++
++#pragma GCC target "+sve"
++
++/*
++** test3:
++** stp x29, x30, \[sp, #?-16\]!
++** mov x29, sp
++** addvl sp, sp, #-18
++** ...
++** str p4, \[sp\]
++** ...
++** sub sp, sp, #272
++** mrs (x[0-9]+), tpidr2_el0
++** ldr (x[0-9]+), \[\1, #?16\]
++** str \2, \[sp, #?264\]
++** mov \2, #?0
++** add x0, sp, #?8
++** bl h
++** ...
++** mrs .*
++** ...
++** bne .*
++** ...
++** add sp, sp, #?272
++** ...
++** ldr p4, \[sp\]
++** ...
++** addvl sp, sp, #18
++** ldp x29, x30, \[sp\], #?16
++** ret
++** bl __stack_chk_fail
++*/
++__SVBool_t test3() {
++ int y[0x40];
++ return *h(y);
++}
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-protector-9.c b/gcc/testsuite/gcc.target/aarch64/stack-protector-9.c
+new file mode 100644
+index 00000000000..58f322aa480
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/stack-protector-9.c
+@@ -0,0 +1,33 @@
++/* { dg-options "-O2 -mcpu=neoverse-v1 -fstack-protector-all" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++/*
++** main:
++** ...
++** stp x29, x30, \[sp, #?-[0-9]+\]!
++** ...
++** sub sp, sp, #[0-9]+
++** ...
++** str x[0-9]+, \[x29, #?-8\]
++** ...
++*/
++int f(const char *);
++void g(void *);
++int main(int argc, char* argv[])
++{
++ int a;
++ int b;
++ char c[2+f(argv[1])];
++ int d[0x100];
++ char y;
++
++ y=42; a=4; b=10;
++ c[0] = 'h'; c[1] = '\0';
++
++ c[f(argv[2])] = '\0';
++
++ __builtin_printf("%d %d\n%s\n", a, b, c);
++ g(d);
++
++ return 0;
++}
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/gcc/gcc_11.3.bb b/meta/recipes-devtools/gcc/gcc_11.4.bb
index 255fe552bd..255fe552bd 100644
--- a/meta/recipes-devtools/gcc/gcc_11.3.bb
+++ b/meta/recipes-devtools/gcc/gcc_11.4.bb
diff --git a/meta/recipes-devtools/gcc/libgcc-common.inc b/meta/recipes-devtools/gcc/libgcc-common.inc
index d48dc8b823..31f629acaa 100644
--- a/meta/recipes-devtools/gcc/libgcc-common.inc
+++ b/meta/recipes-devtools/gcc/libgcc-common.inc
@@ -45,10 +45,14 @@ do_install () {
}
do_install:append:libc-baremetal () {
- rmdir ${D}${base_libdir}
+ if [ "${base_libdir}" != "${libdir}" ]; then
+ rmdir ${D}${base_libdir}
+ fi
}
do_install:append:libc-newlib () {
- rmdir ${D}${base_libdir}
+ if [ "${base_libdir}" != "${libdir}" ]; then
+ rmdir ${D}${base_libdir}
+ fi
}
# No rpm package is actually created but -dev depends on it, avoid dnf error
diff --git a/meta/recipes-devtools/gcc/libgcc-initial_11.3.bb b/meta/recipes-devtools/gcc/libgcc-initial_11.4.bb
index a259082b47..a259082b47 100644
--- a/meta/recipes-devtools/gcc/libgcc-initial_11.3.bb
+++ b/meta/recipes-devtools/gcc/libgcc-initial_11.4.bb
diff --git a/meta/recipes-devtools/gcc/libgcc_11.3.bb b/meta/recipes-devtools/gcc/libgcc_11.4.bb
index f88963b0a4..f88963b0a4 100644
--- a/meta/recipes-devtools/gcc/libgcc_11.3.bb
+++ b/meta/recipes-devtools/gcc/libgcc_11.4.bb
diff --git a/meta/recipes-devtools/gcc/libgfortran_11.3.bb b/meta/recipes-devtools/gcc/libgfortran_11.4.bb
index 71dd8b4bdc..71dd8b4bdc 100644
--- a/meta/recipes-devtools/gcc/libgfortran_11.3.bb
+++ b/meta/recipes-devtools/gcc/libgfortran_11.4.bb
diff --git a/meta/recipes-devtools/gdb/gdb.inc b/meta/recipes-devtools/gdb/gdb.inc
index 649ee28727..6c9fe60cab 100644
--- a/meta/recipes-devtools/gdb/gdb.inc
+++ b/meta/recipes-devtools/gdb/gdb.inc
@@ -14,5 +14,8 @@ SRC_URI = "${GNU_MIRROR}/gdb/gdb-${PV}.tar.xz \
file://0008-resolve-restrict-keyword-conflict.patch \
file://0009-Fix-invalid-sigprocmask-call.patch \
file://0010-gdbserver-ctrl-c-handling.patch \
+ file://0011-CVE-2023-39128.patch \
+ file://0012-CVE-2023-39129.patch \
+ file://0013-CVE-2023-39130.patch \
"
SRC_URI[sha256sum] = "1497c36a71881b8671a9a84a0ee40faab788ca30d7ba19d8463c3cc787152e32"
diff --git a/meta/recipes-devtools/gdb/gdb/0011-CVE-2023-39128.patch b/meta/recipes-devtools/gdb/gdb/0011-CVE-2023-39128.patch
new file mode 100644
index 0000000000..53b49cb21d
--- /dev/null
+++ b/meta/recipes-devtools/gdb/gdb/0011-CVE-2023-39128.patch
@@ -0,0 +1,75 @@
+From 033bc52bb6190393c8eed80925fa78cc35b40c6d Mon Sep 17 00:00:00 2001
+From: Tom Tromey <tromey@adacore.com>
+Date: Wed, 16 Aug 2023 11:29:19 -0600
+Subject: [PATCH] Avoid buffer overflow in ada_decode
+
+A bug report pointed out a buffer overflow in ada_decode, which Keith
+helpfully analyzed. ada_decode had a logic error when the input was
+all digits. While this isn't valid -- and would probably only appear
+in fuzzer tests -- it still should be handled properly.
+
+This patch adds a missing bounds check. Tested with the self-tests in
+an asan build.
+
+Bug: https://sourceware.org/bugzilla/show_bug.cgi?id=30639
+Reviewed-by: Keith Seitz <keiths@redhat.com>
+
+Upstream-Status: Backport from [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=033bc52bb6190393c8eed80925fa78cc35b40c6d]
+CVE: CVE-2023-39128
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ gdb/ada-lang.c | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/gdb/ada-lang.c b/gdb/ada-lang.c
+index 70a2b44..f682302 100644
+--- a/gdb/ada-lang.c
++++ b/gdb/ada-lang.c
+@@ -57,6 +57,7 @@
+ #include "cli/cli-utils.h"
+ #include "gdbsupport/function-view.h"
+ #include "gdbsupport/byte-vector.h"
++#include "gdbsupport/selftest.h"
+ #include <algorithm>
+ #include "ada-exp.h"
+
+@@ -1057,7 +1058,7 @@ ada_decode (const char *encoded, bool wrap)
+ i -= 1;
+ if (i > 1 && encoded[i] == '_' && encoded[i - 1] == '_')
+ len0 = i - 1;
+- else if (encoded[i] == '$')
++ else if (i >= 0 && encoded[i] == '$')
+ len0 = i;
+ }
+
+@@ -1225,6 +1226,18 @@ ada_decode (const char *encoded, bool wrap)
+ return decoded;
+ }
+
++#ifdef GDB_SELF_TEST
++
++static void
++ada_decode_tests ()
++{
++ /* This isn't valid, but used to cause a crash. PR gdb/30639. The
++ result does not really matter very much. */
++ SELF_CHECK (ada_decode ("44") == "44");
++}
++
++#endif
++
+ /* Table for keeping permanent unique copies of decoded names. Once
+ allocated, names in this table are never released. While this is a
+ storage leak, it should not be significant unless there are massive
+@@ -13497,4 +13510,8 @@ DWARF attribute."),
+ gdb::observers::new_objfile.attach (ada_new_objfile_observer, "ada-lang");
+ gdb::observers::free_objfile.attach (ada_free_objfile_observer, "ada-lang");
+ gdb::observers::inferior_exit.attach (ada_inferior_exit, "ada-lang");
++
++#ifdef GDB_SELF_TEST
++ selftests::register_test ("ada-decode", ada_decode_tests);
++#endif
+ }
+--
+2.35.7
+
diff --git a/meta/recipes-devtools/gdb/gdb/0012-CVE-2023-39129.patch b/meta/recipes-devtools/gdb/gdb/0012-CVE-2023-39129.patch
new file mode 100644
index 0000000000..63fb44d59a
--- /dev/null
+++ b/meta/recipes-devtools/gdb/gdb/0012-CVE-2023-39129.patch
@@ -0,0 +1,50 @@
+From: Keith Seitz <keiths@...>
+Date: Wed, 2 Aug 2023 15:35:11 +0000 (-0700)
+Subject: Verify COFF symbol stringtab offset
+X-Git-Tag: gdb-14-branchpoint~473
+X-Git-Url: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=58abdf887821a5da09ba184c6e400a3bc5cccd5a
+
+Verify COFF symbol stringtab offset
+
+This patch addresses an issue with malformed/fuzzed debug information that
+was recently reported in gdb/30639. That bug specifically deals with
+an ASAN issue, but the reproducer provided by the reporter causes a
+another failure outside of ASAN:
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=58abdf887821a5da09ba184c6e400a3bc5cccd5a]
+
+CVE: CVE-2023-39129
+
+Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
+
+diff --git a/gdb/coffread.c b/gdb/coffread.c
+--- a/gdb/coffread.c
++++ b/gdb/coffread.c
+@@ -159,6 +160,7 @@ static file_ptr linetab_offset;
+ static file_ptr linetab_size;
+
+ static char *stringtab = NULL;
++static long stringtab_length = 0;
+
+ extern void stabsread_clear_cache (void);
+
+@@ -1303,6 +1298,7 @@ init_stringtab (bfd *abfd, file_ptr offset, gdb::unique_xmalloc_ptr<char> *stora
+ /* This is in target format (probably not very useful, and not
+ currently used), not host format. */
+ memcpy (stringtab, lengthbuf, sizeof lengthbuf);
++ stringtab_length = length;
+ if (length == sizeof length) /* Empty table -- just the count. */
+ return 0;
+
+@@ -1322,8 +1318,9 @@ getsymname (struct internal_syment *symbol_entry)
+
+ if (symbol_entry->_n._n_n._n_zeroes == 0)
+ {
+- /* FIXME: Probably should be detecting corrupt symbol files by
+- seeing whether offset points to within the stringtab. */
++ if (symbol_entry->_n._n_n._n_offset > stringtab_length)
++ error (_("COFF Error: string table offset (%ld) outside string table (length %ld)"),
++ symbol_entry->_n._n_n._n_offset, stringtab_length);
+ result = stringtab + symbol_entry->_n._n_n._n_offset;
+ }
+ else
diff --git a/meta/recipes-devtools/gdb/gdb/0013-CVE-2023-39130.patch b/meta/recipes-devtools/gdb/gdb/0013-CVE-2023-39130.patch
new file mode 100644
index 0000000000..bfd5b18d7d
--- /dev/null
+++ b/meta/recipes-devtools/gdb/gdb/0013-CVE-2023-39130.patch
@@ -0,0 +1,326 @@
+From 2db20b97f1dc3e5dce3d6ed74a8a62f0dede8c80 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Wed, 9 Aug 2023 09:58:36 +0930
+Subject: [PATCH] gdb: warn unused result for bfd IO functions
+
+This fixes the compilation warnings introduced by my bfdio.c patch.
+
+The removed bfd_seeks in coff_symfile_read date back to 1994, commit
+7f4c859520, prior to which the file used stdio rather than bfd to read
+symbols. Since it now uses bfd to read the file there should be no
+need to synchronise to bfd's idea of the file position. I also fixed
+a potential uninitialised memory access.
+
+Approved-By: Andrew Burgess <aburgess@redhat.com>
+
+Upstream-Status: Backport from [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=2db20b97f1dc3e5dce3d6ed74a8a62f0dede8c80]
+CVE: CVE-2023-39130
+Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
+---
+ gdb/coff-pe-read.c | 114 +++++++++++++++++++++++++++++----------------
+ gdb/coffread.c | 27 ++---------
+ gdb/dbxread.c | 7 +--
+ gdb/xcoffread.c | 5 +-
+ 4 files changed, 85 insertions(+), 68 deletions(-)
+
+diff --git a/gdb/coff-pe-read.c b/gdb/coff-pe-read.c
+--- a/gdb/coff-pe-read.c
++++ b/gdb/coff-pe-read.c
+@@ -291,23 +291,31 @@ read_pe_truncate_name (char *dll_name)
+
+ /* Low-level support functions, direct from the ld module pe-dll.c. */
+ static unsigned int
+-pe_get16 (bfd *abfd, int where)
++pe_get16 (bfd *abfd, int where, bool *fail)
+ {
+ unsigned char b[2];
+
+- bfd_seek (abfd, (file_ptr) where, SEEK_SET);
+- bfd_bread (b, (bfd_size_type) 2, abfd);
++ if (bfd_seek (abfd, where, SEEK_SET) != 0
++ || bfd_bread (b, 2, abfd) != 2)
++ {
++ *fail = true;
++ return 0;
++ }
+ return b[0] + (b[1] << 8);
+ }
+
+ static unsigned int
+-pe_get32 (bfd *abfd, int where)
++pe_get32 (bfd *abfd, int where, bool *fail)
+ {
+ unsigned char b[4];
+
+- bfd_seek (abfd, (file_ptr) where, SEEK_SET);
+- bfd_bread (b, (bfd_size_type) 4, abfd);
+- return b[0] + (b[1] << 8) + (b[2] << 16) + (b[3] << 24);
++ if (bfd_seek (abfd, where, SEEK_SET) != 0
++ || bfd_bread (b, 4, abfd) != 4)
++ {
++ *fail = true;
++ return 0;
++ }
++ return b[0] + (b[1] << 8) + (b[2] << 16) + ((unsigned) b[3] << 24);
+ }
+
+ static unsigned int
+@@ -323,7 +331,7 @@ pe_as32 (void *ptr)
+ {
+ unsigned char *b = (unsigned char *) ptr;
+
+- return b[0] + (b[1] << 8) + (b[2] << 16) + (b[3] << 24);
++ return b[0] + (b[1] << 8) + (b[2] << 16) + ((unsigned) b[3] << 24);
+ }
+
+ /* Read the (non-debug) export symbol table from a portable
+@@ -376,37 +384,50 @@ read_pe_exported_syms (minimal_symbol_re
+ || strcmp (target, "pei-i386") == 0
+ || strcmp (target, "pe-arm-wince-little") == 0
+ || strcmp (target, "pei-arm-wince-little") == 0);
++
++ /* Possibly print a debug message about DLL not having a valid format. */
++ auto maybe_print_debug_msg = [&] () -> void {
++ if (debug_coff_pe_read)
++ fprintf_unfiltered (gdb_stdlog, _("%s doesn't appear to be a DLL\n"),
++ bfd_get_filename (dll));
++ };
++
+ if (!is_pe32 && !is_pe64)
+- {
+- /* This is not a recognized PE format file. Abort now, because
+- the code is untested on anything else. *FIXME* test on
+- further architectures and loosen or remove this test. */
+- return;
+- }
++ return maybe_print_debug_msg ();
+
+ /* Get pe_header, optional header and numbers of export entries. */
+- pe_header_offset = pe_get32 (dll, 0x3c);
++ bool fail = false;
++ pe_header_offset = pe_get32 (dll, 0x3c, &fail);
++ if (fail)
++ return maybe_print_debug_msg ();
+ opthdr_ofs = pe_header_offset + 4 + 20;
+ if (is_pe64)
+- num_entries = pe_get32 (dll, opthdr_ofs + 108);
++ num_entries = pe_get32 (dll, opthdr_ofs + 108, &fail);
+ else
+- num_entries = pe_get32 (dll, opthdr_ofs + 92);
++ num_entries = pe_get32 (dll, opthdr_ofs + 92, &fail);
++ if (fail)
++ return maybe_print_debug_msg ();
+
+ if (num_entries < 1) /* No exports. */
+ return;
+ if (is_pe64)
+ {
+- export_opthdrrva = pe_get32 (dll, opthdr_ofs + 112);
+- export_opthdrsize = pe_get32 (dll, opthdr_ofs + 116);
++ export_opthdrrva = pe_get32 (dll, opthdr_ofs + 112, &fail);
++ export_opthdrsize = pe_get32 (dll, opthdr_ofs + 116, &fail);
+ }
+ else
+ {
+- export_opthdrrva = pe_get32 (dll, opthdr_ofs + 96);
+- export_opthdrsize = pe_get32 (dll, opthdr_ofs + 100);
++ export_opthdrrva = pe_get32 (dll, opthdr_ofs + 96, &fail);
++ export_opthdrsize = pe_get32 (dll, opthdr_ofs + 100, &fail);
+ }
+- nsections = pe_get16 (dll, pe_header_offset + 4 + 2);
++ if (fail)
++ return maybe_print_debug_msg ();
++
++ nsections = pe_get16 (dll, pe_header_offset + 4 + 2, &fail);
+ secptr = (pe_header_offset + 4 + 20 +
+- pe_get16 (dll, pe_header_offset + 4 + 16));
++ pe_get16 (dll, pe_header_offset + 4 + 16, &fail));
++ if (fail)
++ return maybe_print_debug_msg ();
+ expptr = 0;
+ export_size = 0;
+
+@@ -415,12 +436,13 @@ read_pe_exported_syms (minimal_symbol_re
+ {
+ char sname[8];
+ unsigned long secptr1 = secptr + 40 * i;
+- unsigned long vaddr = pe_get32 (dll, secptr1 + 12);
+- unsigned long vsize = pe_get32 (dll, secptr1 + 16);
+- unsigned long fptr = pe_get32 (dll, secptr1 + 20);
+-
+- bfd_seek (dll, (file_ptr) secptr1, SEEK_SET);
+- bfd_bread (sname, (bfd_size_type) sizeof (sname), dll);
++ unsigned long vaddr = pe_get32 (dll, secptr1 + 12, &fail);
++ unsigned long vsize = pe_get32 (dll, secptr1 + 16, &fail);
++ unsigned long fptr = pe_get32 (dll, secptr1 + 20, &fail);
++
++ if (fail
++ || bfd_seek (dll, secptr1, SEEK_SET) != 0
++ || bfd_bread (sname, sizeof (sname), dll) != sizeof (sname))
+
+ if ((strcmp (sname, ".edata") == 0)
+ || (vaddr <= export_opthdrrva && export_opthdrrva < vaddr + vsize))
+@@ -461,16 +483,18 @@ read_pe_exported_syms (minimal_symbol_re
+ for (i = 0; i < nsections; i++)
+ {
+ unsigned long secptr1 = secptr + 40 * i;
+- unsigned long vsize = pe_get32 (dll, secptr1 + 8);
+- unsigned long vaddr = pe_get32 (dll, secptr1 + 12);
+- unsigned long characteristics = pe_get32 (dll, secptr1 + 36);
++ unsigned long vsize = pe_get32 (dll, secptr1 + 8, &fail);
++ unsigned long vaddr = pe_get32 (dll, secptr1 + 12, &fail);
++ unsigned long characteristics = pe_get32 (dll, secptr1 + 36, &fail);
+ char sec_name[SCNNMLEN + 1];
+ int sectix;
+ unsigned int bfd_section_index;
+ asection *section;
+
+- bfd_seek (dll, (file_ptr) secptr1 + 0, SEEK_SET);
+- bfd_bread (sec_name, (bfd_size_type) SCNNMLEN, dll);
++ if (fail
++ || bfd_seek (dll, secptr1 + 0, SEEK_SET) != 0
++ || bfd_bread (sec_name, SCNNMLEN, dll) != SCNNMLEN)
++ return maybe_print_debug_msg ();
+ sec_name[SCNNMLEN] = '\0';
+
+ sectix = read_pe_section_index (sec_name);
+@@ -509,8 +533,9 @@ read_pe_exported_syms (minimal_symbol_re
+ gdb::def_vector<unsigned char> expdata_storage (export_size);
+ expdata = expdata_storage.data ();
+
+- bfd_seek (dll, (file_ptr) expptr, SEEK_SET);
+- bfd_bread (expdata, (bfd_size_type) export_size, dll);
++ if (bfd_seek (dll, expptr, SEEK_SET) != 0
++ || bfd_bread (expdata, export_size, dll) != export_size)
++ return maybe_print_debug_msg ();
+ erva = expdata - export_rva;
+
+ nexp = pe_as32 (expdata + 24);
+@@ -658,20 +683,27 @@ pe_text_section_offset (struct bfd *abfd
+ }
+
+ /* Get pe_header, optional header and numbers of sections. */
+- pe_header_offset = pe_get32 (abfd, 0x3c);
+- nsections = pe_get16 (abfd, pe_header_offset + 4 + 2);
++ bool fail = false;
++ pe_header_offset = pe_get32 (abfd, 0x3c, &fail);
++ if (fail)
++ return DEFAULT_COFF_PE_TEXT_SECTION_OFFSET;
++ nsections = pe_get16 (abfd, pe_header_offset + 4 + 2, &fail);
+ secptr = (pe_header_offset + 4 + 20 +
+- pe_get16 (abfd, pe_header_offset + 4 + 16));
++ pe_get16 (abfd, pe_header_offset + 4 + 16, &fail));
++ if (fail)
++ return DEFAULT_COFF_PE_TEXT_SECTION_OFFSET;
+
+ /* Get the rva and size of the export section. */
+ for (i = 0; i < nsections; i++)
+ {
+ char sname[SCNNMLEN + 1];
+ unsigned long secptr1 = secptr + 40 * i;
+- unsigned long vaddr = pe_get32 (abfd, secptr1 + 12);
++ unsigned long vaddr = pe_get32 (abfd, secptr1 + 12, &fail);
+
+- bfd_seek (abfd, (file_ptr) secptr1, SEEK_SET);
+- bfd_bread (sname, (bfd_size_type) SCNNMLEN, abfd);
++ if (fail
++ || bfd_seek (abfd, secptr1, SEEK_SET) != 0
++ || bfd_bread (sname, SCNNMLEN, abfd) != SCNNMLEN)
++ return DEFAULT_COFF_PE_TEXT_SECTION_OFFSET;
+ sname[SCNNMLEN] = '\0';
+ if (strcmp (sname, ".text") == 0)
+ return vaddr;
+diff --git a/gdb/coffread.c b/gdb/coffread.c
+--- a/gdb/coffread.c
++++ b/gdb/coffread.c
+@@ -690,8 +690,6 @@ coff_symfile_read (struct objfile *objfi
+
+ /* FIXME: dubious. Why can't we use something normal like
+ bfd_get_section_contents? */
+- bfd_seek (abfd, abfd->where, 0);
+-
+ stabstrsize = bfd_section_size (info->stabstrsect);
+
+ coffstab_build_psymtabs (objfile,
+@@ -780,22 +778,6 @@ coff_symtab_read (minimal_symbol_reader
+
+ scoped_free_pendings free_pending;
+
+- /* Work around a stdio bug in SunOS4.1.1 (this makes me nervous....
+- it's hard to know I've really worked around it. The fix should
+- be harmless, anyway). The symptom of the bug is that the first
+- fread (in read_one_sym), will (in my example) actually get data
+- from file offset 268, when the fseek was to 264 (and ftell shows
+- 264). This causes all hell to break loose. I was unable to
+- reproduce this on a short test program which operated on the same
+- file, performing (I think) the same sequence of operations.
+-
+- It stopped happening when I put in this (former) rewind().
+-
+- FIXME: Find out if this has been reported to Sun, whether it has
+- been fixed in a later release, etc. */
+-
+- bfd_seek (objfile->obfd, 0, 0);
+-
+ /* Position to read the symbol table. */
+ val = bfd_seek (objfile->obfd, symtab_offset, 0);
+ if (val < 0)
+@@ -1285,12 +1267,13 @@ init_stringtab (bfd *abfd, file_ptr offs
+ if (bfd_seek (abfd, offset, 0) < 0)
+ return -1;
+
+- val = bfd_bread ((char *) lengthbuf, sizeof lengthbuf, abfd);
+- length = bfd_h_get_32 (symfile_bfd, lengthbuf);
+-
++ val = bfd_bread (lengthbuf, sizeof lengthbuf, abfd);
+ /* If no string table is needed, then the file may end immediately
+ after the symbols. Just return with `stringtab' set to null. */
+- if (val != sizeof lengthbuf || length < sizeof lengthbuf)
++ if (val != sizeof lengthbuf)
++ return 0;
++ length = bfd_h_get_32 (symfile_bfd, lengthbuf);
++ if (length < sizeof lengthbuf)
+ return 0;
+
+ storage->reset ((char *) xmalloc (length));
+diff --git a/gdb/dbxread.c b/gdb/dbxread.c
+--- a/gdb/dbxread.c
++++ b/gdb/dbxread.c
+@@ -812,7 +812,8 @@ stabs_seek (int sym_offset)
+ symbuf_left -= sym_offset;
+ }
+ else
+- bfd_seek (symfile_bfd, sym_offset, SEEK_CUR);
++ if (bfd_seek (symfile_bfd, sym_offset, SEEK_CUR) != 0)
++ perror_with_name (bfd_get_filename (symfile_bfd));
+ }
+
+ #define INTERNALIZE_SYMBOL(intern, extern, abfd) \
+@@ -2095,8 +2096,8 @@ dbx_expand_psymtab (legacy_psymtab *pst,
+ symbol_size = SYMBOL_SIZE (pst);
+
+ /* Read in this file's symbols. */
+- bfd_seek (objfile->obfd, SYMBOL_OFFSET (pst), SEEK_SET);
+- read_ofile_symtab (objfile, pst);
++ if (bfd_seek (objfile->obfd, SYMBOL_OFFSET (pst), SEEK_SET) == 0)
++ read_ofile_symtab (objfile, pst);
+ }
+
+ pst->readin = true;
+diff --git a/gdb/xcoffread.c b/gdb/xcoffread.c
+--- a/gdb/xcoffread.c
++++ b/gdb/xcoffread.c
+@@ -865,8 +865,9 @@ enter_line_range (struct subfile *subfil
+
+ while (curoffset <= limit_offset)
+ {
+- bfd_seek (abfd, curoffset, SEEK_SET);
+- bfd_bread (ext_lnno, linesz, abfd);
++ if (bfd_seek (abfd, curoffset, SEEK_SET) != 0
++ || bfd_bread (ext_lnno, linesz, abfd) != linesz)
++ return;
+ bfd_coff_swap_lineno_in (abfd, ext_lnno, &int_lnno);
+
+ /* Find the address this line represents. */
+--
+2.39.3
diff --git a/meta/recipes-devtools/git/git/CVE-2023-25652.patch b/meta/recipes-devtools/git/git/CVE-2023-25652.patch
new file mode 100644
index 0000000000..825701eaff
--- /dev/null
+++ b/meta/recipes-devtools/git/git/CVE-2023-25652.patch
@@ -0,0 +1,94 @@
+From 9db05711c98efc14f414d4c87135a34c13586e0b Mon Sep 17 00:00:00 2001
+From: Johannes Schindelin <Johannes.Schindelin@gmx.de>
+Date: Thu Mar 9 16:02:54 2023 +0100
+Subject: [PATCH] apply --reject: overwrite existing `.rej` symlink if it
+ exists
+
+ The `git apply --reject` is expected to write out `.rej` files in case
+ one or more hunks fail to apply cleanly. Historically, the command
+ overwrites any existing `.rej` files. The idea being that
+ apply/reject/edit cycles are relatively common, and the generated `.rej`
+ files are not considered precious.
+
+ But the command does not overwrite existing `.rej` symbolic links, and
+ instead follows them. This is unsafe because the same patch could
+ potentially create such a symbolic link and point at arbitrary paths
+ outside the current worktree, and `git apply` would write the contents
+ of the `.rej` file into that location.
+
+ Therefore, let's make sure that any existing `.rej` file or symbolic
+ link is removed before writing it.
+
+ Reported-by: RyotaK <ryotak.mail@gmail.com>
+ Helped-by: Taylor Blau <me@ttaylorr.com>
+ Helped-by: Junio C Hamano <gitster@pobox.com>
+ Helped-by: Linus Torvalds <torvalds@linuxfoundation.org>
+ Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
+
+CVE: CVE-2023-25652
+Upstream-Status: Backport [https://github.com/git/git/commit/9db05711c98efc14f414d4c87135a34c13586e0b]
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ apply.c | 14 ++++++++++++--
+ t/t4115-apply-symlink.sh | 15 +++++++++++++++
+ 2 files changed, 27 insertions(+), 2 deletions(-)
+
+diff --git a/apply.c b/apply.c
+index fc6f484..47f2686 100644
+--- a/apply.c
++++ b/apply.c
+@@ -4584,7 +4584,7 @@ static int write_out_one_reject(struct apply_state *state, struct patch *patch)
+ FILE *rej;
+ char namebuf[PATH_MAX];
+ struct fragment *frag;
+- int cnt = 0;
++ int fd, cnt = 0;
+ struct strbuf sb = STRBUF_INIT;
+
+ for (cnt = 0, frag = patch->fragments; frag; frag = frag->next) {
+@@ -4624,7 +4624,17 @@ static int write_out_one_reject(struct apply_state *state, struct patch *patch)
+ memcpy(namebuf, patch->new_name, cnt);
+ memcpy(namebuf + cnt, ".rej", 5);
+
+- rej = fopen(namebuf, "w");
++ fd = open(namebuf, O_CREAT | O_EXCL | O_WRONLY, 0666);
++ if (fd < 0) {
++ if (errno != EEXIST)
++ return error_errno(_("cannot open %s"), namebuf);
++ if (unlink(namebuf))
++ return error_errno(_("cannot unlink '%s'"), namebuf);
++ fd = open(namebuf, O_CREAT | O_EXCL | O_WRONLY, 0666);
++ if (fd < 0)
++ return error_errno(_("cannot open %s"), namebuf);
++ }
++ rej = fdopen(fd, "w");
+ if (!rej)
+ return error_errno(_("cannot open %s"), namebuf);
+
+diff --git a/t/t4115-apply-symlink.sh b/t/t4115-apply-symlink.sh
+index 65ac7df..e95e6d4 100755
+--- a/t/t4115-apply-symlink.sh
++++ b/t/t4115-apply-symlink.sh
+@@ -126,4 +126,19 @@ test_expect_success SYMLINKS 'symlink escape when deleting file' '
+ test_path_is_file .git/delete-me
+ '
+
++test_expect_success SYMLINKS '--reject removes .rej symlink if it exists' '
++ test_when_finished "git reset --hard && git clean -dfx" &&
++
++ test_commit file &&
++ echo modified >file.t &&
++ git diff -- file.t >patch &&
++ echo modified-again >file.t &&
++
++ ln -s foo file.t.rej &&
++ test_must_fail git apply patch --reject 2>err &&
++ test_i18ngrep "Rejected hunk" err &&
++ test_path_is_missing foo &&
++ test_path_is_file file.t.rej
++'
++
+ test_done
+--
+2.40.0
diff --git a/meta/recipes-devtools/git/git/CVE-2023-29007.patch b/meta/recipes-devtools/git/git/CVE-2023-29007.patch
new file mode 100644
index 0000000000..472f4022b2
--- /dev/null
+++ b/meta/recipes-devtools/git/git/CVE-2023-29007.patch
@@ -0,0 +1,162 @@
+From 057c07a7b1fae22fdeef26c243f4cfbe3afc90ce Mon Sep 17 00:00:00 2001
+From: Taylor Blau <me@ttaylorr.com>
+Date: Fri, 14 Apr 2023 11:46:59 -0400
+Subject: [PATCH] Merge branch 'tb/config-copy-or-rename-in-file-injection'
+
+Avoids issues with renaming or deleting sections with long lines, where
+configuration values may be interpreted as sections, leading to
+configuration injection. Addresses CVE-2023-29007.
+
+* tb/config-copy-or-rename-in-file-injection:
+ config.c: disallow overly-long lines in `copy_or_rename_section_in_file()`
+ config.c: avoid integer truncation in `copy_or_rename_section_in_file()`
+ config: avoid fixed-sized buffer when renaming/deleting a section
+ t1300: demonstrate failure when renaming sections with long lines
+
+Signed-off-by: Taylor Blau <me@ttaylorr.com>
+
+Upstream-Status: Backport
+CVE: CVE-2023-29007
+
+Reference to upstream patch:
+https://github.com/git/git/commit/528290f8c61222433a8cf02fb7cfffa8438432b4
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ config.c | 36 +++++++++++++++++++++++++-----------
+ t/t1300-config.sh | 30 ++++++++++++++++++++++++++++++
+ 2 files changed, 55 insertions(+), 11 deletions(-)
+
+diff --git a/config.c b/config.c
+index 2bffa8d..6a01938 100644
+--- a/config.c
++++ b/config.c
+@@ -3192,9 +3192,10 @@ void git_config_set_multivar(const char *key, const char *value,
+ flags);
+ }
+
+-static int section_name_match (const char *buf, const char *name)
++static size_t section_name_match (const char *buf, const char *name)
+ {
+- int i = 0, j = 0, dot = 0;
++ size_t i = 0, j = 0;
++ int dot = 0;
+ if (buf[i] != '[')
+ return 0;
+ for (i = 1; buf[i] && buf[i] != ']'; i++) {
+@@ -3247,6 +3248,8 @@ static int section_name_is_ok(const char *name)
+ return 1;
+ }
+
++#define GIT_CONFIG_MAX_LINE_LEN (512 * 1024)
++
+ /* if new_name == NULL, the section is removed instead */
+ static int git_config_copy_or_rename_section_in_file(const char *config_filename,
+ const char *old_name,
+@@ -3256,11 +3259,12 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
+ char *filename_buf = NULL;
+ struct lock_file lock = LOCK_INIT;
+ int out_fd;
+- char buf[1024];
++ struct strbuf buf = STRBUF_INIT;
+ FILE *config_file = NULL;
+ struct stat st;
+ struct strbuf copystr = STRBUF_INIT;
+ struct config_store_data store;
++ uint32_t line_nr = 0;
+
+ memset(&store, 0, sizeof(store));
+
+@@ -3297,16 +3301,25 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
+ goto out;
+ }
+
+- while (fgets(buf, sizeof(buf), config_file)) {
+- unsigned i;
+- int length;
++ while (!strbuf_getwholeline(&buf, config_file, '\n')) {
++ size_t i, length;
+ int is_section = 0;
+- char *output = buf;
+- for (i = 0; buf[i] && isspace(buf[i]); i++)
++ char *output = buf.buf;
++
++ line_nr++;
++
++ if (buf.len >= GIT_CONFIG_MAX_LINE_LEN) {
++ ret = error(_("refusing to work with overly long line "
++ "in '%s' on line %"PRIuMAX),
++ config_filename, (uintmax_t)line_nr);
++ goto out;
++ }
++
++ for (i = 0; buf.buf[i] && isspace(buf.buf[i]); i++)
+ ; /* do nothing */
+- if (buf[i] == '[') {
++ if (buf.buf[i] == '[') {
+ /* it's a section */
+- int offset;
++ size_t offset;
+ is_section = 1;
+
+ /*
+@@ -3323,7 +3336,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
+ strbuf_reset(&copystr);
+ }
+
+- offset = section_name_match(&buf[i], old_name);
++ offset = section_name_match(&buf.buf[i], old_name);
+ if (offset > 0) {
+ ret++;
+ if (new_name == NULL) {
+@@ -3398,6 +3411,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
+ out_no_rollback:
+ free(filename_buf);
+ config_store_data_clear(&store);
++ strbuf_release(&buf);
+ return ret;
+ }
+
+diff --git a/t/t1300-config.sh b/t/t1300-config.sh
+index 78359f1..b07feb1 100755
+--- a/t/t1300-config.sh
++++ b/t/t1300-config.sh
+@@ -617,6 +617,36 @@ test_expect_success 'renaming to bogus section is rejected' '
+ test_must_fail git config --rename-section branch.zwei "bogus name"
+ '
+
++test_expect_success 'renaming a section with a long line' '
++ {
++ printf "[b]\\n" &&
++ printf " c = d %1024s [a] e = f\\n" " " &&
++ printf "[a] g = h\\n"
++ } >y &&
++ git config -f y --rename-section a xyz &&
++ test_must_fail git config -f y b.e
++'
++
++test_expect_success 'renaming an embedded section with a long line' '
++ {
++ printf "[b]\\n" &&
++ printf " c = d %1024s [a] [foo] e = f\\n" " " &&
++ printf "[a] g = h\\n"
++ } >y &&
++ git config -f y --rename-section a xyz &&
++ test_must_fail git config -f y foo.e
++'
++
++test_expect_success 'renaming a section with an overly-long line' '
++ {
++ printf "[b]\\n" &&
++ printf " c = d %525000s e" " " &&
++ printf "[a] g = h\\n"
++ } >y &&
++ test_must_fail git config -f y --rename-section a xyz 2>err &&
++ test_i18ngrep "refusing to work with overly long line in .y. on line 2" err
++'
++
+ cat >> .git/config << EOF
+ [branch "zwei"] a = 1 [branch "vier"]
+ EOF
+--
+2.40.0
diff --git a/meta/recipes-devtools/git/git_2.35.3.bb b/meta/recipes-devtools/git/git_2.35.7.bb
index 794045c8b7..9e7b0a8cff 100644
--- a/meta/recipes-devtools/git/git_2.35.3.bb
+++ b/meta/recipes-devtools/git/git_2.35.7.bb
@@ -10,6 +10,8 @@ PROVIDES:append:class-native = " git-replacement-native"
SRC_URI = "${KERNELORG_MIRROR}/software/scm/git/git-${PV}.tar.gz;name=tarball \
file://fixsort.patch \
file://0001-config.mak.uname-do-not-force-RHEL-7-specific-build-.patch \
+ file://CVE-2023-29007.patch \
+ file://CVE-2023-25652.patch \
"
S = "${WORKDIR}/git-${PV}"
@@ -31,6 +33,12 @@ CVE_PRODUCT = "git-scm:git"
# in mirrored git repos. Most OE users wouldn't build the docs and
# we don't see this as a major issue for our general users/usecases.
CVE_CHECK_IGNORE += "CVE-2022-24975"
+# This is specific to Git-for-Windows
+CVE_CHECK_IGNORE += "CVE-2022-41953"
+# specific to Git for Windows
+CVE_CHECK_IGNORE += "CVE-2023-22743"
+# This is specific to Git-for-Windows
+CVE_CHECK_IGNORE += "CVE-2023-25815"
PACKAGECONFIG ??= "expat curl"
PACKAGECONFIG[cvsserver] = ""
@@ -165,4 +173,4 @@ EXTRA_OECONF += "ac_cv_snprintf_returns_bogus=no \
"
EXTRA_OEMAKE += "NO_GETTEXT=1"
-SRC_URI[tarball.sha256sum] = "cad708072d5c0b390c71651f5edb44143f00b357766973470bf9adebc0944c03"
+SRC_URI[tarball.sha256sum] = "fc849272a95cc7457091221a645fcd753b3b1984767ee3323fb6a0aa944bbcb4"
diff --git a/meta/recipes-devtools/go/go-1.17.10.inc b/meta/recipes-devtools/go/go-1.17.10.inc
deleted file mode 100644
index e71feb5d02..0000000000
--- a/meta/recipes-devtools/go/go-1.17.10.inc
+++ /dev/null
@@ -1,25 +0,0 @@
-require go-common.inc
-
-FILESEXTRAPATHS:prepend := "${FILE_DIRNAME}/go-1.18:"
-
-LIC_FILES_CHKSUM = "file://LICENSE;md5=5d4950ecb7b26d2c5e4e7b4e0dd74707"
-
-SRC_URI += "\
- file://0001-allow-CC-and-CXX-to-have-multiple-words.patch \
- file://0002-cmd-go-make-content-based-hash-generation-less-pedan.patch \
- file://0003-allow-GOTOOLDIR-to-be-overridden-in-the-environment.patch \
- file://0004-ld-add-soname-to-shareable-objects.patch \
- file://0005-make.bash-override-CC-when-building-dist-and-go_boot.patch \
- file://0006-cmd-dist-separate-host-and-target-builds.patch \
- file://0007-cmd-go-make-GOROOT-precious-by-default.patch \
- file://0008-use-GOBUILDMODE-to-set-buildmode.patch \
- file://0009-Revert-cmd-go-make-sure-CC-and-CXX-are-absolute.patch \
- file://0001-exec.go-do-not-write-linker-flags-into-buildids.patch \
- file://0001-src-cmd-dist-buildgo.go-do-not-hardcode-host-compile.patch \
-"
-SRC_URI[main.sha256sum] = "299e55af30f15691b015d8dcf8ecae72412412569e5b2ece20361753a456f2f9"
-
-# Upstream don't believe it is a signifiant real world issue and will only
-# fix in 1.17 onwards where we can drop this.
-# https://github.com/golang/go/issues/30999#issuecomment-910470358
-CVE_CHECK_IGNORE += "CVE-2021-29923"
diff --git a/meta/recipes-devtools/go/go-1.17.13.inc b/meta/recipes-devtools/go/go-1.17.13.inc
new file mode 100644
index 0000000000..768961de2c
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.17.13.inc
@@ -0,0 +1,67 @@
+require go-common.inc
+
+FILESEXTRAPATHS:prepend := "${FILE_DIRNAME}/go-1.21:${FILE_DIRNAME}/go-1.20:${FILE_DIRNAME}/go-1.19:${FILE_DIRNAME}/go-1.18:"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=5d4950ecb7b26d2c5e4e7b4e0dd74707"
+
+SRC_URI += "\
+ file://0001-allow-CC-and-CXX-to-have-multiple-words.patch \
+ file://0002-cmd-go-make-content-based-hash-generation-less-pedan.patch \
+ file://0003-allow-GOTOOLDIR-to-be-overridden-in-the-environment.patch \
+ file://0004-ld-add-soname-to-shareable-objects.patch \
+ file://0005-make.bash-override-CC-when-building-dist-and-go_boot.patch \
+ file://0006-cmd-dist-separate-host-and-target-builds.patch \
+ file://0007-cmd-go-make-GOROOT-precious-by-default.patch \
+ file://0008-use-GOBUILDMODE-to-set-buildmode.patch \
+ file://0009-Revert-cmd-go-make-sure-CC-and-CXX-are-absolute.patch \
+ file://0001-exec.go-do-not-write-linker-flags-into-buildids.patch \
+ file://0001-src-cmd-dist-buildgo.go-do-not-hardcode-host-compile.patch \
+ file://0010-net-Fix-issue-with-DNS-not-being-updated.patch \
+ file://CVE-2022-27664.patch \
+ file://0001-net-http-httputil-avoid-query-parameter-smuggling.patch \
+ file://CVE-2022-41715.patch \
+ file://CVE-2022-41717.patch \
+ file://CVE-2022-2879.patch \
+ file://CVE-2022-41720.patch \
+ file://CVE-2022-41723.patch \
+ file://cve-2022-41724.patch \
+ file://add_godebug.patch \
+ file://cve-2022-41725.patch \
+ file://CVE-2022-41722.patch \
+ file://CVE-2023-24537.patch \
+ file://CVE-2023-24534.patch \
+ file://CVE-2023-24538_1.patch \
+ file://CVE-2023-24538_2.patch \
+ file://CVE-2023-24540.patch \
+ file://CVE-2023-24539.patch \
+ file://CVE-2023-29404.patch \
+ file://CVE-2023-29405.patch \
+ file://CVE-2023-29402.patch \
+ file://CVE-2023-29400.patch \
+ file://CVE-2023-29406-1.patch \
+ file://CVE-2023-29406-2.patch \
+ file://CVE-2023-24536_1.patch \
+ file://CVE-2023-24536_2.patch \
+ file://CVE-2023-24536_3.patch \
+ file://CVE-2023-24531_1.patch \
+ file://CVE-2023-24531_2.patch \
+ file://CVE-2023-29409.patch \
+ file://CVE-2023-39319.patch \
+ file://CVE-2023-39318.patch \
+ file://CVE-2023-39326.patch \
+ file://CVE-2023-45285.patch \
+ file://CVE-2023-45287.patch \
+ file://CVE-2023-45289.patch \
+ file://CVE-2023-45290.patch \
+ file://CVE-2024-24784.patch \
+ file://CVE-2024-24785.patch \
+"
+SRC_URI[main.sha256sum] = "a1a48b23afb206f95e7bbaa9b898d965f90826f6f1d1fc0c1d784ada0cd300fd"
+
+# Upstream don't believe it is a signifiant real world issue and will only
+# fix in 1.17 onwards where we can drop this.
+# https://github.com/golang/go/issues/30999#issuecomment-910470358
+CVE_CHECK_IGNORE += "CVE-2021-29923"
+
+# This are specific to Microsoft Windows
+CVE_CHECK_IGNORE += "CVE-2022-41716 CVE-2023-45283 CVE-2023-45284"
diff --git a/meta/recipes-devtools/go/go-1.18/0001-net-http-httputil-avoid-query-parameter-smuggling.patch b/meta/recipes-devtools/go/go-1.18/0001-net-http-httputil-avoid-query-parameter-smuggling.patch
new file mode 100644
index 0000000000..80fba1446e
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/0001-net-http-httputil-avoid-query-parameter-smuggling.patch
@@ -0,0 +1,178 @@
+From c8bdf59453c95528a444a85e1b206c1c09eb20f6 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Thu, 22 Sep 2022 13:32:00 -0700
+Subject: [PATCH] net/http/httputil: avoid query parameter smuggling
+
+Query parameter smuggling occurs when a proxy's interpretation
+of query parameters differs from that of a downstream server.
+Change ReverseProxy to avoid forwarding ignored query parameters.
+
+Remove unparsable query parameters from the outbound request
+
+ * if req.Form != nil after calling ReverseProxy.Director; and
+ * before calling ReverseProxy.Rewrite.
+
+This change preserves the existing behavior of forwarding the
+raw query untouched if a Director hook does not parse the query
+by calling Request.ParseForm (possibly indirectly).
+
+Fixes #55842
+For #54663
+For CVE-2022-2880
+
+Change-Id: If1621f6b0e73a49d79059dae9e6b256e0ff18ca9
+Reviewed-on: https://go-review.googlesource.com/c/go/+/432976
+Reviewed-by: Roland Shoemaker <roland@golang.org>
+Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Damien Neil <dneil@google.com>
+(cherry picked from commit 7c84234142149bd24a4096c6cab691d3593f3431)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/433695
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+
+CVE: CVE-2022-2880
+Upstream-Status: Backport [9d2c73a9fd69e45876509bb3bdb2af99bf77da1e]
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ src/net/http/httputil/reverseproxy.go | 36 +++++++++++
+ src/net/http/httputil/reverseproxy_test.go | 74 ++++++++++++++++++++++
+ 2 files changed, 110 insertions(+)
+
+diff --git a/src/net/http/httputil/reverseproxy.go b/src/net/http/httputil/reverseproxy.go
+index 8b63368..c76eec6 100644
+--- a/src/net/http/httputil/reverseproxy.go
++++ b/src/net/http/httputil/reverseproxy.go
+@@ -249,6 +249,9 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ }
+
+ p.Director(outreq)
++ if outreq.Form != nil {
++ outreq.URL.RawQuery = cleanQueryParams(outreq.URL.RawQuery)
++ }
+ outreq.Close = false
+
+ reqUpType := upgradeType(outreq.Header)
+@@ -628,3 +631,36 @@ func (c switchProtocolCopier) copyToBackend(errc chan<- error) {
+ _, err := io.Copy(c.backend, c.user)
+ errc <- err
+ }
++
++func cleanQueryParams(s string) string {
++ reencode := func(s string) string {
++ v, _ := url.ParseQuery(s)
++ return v.Encode()
++ }
++ for i := 0; i < len(s); {
++ switch s[i] {
++ case ';':
++ return reencode(s)
++ case '%':
++ if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
++ return reencode(s)
++ }
++ i += 3
++ default:
++ i++
++ }
++ }
++ return s
++}
++
++func ishex(c byte) bool {
++ switch {
++ case '0' <= c && c <= '9':
++ return true
++ case 'a' <= c && c <= 'f':
++ return true
++ case 'A' <= c && c <= 'F':
++ return true
++ }
++ return false
++}
+diff --git a/src/net/http/httputil/reverseproxy_test.go b/src/net/http/httputil/reverseproxy_test.go
+index 4b6ad77..8c0a4f1 100644
+--- a/src/net/http/httputil/reverseproxy_test.go
++++ b/src/net/http/httputil/reverseproxy_test.go
+@@ -1517,3 +1517,77 @@ func TestJoinURLPath(t *testing.T) {
+ }
+ }
+ }
++
++const (
++ testWantsCleanQuery = true
++ testWantsRawQuery = false
++)
++
++func TestReverseProxyQueryParameterSmugglingDirectorDoesNotParseForm(t *testing.T) {
++ testReverseProxyQueryParameterSmuggling(t, testWantsRawQuery, func(u *url.URL) *ReverseProxy {
++ proxyHandler := NewSingleHostReverseProxy(u)
++ oldDirector := proxyHandler.Director
++ proxyHandler.Director = func(r *http.Request) {
++ oldDirector(r)
++ }
++ return proxyHandler
++ })
++}
++
++func TestReverseProxyQueryParameterSmugglingDirectorParsesForm(t *testing.T) {
++ testReverseProxyQueryParameterSmuggling(t, testWantsCleanQuery, func(u *url.URL) *ReverseProxy {
++ proxyHandler := NewSingleHostReverseProxy(u)
++ oldDirector := proxyHandler.Director
++ proxyHandler.Director = func(r *http.Request) {
++ // Parsing the form causes ReverseProxy to remove unparsable
++ // query parameters before forwarding.
++ r.FormValue("a")
++ oldDirector(r)
++ }
++ return proxyHandler
++ })
++}
++
++func testReverseProxyQueryParameterSmuggling(t *testing.T, wantCleanQuery bool, newProxy func(*url.URL) *ReverseProxy) {
++ const content = "response_content"
++ backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
++ w.Write([]byte(r.URL.RawQuery))
++ }))
++ defer backend.Close()
++ backendURL, err := url.Parse(backend.URL)
++ if err != nil {
++ t.Fatal(err)
++ }
++ proxyHandler := newProxy(backendURL)
++ frontend := httptest.NewServer(proxyHandler)
++ defer frontend.Close()
++
++ // Don't spam output with logs of queries containing semicolons.
++ backend.Config.ErrorLog = log.New(io.Discard, "", 0)
++ frontend.Config.ErrorLog = log.New(io.Discard, "", 0)
++
++ for _, test := range []struct {
++ rawQuery string
++ cleanQuery string
++ }{{
++ rawQuery: "a=1&a=2;b=3",
++ cleanQuery: "a=1",
++ }, {
++ rawQuery: "a=1&a=%zz&b=3",
++ cleanQuery: "a=1&b=3",
++ }} {
++ res, err := frontend.Client().Get(frontend.URL + "?" + test.rawQuery)
++ if err != nil {
++ t.Fatalf("Get: %v", err)
++ }
++ defer res.Body.Close()
++ body, _ := io.ReadAll(res.Body)
++ wantQuery := test.rawQuery
++ if wantCleanQuery {
++ wantQuery = test.cleanQuery
++ }
++ if got, want := string(body), wantQuery; got != want {
++ t.Errorf("proxy forwarded raw query %q as %q, want %q", test.rawQuery, got, want)
++ }
++ }
++}
+--
+2.32.0
+
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2022-27664.patch b/meta/recipes-devtools/go/go-1.18/CVE-2022-27664.patch
new file mode 100644
index 0000000000..fba4f054ee
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2022-27664.patch
@@ -0,0 +1,102 @@
+From 5bc9106458fc07851ac324a4157132a91b1f3479 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Mon, 22 Aug 2022 16:21:02 -0700
+Subject: [PATCH] [release-branch.go1.18] net/http: update bundled
+ golang.org/x/net/http2
+
+Disable cmd/internal/moddeps test, since this update includes PRIVATE
+track fixes.
+
+Fixes CVE-2022-27664
+Fixes #53977
+For #54658.
+
+Change-Id: I84b0b8f61e49e15ef55ef8d738730107a3cf849b
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1554415
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/428635
+Reviewed-by: Tatiana Bradley <tatiana@golang.org>
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+
+Upstream-Status: Backport
+CVE: CVE-2022-27664
+
+Reference to upstream patch: https://github.com/golang/go/commit/5bc9106458fc07851ac324a4157132a91b1f3479
+Signed-off-by: Teoh Jay Shen <jay.shen.teoh@intel.com>
+---
+ src/cmd/internal/moddeps/moddeps_test.go | 2 ++
+ src/net/http/h2_bundle.go | 21 +++++++++++++--------
+ 2 files changed, 15 insertions(+), 8 deletions(-)
+
+diff --git a/src/cmd/internal/moddeps/moddeps_test.go b/src/cmd/internal/moddeps/moddeps_test.go
+index 56c3b2585c..3306e29431 100644
+--- a/src/cmd/internal/moddeps/moddeps_test.go
++++ b/src/cmd/internal/moddeps/moddeps_test.go
+@@ -34,6 +34,8 @@ import (
+ // See issues 36852, 41409, and 43687.
+ // (Also see golang.org/issue/27348.)
+ func TestAllDependencies(t *testing.T) {
++ t.Skip("TODO(#53977): 1.18.5 contains unreleased changes from vendored modules")
++
+ goBin := testenv.GoToolPath(t)
+
+ // Ensure that all packages imported within GOROOT
+diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go
+index bb82f24585..1e78f6cdb9 100644
+--- a/src/net/http/h2_bundle.go
++++ b/src/net/http/h2_bundle.go
+@@ -3384,10 +3384,11 @@ func (s http2SettingID) String() string {
+ // name (key). See httpguts.ValidHeaderName for the base rules.
+ //
+ // Further, http2 says:
+-// "Just as in HTTP/1.x, header field names are strings of ASCII
+-// characters that are compared in a case-insensitive
+-// fashion. However, header field names MUST be converted to
+-// lowercase prior to their encoding in HTTP/2. "
++//
++// "Just as in HTTP/1.x, header field names are strings of ASCII
++// characters that are compared in a case-insensitive
++// fashion. However, header field names MUST be converted to
++// lowercase prior to their encoding in HTTP/2. "
+ func http2validWireHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+@@ -3578,8 +3579,8 @@ func (s *http2sorter) SortStrings(ss []string) {
+ // validPseudoPath reports whether v is a valid :path pseudo-header
+ // value. It must be either:
+ //
+-// *) a non-empty string starting with '/'
+-// *) the string '*', for OPTIONS requests.
++// *) a non-empty string starting with '/'
++// *) the string '*', for OPTIONS requests.
+ //
+ // For now this is only used a quick check for deciding when to clean
+ // up Opaque URLs before sending requests from the Transport.
+@@ -5053,6 +5054,9 @@ func (sc *http2serverConn) startGracefulShutdownInternal() {
+ func (sc *http2serverConn) goAway(code http2ErrCode) {
+ sc.serveG.check()
+ if sc.inGoAway {
++ if sc.goAwayCode == http2ErrCodeNo {
++ sc.goAwayCode = code
++ }
+ return
+ }
+ sc.inGoAway = true
+@@ -6265,8 +6269,9 @@ func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) {
+ // prior to the headers being written. If the set of trailers is fixed
+ // or known before the header is written, the normal Go trailers mechanism
+ // is preferred:
+-// https://golang.org/pkg/net/http/#ResponseWriter
+-// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
++//
++// https://golang.org/pkg/net/http/#ResponseWriter
++// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+ const http2TrailerPrefix = "Trailer:"
+
+ // promoteUndeclaredTrailers permits http.Handlers to set trailers
+--
+2.36.1
+
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2022-2879.patch b/meta/recipes-devtools/go/go-1.18/CVE-2022-2879.patch
new file mode 100644
index 0000000000..0315e1a3ee
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2022-2879.patch
@@ -0,0 +1,177 @@
+From d064ed520a7cc6b480f9565e30751e695d394f4e Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Fri, 2 Sep 2022 20:45:18 -0700
+Subject: [PATCH] archive/tar: limit size of headers
+
+Set a 1MiB limit on special file blocks (PAX headers, GNU long names,
+GNU link names), to avoid reading arbitrarily large amounts of data
+into memory.
+
+Thanks to Adam Korczynski (ADA Logics) and OSS-Fuzz for reporting
+this issue.
+
+Fixes CVE-2022-2879
+Updates #54853
+Fixes #55925
+
+Change-Id: I85136d6ff1e0af101a112190e027987ab4335680
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1565555
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+(cherry picked from commit 6ee768cef6b82adf7a90dcf367a1699ef694f3b2)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1590622
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/438500
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+CVE: CVE-2022-2879
+Upstream-Status: Backport [0a723816cd205576945fa57fbdde7e6532d59d08]
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ src/archive/tar/format.go | 4 ++++
+ src/archive/tar/reader.go | 14 ++++++++++++--
+ src/archive/tar/reader_test.go | 8 +++++++-
+ src/archive/tar/writer.go | 3 +++
+ src/archive/tar/writer_test.go | 27 +++++++++++++++++++++++++++
+ 5 files changed, 53 insertions(+), 3 deletions(-)
+
+diff --git a/src/archive/tar/format.go b/src/archive/tar/format.go
+index cfe24a5..6642364 100644
+--- a/src/archive/tar/format.go
++++ b/src/archive/tar/format.go
+@@ -143,6 +143,10 @@ const (
+ blockSize = 512 // Size of each block in a tar stream
+ nameSize = 100 // Max length of the name field in USTAR format
+ prefixSize = 155 // Max length of the prefix field in USTAR format
++
++ // Max length of a special file (PAX header, GNU long name or link).
++ // This matches the limit used by libarchive.
++ maxSpecialFileSize = 1 << 20
+ )
+
+ // blockPadding computes the number of bytes needed to pad offset up to the
+diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go
+index 1b1d5b4..f645af8 100644
+--- a/src/archive/tar/reader.go
++++ b/src/archive/tar/reader.go
+@@ -103,7 +103,7 @@ func (tr *Reader) next() (*Header, error) {
+ continue // This is a meta header affecting the next header
+ case TypeGNULongName, TypeGNULongLink:
+ format.mayOnlyBe(FormatGNU)
+- realname, err := io.ReadAll(tr)
++ realname, err := readSpecialFile(tr)
+ if err != nil {
+ return nil, err
+ }
+@@ -293,7 +293,7 @@ func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
+ // parsePAX parses PAX headers.
+ // If an extended header (type 'x') is invalid, ErrHeader is returned
+ func parsePAX(r io.Reader) (map[string]string, error) {
+- buf, err := io.ReadAll(r)
++ buf, err := readSpecialFile(r)
+ if err != nil {
+ return nil, err
+ }
+@@ -826,6 +826,16 @@ func tryReadFull(r io.Reader, b []byte) (n int, err error) {
+ return n, err
+ }
+
++// readSpecialFile is like io.ReadAll except it returns
++// ErrFieldTooLong if more than maxSpecialFileSize is read.
++func readSpecialFile(r io.Reader) ([]byte, error) {
++ buf, err := io.ReadAll(io.LimitReader(r, maxSpecialFileSize+1))
++ if len(buf) > maxSpecialFileSize {
++ return nil, ErrFieldTooLong
++ }
++ return buf, err
++}
++
+ // discard skips n bytes in r, reporting an error if unable to do so.
+ func discard(r io.Reader, n int64) error {
+ // If possible, Seek to the last byte before the end of the data section.
+diff --git a/src/archive/tar/reader_test.go b/src/archive/tar/reader_test.go
+index 789ddc1..926dc3d 100644
+--- a/src/archive/tar/reader_test.go
++++ b/src/archive/tar/reader_test.go
+@@ -6,6 +6,7 @@ package tar
+
+ import (
+ "bytes"
++ "compress/bzip2"
+ "crypto/md5"
+ "errors"
+ "fmt"
+@@ -625,9 +626,14 @@ func TestReader(t *testing.T) {
+ }
+ defer f.Close()
+
++ var fr io.Reader = f
++ if strings.HasSuffix(v.file, ".bz2") {
++ fr = bzip2.NewReader(fr)
++ }
++
+ // Capture all headers and checksums.
+ var (
+- tr = NewReader(f)
++ tr = NewReader(fr)
+ hdrs []*Header
+ chksums []string
+ rdbuf = make([]byte, 8)
+diff --git a/src/archive/tar/writer.go b/src/archive/tar/writer.go
+index e80498d..893eac0 100644
+--- a/src/archive/tar/writer.go
++++ b/src/archive/tar/writer.go
+@@ -199,6 +199,9 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
+ flag = TypeXHeader
+ }
+ data := buf.String()
++ if len(data) > maxSpecialFileSize {
++ return ErrFieldTooLong
++ }
+ if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal {
+ return err // Global headers return here
+ }
+diff --git a/src/archive/tar/writer_test.go b/src/archive/tar/writer_test.go
+index a00f02d..4e709e5 100644
+--- a/src/archive/tar/writer_test.go
++++ b/src/archive/tar/writer_test.go
+@@ -1006,6 +1006,33 @@ func TestIssue12594(t *testing.T) {
+ }
+ }
+
++func TestWriteLongHeader(t *testing.T) {
++ for _, test := range []struct {
++ name string
++ h *Header
++ }{{
++ name: "name too long",
++ h: &Header{Name: strings.Repeat("a", maxSpecialFileSize)},
++ }, {
++ name: "linkname too long",
++ h: &Header{Linkname: strings.Repeat("a", maxSpecialFileSize)},
++ }, {
++ name: "uname too long",
++ h: &Header{Uname: strings.Repeat("a", maxSpecialFileSize)},
++ }, {
++ name: "gname too long",
++ h: &Header{Gname: strings.Repeat("a", maxSpecialFileSize)},
++ }, {
++ name: "PAX header too long",
++ h: &Header{PAXRecords: map[string]string{"GOLANG.x": strings.Repeat("a", maxSpecialFileSize)}},
++ }} {
++ w := NewWriter(io.Discard)
++ if err := w.WriteHeader(test.h); err != ErrFieldTooLong {
++ t.Errorf("%v: w.WriteHeader() = %v, want ErrFieldTooLong", test.name, err)
++ }
++ }
++}
++
+ // testNonEmptyWriter wraps an io.Writer and ensures that
+ // Write is never called with an empty buffer.
+ type testNonEmptyWriter struct{ io.Writer }
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2022-41715.patch b/meta/recipes-devtools/go/go-1.18/CVE-2022-41715.patch
new file mode 100644
index 0000000000..994f37aaf3
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2022-41715.patch
@@ -0,0 +1,270 @@
+From e9017c2416ad0ef642f5e0c2eab2dbf3cba4d997 Mon Sep 17 00:00:00 2001
+From: Russ Cox <rsc@golang.org>
+Date: Wed, 28 Sep 2022 11:18:51 -0400
+Subject: [PATCH] [release-branch.go1.18] regexp: limit size of parsed regexps
+
+Set a 128 MB limit on the amount of space used by []syntax.Inst
+in the compiled form corresponding to a given regexp.
+
+Also set a 128 MB limit on the rune storage in the *syntax.Regexp
+tree itself.
+
+Thanks to Adam Korczynski (ADA Logics) and OSS-Fuzz for reporting this issue.
+
+Fixes CVE-2022-41715.
+Updates #55949.
+Fixes #55950.
+
+Change-Id: Ia656baed81564436368cf950e1c5409752f28e1b
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1592136
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/438501
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/e9017c2416ad0ef642f5e0c2eab2dbf3cba4d997]
+CVE: CVE-2022-41715
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/regexp/syntax/parse.go | 145 ++++++++++++++++++++++++++++++--
+ src/regexp/syntax/parse_test.go | 13 +--
+ 2 files changed, 148 insertions(+), 10 deletions(-)
+
+diff --git a/src/regexp/syntax/parse.go b/src/regexp/syntax/parse.go
+index d7cf2af..3792960 100644
+--- a/src/regexp/syntax/parse.go
++++ b/src/regexp/syntax/parse.go
+@@ -90,15 +90,49 @@ const (
+ // until we've allocated at least maxHeight Regexp structures.
+ const maxHeight = 1000
+
++// maxSize is the maximum size of a compiled regexp in Insts.
++// It too is somewhat arbitrarily chosen, but the idea is to be large enough
++// to allow significant regexps while at the same time small enough that
++// the compiled form will not take up too much memory.
++// 128 MB is enough for a 3.3 million Inst structures, which roughly
++// corresponds to a 3.3 MB regexp.
++const (
++ maxSize = 128 << 20 / instSize
++ instSize = 5 * 8 // byte, 2 uint32, slice is 5 64-bit words
++)
++
++// maxRunes is the maximum number of runes allowed in a regexp tree
++// counting the runes in all the nodes.
++// Ignoring character classes p.numRunes is always less than the length of the regexp.
++// Character classes can make it much larger: each \pL adds 1292 runes.
++// 128 MB is enough for 32M runes, which is over 26k \pL instances.
++// Note that repetitions do not make copies of the rune slices,
++// so \pL{1000} is only one rune slice, not 1000.
++// We could keep a cache of character classes we've seen,
++// so that all the \pL we see use the same rune list,
++// but that doesn't remove the problem entirely:
++// consider something like [\pL01234][\pL01235][\pL01236]...[\pL^&*()].
++// And because the Rune slice is exposed directly in the Regexp,
++// there is not an opportunity to change the representation to allow
++// partial sharing between different character classes.
++// So the limit is the best we can do.
++const (
++ maxRunes = 128 << 20 / runeSize
++ runeSize = 4 // rune is int32
++)
++
+ type parser struct {
+ flags Flags // parse mode flags
+ stack []*Regexp // stack of parsed expressions
+ free *Regexp
+ numCap int // number of capturing groups seen
+ wholeRegexp string
+- tmpClass []rune // temporary char class work space
+- numRegexp int // number of regexps allocated
+- height map[*Regexp]int // regexp height for height limit check
++ tmpClass []rune // temporary char class work space
++ numRegexp int // number of regexps allocated
++ numRunes int // number of runes in char classes
++ repeats int64 // product of all repetitions seen
++ height map[*Regexp]int // regexp height, for height limit check
++ size map[*Regexp]int64 // regexp compiled size, for size limit check
+ }
+
+ func (p *parser) newRegexp(op Op) *Regexp {
+@@ -122,6 +156,104 @@ func (p *parser) reuse(re *Regexp) {
+ p.free = re
+ }
+
++func (p *parser) checkLimits(re *Regexp) {
++ if p.numRunes > maxRunes {
++ panic(ErrInternalError)
++ }
++ p.checkSize(re)
++ p.checkHeight(re)
++}
++
++func (p *parser) checkSize(re *Regexp) {
++ if p.size == nil {
++ // We haven't started tracking size yet.
++ // Do a relatively cheap check to see if we need to start.
++ // Maintain the product of all the repeats we've seen
++ // and don't track if the total number of regexp nodes
++ // we've seen times the repeat product is in budget.
++ if p.repeats == 0 {
++ p.repeats = 1
++ }
++ if re.Op == OpRepeat {
++ n := re.Max
++ if n == -1 {
++ n = re.Min
++ }
++ if n <= 0 {
++ n = 1
++ }
++ if int64(n) > maxSize/p.repeats {
++ p.repeats = maxSize
++ } else {
++ p.repeats *= int64(n)
++ }
++ }
++ if int64(p.numRegexp) < maxSize/p.repeats {
++ return
++ }
++
++ // We need to start tracking size.
++ // Make the map and belatedly populate it
++ // with info about everything we've constructed so far.
++ p.size = make(map[*Regexp]int64)
++ for _, re := range p.stack {
++ p.checkSize(re)
++ }
++ }
++
++ if p.calcSize(re, true) > maxSize {
++ panic(ErrInternalError)
++ }
++}
++
++func (p *parser) calcSize(re *Regexp, force bool) int64 {
++ if !force {
++ if size, ok := p.size[re]; ok {
++ return size
++ }
++ }
++
++ var size int64
++ switch re.Op {
++ case OpLiteral:
++ size = int64(len(re.Rune))
++ case OpCapture, OpStar:
++ // star can be 1+ or 2+; assume 2 pessimistically
++ size = 2 + p.calcSize(re.Sub[0], false)
++ case OpPlus, OpQuest:
++ size = 1 + p.calcSize(re.Sub[0], false)
++ case OpConcat:
++ for _, sub := range re.Sub {
++ size += p.calcSize(sub, false)
++ }
++ case OpAlternate:
++ for _, sub := range re.Sub {
++ size += p.calcSize(sub, false)
++ }
++ if len(re.Sub) > 1 {
++ size += int64(len(re.Sub)) - 1
++ }
++ case OpRepeat:
++ sub := p.calcSize(re.Sub[0], false)
++ if re.Max == -1 {
++ if re.Min == 0 {
++ size = 2 + sub // x*
++ } else {
++ size = 1 + int64(re.Min)*sub // xxx+
++ }
++ break
++ }
++ // x{2,5} = xx(x(x(x)?)?)?
++ size = int64(re.Max)*sub + int64(re.Max-re.Min)
++ }
++
++ if size < 1 {
++ size = 1
++ }
++ p.size[re] = size
++ return size
++}
++
+ func (p *parser) checkHeight(re *Regexp) {
+ if p.numRegexp < maxHeight {
+ return
+@@ -158,6 +290,7 @@ func (p *parser) calcHeight(re *Regexp, force bool) int {
+
+ // push pushes the regexp re onto the parse stack and returns the regexp.
+ func (p *parser) push(re *Regexp) *Regexp {
++ p.numRunes += len(re.Rune)
+ if re.Op == OpCharClass && len(re.Rune) == 2 && re.Rune[0] == re.Rune[1] {
+ // Single rune.
+ if p.maybeConcat(re.Rune[0], p.flags&^FoldCase) {
+@@ -189,7 +322,7 @@ func (p *parser) push(re *Regexp) *Regexp {
+ }
+
+ p.stack = append(p.stack, re)
+- p.checkHeight(re)
++ p.checkLimits(re)
+ return re
+ }
+
+@@ -299,7 +432,7 @@ func (p *parser) repeat(op Op, min, max int, before, after, lastRepeat string) (
+ re.Sub = re.Sub0[:1]
+ re.Sub[0] = sub
+ p.stack[n-1] = re
+- p.checkHeight(re)
++ p.checkLimits(re)
+
+ if op == OpRepeat && (min >= 2 || max >= 2) && !repeatIsValid(re, 1000) {
+ return "", &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]}
+@@ -503,6 +636,7 @@ func (p *parser) factor(sub []*Regexp) []*Regexp {
+
+ for j := start; j < i; j++ {
+ sub[j] = p.removeLeadingString(sub[j], len(str))
++ p.checkLimits(sub[j])
+ }
+ suffix := p.collapse(sub[start:i], OpAlternate) // recurse
+
+@@ -560,6 +694,7 @@ func (p *parser) factor(sub []*Regexp) []*Regexp {
+ for j := start; j < i; j++ {
+ reuse := j != start // prefix came from sub[start]
+ sub[j] = p.removeLeadingRegexp(sub[j], reuse)
++ p.checkLimits(sub[j])
+ }
+ suffix := p.collapse(sub[start:i], OpAlternate) // recurse
+
+diff --git a/src/regexp/syntax/parse_test.go b/src/regexp/syntax/parse_test.go
+index 1ef6d8a..67e3c56 100644
+--- a/src/regexp/syntax/parse_test.go
++++ b/src/regexp/syntax/parse_test.go
+@@ -484,12 +484,15 @@ var invalidRegexps = []string{
+ `(?P<>a)`,
+ `[a-Z]`,
+ `(?i)[a-Z]`,
+- `a{100000}`,
+- `a{100000,}`,
+- "((((((((((x{2}){2}){2}){2}){2}){2}){2}){2}){2}){2})",
+- strings.Repeat("(", 1000) + strings.Repeat(")", 1000),
+- strings.Repeat("(?:", 1000) + strings.Repeat(")*", 1000),
+ `\Q\E*`,
++ `a{100000}`, // too much repetition
++ `a{100000,}`, // too much repetition
++ "((((((((((x{2}){2}){2}){2}){2}){2}){2}){2}){2}){2})", // too much repetition
++ strings.Repeat("(", 1000) + strings.Repeat(")", 1000), // too deep
++ strings.Repeat("(?:", 1000) + strings.Repeat(")*", 1000), // too deep
++ "(" + strings.Repeat("(xx?)", 1000) + "){1000}", // too long
++ strings.Repeat("(xx?){1000}", 1000), // too long
++ strings.Repeat(`\pL`, 27000), // too many runes
+ }
+
+ var onlyPerl = []string{
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2022-41717.patch b/meta/recipes-devtools/go/go-1.18/CVE-2022-41717.patch
new file mode 100644
index 0000000000..e2ab92ed00
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2022-41717.patch
@@ -0,0 +1,89 @@
+From 618120c165669c00a1606505defea6ca755cdc27 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Wed, 30 Nov 2022 16:46:33 -0500
+Subject: [PATCH] [release-branch.go1.19] net/http: update bundled
+ golang.org/x/net/http2
+
+Disable cmd/internal/moddeps test, since this update includes PRIVATE
+track fixes.
+
+For #56350.
+For #57009.
+Fixes CVE-2022-41717.
+
+Change-Id: I5c6ce546add81f361dcf0d5123fa4eaaf8f0a03b
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1663835
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/455363
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Jenny Rakoczy <jenny@golang.org>
+Reviewed-by: Michael Pratt <mpratt@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/618120c165669c00a1606505defea6ca755cdc27]
+CVE: CVE-2022-41717
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/cmd/internal/moddeps/moddeps_test.go | 1 +
+ src/net/http/h2_bundle.go | 18 +++++++++++-------
+ 2 files changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/src/cmd/internal/moddeps/moddeps_test.go b/src/cmd/internal/moddeps/moddeps_test.go
+index 3306e29..d48d43f 100644
+--- a/src/cmd/internal/moddeps/moddeps_test.go
++++ b/src/cmd/internal/moddeps/moddeps_test.go
+@@ -34,6 +34,7 @@ import (
+ // See issues 36852, 41409, and 43687.
+ // (Also see golang.org/issue/27348.)
+ func TestAllDependencies(t *testing.T) {
++ t.Skip("TODO(#57009): 1.19.4 contains unreleased changes from vendored modules")
+ t.Skip("TODO(#53977): 1.18.5 contains unreleased changes from vendored modules")
+
+ goBin := testenv.GoToolPath(t)
+diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go
+index 6e2ef30..9d6abd8 100644
+--- a/src/net/http/h2_bundle.go
++++ b/src/net/http/h2_bundle.go
+@@ -4189,6 +4189,7 @@ type http2serverConn struct {
+ headerTableSize uint32
+ peerMaxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
++ canonHeaderKeysSize int // canonHeader keys size in bytes
+ writingFrame bool // started writing a frame (on serve goroutine or separate)
+ writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+@@ -4368,6 +4369,13 @@ func (sc *http2serverConn) condlogf(err error, format string, args ...interface{
+ }
+ }
+
++// maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size
++// of the entries in the canonHeader cache.
++// This should be larger than the size of unique, uncommon header keys likely to
++// be sent by the peer, while not so high as to permit unreasonable memory usage
++// if the peer sends an unbounded number of unique header keys.
++const http2maxCachedCanonicalHeadersKeysSize = 2048
++
+ func (sc *http2serverConn) canonicalHeader(v string) string {
+ sc.serveG.check()
+ http2buildCommonHeaderMapsOnce()
+@@ -4383,14 +4391,10 @@ func (sc *http2serverConn) canonicalHeader(v string) string {
+ sc.canonHeader = make(map[string]string)
+ }
+ cv = CanonicalHeaderKey(v)
+- // maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
+- // entries in the canonHeader cache. This should be larger than the number
+- // of unique, uncommon header keys likely to be sent by the peer, while not
+- // so high as to permit unreaasonable memory usage if the peer sends an unbounded
+- // number of unique header keys.
+- const maxCachedCanonicalHeaders = 32
+- if len(sc.canonHeader) < maxCachedCanonicalHeaders {
++ size := 100 + len(v)*2 // 100 bytes of map overhead + key + value
++ if sc.canonHeaderKeysSize+size <= http2maxCachedCanonicalHeadersKeysSize {
+ sc.canonHeader[v] = cv
++ sc.canonHeaderKeysSize += size
+ }
+ return cv
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2022-41720.patch b/meta/recipes-devtools/go/go-1.18/CVE-2022-41720.patch
new file mode 100644
index 0000000000..6c2e8804b3
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2022-41720.patch
@@ -0,0 +1,514 @@
+From f8896a97a0630b0f2f8c488310147f7f20b3ec7d Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Thu, 10 Nov 2022 12:16:27 -0800
+Subject: [PATCH] os, net/http: avoid escapes from os.DirFS and http.Dir on
+ Windows
+
+Do not permit access to Windows reserved device names (NUL, COM1, etc.)
+via os.DirFS and http.Dir filesystems.
+
+Avoid escapes from os.DirFS(`\`) on Windows. DirFS would join the
+the root to the relative path with a path separator, making
+os.DirFS(`\`).Open(`/foo/bar`) open the path `\\foo\bar`, which is
+a UNC name. Not only does this not open the intended file, but permits
+reference to any file on the system rather than only files on the
+current drive.
+
+Make os.DirFS("") invalid, with all file access failing. Previously,
+a root of "" was interpreted as "/", which is surprising and probably
+unintentional.
+
+Fixes CVE-2022-41720.
+Fixes #56694.
+
+Change-Id: I275b5fa391e6ad7404309ea98ccc97405942e0f0
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1663832
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/455360
+Reviewed-by: Michael Pratt <mpratt@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Jenny Rakoczy <jenny@golang.org>
+
+CVE: CVE-2022-41720
+Upstream-Status: Backport [7013a4f5f816af62033ad63dd06b77c30d7a62a7]
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ src/go/build/deps_test.go | 1 +
+ src/internal/safefilepath/path.go | 21 +++++
+ src/internal/safefilepath/path_other.go | 23 ++++++
+ src/internal/safefilepath/path_test.go | 88 +++++++++++++++++++++
+ src/internal/safefilepath/path_windows.go | 95 +++++++++++++++++++++++
+ src/net/http/fs.go | 8 +-
+ src/net/http/fs_test.go | 28 +++++++
+ src/os/file.go | 36 +++++++--
+ src/os/os_test.go | 38 +++++++++
+ 9 files changed, 328 insertions(+), 10 deletions(-)
+ create mode 100644 src/internal/safefilepath/path.go
+ create mode 100644 src/internal/safefilepath/path_other.go
+ create mode 100644 src/internal/safefilepath/path_test.go
+ create mode 100644 src/internal/safefilepath/path_windows.go
+
+diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go
+index 45e2f25..dc3bb8c 100644
+--- a/src/go/build/deps_test.go
++++ b/src/go/build/deps_test.go
+@@ -165,6 +165,7 @@ var depsRules = `
+ io/fs
+ < internal/testlog
+ < internal/poll
++ < internal/safefilepath
+ < os
+ < os/signal;
+
+diff --git a/src/internal/safefilepath/path.go b/src/internal/safefilepath/path.go
+new file mode 100644
+index 0000000..0f0a270
+--- /dev/null
++++ b/src/internal/safefilepath/path.go
+@@ -0,0 +1,21 @@
++// Copyright 2022 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++// Package safefilepath manipulates operating-system file paths.
++package safefilepath
++
++import (
++ "errors"
++)
++
++var errInvalidPath = errors.New("invalid path")
++
++// FromFS converts a slash-separated path into an operating-system path.
++//
++// FromFS returns an error if the path cannot be represented by the operating
++// system. For example, paths containing '\' and ':' characters are rejected
++// on Windows.
++func FromFS(path string) (string, error) {
++ return fromFS(path)
++}
+diff --git a/src/internal/safefilepath/path_other.go b/src/internal/safefilepath/path_other.go
+new file mode 100644
+index 0000000..f93da18
+--- /dev/null
++++ b/src/internal/safefilepath/path_other.go
+@@ -0,0 +1,23 @@
++// Copyright 2022 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++//go:build !windows
++
++package safefilepath
++
++import "runtime"
++
++func fromFS(path string) (string, error) {
++ if runtime.GOOS == "plan9" {
++ if len(path) > 0 && path[0] == '#' {
++ return path, errInvalidPath
++ }
++ }
++ for i := range path {
++ if path[i] == 0 {
++ return "", errInvalidPath
++ }
++ }
++ return path, nil
++}
+diff --git a/src/internal/safefilepath/path_test.go b/src/internal/safefilepath/path_test.go
+new file mode 100644
+index 0000000..dc662c1
+--- /dev/null
++++ b/src/internal/safefilepath/path_test.go
+@@ -0,0 +1,88 @@
++// Copyright 2022 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package safefilepath_test
++
++import (
++ "internal/safefilepath"
++ "os"
++ "path/filepath"
++ "runtime"
++ "testing"
++)
++
++type PathTest struct {
++ path, result string
++}
++
++const invalid = ""
++
++var fspathtests = []PathTest{
++ {".", "."},
++ {"/a/b/c", "/a/b/c"},
++ {"a\x00b", invalid},
++}
++
++var winreservedpathtests = []PathTest{
++ {`a\b`, `a\b`},
++ {`a:b`, `a:b`},
++ {`a/b:c`, `a/b:c`},
++ {`NUL`, `NUL`},
++ {`./com1`, `./com1`},
++ {`a/nul/b`, `a/nul/b`},
++}
++
++// Whether a reserved name with an extension is reserved or not varies by
++// Windows version.
++var winreservedextpathtests = []PathTest{
++ {"nul.txt", "nul.txt"},
++ {"a/nul.txt/b", "a/nul.txt/b"},
++}
++
++var plan9reservedpathtests = []PathTest{
++ {`#c`, `#c`},
++}
++
++func TestFromFS(t *testing.T) {
++ switch runtime.GOOS {
++ case "windows":
++ if canWriteFile(t, "NUL") {
++ t.Errorf("can unexpectedly write a file named NUL on Windows")
++ }
++ if canWriteFile(t, "nul.txt") {
++ fspathtests = append(fspathtests, winreservedextpathtests...)
++ } else {
++ winreservedpathtests = append(winreservedpathtests, winreservedextpathtests...)
++ }
++ for i := range winreservedpathtests {
++ winreservedpathtests[i].result = invalid
++ }
++ for i := range fspathtests {
++ fspathtests[i].result = filepath.FromSlash(fspathtests[i].result)
++ }
++ case "plan9":
++ for i := range plan9reservedpathtests {
++ plan9reservedpathtests[i].result = invalid
++ }
++ }
++ tests := fspathtests
++ tests = append(tests, winreservedpathtests...)
++ tests = append(tests, plan9reservedpathtests...)
++ for _, test := range tests {
++ got, err := safefilepath.FromFS(test.path)
++ if (got == "") != (err != nil) {
++ t.Errorf(`FromFS(%q) = %q, %v; want "" only if err != nil`, test.path, got, err)
++ }
++ if got != test.result {
++ t.Errorf("FromFS(%q) = %q, %v; want %q", test.path, got, err, test.result)
++ }
++ }
++}
++
++func canWriteFile(t *testing.T, name string) bool {
++ path := filepath.Join(t.TempDir(), name)
++ os.WriteFile(path, []byte("ok"), 0666)
++ b, _ := os.ReadFile(path)
++ return string(b) == "ok"
++}
+diff --git a/src/internal/safefilepath/path_windows.go b/src/internal/safefilepath/path_windows.go
+new file mode 100644
+index 0000000..909c150
+--- /dev/null
++++ b/src/internal/safefilepath/path_windows.go
+@@ -0,0 +1,95 @@
++// Copyright 2022 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package safefilepath
++
++import (
++ "syscall"
++ "unicode/utf8"
++)
++
++func fromFS(path string) (string, error) {
++ if !utf8.ValidString(path) {
++ return "", errInvalidPath
++ }
++ for len(path) > 1 && path[0] == '/' && path[1] == '/' {
++ path = path[1:]
++ }
++ containsSlash := false
++ for p := path; p != ""; {
++ // Find the next path element.
++ i := 0
++ dot := -1
++ for i < len(p) && p[i] != '/' {
++ switch p[i] {
++ case 0, '\\', ':':
++ return "", errInvalidPath
++ case '.':
++ if dot < 0 {
++ dot = i
++ }
++ }
++ i++
++ }
++ part := p[:i]
++ if i < len(p) {
++ containsSlash = true
++ p = p[i+1:]
++ } else {
++ p = ""
++ }
++ // Trim the extension and look for a reserved name.
++ base := part
++ if dot >= 0 {
++ base = part[:dot]
++ }
++ if isReservedName(base) {
++ if dot < 0 {
++ return "", errInvalidPath
++ }
++ // The path element is a reserved name with an extension.
++ // Some Windows versions consider this a reserved name,
++ // while others do not. Use FullPath to see if the name is
++ // reserved.
++ if p, _ := syscall.FullPath(part); len(p) >= 4 && p[:4] == `\\.\` {
++ return "", errInvalidPath
++ }
++ }
++ }
++ if containsSlash {
++ // We can't depend on strings, so substitute \ for / manually.
++ buf := []byte(path)
++ for i, b := range buf {
++ if b == '/' {
++ buf[i] = '\\'
++ }
++ }
++ path = string(buf)
++ }
++ return path, nil
++}
++
++// isReservedName reports if name is a Windows reserved device name.
++// It does not detect names with an extension, which are also reserved on some Windows versions.
++//
++// For details, search for PRN in
++// https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file.
++func isReservedName(name string) bool {
++ if 3 <= len(name) && len(name) <= 4 {
++ switch string([]byte{toUpper(name[0]), toUpper(name[1]), toUpper(name[2])}) {
++ case "CON", "PRN", "AUX", "NUL":
++ return len(name) == 3
++ case "COM", "LPT":
++ return len(name) == 4 && '1' <= name[3] && name[3] <= '9'
++ }
++ }
++ return false
++}
++
++func toUpper(c byte) byte {
++ if 'a' <= c && c <= 'z' {
++ return c - ('a' - 'A')
++ }
++ return c
++}
+diff --git a/src/net/http/fs.go b/src/net/http/fs.go
+index 57e731e..43ee4b5 100644
+--- a/src/net/http/fs.go
++++ b/src/net/http/fs.go
+@@ -9,6 +9,7 @@ package http
+ import (
+ "errors"
+ "fmt"
++ "internal/safefilepath"
+ "io"
+ "io/fs"
+ "mime"
+@@ -69,14 +70,15 @@ func mapDirOpenError(originalErr error, name string) error {
+ // Open implements FileSystem using os.Open, opening files for reading rooted
+ // and relative to the directory d.
+ func (d Dir) Open(name string) (File, error) {
+- if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) {
+- return nil, errors.New("http: invalid character in file path")
++ path, err := safefilepath.FromFS(path.Clean("/" + name))
++ if err != nil {
++ return nil, errors.New("http: invalid or unsafe file path")
+ }
+ dir := string(d)
+ if dir == "" {
+ dir = "."
+ }
+- fullName := filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))
++ fullName := filepath.Join(dir, path)
+ f, err := os.Open(fullName)
+ if err != nil {
+ return nil, mapDirOpenError(err, fullName)
+diff --git a/src/net/http/fs_test.go b/src/net/http/fs_test.go
+index b42ade1..941448a 100644
+--- a/src/net/http/fs_test.go
++++ b/src/net/http/fs_test.go
+@@ -648,6 +648,34 @@ func TestFileServerZeroByte(t *testing.T) {
+ }
+ }
+
++func TestFileServerNamesEscape(t *testing.T) {
++ t.Run("h1", func(t *testing.T) {
++ testFileServerNamesEscape(t, h1Mode)
++ })
++ t.Run("h2", func(t *testing.T) {
++ testFileServerNamesEscape(t, h2Mode)
++ })
++}
++func testFileServerNamesEscape(t *testing.T, h2 bool) {
++ defer afterTest(t)
++ ts := newClientServerTest(t, h2, FileServer(Dir("testdata"))).ts
++ defer ts.Close()
++ for _, path := range []string{
++ "/../testdata/file",
++ "/NUL", // don't read from device files on Windows
++ } {
++ res, err := ts.Client().Get(ts.URL + path)
++ if err != nil {
++ t.Fatal(err)
++ }
++ res.Body.Close()
++ if res.StatusCode < 400 || res.StatusCode > 599 {
++ t.Errorf("Get(%q): got status %v, want 4xx or 5xx", path, res.StatusCode)
++ }
++
++ }
++}
++
+ type fakeFileInfo struct {
+ dir bool
+ basename string
+diff --git a/src/os/file.go b/src/os/file.go
+index e717f17..cb87158 100644
+--- a/src/os/file.go
++++ b/src/os/file.go
+@@ -37,12 +37,12 @@
+ // Note: The maximum number of concurrent operations on a File may be limited by
+ // the OS or the system. The number should be high, but exceeding it may degrade
+ // performance or cause other issues.
+-//
+ package os
+
+ import (
+ "errors"
+ "internal/poll"
++ "internal/safefilepath"
+ "internal/testlog"
+ "internal/unsafeheader"
+ "io"
+@@ -623,6 +623,8 @@ func isWindowsNulName(name string) bool {
+ // the /prefix tree, then using DirFS does not stop the access any more than using
+ // os.Open does. DirFS is therefore not a general substitute for a chroot-style security
+ // mechanism when the directory tree contains arbitrary content.
++//
++// The directory dir must not be "".
+ func DirFS(dir string) fs.FS {
+ return dirFS(dir)
+ }
+@@ -641,10 +643,11 @@ func containsAny(s, chars string) bool {
+ type dirFS string
+
+ func (dir dirFS) Open(name string) (fs.File, error) {
+- if !fs.ValidPath(name) || runtime.GOOS == "windows" && containsAny(name, `\:`) {
+- return nil, &PathError{Op: "open", Path: name, Err: ErrInvalid}
++ fullname, err := dir.join(name)
++ if err != nil {
++ return nil, &PathError{Op: "stat", Path: name, Err: err}
+ }
+- f, err := Open(string(dir) + "/" + name)
++ f, err := Open(fullname)
+ if err != nil {
+ return nil, err // nil fs.File
+ }
+@@ -652,16 +655,35 @@ func (dir dirFS) Open(name string) (fs.File, error) {
+ }
+
+ func (dir dirFS) Stat(name string) (fs.FileInfo, error) {
+- if !fs.ValidPath(name) || runtime.GOOS == "windows" && containsAny(name, `\:`) {
+- return nil, &PathError{Op: "stat", Path: name, Err: ErrInvalid}
++ fullname, err := dir.join(name)
++ if err != nil {
++ return nil, &PathError{Op: "stat", Path: name, Err: err}
+ }
+- f, err := Stat(string(dir) + "/" + name)
++ f, err := Stat(fullname)
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+ }
+
++// join returns the path for name in dir.
++func (dir dirFS) join(name string) (string, error) {
++ if dir == "" {
++ return "", errors.New("os: DirFS with empty root")
++ }
++ if !fs.ValidPath(name) {
++ return "", ErrInvalid
++ }
++ name, err := safefilepath.FromFS(name)
++ if err != nil {
++ return "", ErrInvalid
++ }
++ if IsPathSeparator(dir[len(dir)-1]) {
++ return string(dir) + name, nil
++ }
++ return string(dir) + string(PathSeparator) + name, nil
++}
++
+ // ReadFile reads the named file and returns the contents.
+ // A successful call returns err == nil, not err == EOF.
+ // Because ReadFile reads the whole file, it does not treat an EOF from Read
+diff --git a/src/os/os_test.go b/src/os/os_test.go
+index 506f1fb..be269bb 100644
+--- a/src/os/os_test.go
++++ b/src/os/os_test.go
+@@ -2702,6 +2702,44 @@ func TestDirFS(t *testing.T) {
+ if err == nil {
+ t.Fatalf(`Open testdata\dirfs succeeded`)
+ }
++
++ // Test that Open does not open Windows device files.
++ _, err = d.Open(`NUL`)
++ if err == nil {
++ t.Errorf(`Open NUL succeeded`)
++ }
++}
++
++func TestDirFSRootDir(t *testing.T) {
++ cwd, err := os.Getwd()
++ if err != nil {
++ t.Fatal(err)
++ }
++ cwd = cwd[len(filepath.VolumeName(cwd)):] // trim volume prefix (C:) on Windows
++ cwd = filepath.ToSlash(cwd) // convert \ to /
++ cwd = strings.TrimPrefix(cwd, "/") // trim leading /
++
++ // Test that Open can open a path starting at /.
++ d := DirFS("/")
++ f, err := d.Open(cwd + "/testdata/dirfs/a")
++ if err != nil {
++ t.Fatal(err)
++ }
++ f.Close()
++}
++
++func TestDirFSEmptyDir(t *testing.T) {
++ d := DirFS("")
++ cwd, _ := os.Getwd()
++ for _, path := range []string{
++ "testdata/dirfs/a", // not DirFS(".")
++ filepath.ToSlash(cwd) + "/testdata/dirfs/a", // not DirFS("/")
++ } {
++ _, err := d.Open(path)
++ if err == nil {
++ t.Fatalf(`DirFS("").Open(%q) succeeded`, path)
++ }
++ }
+ }
+
+ func TestDirFSPathsValid(t *testing.T) {
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2022-41722.patch b/meta/recipes-devtools/go/go-1.18/CVE-2022-41722.patch
new file mode 100644
index 0000000000..426a4f925f
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2022-41722.patch
@@ -0,0 +1,103 @@
+From a826b19625caebed6dd0f3fbd9d0111f6c83737c Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Mon, 12 Dec 2022 16:43:37 -0800
+Subject: [PATCH] path/filepath: do not Clean("a/../c:/b") into c:\b on Windows
+
+Do not permit Clean to convert a relative path into one starting
+with a drive reference. This change causes Clean to insert a .
+path element at the start of a path when the original path does not
+start with a volume name, and the first path element would contain
+a colon.
+
+This may introduce a spurious but harmless . path element under
+some circumstances. For example, Clean("a/../b:/../c") becomes `.\c`.
+
+This reverts CL 401595, since the change here supersedes the one
+in that CL.
+
+Thanks to RyotaK (https://twitter.com/ryotkak) for reporting this issue.
+
+Updates #57274
+Fixes #57276
+Fixes CVE-2022-41722
+
+Change-Id: I837446285a03aa74c79d7642720e01f354c2ca17
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1675249
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+(cherry picked from commit 8ca37f4813ef2f64600c92b83f17c9f3ca6c03a5)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1728944
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/468119
+Reviewed-by: Than McIntosh <thanm@google.com>
+Run-TryBot: Michael Pratt <mpratt@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Auto-Submit: Michael Pratt <mpratt@google.com>
+
+CVE: CVE-2022-41722
+Upstream-Status: Backport from https://github.com/golang/go/commit/bdf07c2e168baf736e4c057279ca12a4d674f18
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/path/filepath/path.go | 27 ++++++++++++++-------------
+ 1 file changed, 14 insertions(+), 13 deletions(-)
+
+diff --git a/src/path/filepath/path.go b/src/path/filepath/path.go
+index 8300a32..94621a0 100644
+--- a/src/path/filepath/path.go
++++ b/src/path/filepath/path.go
+@@ -15,6 +15,7 @@ import (
+ "errors"
+ "io/fs"
+ "os"
++ "runtime"
+ "sort"
+ "strings"
+ )
+@@ -117,21 +118,9 @@ func Clean(path string) string {
+ case os.IsPathSeparator(path[r]):
+ // empty path element
+ r++
+- case path[r] == '.' && r+1 == n:
++ case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):
+ // . element
+ r++
+- case path[r] == '.' && os.IsPathSeparator(path[r+1]):
+- // ./ element
+- r++
+-
+- for r < len(path) && os.IsPathSeparator(path[r]) {
+- r++
+- }
+- if out.w == 0 && volumeNameLen(path[r:]) > 0 {
+- // When joining prefix "." and an absolute path on Windows,
+- // the prefix should not be removed.
+- out.append('.')
+- }
+ case path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
+ // .. element: remove to last separator
+ r += 2
+@@ -157,6 +146,18 @@ func Clean(path string) string {
+ if rooted && out.w != 1 || !rooted && out.w != 0 {
+ out.append(Separator)
+ }
++ // If a ':' appears in the path element at the start of a Windows path,
++ // insert a .\ at the beginning to avoid converting relative paths
++ // like a/../c: into c:.
++ if runtime.GOOS == "windows" && out.w == 0 && out.volLen == 0 && r != 0 {
++ for i := r; i < n && !os.IsPathSeparator(path[i]); i++ {
++ if path[i] == ':' {
++ out.append('.')
++ out.append(Separator)
++ break
++ }
++ }
++ }
+ // copy element
+ for ; r < n && !os.IsPathSeparator(path[r]); r++ {
+ out.append(path[r])
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2022-41723.patch b/meta/recipes-devtools/go/go-1.18/CVE-2022-41723.patch
new file mode 100644
index 0000000000..a93fa31dcd
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2022-41723.patch
@@ -0,0 +1,156 @@
+From 451766789f646617157c725e20c955d4a9a70d4e Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Mon, 6 Feb 2023 10:03:44 -0800
+Subject: [PATCH] net/http: update bundled golang.org/x/net/http2
+
+Disable cmd/internal/moddeps test, since this update includes PRIVATE
+track fixes.
+
+Fixes CVE-2022-41723
+Fixes #58355
+Updates #57855
+
+Change-Id: Ie870562a6f6e44e4e8f57db6a0dde1a41a2b090c
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1728939
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/468118
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Michael Pratt <mpratt@google.com>
+Auto-Submit: Michael Pratt <mpratt@google.com>
+Reviewed-by: Than McIntosh <thanm@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5c3e11bd0b5c0a86e5beffcd4339b86a902b21c3]
+CVE: CVE-2022-41723
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/vendor/golang.org/x/net/http2/hpack/hpack.go | 79 +++++++++++++++---------
+ 1 file changed, 49 insertions(+), 30 deletions(-)
+
+diff --git a/src/vendor/golang.org/x/net/http2/hpack/hpack.go b/src/vendor/golang.org/x/net/http2/hpack/hpack.go
+index 85f18a2..02e80e3 100644
+--- a/src/vendor/golang.org/x/net/http2/hpack/hpack.go
++++ b/src/vendor/golang.org/x/net/http2/hpack/hpack.go
+@@ -359,6 +359,7 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+
+ var hf HeaderField
+ wantStr := d.emitEnabled || it.indexed()
++ var undecodedName undecodedString
+ if nameIdx > 0 {
+ ihf, ok := d.at(nameIdx)
+ if !ok {
+@@ -366,15 +367,27 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+ }
+ hf.Name = ihf.Name
+ } else {
+- hf.Name, buf, err = d.readString(buf, wantStr)
++ undecodedName, buf, err = d.readString(buf)
+ if err != nil {
+ return err
+ }
+ }
+- hf.Value, buf, err = d.readString(buf, wantStr)
++ undecodedValue, buf, err := d.readString(buf)
+ if err != nil {
+ return err
+ }
++ if wantStr {
++ if nameIdx <= 0 {
++ hf.Name, err = d.decodeString(undecodedName)
++ if err != nil {
++ return err
++ }
++ }
++ hf.Value, err = d.decodeString(undecodedValue)
++ if err != nil {
++ return err
++ }
++ }
+ d.buf = buf
+ if it.indexed() {
+ d.dynTab.add(hf)
+@@ -459,46 +472,52 @@ func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
+ return 0, origP, errNeedMore
+ }
+
+-// readString decodes an hpack string from p.
++// readString reads an hpack string from p.
+ //
+-// wantStr is whether s will be used. If false, decompression and
+-// []byte->string garbage are skipped if s will be ignored
+-// anyway. This does mean that huffman decoding errors for non-indexed
+-// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
+-// is returning an error anyway, and because they're not indexed, the error
+-// won't affect the decoding state.
+-func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
++// It returns a reference to the encoded string data to permit deferring decode costs
++// until after the caller verifies all data is present.
++func (d *Decoder) readString(p []byte) (u undecodedString, remain []byte, err error) {
+ if len(p) == 0 {
+- return "", p, errNeedMore
++ return u, p, errNeedMore
+ }
+ isHuff := p[0]&128 != 0
+ strLen, p, err := readVarInt(7, p)
+ if err != nil {
+- return "", p, err
++ return u, p, err
+ }
+ if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
+- return "", nil, ErrStringLength
++ // Returning an error here means Huffman decoding errors
++ // for non-indexed strings past the maximum string length
++ // are ignored, but the server is returning an error anyway
++ // and because the string is not indexed the error will not
++ // affect the decoding state.
++ return u, nil, ErrStringLength
+ }
+ if uint64(len(p)) < strLen {
+- return "", p, errNeedMore
+- }
+- if !isHuff {
+- if wantStr {
+- s = string(p[:strLen])
+- }
+- return s, p[strLen:], nil
++ return u, p, errNeedMore
+ }
++ u.isHuff = isHuff
++ u.b = p[:strLen]
++ return u, p[strLen:], nil
++}
+
+- if wantStr {
+- buf := bufPool.Get().(*bytes.Buffer)
+- buf.Reset() // don't trust others
+- defer bufPool.Put(buf)
+- if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
+- buf.Reset()
+- return "", nil, err
+- }
++type undecodedString struct {
++ isHuff bool
++ b []byte
++}
++
++func (d *Decoder) decodeString(u undecodedString) (string, error) {
++ if !u.isHuff {
++ return string(u.b), nil
++ }
++ buf := bufPool.Get().(*bytes.Buffer)
++ buf.Reset() // don't trust others
++ var s string
++ err := huffmanDecode(buf, d.maxStrLen, u.b)
++ if err == nil {
+ s = buf.String()
+- buf.Reset() // be nice to GC
+ }
+- return s, p[strLen:], nil
++ buf.Reset() // be nice to GC
++ bufPool.Put(buf)
++ return s, err
+ }
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2023-24534.patch b/meta/recipes-devtools/go/go-1.18/CVE-2023-24534.patch
new file mode 100644
index 0000000000..c65c7852d5
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2023-24534.patch
@@ -0,0 +1,200 @@
+From d6759e7a059f4208f07aa781402841d7ddaaef96 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Fri, 10 Mar 2023 14:21:05 -0800
+Subject: [PATCH] [release-branch.go1.19] net/textproto: avoid overpredicting
+ the number of MIME header keys
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802452
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+(cherry picked from commit f739f080a72fd5b06d35c8e244165159645e2ed6)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802393
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Change-Id: I675451438d619a9130360c56daf529559004903f
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481982
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/d6759e7a059f4208f07aa781402841d7ddaaef96]
+CVE: CVE-2023-24534
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+
+---
+ src/bytes/bytes.go | 14 ++++++++
+ src/net/textproto/reader.go | 30 ++++++++++------
+ src/net/textproto/reader_test.go | 59 ++++++++++++++++++++++++++++++++
+ 3 files changed, 92 insertions(+), 11 deletions(-)
+
+diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go
+index ce52649..95ff31c 100644
+--- a/src/bytes/bytes.go
++++ b/src/bytes/bytes.go
+@@ -1174,3 +1174,17 @@ func Index(s, sep []byte) int {
+ }
+ return -1
+ }
++
++// Cut slices s around the first instance of sep,
++// returning the text before and after sep.
++// The found result reports whether sep appears in s.
++// If sep does not appear in s, cut returns s, nil, false.
++//
++// Cut returns slices of the original slice s, not copies.
++func Cut(s, sep []byte) (before, after []byte, found bool) {
++ if i := Index(s, sep); i >= 0 {
++ return s[:i], s[i+len(sep):], true
++ }
++ return s, nil, false
++}
++
+diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
+index 6a680f4..fcbede8 100644
+--- a/src/net/textproto/reader.go
++++ b/src/net/textproto/reader.go
+@@ -493,8 +493,11 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+ // large one ahead of time which we'll cut up into smaller
+ // slices. If this isn't big enough later, we allocate small ones.
+ var strs []string
+- hint := r.upcomingHeaderNewlines()
++ hint := r.upcomingHeaderKeys()
+ if hint > 0 {
++ if hint > 1000 {
++ hint = 1000 // set a cap to avoid overallocation
++ }
+ strs = make([]string, hint)
+ }
+
+@@ -589,9 +592,11 @@ func mustHaveFieldNameColon(line []byte) error {
+ return nil
+ }
+
+-// upcomingHeaderNewlines returns an approximation of the number of newlines
++var nl = []byte("\n")
++
++// upcomingHeaderKeys returns an approximation of the number of keys
+ // that will be in this header. If it gets confused, it returns 0.
+-func (r *Reader) upcomingHeaderNewlines() (n int) {
++func (r *Reader) upcomingHeaderKeys() (n int) {
+ // Try to determine the 'hint' size.
+ r.R.Peek(1) // force a buffer load if empty
+ s := r.R.Buffered()
+@@ -599,17 +604,20 @@ func (r *Reader) upcomingHeaderNewlines() (n int) {
+ return
+ }
+ peek, _ := r.R.Peek(s)
+- for len(peek) > 0 {
+- i := bytes.IndexByte(peek, '\n')
+- if i < 3 {
+- // Not present (-1) or found within the next few bytes,
+- // implying we're at the end ("\r\n\r\n" or "\n\n")
+- return
++ for len(peek) > 0 && n < 1000 {
++ var line []byte
++ line, peek, _ = bytes.Cut(peek, nl)
++ if len(line) == 0 || (len(line) == 1 && line[0] == '\r') {
++ // Blank line separating headers from the body.
++ break
++ }
++ if line[0] == ' ' || line[0] == '\t' {
++ // Folded continuation of the previous line.
++ continue
+ }
+ n++
+- peek = peek[i+1:]
+ }
+- return
++ return n
+ }
+
+ // CanonicalMIMEHeaderKey returns the canonical format of the
+diff --git a/src/net/textproto/reader_test.go b/src/net/textproto/reader_test.go
+index 3124d43..3ae0de1 100644
+--- a/src/net/textproto/reader_test.go
++++ b/src/net/textproto/reader_test.go
+@@ -9,6 +9,7 @@ import (
+ "bytes"
+ "io"
+ "reflect"
++ "runtime"
+ "strings"
+ "testing"
+ )
+@@ -127,6 +128,42 @@ func TestReadMIMEHeaderSingle(t *testing.T) {
+ }
+ }
+
++// TestReaderUpcomingHeaderKeys is testing an internal function, but it's very
++// difficult to test well via the external API.
++func TestReaderUpcomingHeaderKeys(t *testing.T) {
++ for _, test := range []struct {
++ input string
++ want int
++ }{{
++ input: "",
++ want: 0,
++ }, {
++ input: "A: v",
++ want: 1,
++ }, {
++ input: "A: v\r\nB: v\r\n",
++ want: 2,
++ }, {
++ input: "A: v\nB: v\n",
++ want: 2,
++ }, {
++ input: "A: v\r\n continued\r\n still continued\r\nB: v\r\n\r\n",
++ want: 2,
++ }, {
++ input: "A: v\r\n\r\nB: v\r\nC: v\r\n",
++ want: 1,
++ }, {
++ input: "A: v" + strings.Repeat("\n", 1000),
++ want: 1,
++ }} {
++ r := reader(test.input)
++ got := r.upcomingHeaderKeys()
++ if test.want != got {
++ t.Fatalf("upcomingHeaderKeys(%q): %v; want %v", test.input, got, test.want)
++ }
++ }
++}
++
+ func TestReadMIMEHeaderNoKey(t *testing.T) {
+ r := reader(": bar\ntest-1: 1\n\n")
+ m, err := r.ReadMIMEHeader()
+@@ -223,6 +260,28 @@ func TestReadMIMEHeaderTrimContinued(t *testing.T) {
+ }
+ }
+
++// Test that reading a header doesn't overallocate. Issue 58975.
++func TestReadMIMEHeaderAllocations(t *testing.T) {
++ var totalAlloc uint64
++ const count = 200
++ for i := 0; i < count; i++ {
++ r := reader("A: b\r\n\r\n" + strings.Repeat("\n", 4096))
++ var m1, m2 runtime.MemStats
++ runtime.ReadMemStats(&m1)
++ _, err := r.ReadMIMEHeader()
++ if err != nil {
++ t.Fatalf("ReadMIMEHeader: %v", err)
++ }
++ runtime.ReadMemStats(&m2)
++ totalAlloc += m2.TotalAlloc - m1.TotalAlloc
++ }
++ // 32k is large and we actually allocate substantially less,
++ // but prior to the fix for #58975 we allocated ~400k in this case.
++ if got, want := totalAlloc/count, uint64(32768); got > want {
++ t.Fatalf("ReadMIMEHeader allocated %v bytes, want < %v", got, want)
++ }
++}
++
+ type readResponseTest struct {
+ in string
+ inCode int
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2023-24537.patch b/meta/recipes-devtools/go/go-1.18/CVE-2023-24537.patch
new file mode 100644
index 0000000000..4521f159ea
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2023-24537.patch
@@ -0,0 +1,75 @@
+From bf8c7c575c8a552d9d79deb29e80854dc88528d0 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Mon, 20 Mar 2023 10:43:19 -0700
+Subject: [PATCH] [release-branch.go1.20] mime/multipart: limit parsed mime
+ message sizes
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802456
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802611
+Reviewed-by: Damien Neil <dneil@google.com>
+Change-Id: Ifdfa192d54f722d781a4d8c5f35b5fb72d122168
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481986
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/126a1d02da82f93ede7ce0bd8d3c51ef627f2104]
+CVE: CVE-2023-24537
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/go/parser/parser_test.go | 16 ++++++++++++++++
+ src/go/scanner/scanner.go | 5 ++++-
+ 2 files changed, 20 insertions(+), 1 deletion(-)
+
+diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go
+index 1a46c87..993df63 100644
+--- a/src/go/parser/parser_test.go
++++ b/src/go/parser/parser_test.go
+@@ -746,3 +746,19 @@ func TestScopeDepthLimit(t *testing.T) {
+ }
+ }
+ }
++
++// TestIssue59180 tests that line number overflow doesn't cause an infinite loop.
++func TestIssue59180(t *testing.T) {
++ testcases := []string{
++ "package p\n//line :9223372036854775806\n\n//",
++ "package p\n//line :1:9223372036854775806\n\n//",
++ "package p\n//line file:9223372036854775806\n\n//",
++ }
++
++ for _, src := range testcases {
++ _, err := ParseFile(token.NewFileSet(), "", src, ParseComments)
++ if err == nil {
++ t.Errorf("ParseFile(%s) succeeded unexpectedly", src)
++ }
++ }
++}
+diff --git a/src/go/scanner/scanner.go b/src/go/scanner/scanner.go
+index f08e28c..ff847b5 100644
+--- a/src/go/scanner/scanner.go
++++ b/src/go/scanner/scanner.go
+@@ -251,13 +251,16 @@ func (s *Scanner) updateLineInfo(next, offs int, text []byte) {
+ return
+ }
+
++ // Put a cap on the maximum size of line and column numbers.
++ // 30 bits allows for some additional space before wrapping an int32.
++ const maxLineCol = 1<<30 - 1
+ var line, col int
+ i2, n2, ok2 := trailingDigits(text[:i-1])
+ if ok2 {
+ //line filename:line:col
+ i, i2 = i2, i
+ line, col = n2, n
+- if col == 0 {
++ if col == 0 || col > maxLineCol {
+ s.error(offs+i2, "invalid column number: "+string(text[i2:]))
+ return
+ }
+--
+2.25.1
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2023-24538_1.patch b/meta/recipes-devtools/go/go-1.18/CVE-2023-24538_1.patch
new file mode 100644
index 0000000000..bb0a416f46
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2023-24538_1.patch
@@ -0,0 +1,597 @@
+From b1e4e8ec7e946ff2d3bb37ac99c5468ceb49c362 Mon Sep 17 00:00:00 2001
+From: Russ Cox <rsc@golang.org>
+Date: Thu, 20 May 2021 12:46:33 -0400
+Subject: [PATCH 1/2] html/template, text/template: implement break and
+ continue for range loops
+
+Break and continue for range loops was accepted as a proposal in June 2017.
+It was implemented in CL 66410 (Oct 2017)
+but then rolled back in CL 92155 (Feb 2018)
+because html/template changes had not been implemented.
+
+This CL reimplements break and continue in text/template
+and then adds support for them in html/template as well.
+
+Fixes #20531.
+
+Change-Id: I05330482a976f1c078b4b49c2287bd9031bb7616
+Reviewed-on: https://go-review.googlesource.com/c/go/+/321491
+Trust: Russ Cox <rsc@golang.org>
+Run-TryBot: Russ Cox <rsc@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Rob Pike <r@golang.org>
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/d0dd26a88c019d54f22463daae81e785f5867565
+CVE: CVE-2023-24538
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/html/template/context.go | 4 ++
+ src/html/template/escape.go | 71 ++++++++++++++++++++++++++++++++++-
+ src/html/template/escape_test.go | 24 ++++++++++++
+ src/html/template/exec_test.go | 2 +
+ src/text/template/doc.go | 8 ++++
+ src/text/template/exec.go | 24 +++++++++++-
+ src/text/template/exec_test.go | 2 +
+ src/text/template/parse/lex.go | 13 ++++++-
+ src/text/template/parse/lex_test.go | 2 +
+ src/text/template/parse/node.go | 36 ++++++++++++++++++
+ src/text/template/parse/parse.go | 42 ++++++++++++++++++++-
+ src/text/template/parse/parse_test.go | 8 ++++
+ 12 files changed, 232 insertions(+), 4 deletions(-)
+
+diff --git a/src/html/template/context.go b/src/html/template/context.go
+index f7d4849..aaa7d08 100644
+--- a/src/html/template/context.go
++++ b/src/html/template/context.go
+@@ -6,6 +6,7 @@ package template
+
+ import (
+ "fmt"
++ "text/template/parse"
+ )
+
+ // context describes the state an HTML parser must be in when it reaches the
+@@ -22,6 +23,7 @@ type context struct {
+ jsCtx jsCtx
+ attr attr
+ element element
++ n parse.Node // for range break/continue
+ err *Error
+ }
+
+@@ -141,6 +143,8 @@ const (
+ // stateError is an infectious error state outside any valid
+ // HTML/CSS/JS construct.
+ stateError
++ // stateDead marks unreachable code after a {{break}} or {{continue}}.
++ stateDead
+ )
+
+ // isComment is true for any state that contains content meant for template
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index 8739735..6dea79c 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -97,6 +97,15 @@ type escaper struct {
+ actionNodeEdits map[*parse.ActionNode][]string
+ templateNodeEdits map[*parse.TemplateNode]string
+ textNodeEdits map[*parse.TextNode][]byte
++ // rangeContext holds context about the current range loop.
++ rangeContext *rangeContext
++}
++
++// rangeContext holds information about the current range loop.
++type rangeContext struct {
++ outer *rangeContext // outer loop
++ breaks []context // context at each break action
++ continues []context // context at each continue action
+ }
+
+ // makeEscaper creates a blank escaper for the given set.
+@@ -109,6 +118,7 @@ func makeEscaper(n *nameSpace) escaper {
+ map[*parse.ActionNode][]string{},
+ map[*parse.TemplateNode]string{},
+ map[*parse.TextNode][]byte{},
++ nil,
+ }
+ }
+
+@@ -124,8 +134,16 @@ func (e *escaper) escape(c context, n parse.Node) context {
+ switch n := n.(type) {
+ case *parse.ActionNode:
+ return e.escapeAction(c, n)
++ case *parse.BreakNode:
++ c.n = n
++ e.rangeContext.breaks = append(e.rangeContext.breaks, c)
++ return context{state: stateDead}
+ case *parse.CommentNode:
+ return c
++ case *parse.ContinueNode:
++ c.n = n
++ e.rangeContext.continues = append(e.rangeContext.breaks, c)
++ return context{state: stateDead}
+ case *parse.IfNode:
+ return e.escapeBranch(c, &n.BranchNode, "if")
+ case *parse.ListNode:
+@@ -427,6 +445,12 @@ func join(a, b context, node parse.Node, nodeName string) context {
+ if b.state == stateError {
+ return b
+ }
++ if a.state == stateDead {
++ return b
++ }
++ if b.state == stateDead {
++ return a
++ }
+ if a.eq(b) {
+ return a
+ }
+@@ -466,14 +490,27 @@ func join(a, b context, node parse.Node, nodeName string) context {
+
+ // escapeBranch escapes a branch template node: "if", "range" and "with".
+ func (e *escaper) escapeBranch(c context, n *parse.BranchNode, nodeName string) context {
++ if nodeName == "range" {
++ e.rangeContext = &rangeContext{outer: e.rangeContext}
++ }
+ c0 := e.escapeList(c, n.List)
+- if nodeName == "range" && c0.state != stateError {
++ if nodeName == "range" {
++ if c0.state != stateError {
++ c0 = joinRange(c0, e.rangeContext)
++ }
++ e.rangeContext = e.rangeContext.outer
++ if c0.state == stateError {
++ return c0
++ }
++
+ // The "true" branch of a "range" node can execute multiple times.
+ // We check that executing n.List once results in the same context
+ // as executing n.List twice.
++ e.rangeContext = &rangeContext{outer: e.rangeContext}
+ c1, _ := e.escapeListConditionally(c0, n.List, nil)
+ c0 = join(c0, c1, n, nodeName)
+ if c0.state == stateError {
++ e.rangeContext = e.rangeContext.outer
+ // Make clear that this is a problem on loop re-entry
+ // since developers tend to overlook that branch when
+ // debugging templates.
+@@ -481,11 +518,39 @@ func (e *escaper) escapeBranch(c context, n *parse.BranchNode, nodeName string)
+ c0.err.Description = "on range loop re-entry: " + c0.err.Description
+ return c0
+ }
++ c0 = joinRange(c0, e.rangeContext)
++ e.rangeContext = e.rangeContext.outer
++ if c0.state == stateError {
++ return c0
++ }
+ }
+ c1 := e.escapeList(c, n.ElseList)
+ return join(c0, c1, n, nodeName)
+ }
+
++func joinRange(c0 context, rc *rangeContext) context {
++ // Merge contexts at break and continue statements into overall body context.
++ // In theory we could treat breaks differently from continues, but for now it is
++ // enough to treat them both as going back to the start of the loop (which may then stop).
++ for _, c := range rc.breaks {
++ c0 = join(c0, c, c.n, "range")
++ if c0.state == stateError {
++ c0.err.Line = c.n.(*parse.BreakNode).Line
++ c0.err.Description = "at range loop break: " + c0.err.Description
++ return c0
++ }
++ }
++ for _, c := range rc.continues {
++ c0 = join(c0, c, c.n, "range")
++ if c0.state == stateError {
++ c0.err.Line = c.n.(*parse.ContinueNode).Line
++ c0.err.Description = "at range loop continue: " + c0.err.Description
++ return c0
++ }
++ }
++ return c0
++}
++
+ // escapeList escapes a list template node.
+ func (e *escaper) escapeList(c context, n *parse.ListNode) context {
+ if n == nil {
+@@ -493,6 +558,9 @@ func (e *escaper) escapeList(c context, n *parse.ListNode) context {
+ }
+ for _, m := range n.Nodes {
+ c = e.escape(c, m)
++ if c.state == stateDead {
++ break
++ }
+ }
+ return c
+ }
+@@ -503,6 +571,7 @@ func (e *escaper) escapeList(c context, n *parse.ListNode) context {
+ // which is the same as whether e was updated.
+ func (e *escaper) escapeListConditionally(c context, n *parse.ListNode, filter func(*escaper, context) bool) (context, bool) {
+ e1 := makeEscaper(e.ns)
++ e1.rangeContext = e.rangeContext
+ // Make type inferences available to f.
+ for k, v := range e.output {
+ e1.output[k] = v
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index fbc84a7..3b0aa8c 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -920,6 +920,22 @@ func TestErrors(t *testing.T) {
+ "<a href='/foo?{{range .Items}}&{{.K}}={{.V}}{{end}}'>",
+ "",
+ },
++ {
++ "{{range .Items}}<a{{if .X}}{{end}}>{{end}}",
++ "",
++ },
++ {
++ "{{range .Items}}<a{{if .X}}{{end}}>{{continue}}{{end}}",
++ "",
++ },
++ {
++ "{{range .Items}}<a{{if .X}}{{end}}>{{break}}{{end}}",
++ "",
++ },
++ {
++ "{{range .Items}}<a{{if .X}}{{end}}>{{if .X}}{{break}}{{end}}{{end}}",
++ "",
++ },
+ // Error cases.
+ {
+ "{{if .Cond}}<a{{end}}",
+@@ -956,6 +972,14 @@ func TestErrors(t *testing.T) {
+ "z:2:8: on range loop re-entry: {{range}} branches",
+ },
+ {
++ "{{range .Items}}<a{{if .X}}{{break}}{{end}}>{{end}}",
++ "z:1:29: at range loop break: {{range}} branches end in different contexts",
++ },
++ {
++ "{{range .Items}}<a{{if .X}}{{continue}}{{end}}>{{end}}",
++ "z:1:29: at range loop continue: {{range}} branches end in different contexts",
++ },
++ {
+ "<a b=1 c={{.H}}",
+ "z: ends in a non-text context: {stateAttr delimSpaceOrTagEnd",
+ },
+diff --git a/src/html/template/exec_test.go b/src/html/template/exec_test.go
+index 8885873..523340b 100644
+--- a/src/html/template/exec_test.go
++++ b/src/html/template/exec_test.go
+@@ -567,6 +567,8 @@ var execTests = []execTest{
+ {"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+ {"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+ {"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
++ {"range []int break else", "{{range .SI}}-{{.}}-{{break}}NOTREACHED{{else}}EMPTY{{end}}", "-3-", tVal, true},
++ {"range []int continue else", "{{range .SI}}-{{.}}-{{continue}}NOTREACHED{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+ {"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true},
+ {"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true},
+ {"range map", "{{range .MSI}}-{{.}}-{{end}}", "-1--3--2-", tVal, true},
+diff --git a/src/text/template/doc.go b/src/text/template/doc.go
+index 7b30294..0228b15 100644
+--- a/src/text/template/doc.go
++++ b/src/text/template/doc.go
+@@ -112,6 +112,14 @@ data, defined in detail in the corresponding sections that follow.
+ T0 is executed; otherwise, dot is set to the successive elements
+ of the array, slice, or map and T1 is executed.
+
++ {{break}}
++ The innermost {{range pipeline}} loop is ended early, stopping the
++ current iteration and bypassing all remaining iterations.
++
++ {{continue}}
++ The current iteration of the innermost {{range pipeline}} loop is
++ stopped, and the loop starts the next iteration.
++
+ {{template "name"}}
+ The template with the specified name is executed with nil data.
+
+diff --git a/src/text/template/exec.go b/src/text/template/exec.go
+index 5ad3b4e..92fa9d9 100644
+--- a/src/text/template/exec.go
++++ b/src/text/template/exec.go
+@@ -5,6 +5,7 @@
+ package template
+
+ import (
++ "errors"
+ "fmt"
+ "internal/fmtsort"
+ "io"
+@@ -243,6 +244,12 @@ func (t *Template) DefinedTemplates() string {
+ return b.String()
+ }
+
++// Sentinel errors for use with panic to signal early exits from range loops.
++var (
++ walkBreak = errors.New("break")
++ walkContinue = errors.New("continue")
++)
++
+ // Walk functions step through the major pieces of the template structure,
+ // generating output as they go.
+ func (s *state) walk(dot reflect.Value, node parse.Node) {
+@@ -255,7 +262,11 @@ func (s *state) walk(dot reflect.Value, node parse.Node) {
+ if len(node.Pipe.Decl) == 0 {
+ s.printValue(node, val)
+ }
++ case *parse.BreakNode:
++ panic(walkBreak)
+ case *parse.CommentNode:
++ case *parse.ContinueNode:
++ panic(walkContinue)
+ case *parse.IfNode:
+ s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
+ case *parse.ListNode:
+@@ -334,6 +345,11 @@ func isTrue(val reflect.Value) (truth, ok bool) {
+
+ func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
+ s.at(r)
++ defer func() {
++ if r := recover(); r != nil && r != walkBreak {
++ panic(r)
++ }
++ }()
+ defer s.pop(s.mark())
+ val, _ := indirect(s.evalPipeline(dot, r.Pipe))
+ // mark top of stack before any variables in the body are pushed.
+@@ -347,8 +363,14 @@ func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
+ if len(r.Pipe.Decl) > 1 {
+ s.setTopVar(2, index)
+ }
++ defer s.pop(mark)
++ defer func() {
++ // Consume panic(walkContinue)
++ if r := recover(); r != nil && r != walkContinue {
++ panic(r)
++ }
++ }()
+ s.walk(elem, r.List)
+- s.pop(mark)
+ }
+ switch val.Kind() {
+ case reflect.Array, reflect.Slice:
+diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go
+index ef52164..586af55 100644
+--- a/src/text/template/exec_test.go
++++ b/src/text/template/exec_test.go
+@@ -564,6 +564,8 @@ var execTests = []execTest{
+ {"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+ {"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+ {"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
++ {"range []int break else", "{{range .SI}}-{{.}}-{{break}}NOTREACHED{{else}}EMPTY{{end}}", "-3-", tVal, true},
++ {"range []int continue else", "{{range .SI}}-{{.}}-{{continue}}NOTREACHED{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+ {"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true},
+ {"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true},
+ {"range map", "{{range .MSI}}-{{.}}-{{end}}", "-1--3--2-", tVal, true},
+diff --git a/src/text/template/parse/lex.go b/src/text/template/parse/lex.go
+index 6784071..95e3377 100644
+--- a/src/text/template/parse/lex.go
++++ b/src/text/template/parse/lex.go
+@@ -62,6 +62,8 @@ const (
+ // Keywords appear after all the rest.
+ itemKeyword // used only to delimit the keywords
+ itemBlock // block keyword
++ itemBreak // break keyword
++ itemContinue // continue keyword
+ itemDot // the cursor, spelled '.'
+ itemDefine // define keyword
+ itemElse // else keyword
+@@ -76,6 +78,8 @@ const (
+ var key = map[string]itemType{
+ ".": itemDot,
+ "block": itemBlock,
++ "break": itemBreak,
++ "continue": itemContinue,
+ "define": itemDefine,
+ "else": itemElse,
+ "end": itemEnd,
+@@ -119,6 +123,8 @@ type lexer struct {
+ parenDepth int // nesting depth of ( ) exprs
+ line int // 1+number of newlines seen
+ startLine int // start line of this item
++ breakOK bool // break keyword allowed
++ continueOK bool // continue keyword allowed
+ }
+
+ // next returns the next rune in the input.
+@@ -461,7 +467,12 @@ Loop:
+ }
+ switch {
+ case key[word] > itemKeyword:
+- l.emit(key[word])
++ item := key[word]
++ if item == itemBreak && !l.breakOK || item == itemContinue && !l.continueOK {
++ l.emit(itemIdentifier)
++ } else {
++ l.emit(item)
++ }
+ case word[0] == '.':
+ l.emit(itemField)
+ case word == "true", word == "false":
+diff --git a/src/text/template/parse/lex_test.go b/src/text/template/parse/lex_test.go
+index 6510eed..df6aabf 100644
+--- a/src/text/template/parse/lex_test.go
++++ b/src/text/template/parse/lex_test.go
+@@ -35,6 +35,8 @@ var itemName = map[itemType]string{
+ // keywords
+ itemDot: ".",
+ itemBlock: "block",
++ itemBreak: "break",
++ itemContinue: "continue",
+ itemDefine: "define",
+ itemElse: "else",
+ itemIf: "if",
+diff --git a/src/text/template/parse/node.go b/src/text/template/parse/node.go
+index 177482f..4726822 100644
+--- a/src/text/template/parse/node.go
++++ b/src/text/template/parse/node.go
+@@ -71,6 +71,8 @@ const (
+ NodeVariable // A $ variable.
+ NodeWith // A with action.
+ NodeComment // A comment.
++ NodeBreak // A break action.
++ NodeContinue // A continue action.
+ )
+
+ // Nodes.
+@@ -907,6 +909,40 @@ func (i *IfNode) Copy() Node {
+ return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
+ }
+
++// BreakNode represents a {{break}} action.
++type BreakNode struct {
++ tr *Tree
++ NodeType
++ Pos
++ Line int
++}
++
++func (t *Tree) newBreak(pos Pos, line int) *BreakNode {
++ return &BreakNode{tr: t, NodeType: NodeBreak, Pos: pos, Line: line}
++}
++
++func (b *BreakNode) Copy() Node { return b.tr.newBreak(b.Pos, b.Line) }
++func (b *BreakNode) String() string { return "{{break}}" }
++func (b *BreakNode) tree() *Tree { return b.tr }
++func (b *BreakNode) writeTo(sb *strings.Builder) { sb.WriteString("{{break}}") }
++
++// ContinueNode represents a {{continue}} action.
++type ContinueNode struct {
++ tr *Tree
++ NodeType
++ Pos
++ Line int
++}
++
++func (t *Tree) newContinue(pos Pos, line int) *ContinueNode {
++ return &ContinueNode{tr: t, NodeType: NodeContinue, Pos: pos, Line: line}
++}
++
++func (c *ContinueNode) Copy() Node { return c.tr.newContinue(c.Pos, c.Line) }
++func (c *ContinueNode) String() string { return "{{continue}}" }
++func (c *ContinueNode) tree() *Tree { return c.tr }
++func (c *ContinueNode) writeTo(sb *strings.Builder) { sb.WriteString("{{continue}}") }
++
+ // RangeNode represents a {{range}} action and its commands.
+ type RangeNode struct {
+ BranchNode
+diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go
+index 1a63961..d92bed5 100644
+--- a/src/text/template/parse/parse.go
++++ b/src/text/template/parse/parse.go
+@@ -31,6 +31,7 @@ type Tree struct {
+ vars []string // variables defined at the moment.
+ treeSet map[string]*Tree
+ actionLine int // line of left delim starting action
++ rangeDepth int
+ mode Mode
+ }
+
+@@ -224,6 +225,8 @@ func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer, treeSet ma
+ t.vars = []string{"$"}
+ t.funcs = funcs
+ t.treeSet = treeSet
++ lex.breakOK = !t.hasFunction("break")
++ lex.continueOK = !t.hasFunction("continue")
+ }
+
+ // stopParse terminates parsing.
+@@ -386,6 +389,10 @@ func (t *Tree) action() (n Node) {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemBlock:
+ return t.blockControl()
++ case itemBreak:
++ return t.breakControl(token.pos, token.line)
++ case itemContinue:
++ return t.continueControl(token.pos, token.line)
+ case itemElse:
+ return t.elseControl()
+ case itemEnd:
+@@ -405,6 +412,32 @@ func (t *Tree) action() (n Node) {
+ return t.newAction(token.pos, token.line, t.pipeline("command", itemRightDelim))
+ }
+
++// Break:
++// {{break}}
++// Break keyword is past.
++func (t *Tree) breakControl(pos Pos, line int) Node {
++ if token := t.next(); token.typ != itemRightDelim {
++ t.unexpected(token, "in {{break}}")
++ }
++ if t.rangeDepth == 0 {
++ t.errorf("{{break}} outside {{range}}")
++ }
++ return t.newBreak(pos, line)
++}
++
++// Continue:
++// {{continue}}
++// Continue keyword is past.
++func (t *Tree) continueControl(pos Pos, line int) Node {
++ if token := t.next(); token.typ != itemRightDelim {
++ t.unexpected(token, "in {{continue}}")
++ }
++ if t.rangeDepth == 0 {
++ t.errorf("{{continue}} outside {{range}}")
++ }
++ return t.newContinue(pos, line)
++}
++
+ // Pipeline:
+ // declarations? command ('|' command)*
+ func (t *Tree) pipeline(context string, end itemType) (pipe *PipeNode) {
+@@ -480,8 +513,14 @@ func (t *Tree) checkPipeline(pipe *PipeNode, context string) {
+ func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
+ defer t.popVars(len(t.vars))
+ pipe = t.pipeline(context, itemRightDelim)
++ if context == "range" {
++ t.rangeDepth++
++ }
+ var next Node
+ list, next = t.itemList()
++ if context == "range" {
++ t.rangeDepth--
++ }
+ switch next.Type() {
+ case nodeEnd: //done
+ case nodeElse:
+@@ -523,7 +562,8 @@ func (t *Tree) ifControl() Node {
+ // {{range pipeline}} itemList {{else}} itemList {{end}}
+ // Range keyword is past.
+ func (t *Tree) rangeControl() Node {
+- return t.newRange(t.parseControl(false, "range"))
++ r := t.newRange(t.parseControl(false, "range"))
++ return r
+ }
+
+ // With:
+diff --git a/src/text/template/parse/parse_test.go b/src/text/template/parse/parse_test.go
+index 9b1be27..c3679a0 100644
+--- a/src/text/template/parse/parse_test.go
++++ b/src/text/template/parse/parse_test.go
+@@ -230,6 +230,10 @@ var parseTests = []parseTest{
+ `{{range $x := .SI}}{{.}}{{end}}`},
+ {"range 2 vars", "{{range $x, $y := .SI}}{{.}}{{end}}", noError,
+ `{{range $x, $y := .SI}}{{.}}{{end}}`},
++ {"range with break", "{{range .SI}}{{.}}{{break}}{{end}}", noError,
++ `{{range .SI}}{{.}}{{break}}{{end}}`},
++ {"range with continue", "{{range .SI}}{{.}}{{continue}}{{end}}", noError,
++ `{{range .SI}}{{.}}{{continue}}{{end}}`},
+ {"constants", "{{range .SI 1 -3.2i true false 'a' nil}}{{end}}", noError,
+ `{{range .SI 1 -3.2i true false 'a' nil}}{{end}}`},
+ {"template", "{{template `x`}}", noError,
+@@ -279,6 +283,10 @@ var parseTests = []parseTest{
+ {"adjacent args", "{{printf 3`x`}}", hasError, ""},
+ {"adjacent args with .", "{{printf `x`.}}", hasError, ""},
+ {"extra end after if", "{{if .X}}a{{else if .Y}}b{{end}}{{end}}", hasError, ""},
++ {"break outside range", "{{range .}}{{end}} {{break}}", hasError, ""},
++ {"continue outside range", "{{range .}}{{end}} {{continue}}", hasError, ""},
++ {"break in range else", "{{range .}}{{else}}{{break}}{{end}}", hasError, ""},
++ {"continue in range else", "{{range .}}{{else}}{{continue}}{{end}}", hasError, ""},
+ // Other kinds of assignments and operators aren't available yet.
+ {"bug0a", "{{$x := 0}}{{$x}}", noError, "{{$x := 0}}{{$x}}"},
+ {"bug0b", "{{$x += 1}}{{$x}}", hasError, ""},
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2023-24538_2.patch b/meta/recipes-devtools/go/go-1.18/CVE-2023-24538_2.patch
new file mode 100644
index 0000000000..f94f0f55c7
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2023-24538_2.patch
@@ -0,0 +1,371 @@
+From 07cc3b8711a8efbb5885f56dd90d854049ad2f7d Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Mon, 20 Mar 2023 11:01:13 -0700
+Subject: [PATCH 2/2] html/template: disallow actions in JS template literals
+
+ECMAScript 6 introduced template literals[0][1] which are delimited with
+backticks. These need to be escaped in a similar fashion to the
+delimiters for other string literals. Additionally template literals can
+contain special syntax for string interpolation.
+
+There is no clear way to allow safe insertion of actions within JS
+template literals, as handling (JS) string interpolation inside of these
+literals is rather complex. As such we've chosen to simply disallow
+template actions within these template literals.
+
+A new error code is added for this parsing failure case, errJsTmplLit,
+but it is unexported as it is not backwards compatible with other minor
+release versions to introduce an API change in a minor release. We will
+export this code in the next major release.
+
+The previous behavior (with the cavet that backticks are now escaped
+properly) can be re-enabled with GODEBUG=jstmpllitinterp=1.
+
+This change subsumes CL471455.
+
+Thanks to Sohom Datta, Manipal Institute of Technology, for reporting
+this issue.
+
+Fixes CVE-2023-24538
+For #59234
+Fixes #59271
+
+[0] https://tc39.es/ecma262/multipage/ecmascript-language-expressions.html#sec-template-literals
+[1] https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Template_literals
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802457
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802612
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Change-Id: Ic7f10595615f2b2740d9c85ad7ef40dc0e78c04c
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481987
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/b1e3ecfa06b67014429a197ec5e134ce4303ad9b
+CVE: CVE-2023-24538
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/html/template/context.go | 2 ++
+ src/html/template/error.go | 13 ++++++++
+ src/html/template/escape.go | 11 +++++++
+ src/html/template/escape_test.go | 66 ++++++++++++++++++++++-----------------
+ src/html/template/js.go | 2 ++
+ src/html/template/js_test.go | 2 +-
+ src/html/template/jsctx_string.go | 9 ++++++
+ src/html/template/state_string.go | 37 ++++++++++++++++++++--
+ src/html/template/transition.go | 7 ++++-
+ 9 files changed, 116 insertions(+), 33 deletions(-)
+
+diff --git a/src/html/template/context.go b/src/html/template/context.go
+index f7d4849..0b65313 100644
+--- a/src/html/template/context.go
++++ b/src/html/template/context.go
+@@ -116,6 +116,8 @@ const (
+ stateJSDqStr
+ // stateJSSqStr occurs inside a JavaScript single quoted string.
+ stateJSSqStr
++ // stateJSBqStr occurs inside a JavaScript back quoted string.
++ stateJSBqStr
+ // stateJSRegexp occurs inside a JavaScript regexp literal.
+ stateJSRegexp
+ // stateJSBlockCmt occurs inside a JavaScript /* block comment */.
+diff --git a/src/html/template/error.go b/src/html/template/error.go
+index 0e52706..fd26b64 100644
+--- a/src/html/template/error.go
++++ b/src/html/template/error.go
+@@ -211,6 +211,19 @@ const (
+ // pipeline occurs in an unquoted attribute value context, "html" is
+ // disallowed. Avoid using "html" and "urlquery" entirely in new templates.
+ ErrPredefinedEscaper
++
++ // errJSTmplLit: "... appears in a JS template literal"
++ // Example:
++ // <script>var tmpl = `{{.Interp}`</script>
++ // Discussion:
++ // Package html/template does not support actions inside of JS template
++ // literals.
++ //
++ // TODO(rolandshoemaker): we cannot add this as an exported error in a minor
++ // release, since it is backwards incompatible with the other minor
++ // releases. As such we need to leave it unexported, and then we'll add it
++ // in the next major release.
++ errJSTmplLit
+ )
+
+ func (e *Error) Error() string {
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index 8739735..ca078f4 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -8,6 +8,7 @@ import (
+ "bytes"
+ "fmt"
+ "html"
++ "internal/godebug"
+ "io"
+ "text/template"
+ "text/template/parse"
+@@ -205,6 +206,16 @@ func (e *escaper) escapeAction(c context, n *parse.ActionNode) context {
+ c.jsCtx = jsCtxDivOp
+ case stateJSDqStr, stateJSSqStr:
+ s = append(s, "_html_template_jsstrescaper")
++ case stateJSBqStr:
++ debugAllowActionJSTmpl := godebug.Get("jstmpllitinterp")
++ if debugAllowActionJSTmpl == "1" {
++ s = append(s, "_html_template_jsstrescaper")
++ } else {
++ return context{
++ state: stateError,
++ err: errorf(errJSTmplLit, n, n.Line, "%s appears in a JS template literal", n),
++ }
++ }
+ case stateJSRegexp:
+ s = append(s, "_html_template_jsregexpescaper")
+ case stateCSS:
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index 3b0aa8c..a695b17 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -681,35 +681,31 @@ func TestEscape(t *testing.T) {
+ }
+
+ for _, test := range tests {
+- tmpl := New(test.name)
+- tmpl = Must(tmpl.Parse(test.input))
+- // Check for bug 6459: Tree field was not set in Parse.
+- if tmpl.Tree != tmpl.text.Tree {
+- t.Errorf("%s: tree not set properly", test.name)
+- continue
+- }
+- b := new(bytes.Buffer)
+- if err := tmpl.Execute(b, data); err != nil {
+- t.Errorf("%s: template execution failed: %s", test.name, err)
+- continue
+- }
+- if w, g := test.output, b.String(); w != g {
+- t.Errorf("%s: escaped output: want\n\t%q\ngot\n\t%q", test.name, w, g)
+- continue
+- }
+- b.Reset()
+- if err := tmpl.Execute(b, pdata); err != nil {
+- t.Errorf("%s: template execution failed for pointer: %s", test.name, err)
+- continue
+- }
+- if w, g := test.output, b.String(); w != g {
+- t.Errorf("%s: escaped output for pointer: want\n\t%q\ngot\n\t%q", test.name, w, g)
+- continue
+- }
+- if tmpl.Tree != tmpl.text.Tree {
+- t.Errorf("%s: tree mismatch", test.name)
+- continue
+- }
++ t.Run(test.name, func(t *testing.T) {
++ tmpl := New(test.name)
++ tmpl = Must(tmpl.Parse(test.input))
++ // Check for bug 6459: Tree field was not set in Parse.
++ if tmpl.Tree != tmpl.text.Tree {
++ t.Fatalf("%s: tree not set properly", test.name)
++ }
++ b := new(strings.Builder)
++ if err := tmpl.Execute(b, data); err != nil {
++ t.Fatalf("%s: template execution failed: %s", test.name, err)
++ }
++ if w, g := test.output, b.String(); w != g {
++ t.Fatalf("%s: escaped output: want\n\t%q\ngot\n\t%q", test.name, w, g)
++ }
++ b.Reset()
++ if err := tmpl.Execute(b, pdata); err != nil {
++ t.Fatalf("%s: template execution failed for pointer: %s", test.name, err)
++ }
++ if w, g := test.output, b.String(); w != g {
++ t.Fatalf("%s: escaped output for pointer: want\n\t%q\ngot\n\t%q", test.name, w, g)
++ }
++ if tmpl.Tree != tmpl.text.Tree {
++ t.Fatalf("%s: tree mismatch", test.name)
++ }
++ })
+ }
+ }
+
+@@ -936,6 +932,10 @@ func TestErrors(t *testing.T) {
+ "{{range .Items}}<a{{if .X}}{{end}}>{{if .X}}{{break}}{{end}}{{end}}",
+ "",
+ },
++ {
++ "<script>var a = `${a+b}`</script>`",
++ "",
++ },
+ // Error cases.
+ {
+ "{{if .Cond}}<a{{end}}",
+@@ -1082,6 +1082,10 @@ func TestErrors(t *testing.T) {
+ // html is allowed since it is the last command in the pipeline, but urlquery is not.
+ `predefined escaper "urlquery" disallowed in template`,
+ },
++ {
++ "<script>var tmpl = `asd {{.}}`;</script>",
++ `{{.}} appears in a JS template literal`,
++ },
+ }
+ for _, test := range tests {
+ buf := new(bytes.Buffer)
+@@ -1304,6 +1308,10 @@ func TestEscapeText(t *testing.T) {
+ context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
+ },
+ {
++ "<a onclick=\"`foo",
++ context{state: stateJSBqStr, delim: delimDoubleQuote, attr: attrScript},
++ },
++ {
+ `<A ONCLICK="'`,
+ context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
+ },
+diff --git a/src/html/template/js.go b/src/html/template/js.go
+index ea9c183..b888eaf 100644
+--- a/src/html/template/js.go
++++ b/src/html/template/js.go
+@@ -308,6 +308,7 @@ var jsStrReplacementTable = []string{
+ // Encode HTML specials as hex so the output can be embedded
+ // in HTML attributes without further encoding.
+ '"': `\u0022`,
++ '`': `\u0060`,
+ '&': `\u0026`,
+ '\'': `\u0027`,
+ '+': `\u002b`,
+@@ -331,6 +332,7 @@ var jsStrNormReplacementTable = []string{
+ '"': `\u0022`,
+ '&': `\u0026`,
+ '\'': `\u0027`,
++ '`': `\u0060`,
+ '+': `\u002b`,
+ '/': `\/`,
+ '<': `\u003c`,
+diff --git a/src/html/template/js_test.go b/src/html/template/js_test.go
+index d7ee47b..7d963ae 100644
+--- a/src/html/template/js_test.go
++++ b/src/html/template/js_test.go
+@@ -292,7 +292,7 @@ func TestEscapersOnLower7AndSelectHighCodepoints(t *testing.T) {
+ `0123456789:;\u003c=\u003e?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ[\\]^_` +
+- "`abcdefghijklmno" +
++ "\\u0060abcdefghijklmno" +
+ "pqrstuvwxyz{|}~\u007f" +
+ "\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
+ },
+diff --git a/src/html/template/jsctx_string.go b/src/html/template/jsctx_string.go
+index dd1d87e..2394893 100644
+--- a/src/html/template/jsctx_string.go
++++ b/src/html/template/jsctx_string.go
+@@ -4,6 +4,15 @@ package template
+
+ import "strconv"
+
++func _() {
++ // An "invalid array index" compiler error signifies that the constant values have changed.
++ // Re-run the stringer command to generate them again.
++ var x [1]struct{}
++ _ = x[jsCtxRegexp-0]
++ _ = x[jsCtxDivOp-1]
++ _ = x[jsCtxUnknown-2]
++}
++
+ const _jsCtx_name = "jsCtxRegexpjsCtxDivOpjsCtxUnknown"
+
+ var _jsCtx_index = [...]uint8{0, 11, 21, 33}
+diff --git a/src/html/template/state_string.go b/src/html/template/state_string.go
+index 05104be..6fb1a6e 100644
+--- a/src/html/template/state_string.go
++++ b/src/html/template/state_string.go
+@@ -4,9 +4,42 @@ package template
+
+ import "strconv"
+
+-const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateError"
++func _() {
++ // An "invalid array index" compiler error signifies that the constant values have changed.
++ // Re-run the stringer command to generate them again.
++ var x [1]struct{}
++ _ = x[stateText-0]
++ _ = x[stateTag-1]
++ _ = x[stateAttrName-2]
++ _ = x[stateAfterName-3]
++ _ = x[stateBeforeValue-4]
++ _ = x[stateHTMLCmt-5]
++ _ = x[stateRCDATA-6]
++ _ = x[stateAttr-7]
++ _ = x[stateURL-8]
++ _ = x[stateSrcset-9]
++ _ = x[stateJS-10]
++ _ = x[stateJSDqStr-11]
++ _ = x[stateJSSqStr-12]
++ _ = x[stateJSBqStr-13]
++ _ = x[stateJSRegexp-14]
++ _ = x[stateJSBlockCmt-15]
++ _ = x[stateJSLineCmt-16]
++ _ = x[stateCSS-17]
++ _ = x[stateCSSDqStr-18]
++ _ = x[stateCSSSqStr-19]
++ _ = x[stateCSSDqURL-20]
++ _ = x[stateCSSSqURL-21]
++ _ = x[stateCSSURL-22]
++ _ = x[stateCSSBlockCmt-23]
++ _ = x[stateCSSLineCmt-24]
++ _ = x[stateError-25]
++ _ = x[stateDead-26]
++}
++
++const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSBqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateErrorstateDead"
+
+-var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 155, 170, 184, 192, 205, 218, 231, 244, 255, 271, 286, 296}
++var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 154, 167, 182, 196, 204, 217, 230, 243, 256, 267, 283, 298, 308, 317}
+
+ func (i state) String() string {
+ if i >= state(len(_state_index)-1) {
+diff --git a/src/html/template/transition.go b/src/html/template/transition.go
+index 06df679..92eb351 100644
+--- a/src/html/template/transition.go
++++ b/src/html/template/transition.go
+@@ -27,6 +27,7 @@ var transitionFunc = [...]func(context, []byte) (context, int){
+ stateJS: tJS,
+ stateJSDqStr: tJSDelimited,
+ stateJSSqStr: tJSDelimited,
++ stateJSBqStr: tJSDelimited,
+ stateJSRegexp: tJSDelimited,
+ stateJSBlockCmt: tBlockCmt,
+ stateJSLineCmt: tLineCmt,
+@@ -262,7 +263,7 @@ func tURL(c context, s []byte) (context, int) {
+
+ // tJS is the context transition function for the JS state.
+ func tJS(c context, s []byte) (context, int) {
+- i := bytes.IndexAny(s, `"'/`)
++ i := bytes.IndexAny(s, "\"`'/")
+ if i == -1 {
+ // Entire input is non string, comment, regexp tokens.
+ c.jsCtx = nextJSCtx(s, c.jsCtx)
+@@ -274,6 +275,8 @@ func tJS(c context, s []byte) (context, int) {
+ c.state, c.jsCtx = stateJSDqStr, jsCtxRegexp
+ case '\'':
+ c.state, c.jsCtx = stateJSSqStr, jsCtxRegexp
++ case '`':
++ c.state, c.jsCtx = stateJSBqStr, jsCtxRegexp
+ case '/':
+ switch {
+ case i+1 < len(s) && s[i+1] == '/':
+@@ -303,6 +306,8 @@ func tJSDelimited(c context, s []byte) (context, int) {
+ switch c.state {
+ case stateJSSqStr:
+ specials = `\'`
++ case stateJSBqStr:
++ specials = "`\\"
+ case stateJSRegexp:
+ specials = `\/[]`
+ }
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2023-24539.patch b/meta/recipes-devtools/go/go-1.18/CVE-2023-24539.patch
new file mode 100644
index 0000000000..fa19e18264
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2023-24539.patch
@@ -0,0 +1,53 @@
+From e49282327b05192e46086bf25fd3ac691205fe80 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Thu, 13 Apr 2023 15:40:44 -0700
+Subject: [PATCH] [release-branch.go1.19] html/template: disallow angle
+ brackets in CSS values
+
+Change-Id: Iccc659c9a18415992b0c05c178792228e3a7bae4
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1826636
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1851496
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/491335
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/e49282327b05192e46086bf25fd3ac691205fe80]
+CVE: CVE-2023-24539
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/html/template/css.go | 2 +-
+ src/html/template/css_test.go | 2 ++
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/src/html/template/css.go b/src/html/template/css.go
+index 890a0c6b227fe..f650d8b3e843a 100644
+--- a/src/html/template/css.go
++++ b/src/html/template/css.go
+@@ -238,7 +238,7 @@ func cssValueFilter(args ...any) string {
+ // inside a string that might embed JavaScript source.
+ for i, c := range b {
+ switch c {
+- case 0, '"', '\'', '(', ')', '/', ';', '@', '[', '\\', ']', '`', '{', '}':
++ case 0, '"', '\'', '(', ')', '/', ';', '@', '[', '\\', ']', '`', '{', '}', '<', '>':
+ return filterFailsafe
+ case '-':
+ // Disallow <!-- or -->.
+diff --git a/src/html/template/css_test.go b/src/html/template/css_test.go
+index a735638b0314f..2b76256a766e9 100644
+--- a/src/html/template/css_test.go
++++ b/src/html/template/css_test.go
+@@ -231,6 +231,8 @@ func TestCSSValueFilter(t *testing.T) {
+ {`-exp\000052 ession(alert(1337))`, "ZgotmplZ"},
+ {`-expre\0000073sion`, "-expre\x073sion"},
+ {`@import url evil.css`, "ZgotmplZ"},
++ {"<", "ZgotmplZ"},
++ {">", "ZgotmplZ"},
+ }
+ for _, test := range tests {
+ got := cssValueFilter(test.css)
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2023-29400.patch b/meta/recipes-devtools/go/go-1.18/CVE-2023-29400.patch
new file mode 100644
index 0000000000..04bd1f5fec
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2023-29400.patch
@@ -0,0 +1,99 @@
+From 9db0e74f606b8afb28cc71d4b1c8b4ed24cabbf5 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Thu, 13 Apr 2023 14:01:50 -0700
+Subject: [PATCH] [release-branch.go1.19] html/template: emit filterFailsafe
+ for empty unquoted attr value
+
+An unquoted action used as an attribute value can result in unsafe
+behavior if it is empty, as HTML normalization will result in unexpected
+attributes, and may allow attribute injection. If executing a template
+results in a empty unquoted attribute value, emit filterFailsafe
+instead.
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+For #59722
+Fixes #59815
+Fixes CVE-2023-29400
+
+Change-Id: Ia38d1b536ae2b4af5323a6c6d861e3c057c2570a
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1826631
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1851498
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/491357
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/9db0e74f606b8afb28cc71d4b1c8b4ed24cabbf5]
+CVE: CVE-2023-29400
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/html/template/escape.go | 5 ++---
+ src/html/template/escape_test.go | 15 +++++++++++++++
+ src/html/template/html.go | 3 +++
+ 3 files changed, 20 insertions(+), 3 deletions(-)
+
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index ca078f4..bdccc65 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -362,9 +362,8 @@ func normalizeEscFn(e string) string {
+ // for all x.
+ var redundantFuncs = map[string]map[string]bool{
+ "_html_template_commentescaper": {
+- "_html_template_attrescaper": true,
+- "_html_template_nospaceescaper": true,
+- "_html_template_htmlescaper": true,
++ "_html_template_attrescaper": true,
++ "_html_template_htmlescaper": true,
+ },
+ "_html_template_cssescaper": {
+ "_html_template_attrescaper": true,
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index fbc84a7..4f48afe 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -678,6 +678,21 @@ func TestEscape(t *testing.T) {
+ `<img srcset={{",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,"}}>`,
+ `<img srcset=,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,>`,
+ },
++ {
++ "unquoted empty attribute value (plaintext)",
++ "<p name={{.U}}>",
++ "<p name=ZgotmplZ>",
++ },
++ {
++ "unquoted empty attribute value (url)",
++ "<p href={{.U}}>",
++ "<p href=ZgotmplZ>",
++ },
++ {
++ "quoted empty attribute value",
++ "<p name=\"{{.U}}\">",
++ "<p name=\"\">",
++ },
+ }
+
+ for _, test := range tests {
+diff --git a/src/html/template/html.go b/src/html/template/html.go
+index 356b829..636bc21 100644
+--- a/src/html/template/html.go
++++ b/src/html/template/html.go
+@@ -14,6 +14,9 @@ import (
+ // htmlNospaceEscaper escapes for inclusion in unquoted attribute values.
+ func htmlNospaceEscaper(args ...interface{}) string {
+ s, t := stringify(args...)
++ if s == "" {
++ return filterFailsafe
++ }
+ if t == contentTypeHTML {
+ return htmlReplacer(stripTags(s), htmlNospaceNormReplacementTable, false)
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2023-29406-1.patch b/meta/recipes-devtools/go/go-1.18/CVE-2023-29406-1.patch
new file mode 100644
index 0000000000..a326cda5c4
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2023-29406-1.patch
@@ -0,0 +1,210 @@
+From 5fa6923b1ea891400153d04ddf1545e23b40041b Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Wed, 28 Jun 2023 13:20:08 -0700
+Subject: [PATCH] [release-branch.go1.19] net/http: validate Host header before
+ sending
+
+Verify that the Host header we send is valid.
+Avoids surprising behavior such as a Host of "go.dev\r\nX-Evil:oops"
+adding an X-Evil header to HTTP/1 requests.
+
+Add a test, skip the test for HTTP/2. HTTP/2 is not vulnerable to
+header injection in the way HTTP/1 is, but x/net/http2 doesn't validate
+the header and will go into a retry loop when the server rejects it.
+CL 506995 adds the necessary validation to x/net/http2.
+
+Updates #60374
+Fixes #61075
+For CVE-2023-29406
+
+Change-Id: I05cb6866a9bead043101954dfded199258c6dd04
+Reviewed-on: https://go-review.googlesource.com/c/go/+/506996
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Damien Neil <dneil@google.com>
+(cherry picked from commit 499458f7ca04087958987a33c2703c3ef03e27e2)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/507358
+Run-TryBot: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Roland Shoemaker <roland@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5fa6923b1ea891400153d04ddf1545e23b40041b]
+CVE: CVE-2023-29406
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/net/http/http_test.go | 29 ----------------------
+ src/net/http/request.go | 45 ++++++++--------------------------
+ src/net/http/request_test.go | 11 ++-------
+ src/net/http/transport_test.go | 18 ++++++++++++++
+ 4 files changed, 30 insertions(+), 73 deletions(-)
+
+diff --git a/src/net/http/http_test.go b/src/net/http/http_test.go
+index 0d92fe5..f03272a 100644
+--- a/src/net/http/http_test.go
++++ b/src/net/http/http_test.go
+@@ -48,35 +48,6 @@ func TestForeachHeaderElement(t *testing.T) {
+ }
+ }
+
+-func TestCleanHost(t *testing.T) {
+- tests := []struct {
+- in, want string
+- }{
+- {"www.google.com", "www.google.com"},
+- {"www.google.com foo", "www.google.com"},
+- {"www.google.com/foo", "www.google.com"},
+- {" first character is a space", ""},
+- {"[1::6]:8080", "[1::6]:8080"},
+-
+- // Punycode:
+- {"гофер.рф/foo", "xn--c1ae0ajs.xn--p1ai"},
+- {"bücher.de", "xn--bcher-kva.de"},
+- {"bücher.de:8080", "xn--bcher-kva.de:8080"},
+- // Verify we convert to lowercase before punycode:
+- {"BÜCHER.de", "xn--bcher-kva.de"},
+- {"BÜCHER.de:8080", "xn--bcher-kva.de:8080"},
+- // Verify we normalize to NFC before punycode:
+- {"gophér.nfc", "xn--gophr-esa.nfc"}, // NFC input; no work needed
+- {"goph\u0065\u0301r.nfd", "xn--gophr-esa.nfd"}, // NFD input
+- }
+- for _, tt := range tests {
+- got := cleanHost(tt.in)
+- if tt.want != got {
+- t.Errorf("cleanHost(%q) = %q, want %q", tt.in, got, tt.want)
+- }
+- }
+-}
+-
+ // Test that cmd/go doesn't link in the HTTP server.
+ //
+ // This catches accidental dependencies between the HTTP transport and
+diff --git a/src/net/http/request.go b/src/net/http/request.go
+index 09cb0c7..2f4e740 100644
+--- a/src/net/http/request.go
++++ b/src/net/http/request.go
+@@ -17,7 +17,6 @@ import (
+ "io"
+ "mime"
+ "mime/multipart"
+- "net"
+ "net/http/httptrace"
+ "net/http/internal/ascii"
+ "net/textproto"
+@@ -27,6 +26,7 @@ import (
+ "strings"
+ "sync"
+
++ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/idna"
+ )
+
+@@ -568,12 +568,19 @@ func (r *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, waitF
+ // is not given, use the host from the request URL.
+ //
+ // Clean the host, in case it arrives with unexpected stuff in it.
+- host := cleanHost(r.Host)
++ host := r.Host
+ if host == "" {
+ if r.URL == nil {
+ return errMissingHost
+ }
+- host = cleanHost(r.URL.Host)
++ host = r.URL.Host
++ }
++ host, err = httpguts.PunycodeHostPort(host)
++ if err != nil {
++ return err
++ }
++ if !httpguts.ValidHostHeader(host) {
++ return errors.New("http: invalid Host header")
+ }
+
+ // According to RFC 6874, an HTTP client, proxy, or other
+@@ -730,38 +737,6 @@ func idnaASCII(v string) (string, error) {
+ return idna.Lookup.ToASCII(v)
+ }
+
+-// cleanHost cleans up the host sent in request's Host header.
+-//
+-// It both strips anything after '/' or ' ', and puts the value
+-// into Punycode form, if necessary.
+-//
+-// Ideally we'd clean the Host header according to the spec:
+-// https://tools.ietf.org/html/rfc7230#section-5.4 (Host = uri-host [ ":" port ]")
+-// https://tools.ietf.org/html/rfc7230#section-2.7 (uri-host -> rfc3986's host)
+-// https://tools.ietf.org/html/rfc3986#section-3.2.2 (definition of host)
+-// But practically, what we are trying to avoid is the situation in
+-// issue 11206, where a malformed Host header used in the proxy context
+-// would create a bad request. So it is enough to just truncate at the
+-// first offending character.
+-func cleanHost(in string) string {
+- if i := strings.IndexAny(in, " /"); i != -1 {
+- in = in[:i]
+- }
+- host, port, err := net.SplitHostPort(in)
+- if err != nil { // input was just a host
+- a, err := idnaASCII(in)
+- if err != nil {
+- return in // garbage in, garbage out
+- }
+- return a
+- }
+- a, err := idnaASCII(host)
+- if err != nil {
+- return in // garbage in, garbage out
+- }
+- return net.JoinHostPort(a, port)
+-}
+-
+ // removeZone removes IPv6 zone identifier from host.
+ // E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
+ func removeZone(host string) string {
+diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go
+index fac12b7..368e87a 100644
+--- a/src/net/http/request_test.go
++++ b/src/net/http/request_test.go
+@@ -776,15 +776,8 @@ func TestRequestBadHost(t *testing.T) {
+ }
+ req.Host = "foo.com with spaces"
+ req.URL.Host = "foo.com with spaces"
+- req.Write(logWrites{t, &got})
+- want := []string{
+- "GET /after HTTP/1.1\r\n",
+- "Host: foo.com\r\n",
+- "User-Agent: " + DefaultUserAgent + "\r\n",
+- "\r\n",
+- }
+- if !reflect.DeepEqual(got, want) {
+- t.Errorf("Writes = %q\n Want = %q", got, want)
++ if err := req.Write(logWrites{t, &got}); err == nil {
++ t.Errorf("Writing request with invalid Host: succeded, want error")
+ }
+ }
+
+diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go
+index eeaa492..58f12af 100644
+--- a/src/net/http/transport_test.go
++++ b/src/net/http/transport_test.go
+@@ -6512,3 +6512,21 @@ func TestCancelRequestWhenSharingConnection(t *testing.T) {
+ close(r2c)
+ wg.Wait()
+ }
++
++func TestRequestSanitization(t *testing.T) {
++ setParallel(t)
++ defer afterTest(t)
++
++ ts := newClientServerTest(t, h1Mode, HandlerFunc(func(rw ResponseWriter, req *Request) {
++ if h, ok := req.Header["X-Evil"]; ok {
++ t.Errorf("request has X-Evil header: %q", h)
++ }
++ })).ts
++ defer ts.Close()
++ req, _ := NewRequest("GET", ts.URL, nil)
++ req.Host = "go.dev\r\nX-Evil:evil"
++ resp, _ := ts.Client().Do(req)
++ if resp != nil {
++ resp.Body.Close()
++ }
++}
+--
+2.25.1
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2023-29406-2.patch b/meta/recipes-devtools/go/go-1.18/CVE-2023-29406-2.patch
new file mode 100644
index 0000000000..637f46a537
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2023-29406-2.patch
@@ -0,0 +1,114 @@
+From c08a5fa413a34111c9a37fd9e545de27ab0978b1 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Wed, 19 Jul 2023 10:30:46 -0700
+Subject: [PATCH] [release-branch.go1.19] net/http: permit requests with
+ invalid Host headers
+
+Historically, the Transport has silently truncated invalid
+Host headers at the first '/' or ' ' character. CL 506996 changed
+this behavior to reject invalid Host headers entirely.
+Unfortunately, Docker appears to rely on the previous behavior.
+
+When sending a HTTP/1 request with an invalid Host, send an empty
+Host header. This is safer than truncation: If you care about the
+Host, then you should get the one you set; if you don't care,
+then an empty Host should be fine.
+
+Continue to fully validate Host headers sent to a proxy,
+since proxies generally can't productively forward requests
+without a Host.
+
+For #60374
+Fixes #61431
+Fixes #61825
+
+Change-Id: If170c7dd860aa20eb58fe32990fc93af832742b6
+Reviewed-on: https://go-review.googlesource.com/c/go/+/511155
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Roland Shoemaker <roland@golang.org>
+Run-TryBot: Damien Neil <dneil@google.com>
+(cherry picked from commit b9153f6ef338baee5fe02a867c8fbc83a8b29dd1)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/518855
+Auto-Submit: Dmitri Shuralyov <dmitshur@google.com>
+Run-TryBot: Roland Shoemaker <roland@golang.org>
+Reviewed-by: Russ Cox <rsc@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/c08a5fa413a34111c9a37fd9e545de27ab0978b1]
+CVE: CVE-2023-29406
+Signed-off-by: Ming Liu <liu.ming50@gmail.com>
+---
+ src/net/http/request.go | 23 ++++++++++++++++++++++-
+ src/net/http/request_test.go | 17 ++++++++++++-----
+ 2 files changed, 34 insertions(+), 6 deletions(-)
+
+diff --git a/src/net/http/request.go b/src/net/http/request.go
+index 3100037386..91cb8a66b9 100644
+--- a/src/net/http/request.go
++++ b/src/net/http/request.go
+@@ -582,8 +582,29 @@ func (r *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, waitF
+ if err != nil {
+ return err
+ }
++ // Validate that the Host header is a valid header in general,
++ // but don't validate the host itself. This is sufficient to avoid
++ // header or request smuggling via the Host field.
++ // The server can (and will, if it's a net/http server) reject
++ // the request if it doesn't consider the host valid.
+ if !httpguts.ValidHostHeader(host) {
+- return errors.New("http: invalid Host header")
++ // Historically, we would truncate the Host header after '/' or ' '.
++ // Some users have relied on this truncation to convert a network
++ // address such as Unix domain socket path into a valid, ignored
++ // Host header (see https://go.dev/issue/61431).
++ //
++ // We don't preserve the truncation, because sending an altered
++ // header field opens a smuggling vector. Instead, zero out the
++ // Host header entirely if it isn't valid. (An empty Host is valid;
++ // see RFC 9112 Section 3.2.)
++ //
++ // Return an error if we're sending to a proxy, since the proxy
++ // probably can't do anything useful with an empty Host header.
++ if !usingProxy {
++ host = ""
++ } else {
++ return errors.New("http: invalid Host header")
++ }
+ }
+
+ // According to RFC 6874, an HTTP client, proxy, or other
+diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go
+index fddc85d6a9..dd1e2dc2a1 100644
+--- a/src/net/http/request_test.go
++++ b/src/net/http/request_test.go
+@@ -770,16 +770,23 @@ func TestRequestWriteBufferedWriter(t *testing.T) {
+ }
+ }
+
+-func TestRequestBadHost(t *testing.T) {
++func TestRequestBadHostHeader(t *testing.T) {
+ got := []string{}
+ req, err := NewRequest("GET", "http://foo/after", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+- req.Host = "foo.com with spaces"
+- req.URL.Host = "foo.com with spaces"
+- if err := req.Write(logWrites{t, &got}); err == nil {
+- t.Errorf("Writing request with invalid Host: succeded, want error")
++ req.Host = "foo.com\nnewline"
++ req.URL.Host = "foo.com\nnewline"
++ req.Write(logWrites{t, &got})
++ want := []string{
++ "GET /after HTTP/1.1\r\n",
++ "Host: \r\n",
++ "User-Agent: " + DefaultUserAgent + "\r\n",
++ "\r\n",
++ }
++ if !reflect.DeepEqual(got, want) {
++ t.Errorf("Writes = %q\n Want = %q", got, want)
+ }
+ }
+
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2024-24784.patch b/meta/recipes-devtools/go/go-1.18/CVE-2024-24784.patch
new file mode 100644
index 0000000000..d3fc6b0313
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2024-24784.patch
@@ -0,0 +1,207 @@
+From 5330cd225ba54c7dc78c1b46dcdf61a4671a632c Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Wed, 10 Jan 2024 11:02:14 -0800
+Subject: [PATCH] [release-branch.go1.22] net/mail: properly handle special
+ characters in phrase and obs-phrase
+
+Fixes a couple of misalignments with RFC 5322 which introduce
+significant diffs between (mostly) conformant parsers.
+
+This change reverts the changes made in CL50911, which allowed certain
+special RFC 5322 characters to appear unquoted in the "phrase" syntax.
+It is unclear why this change was made in the first place, and created
+a divergence from comformant parsers. In particular this resulted in
+treating comments in display names incorrectly.
+
+Additionally properly handle trailing malformed comments in the group
+syntax.
+
+For #65083
+Fixed #65849
+
+Change-Id: I00dddc044c6ae3381154e43236632604c390f672
+Reviewed-on: https://go-review.googlesource.com/c/go/+/555596
+Reviewed-by: Damien Neil <dneil@google.com>
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/566215
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5330cd225ba54c7dc78c1b46dcdf61a4671a632c]
+CVE: CVE-2024-24784
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/net/mail/message.go | 30 +++++++++++++++------------
+ src/net/mail/message_test.go | 40 ++++++++++++++++++++++++++----------
+ 2 files changed, 46 insertions(+), 24 deletions(-)
+
+diff --git a/src/net/mail/message.go b/src/net/mail/message.go
+index 47bbf6c..84f48f0 100644
+--- a/src/net/mail/message.go
++++ b/src/net/mail/message.go
+@@ -231,7 +231,7 @@ func (a *Address) String() string {
+ // Add quotes if needed
+ quoteLocal := false
+ for i, r := range local {
+- if isAtext(r, false, false) {
++ if isAtext(r, false) {
+ continue
+ }
+ if r == '.' {
+@@ -395,7 +395,7 @@ func (p *addrParser) parseAddress(handleGroup bool) ([]*Address, error) {
+ if !p.consume('<') {
+ atext := true
+ for _, r := range displayName {
+- if !isAtext(r, true, false) {
++ if !isAtext(r, true) {
+ atext = false
+ break
+ }
+@@ -430,7 +430,9 @@ func (p *addrParser) consumeGroupList() ([]*Address, error) {
+ // handle empty group.
+ p.skipSpace()
+ if p.consume(';') {
+- p.skipCFWS()
++ if !p.skipCFWS() {
++ return nil, errors.New("mail: misformatted parenthetical comment")
++ }
+ return group, nil
+ }
+
+@@ -447,7 +449,9 @@ func (p *addrParser) consumeGroupList() ([]*Address, error) {
+ return nil, errors.New("mail: misformatted parenthetical comment")
+ }
+ if p.consume(';') {
+- p.skipCFWS()
++ if !p.skipCFWS() {
++ return nil, errors.New("mail: misformatted parenthetical comment")
++ }
+ break
+ }
+ if !p.consume(',') {
+@@ -517,6 +521,12 @@ func (p *addrParser) consumePhrase() (phrase string, err error) {
+ var words []string
+ var isPrevEncoded bool
+ for {
++ // obs-phrase allows CFWS after one word
++ if len(words) > 0 {
++ if !p.skipCFWS() {
++ return "", errors.New("mail: misformatted parenthetical comment")
++ }
++ }
+ // word = atom / quoted-string
+ var word string
+ p.skipSpace()
+@@ -612,7 +622,6 @@ Loop:
+ // If dot is true, consumeAtom parses an RFC 5322 dot-atom instead.
+ // If permissive is true, consumeAtom will not fail on:
+ // - leading/trailing/double dots in the atom (see golang.org/issue/4938)
+-// - special characters (RFC 5322 3.2.3) except '<', '>', ':' and '"' (see golang.org/issue/21018)
+ func (p *addrParser) consumeAtom(dot bool, permissive bool) (atom string, err error) {
+ i := 0
+
+@@ -623,7 +632,7 @@ Loop:
+ case size == 1 && r == utf8.RuneError:
+ return "", fmt.Errorf("mail: invalid utf-8 in address: %q", p.s)
+
+- case size == 0 || !isAtext(r, dot, permissive):
++ case size == 0 || !isAtext(r, dot):
+ break Loop
+
+ default:
+@@ -777,18 +786,13 @@ func (e charsetError) Error() string {
+
+ // isAtext reports whether r is an RFC 5322 atext character.
+ // If dot is true, period is included.
+-// If permissive is true, RFC 5322 3.2.3 specials is included,
+-// except '<', '>', ':' and '"'.
+-func isAtext(r rune, dot, permissive bool) bool {
++func isAtext(r rune, dot bool) bool {
+ switch r {
+ case '.':
+ return dot
+
+ // RFC 5322 3.2.3. specials
+- case '(', ')', '[', ']', ';', '@', '\\', ',':
+- return permissive
+-
+- case '<', '>', '"', ':':
++ case '(', ')', '<', '>', '[', ']', ':', ';', '@', '\\', ',', '"': // RFC 5322 3.2.3. specials
+ return false
+ }
+ return isVchar(r)
+diff --git a/src/net/mail/message_test.go b/src/net/mail/message_test.go
+index 80a17b2..00bc93e 100644
+--- a/src/net/mail/message_test.go
++++ b/src/net/mail/message_test.go
+@@ -334,8 +334,11 @@ func TestAddressParsingError(t *testing.T) {
+ 13: {"group not closed: null@example.com", "expected comma"},
+ 14: {"group: first@example.com, second@example.com;", "group with multiple addresses"},
+ 15: {"john.doe", "missing '@' or angle-addr"},
+- 16: {"john.doe@", "no angle-addr"},
++ 16: {"john.doe@", "missing '@' or angle-addr"},
+ 17: {"John Doe@foo.bar", "no angle-addr"},
++ 18: {" group: null@example.com; (asd", "misformatted parenthetical comment"},
++ 19: {" group: ; (asd", "misformatted parenthetical comment"},
++ 20: {`(John) Doe <jdoe@machine.example>`, "missing word in phrase:"},
+ }
+
+ for i, tc := range mustErrTestCases {
+@@ -374,24 +377,19 @@ func TestAddressParsing(t *testing.T) {
+ Address: "john.q.public@example.com",
+ }},
+ },
+- {
+- `"John (middle) Doe" <jdoe@machine.example>`,
+- []*Address{{
+- Name: "John (middle) Doe",
+- Address: "jdoe@machine.example",
+- }},
+- },
++ // Comment in display name
+ {
+ `John (middle) Doe <jdoe@machine.example>`,
+ []*Address{{
+- Name: "John (middle) Doe",
++ Name: "John Doe",
+ Address: "jdoe@machine.example",
+ }},
+ },
++ // Display name is quoted string, so comment is not a comment
+ {
+- `John !@M@! Doe <jdoe@machine.example>`,
++ `"John (middle) Doe" <jdoe@machine.example>`,
+ []*Address{{
+- Name: "John !@M@! Doe",
++ Name: "John (middle) Doe",
+ Address: "jdoe@machine.example",
+ }},
+ },
+@@ -726,6 +724,26 @@ func TestAddressParsing(t *testing.T) {
+ },
+ },
+ },
++ // Comment in group display name
++ {
++ `group (comment:): a@example.com, b@example.com;`,
++ []*Address{
++ {
++ Address: "a@example.com",
++ },
++ {
++ Address: "b@example.com",
++ },
++ },
++ },
++ {
++ `x(:"):"@a.example;("@b.example;`,
++ []*Address{
++ {
++ Address: `@a.example;(@b.example`,
++ },
++ },
++ },
+ }
+ for _, test := range tests {
+ if len(test.exp) == 1 {
+--
+2.39.3
diff --git a/meta/recipes-devtools/go/go-1.18/CVE-2024-24785.patch b/meta/recipes-devtools/go/go-1.18/CVE-2024-24785.patch
new file mode 100644
index 0000000000..5c8244e89a
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.18/CVE-2024-24785.patch
@@ -0,0 +1,196 @@
+From 056b0edcb8c152152021eebf4cf42adbfbe77992 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <roland@golang.org>
+Date: Wed, 14 Feb 2024 17:18:36 -0800
+Subject: [PATCH] [release-branch.go1.22] html/template: escape additional
+ tokens in MarshalJSON errors
+
+Escape "</script" and "<!--" in errors returned from MarshalJSON errors
+when attempting to marshal types in script blocks. This prevents any
+user controlled content from prematurely terminating the script block.
+
+Updates #65697
+Fixes #65969
+
+Change-Id: Icf0e26c54ea7d9c1deed0bff11b6506c99ddef1b
+Reviewed-on: https://go-review.googlesource.com/c/go/+/564196
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+(cherry picked from commit ccbc725f2d678255df1bd326fa511a492aa3a0aa)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/567535
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/056b0edcb8c152152021eebf4cf42adbfbe77992]
+CVE: CVE-2024-24785
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/html/template/js.go | 22 ++++++++-
+ src/html/template/js_test.go | 96 ++++++++++++++++++++----------------
+ 2 files changed, 74 insertions(+), 44 deletions(-)
+
+diff --git a/src/html/template/js.go b/src/html/template/js.go
+index 35994f0..4d3b25d 100644
+--- a/src/html/template/js.go
++++ b/src/html/template/js.go
+@@ -171,13 +171,31 @@ func jsValEscaper(args ...interface{}) string {
+ // cyclic data. This may be an unacceptable DoS risk.
+ b, err := json.Marshal(a)
+ if err != nil {
+- // Put a space before comment so that if it is flush against
++ // While the standard JSON marshaller does not include user controlled
++ // information in the error message, if a type has a MarshalJSON method,
++ // the content of the error message is not guaranteed. Since we insert
++ // the error into the template, as part of a comment, we attempt to
++ // prevent the error from either terminating the comment, or the script
++ // block itself.
++ //
++ // In particular we:
++ // * replace "*/" comment end tokens with "* /", which does not
++ // terminate the comment
++ // * replace "</script" with "\x3C/script", and "<!--" with
++ // "\x3C!--", which prevents confusing script block termination
++ // semantics
++ //
++ // We also put a space before the comment so that if it is flush against
+ // a division operator it is not turned into a line comment:
+ // x/{{y}}
+ // turning into
+ // x//* error marshaling y:
+ // second line of error message */null
+- return fmt.Sprintf(" /* %s */null ", strings.ReplaceAll(err.Error(), "*/", "* /"))
++ errStr := err.Error()
++ errStr = strings.ReplaceAll(errStr, "*/", "* /")
++ errStr = strings.ReplaceAll(errStr, "</script", `\x3C/script`)
++ errStr = strings.ReplaceAll(errStr, "<!--", `\x3C!--`)
++ return fmt.Sprintf(" /* %s */null ", errStr)
+ }
+
+ // TODO: maybe post-process output to prevent it from containing
+diff --git a/src/html/template/js_test.go b/src/html/template/js_test.go
+index de9ef28..0eaec11 100644
+--- a/src/html/template/js_test.go
++++ b/src/html/template/js_test.go
+@@ -5,6 +5,7 @@
+ package template
+
+ import (
++ "errors"
+ "bytes"
+ "math"
+ "strings"
+@@ -104,61 +105,72 @@ func TestNextJsCtx(t *testing.T) {
+ }
+ }
+
++type jsonErrType struct{}
++
++func (e *jsonErrType) MarshalJSON() ([]byte, error) {
++ return nil, errors.New("beep */ boop </script blip <!--")
++}
++
+ func TestJSValEscaper(t *testing.T) {
+ tests := []struct {
+- x interface{}
+- js string
++ x any
++ js string
++ skipNest bool
+ }{
+- {int(42), " 42 "},
+- {uint(42), " 42 "},
+- {int16(42), " 42 "},
+- {uint16(42), " 42 "},
+- {int32(-42), " -42 "},
+- {uint32(42), " 42 "},
+- {int16(-42), " -42 "},
+- {uint16(42), " 42 "},
+- {int64(-42), " -42 "},
+- {uint64(42), " 42 "},
+- {uint64(1) << 53, " 9007199254740992 "},
++ {int(42), " 42 ", false},
++ {uint(42), " 42 ", false},
++ {int16(42), " 42 ", false},
++ {uint16(42), " 42 ", false},
++ {int32(-42), " -42 ", false},
++ {uint32(42), " 42 ", false},
++ {int16(-42), " -42 ", false},
++ {uint16(42), " 42 ", false},
++ {int64(-42), " -42 ", false},
++ {uint64(42), " 42 ", false},
++ {uint64(1) << 53, " 9007199254740992 ", false},
+ // ulp(1 << 53) > 1 so this loses precision in JS
+ // but it is still a representable integer literal.
+- {uint64(1)<<53 + 1, " 9007199254740993 "},
+- {float32(1.0), " 1 "},
+- {float32(-1.0), " -1 "},
+- {float32(0.5), " 0.5 "},
+- {float32(-0.5), " -0.5 "},
+- {float32(1.0) / float32(256), " 0.00390625 "},
+- {float32(0), " 0 "},
+- {math.Copysign(0, -1), " -0 "},
+- {float64(1.0), " 1 "},
+- {float64(-1.0), " -1 "},
+- {float64(0.5), " 0.5 "},
+- {float64(-0.5), " -0.5 "},
+- {float64(0), " 0 "},
+- {math.Copysign(0, -1), " -0 "},
+- {"", `""`},
+- {"foo", `"foo"`},
++ {uint64(1)<<53 + 1, " 9007199254740993 ", false},
++ {float32(1.0), " 1 ", false},
++ {float32(-1.0), " -1 ", false},
++ {float32(0.5), " 0.5 ", false},
++ {float32(-0.5), " -0.5 ", false},
++ {float32(1.0) / float32(256), " 0.00390625 ", false},
++ {float32(0), " 0 ", false},
++ {math.Copysign(0, -1), " -0 ", false},
++ {float64(1.0), " 1 ", false},
++ {float64(-1.0), " -1 ", false},
++ {float64(0.5), " 0.5 ", false},
++ {float64(-0.5), " -0.5 ", false},
++ {float64(0), " 0 ", false},
++ {math.Copysign(0, -1), " -0 ", false},
++ {"", `""`, false},
++ {"foo", `"foo"`, false},
+ // Newlines.
+- {"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`},
++ {"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`, false},
+ // "\v" == "v" on IE 6 so use "\u000b" instead.
+- {"\t\x0b", `"\t\u000b"`},
+- {struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`},
+- {[]interface{}{}, "[]"},
+- {[]interface{}{42, "foo", nil}, `[42,"foo",null]`},
+- {[]string{"<!--", "</script>", "-->"}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`},
+- {"<!--", `"\u003c!--"`},
+- {"-->", `"--\u003e"`},
+- {"<![CDATA[", `"\u003c![CDATA["`},
+- {"]]>", `"]]\u003e"`},
+- {"</script", `"\u003c/script"`},
+- {"\U0001D11E", "\"\U0001D11E\""}, // or "\uD834\uDD1E"
+- {nil, " null "},
++ {"\t\x0b", `"\t\u000b"`, false},
++ {struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`, false},
++ {[]any{}, "[]", false},
++ {[]any{42, "foo", nil}, `[42,"foo",null]`, false},
++ {[]string{"<!--", "</script>", "-->"}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`, false},
++ {"<!--", `"\u003c!--"`, false},
++ {"-->", `"--\u003e"`, false},
++ {"<![CDATA[", `"\u003c![CDATA["`, false},
++ {"]]>", `"]]\u003e"`, false},
++ {"</script", `"\u003c/script"`, false},
++ {"\U0001D11E", "\"\U0001D11E\"", false}, // or "\uD834\uDD1E"
++ {nil, " null ", false},
++ {&jsonErrType{}, " /* json: error calling MarshalJSON for type *template.jsonErrType: beep * / boop \\x3C/script blip \\x3C!-- */null ", true},
+ }
+
+ for _, test := range tests {
+ if js := jsValEscaper(test.x); js != test.js {
+ t.Errorf("%+v: want\n\t%q\ngot\n\t%q", test.x, test.js, js)
+ }
++ if test.skipNest {
++ continue
++ }
+ // Make sure that escaping corner cases are not broken
+ // by nesting.
+ a := []interface{}{test.x}
+--
+2.39.3
diff --git a/meta/recipes-devtools/go/go-1.19/CVE-2023-24536_1.patch b/meta/recipes-devtools/go/go-1.19/CVE-2023-24536_1.patch
new file mode 100644
index 0000000000..ff9ba18ec5
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.19/CVE-2023-24536_1.patch
@@ -0,0 +1,137 @@
+From f8d691d335c6ac14bcbae6886b5bf8ca8bf1e6a5 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Thu, 16 Mar 2023 14:18:04 -0700
+Subject: [PATCH 1/3] mime/multipart: avoid excessive copy buffer allocations
+ in ReadForm
+
+When copying form data to disk with io.Copy,
+allocate only one copy buffer and reuse it rather than
+creating two buffers per file (one from io.multiReader.WriteTo,
+and a second one from os.File.ReadFrom).
+
+Thanks to Jakob Ackermann (@das7pad) for reporting this issue.
+
+For CVE-2023-24536
+For #59153
+For #59269
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802453
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802395
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Change-Id: Ie405470c92abffed3356913b37d813e982c96c8b
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481983
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+
+CVE: CVE-2023-24536
+Upstream-Status: Backport [ef41a4e2face45e580c5836eaebd51629fc23f15]
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ src/mime/multipart/formdata.go | 15 +++++++--
+ src/mime/multipart/formdata_test.go | 49 +++++++++++++++++++++++++++++
+ 2 files changed, 61 insertions(+), 3 deletions(-)
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index a7d4ca9..975dcb6 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -84,6 +84,7 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ maxMemoryBytes = math.MaxInt64
+ }
+ }
++ var copyBuf []byte
+ for {
+ p, err := r.nextPart(false, maxMemoryBytes)
+ if err == io.EOF {
+@@ -147,14 +148,22 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+ }
+ numDiskFiles++
+- size, err := io.Copy(file, io.MultiReader(&b, p))
++ if _, err := file.Write(b.Bytes()); err != nil {
++ return nil, err
++ }
++ if copyBuf == nil {
++ copyBuf = make([]byte, 32*1024) // same buffer size as io.Copy uses
++ }
++ // os.File.ReadFrom will allocate its own copy buffer if we let io.Copy use it.
++ type writerOnly struct{ io.Writer }
++ remainingSize, err := io.CopyBuffer(writerOnly{file}, p, copyBuf)
+ if err != nil {
+ return nil, err
+ }
+ fh.tmpfile = file.Name()
+- fh.Size = size
++ fh.Size = int64(b.Len()) + remainingSize
+ fh.tmpoff = fileOff
+- fileOff += size
++ fileOff += fh.Size
+ if !combineFiles {
+ if err := file.Close(); err != nil {
+ return nil, err
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index 5cded71..f5b5608 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -368,3 +368,52 @@ func testReadFormManyFiles(t *testing.T, distinct bool) {
+ t.Fatalf("temp dir contains %v files; want 0", len(names))
+ }
+ }
++
++func BenchmarkReadForm(b *testing.B) {
++ for _, test := range []struct {
++ name string
++ form func(fw *Writer, count int)
++ }{{
++ name: "fields",
++ form: func(fw *Writer, count int) {
++ for i := 0; i < count; i++ {
++ w, _ := fw.CreateFormField(fmt.Sprintf("field%v", i))
++ fmt.Fprintf(w, "value %v", i)
++ }
++ },
++ }, {
++ name: "files",
++ form: func(fw *Writer, count int) {
++ for i := 0; i < count; i++ {
++ w, _ := fw.CreateFormFile(fmt.Sprintf("field%v", i), fmt.Sprintf("file%v", i))
++ fmt.Fprintf(w, "value %v", i)
++ }
++ },
++ }} {
++ b.Run(test.name, func(b *testing.B) {
++ for _, maxMemory := range []int64{
++ 0,
++ 1 << 20,
++ } {
++ var buf bytes.Buffer
++ fw := NewWriter(&buf)
++ test.form(fw, 10)
++ if err := fw.Close(); err != nil {
++ b.Fatal(err)
++ }
++ b.Run(fmt.Sprintf("maxMemory=%v", maxMemory), func(b *testing.B) {
++ b.ReportAllocs()
++ for i := 0; i < b.N; i++ {
++ fr := NewReader(bytes.NewReader(buf.Bytes()), fw.Boundary())
++ form, err := fr.ReadForm(maxMemory)
++ if err != nil {
++ b.Fatal(err)
++ }
++ form.RemoveAll()
++ }
++
++ })
++ }
++ })
++ }
++}
+--
+2.35.5
+
diff --git a/meta/recipes-devtools/go/go-1.19/CVE-2023-24536_2.patch b/meta/recipes-devtools/go/go-1.19/CVE-2023-24536_2.patch
new file mode 100644
index 0000000000..704a1fb567
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.19/CVE-2023-24536_2.patch
@@ -0,0 +1,187 @@
+From 4174a87b600c58e8cc00d9d18d0c507c67ca5d41 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Thu, 16 Mar 2023 16:56:12 -0700
+Subject: [PATCH 2/3] net/textproto, mime/multipart: improve accounting of
+ non-file data
+
+For requests containing large numbers of small parts,
+memory consumption of a parsed form could be about 250%
+over the estimated size.
+
+When considering the size of parsed forms, account for the size of
+FileHeader structs and increase the estimate of memory consumed by
+map entries.
+
+Thanks to Jakob Ackermann (@das7pad) for reporting this issue.
+
+For CVE-2023-24536
+For #59153
+For #59269
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802454
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802396
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Change-Id: I31bc50e9346b4eee6fbe51a18c3c57230cc066db
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481984
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+
+CVE: CVE-2023-24536
+Upstream-Status: Backport [7a359a651c7ebdb29e0a1c03102fce793e9f58f0]
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ src/mime/multipart/formdata.go | 9 +++--
+ src/mime/multipart/formdata_test.go | 55 ++++++++++++-----------------
+ src/net/textproto/reader.go | 8 ++++-
+ 3 files changed, 37 insertions(+), 35 deletions(-)
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index 975dcb6..3f6ff69 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -103,8 +103,9 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ // Multiple values for the same key (one map entry, longer slice) are cheaper
+ // than the same number of values for different keys (many map entries), but
+ // using a consistent per-value cost for overhead is simpler.
++ const mapEntryOverhead = 200
+ maxMemoryBytes -= int64(len(name))
+- maxMemoryBytes -= 100 // map overhead
++ maxMemoryBytes -= mapEntryOverhead
+ if maxMemoryBytes < 0 {
+ // We can't actually take this path, since nextPart would already have
+ // rejected the MIME headers for being too large. Check anyway.
+@@ -128,7 +129,10 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+
+ // file, store in memory or on disk
++ const fileHeaderSize = 100
+ maxMemoryBytes -= mimeHeaderSize(p.Header)
++ maxMemoryBytes -= mapEntryOverhead
++ maxMemoryBytes -= fileHeaderSize
+ if maxMemoryBytes < 0 {
+ return nil, ErrMessageTooLarge
+ }
+@@ -183,9 +187,10 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+
+ func mimeHeaderSize(h textproto.MIMEHeader) (size int64) {
++ size = 400
+ for k, vs := range h {
+ size += int64(len(k))
+- size += 100 // map entry overhead
++ size += 200 // map entry overhead
+ for _, v := range vs {
+ size += int64(len(v))
+ }
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index f5b5608..8ed26e0 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -192,10 +192,10 @@ func (r *failOnReadAfterErrorReader) Read(p []byte) (n int, err error) {
+ // TestReadForm_NonFileMaxMemory asserts that the ReadForm maxMemory limit is applied
+ // while processing non-file form data as well as file form data.
+ func TestReadForm_NonFileMaxMemory(t *testing.T) {
+- n := 10<<20 + 25
+ if testing.Short() {
+- n = 10<<10 + 25
++ t.Skip("skipping in -short mode")
+ }
++ n := 10 << 20
+ largeTextValue := strings.Repeat("1", n)
+ message := `--MyBoundary
+ Content-Disposition: form-data; name="largetext"
+@@ -203,38 +203,29 @@ Content-Disposition: form-data; name="largetext"
+ ` + largeTextValue + `
+ --MyBoundary--
+ `
+-
+ testBody := strings.ReplaceAll(message, "\n", "\r\n")
+- testCases := []struct {
+- name string
+- maxMemory int64
+- err error
+- }{
+- {"smaller", 50 + int64(len("largetext")) + 100, nil},
+- {"exact-fit", 25 + int64(len("largetext")) + 100, nil},
+- {"too-large", 0, ErrMessageTooLarge},
+- }
+- for _, tc := range testCases {
+- t.Run(tc.name, func(t *testing.T) {
+- if tc.maxMemory == 0 && testing.Short() {
+- t.Skip("skipping in -short mode")
+- }
+- b := strings.NewReader(testBody)
+- r := NewReader(b, boundary)
+- f, err := r.ReadForm(tc.maxMemory)
+- if err == nil {
+- defer f.RemoveAll()
+- }
+- if tc.err != err {
+- t.Fatalf("ReadForm error - got: %v; expected: %v", err, tc.err)
+- }
+- if err == nil {
+- if g := f.Value["largetext"][0]; g != largeTextValue {
+- t.Errorf("largetext mismatch: got size: %v, expected size: %v", len(g), len(largeTextValue))
+- }
+- }
+- })
++ // Try parsing the form with increasing maxMemory values.
++ // Changes in how we account for non-file form data may cause the exact point
++ // where we change from rejecting the form as too large to accepting it to vary,
++ // but we should see both successes and failures.
++ const failWhenMaxMemoryLessThan = 128
++ for maxMemory := int64(0); maxMemory < failWhenMaxMemoryLessThan*2; maxMemory += 16 {
++ b := strings.NewReader(testBody)
++ r := NewReader(b, boundary)
++ f, err := r.ReadForm(maxMemory)
++ if err != nil {
++ continue
++ }
++ if g := f.Value["largetext"][0]; g != largeTextValue {
++ t.Errorf("largetext mismatch: got size: %v, expected size: %v", len(g), len(largeTextValue))
++ }
++ f.RemoveAll()
++ if maxMemory < failWhenMaxMemoryLessThan {
++ t.Errorf("ReadForm(%v): no error, expect to hit memory limit when maxMemory < %v", maxMemory, failWhenMaxMemoryLessThan)
++ }
++ return
+ }
++ t.Errorf("ReadForm(x) failed for x < 1024, expect success")
+ }
+
+ // TestReadForm_MetadataTooLarge verifies that we account for the size of field names,
+diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
+index fcbede8..9af4c49 100644
+--- a/src/net/textproto/reader.go
++++ b/src/net/textproto/reader.go
+@@ -503,6 +503,12 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+
+ m := make(MIMEHeader, hint)
+
++ // Account for 400 bytes of overhead for the MIMEHeader, plus 200 bytes per entry.
++ // Benchmarking map creation as of go1.20, a one-entry MIMEHeader is 416 bytes and large
++ // MIMEHeaders average about 200 bytes per entry.
++ lim -= 400
++ const mapEntryOverhead = 200
++
+ // The first line cannot start with a leading space.
+ if buf, err := r.R.Peek(1); err == nil && (buf[0] == ' ' || buf[0] == '\t') {
+ line, err := r.readLineSlice()
+@@ -552,7 +558,7 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+ vv := m[key]
+ if vv == nil {
+ lim -= int64(len(key))
+- lim -= 100 // map entry overhead
++ lim -= mapEntryOverhead
+ }
+ lim -= int64(len(value))
+ if lim < 0 {
+--
+2.35.5
+
diff --git a/meta/recipes-devtools/go/go-1.19/CVE-2023-24536_3.patch b/meta/recipes-devtools/go/go-1.19/CVE-2023-24536_3.patch
new file mode 100644
index 0000000000..6de04e9a61
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.19/CVE-2023-24536_3.patch
@@ -0,0 +1,349 @@
+From ec763bc936f76cec0fe71a791c6bb7d4ac5f3e46 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Mon, 20 Mar 2023 10:43:19 -0700
+Subject: [PATCH 3/3] mime/multipart: limit parsed mime message sizes
+
+The parsed forms of MIME headers and multipart forms can consume
+substantially more memory than the size of the input data.
+A malicious input containing a very large number of headers or
+form parts can cause excessively large memory allocations.
+
+Set limits on the size of MIME data:
+
+Reader.NextPart and Reader.NextRawPart limit the the number
+of headers in a part to 10000.
+
+Reader.ReadForm limits the total number of headers in all
+FileHeaders to 10000.
+
+Both of these limits may be set with with
+GODEBUG=multipartmaxheaders=<values>.
+
+Reader.ReadForm limits the number of parts in a form to 1000.
+This limit may be set with GODEBUG=multipartmaxparts=<value>.
+
+Thanks for Jakob Ackermann (@das7pad) for reporting this issue.
+
+For CVE-2023-24536
+For #59153
+For #59269
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802455
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1801087
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Change-Id: If134890d75f0d95c681d67234daf191ba08e6424
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481985
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+
+CVE: CVE-2023-24536
+Upstream-Status: Backport [7917b5f31204528ea72e0629f0b7d52b35b27538]
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ src/mime/multipart/formdata.go | 19 ++++++++-
+ src/mime/multipart/formdata_test.go | 61 ++++++++++++++++++++++++++++
+ src/mime/multipart/multipart.go | 31 ++++++++++----
+ src/mime/multipart/readmimeheader.go | 2 +-
+ src/net/textproto/reader.go | 19 +++++----
+ 5 files changed, 115 insertions(+), 17 deletions(-)
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index 3f6ff69..4f26aab 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -12,6 +12,7 @@ import (
+ "math"
+ "net/textproto"
+ "os"
++ "strconv"
+ )
+
+ // ErrMessageTooLarge is returned by ReadForm if the message form
+@@ -41,6 +42,15 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ numDiskFiles := 0
+ multipartFiles := godebug.Get("multipartfiles")
+ combineFiles := multipartFiles != "distinct"
++ maxParts := 1000
++ multipartMaxParts := godebug.Get("multipartmaxparts")
++ if multipartMaxParts != "" {
++ if v, err := strconv.Atoi(multipartMaxParts); err == nil && v >= 0 {
++ maxParts = v
++ }
++ }
++ maxHeaders := maxMIMEHeaders()
++
+ defer func() {
+ if file != nil {
+ if cerr := file.Close(); err == nil {
+@@ -86,13 +96,17 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+ var copyBuf []byte
+ for {
+- p, err := r.nextPart(false, maxMemoryBytes)
++ p, err := r.nextPart(false, maxMemoryBytes, maxHeaders)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
++ if maxParts <= 0 {
++ return nil, ErrMessageTooLarge
++ }
++ maxParts--
+
+ name := p.FormName()
+ if name == "" {
+@@ -136,6 +150,9 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ if maxMemoryBytes < 0 {
+ return nil, ErrMessageTooLarge
+ }
++ for _, v := range p.Header {
++ maxHeaders -= int64(len(v))
++ }
+ fh := &FileHeader{
+ Filename: filename,
+ Header: p.Header,
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index 8ed26e0..c78eeb7 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -360,6 +360,67 @@ func testReadFormManyFiles(t *testing.T, distinct bool) {
+ }
+ }
+
++func TestReadFormLimits(t *testing.T) {
++ for _, test := range []struct {
++ values int
++ files int
++ extraKeysPerFile int
++ wantErr error
++ godebug string
++ }{
++ {values: 1000},
++ {values: 1001, wantErr: ErrMessageTooLarge},
++ {values: 500, files: 500},
++ {values: 501, files: 500, wantErr: ErrMessageTooLarge},
++ {files: 1000},
++ {files: 1001, wantErr: ErrMessageTooLarge},
++ {files: 1, extraKeysPerFile: 9998}, // plus Content-Disposition and Content-Type
++ {files: 1, extraKeysPerFile: 10000, wantErr: ErrMessageTooLarge},
++ {godebug: "multipartmaxparts=100", values: 100},
++ {godebug: "multipartmaxparts=100", values: 101, wantErr: ErrMessageTooLarge},
++ {godebug: "multipartmaxheaders=100", files: 2, extraKeysPerFile: 48},
++ {godebug: "multipartmaxheaders=100", files: 2, extraKeysPerFile: 50, wantErr: ErrMessageTooLarge},
++ } {
++ name := fmt.Sprintf("values=%v/files=%v/extraKeysPerFile=%v", test.values, test.files, test.extraKeysPerFile)
++ if test.godebug != "" {
++ name += fmt.Sprintf("/godebug=%v", test.godebug)
++ }
++ t.Run(name, func(t *testing.T) {
++ if test.godebug != "" {
++ t.Setenv("GODEBUG", test.godebug)
++ }
++ var buf bytes.Buffer
++ fw := NewWriter(&buf)
++ for i := 0; i < test.values; i++ {
++ w, _ := fw.CreateFormField(fmt.Sprintf("field%v", i))
++ fmt.Fprintf(w, "value %v", i)
++ }
++ for i := 0; i < test.files; i++ {
++ h := make(textproto.MIMEHeader)
++ h.Set("Content-Disposition",
++ fmt.Sprintf(`form-data; name="file%v"; filename="file%v"`, i, i))
++ h.Set("Content-Type", "application/octet-stream")
++ for j := 0; j < test.extraKeysPerFile; j++ {
++ h.Set(fmt.Sprintf("k%v", j), "v")
++ }
++ w, _ := fw.CreatePart(h)
++ fmt.Fprintf(w, "value %v", i)
++ }
++ if err := fw.Close(); err != nil {
++ t.Fatal(err)
++ }
++ fr := NewReader(bytes.NewReader(buf.Bytes()), fw.Boundary())
++ form, err := fr.ReadForm(1 << 10)
++ if err == nil {
++ defer form.RemoveAll()
++ }
++ if err != test.wantErr {
++ t.Errorf("ReadForm = %v, want %v", err, test.wantErr)
++ }
++ })
++ }
++}
++
+ func BenchmarkReadForm(b *testing.B) {
+ for _, test := range []struct {
+ name string
+diff --git a/src/mime/multipart/multipart.go b/src/mime/multipart/multipart.go
+index 19fe0ea..80acabc 100644
+--- a/src/mime/multipart/multipart.go
++++ b/src/mime/multipart/multipart.go
+@@ -16,11 +16,13 @@ import (
+ "bufio"
+ "bytes"
+ "fmt"
++ "internal/godebug"
+ "io"
+ "mime"
+ "mime/quotedprintable"
+ "net/textproto"
+ "path/filepath"
++ "strconv"
+ "strings"
+ )
+
+@@ -128,12 +130,12 @@ func (r *stickyErrorReader) Read(p []byte) (n int, _ error) {
+ return n, r.err
+ }
+
+-func newPart(mr *Reader, rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
++func newPart(mr *Reader, rawPart bool, maxMIMEHeaderSize, maxMIMEHeaders int64) (*Part, error) {
+ bp := &Part{
+ Header: make(map[string][]string),
+ mr: mr,
+ }
+- if err := bp.populateHeaders(maxMIMEHeaderSize); err != nil {
++ if err := bp.populateHeaders(maxMIMEHeaderSize, maxMIMEHeaders); err != nil {
+ return nil, err
+ }
+ bp.r = partReader{bp}
+@@ -149,9 +151,9 @@ func newPart(mr *Reader, rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
+ return bp, nil
+ }
+
+-func (bp *Part) populateHeaders(maxMIMEHeaderSize int64) error {
++func (bp *Part) populateHeaders(maxMIMEHeaderSize, maxMIMEHeaders int64) error {
+ r := textproto.NewReader(bp.mr.bufReader)
+- header, err := readMIMEHeader(r, maxMIMEHeaderSize)
++ header, err := readMIMEHeader(r, maxMIMEHeaderSize, maxMIMEHeaders)
+ if err == nil {
+ bp.Header = header
+ }
+@@ -313,6 +315,19 @@ type Reader struct {
+ // including header keys, values, and map overhead.
+ const maxMIMEHeaderSize = 10 << 20
+
++func maxMIMEHeaders() int64 {
++ // multipartMaxHeaders is the maximum number of header entries NextPart will return,
++ // as well as the maximum combined total of header entries Reader.ReadForm will return
++ // in FileHeaders.
++ multipartMaxHeaders := godebug.Get("multipartmaxheaders")
++ if multipartMaxHeaders != "" {
++ if v, err := strconv.ParseInt(multipartMaxHeaders, 10, 64); err == nil && v >= 0 {
++ return v
++ }
++ }
++ return 10000
++}
++
+ // NextPart returns the next part in the multipart or an error.
+ // When there are no more parts, the error io.EOF is returned.
+ //
+@@ -320,7 +335,7 @@ const maxMIMEHeaderSize = 10 << 20
+ // has a value of "quoted-printable", that header is instead
+ // hidden and the body is transparently decoded during Read calls.
+ func (r *Reader) NextPart() (*Part, error) {
+- return r.nextPart(false, maxMIMEHeaderSize)
++ return r.nextPart(false, maxMIMEHeaderSize, maxMIMEHeaders())
+ }
+
+ // NextRawPart returns the next part in the multipart or an error.
+@@ -329,10 +344,10 @@ func (r *Reader) NextPart() (*Part, error) {
+ // Unlike NextPart, it does not have special handling for
+ // "Content-Transfer-Encoding: quoted-printable".
+ func (r *Reader) NextRawPart() (*Part, error) {
+- return r.nextPart(true, maxMIMEHeaderSize)
++ return r.nextPart(true, maxMIMEHeaderSize, maxMIMEHeaders())
+ }
+
+-func (r *Reader) nextPart(rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
++func (r *Reader) nextPart(rawPart bool, maxMIMEHeaderSize, maxMIMEHeaders int64) (*Part, error) {
+ if r.currentPart != nil {
+ r.currentPart.Close()
+ }
+@@ -357,7 +372,7 @@ func (r *Reader) nextPart(rawPart bool, maxMIMEHeaderSize int64) (*Part, error)
+
+ if r.isBoundaryDelimiterLine(line) {
+ r.partsRead++
+- bp, err := newPart(r, rawPart, maxMIMEHeaderSize)
++ bp, err := newPart(r, rawPart, maxMIMEHeaderSize, maxMIMEHeaders)
+ if err != nil {
+ return nil, err
+ }
+diff --git a/src/mime/multipart/readmimeheader.go b/src/mime/multipart/readmimeheader.go
+index 6836928..25aa6e2 100644
+--- a/src/mime/multipart/readmimeheader.go
++++ b/src/mime/multipart/readmimeheader.go
+@@ -11,4 +11,4 @@ import (
+ // readMIMEHeader is defined in package net/textproto.
+ //
+ //go:linkname readMIMEHeader net/textproto.readMIMEHeader
+-func readMIMEHeader(r *textproto.Reader, lim int64) (textproto.MIMEHeader, error)
++func readMIMEHeader(r *textproto.Reader, maxMemory, maxHeaders int64) (textproto.MIMEHeader, error)
+diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
+index 9af4c49..c6569c8 100644
+--- a/src/net/textproto/reader.go
++++ b/src/net/textproto/reader.go
+@@ -483,12 +483,12 @@ func (r *Reader) ReadDotLines() ([]string, error) {
+ // }
+ //
+ func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
+- return readMIMEHeader(r, math.MaxInt64)
++ return readMIMEHeader(r, math.MaxInt64, math.MaxInt64)
+ }
+
+ // readMIMEHeader is a version of ReadMIMEHeader which takes a limit on the header size.
+ // It is called by the mime/multipart package.
+-func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
++func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error) {
+ // Avoid lots of small slice allocations later by allocating one
+ // large one ahead of time which we'll cut up into smaller
+ // slices. If this isn't big enough later, we allocate small ones.
+@@ -506,7 +506,7 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+ // Account for 400 bytes of overhead for the MIMEHeader, plus 200 bytes per entry.
+ // Benchmarking map creation as of go1.20, a one-entry MIMEHeader is 416 bytes and large
+ // MIMEHeaders average about 200 bytes per entry.
+- lim -= 400
++ maxMemory -= 400
+ const mapEntryOverhead = 200
+
+ // The first line cannot start with a leading space.
+@@ -538,6 +538,11 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+ continue
+ }
+
++ maxHeaders--
++ if maxHeaders < 0 {
++ return nil, errors.New("message too large")
++ }
++
+ // backport 5c55ac9bf1e5f779220294c843526536605f42ab
+ //
+ // value is computed as
+@@ -557,11 +562,11 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+
+ vv := m[key]
+ if vv == nil {
+- lim -= int64(len(key))
+- lim -= mapEntryOverhead
++ maxMemory -= int64(len(key))
++ maxMemory -= mapEntryOverhead
+ }
+- lim -= int64(len(value))
+- if lim < 0 {
++ maxMemory -= int64(len(value))
++ if maxMemory < 0 {
+ // TODO: This should be a distinguishable error (ErrMessageTooLarge)
+ // to allow mime/multipart to detect it.
+ return m, errors.New("message too large")
+--
+2.35.5
+
diff --git a/meta/recipes-devtools/go/go-1.19/CVE-2023-24540.patch b/meta/recipes-devtools/go/go-1.19/CVE-2023-24540.patch
new file mode 100644
index 0000000000..7e6e871e38
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.19/CVE-2023-24540.patch
@@ -0,0 +1,93 @@
+From 2305cdb2aa5ac8e9960bd64e548a119c7dd87530 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Tue, 11 Apr 2023 16:27:43 +0100
+Subject: [PATCH] html/template: handle all JS whitespace characters
+
+Rather than just a small set. Character class as defined by \s [0].
+
+Thanks to Juho Nurminen of Mattermost for reporting this.
+
+For #59721
+Fixes #59813
+Fixes CVE-2023-24540
+
+[0] https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions/Character_Classes
+
+Change-Id: I56d4fa1ef08125b417106ee7dbfb5b0923b901ba
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1821459
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1851497
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/491355
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+TryBot-Bypass: Carlos Amedee <carlos@golang.org>
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+
+CVE: CVE-2023-24540
+Upstream-Status: Backport [https://github.com/golang/go/commit/ce7bd33345416e6d8cac901792060591cafc2797]
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ src/html/template/js.go | 8 +++++++-
+ src/html/template/js_test.go | 11 +++++++----
+ 2 files changed, 14 insertions(+), 5 deletions(-)
+
+diff --git a/src/html/template/js.go b/src/html/template/js.go
+index b888eaf..35994f0 100644
+--- a/src/html/template/js.go
++++ b/src/html/template/js.go
+@@ -13,6 +13,11 @@ import (
+ "unicode/utf8"
+ )
+
++// jsWhitespace contains all of the JS whitespace characters, as defined
++// by the \s character class.
++// See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_expressions/Character_classes.
++const jsWhitespace = "\f\n\r\t\v\u0020\u00a0\u1680\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u2028\u2029\u202f\u205f\u3000\ufeff"
++
+ // nextJSCtx returns the context that determines whether a slash after the
+ // given run of tokens starts a regular expression instead of a division
+ // operator: / or /=.
+@@ -26,7 +31,8 @@ import (
+ // JavaScript 2.0 lexical grammar and requires one token of lookbehind:
+ // https://www.mozilla.org/js/language/js20-2000-07/rationale/syntax.html
+ func nextJSCtx(s []byte, preceding jsCtx) jsCtx {
+- s = bytes.TrimRight(s, "\t\n\f\r \u2028\u2029")
++ // Trim all JS whitespace characters
++ s = bytes.TrimRight(s, jsWhitespace)
+ if len(s) == 0 {
+ return preceding
+ }
+diff --git a/src/html/template/js_test.go b/src/html/template/js_test.go
+index d7ee47b..8f5d76d 100644
+--- a/src/html/template/js_test.go
++++ b/src/html/template/js_test.go
+@@ -81,14 +81,17 @@ func TestNextJsCtx(t *testing.T) {
+ {jsCtxDivOp, "0"},
+ // Dots that are part of a number are div preceders.
+ {jsCtxDivOp, "0."},
++ // Some JS interpreters treat NBSP as a normal space, so
++ // we must too in order to properly escape things.
++ {jsCtxRegexp, "=\u00A0"},
+ }
+
+ for _, test := range tests {
+- if nextJSCtx([]byte(test.s), jsCtxRegexp) != test.jsCtx {
+- t.Errorf("want %s got %q", test.jsCtx, test.s)
++ if ctx := nextJSCtx([]byte(test.s), jsCtxRegexp); ctx != test.jsCtx {
++ t.Errorf("%q: want %s got %s", test.s, test.jsCtx, ctx)
+ }
+- if nextJSCtx([]byte(test.s), jsCtxDivOp) != test.jsCtx {
+- t.Errorf("want %s got %q", test.jsCtx, test.s)
++ if ctx := nextJSCtx([]byte(test.s), jsCtxDivOp); ctx != test.jsCtx {
++ t.Errorf("%q: want %s got %s", test.s, test.jsCtx, ctx)
+ }
+ }
+
+--
+2.40.0
+
diff --git a/meta/recipes-devtools/go/go-1.19/CVE-2023-29402.patch b/meta/recipes-devtools/go/go-1.19/CVE-2023-29402.patch
new file mode 100644
index 0000000000..bf1fbbe0d6
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.19/CVE-2023-29402.patch
@@ -0,0 +1,194 @@
+From 4dae3bbe0e6a5700037bb996ae84d6f457c4f58a Mon Sep 17 00:00:00 2001
+From: Bryan C. Mills <bcmills@google.com>
+Date: Fri, 12 May 2023 14:15:16 -0400
+Subject: [PATCH] cmd/go: disallow package directories containing newlines
+
+Directory or file paths containing newlines may cause tools (such as
+cmd/cgo) that emit "//line" or "#line" -directives to write part of
+the path into non-comment lines in generated source code. If those
+lines contain valid Go code, it may be injected into the resulting
+binary.
+
+(Note that Go import paths and file paths within module zip files
+already could not contain newlines.)
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Fixes #60167.
+Fixes CVE-2023-29402.
+
+Change-Id: I64572e9f454bce7b685d00e2e6a1c96cd33d53df
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1882606
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Russ Cox <rsc@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/501226
+Run-TryBot: David Chase <drchase@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/4dae3bbe0e6a5700037bb996ae84d6f457c4f58a]
+CVE: CVE-2023-29402
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ src/cmd/go/internal/load/pkg.go | 4 +
+ src/cmd/go/internal/work/exec.go | 6 ++
+ src/cmd/go/script_test.go | 1 +
+ .../go/testdata/script/build_cwd_newline.txt | 100 ++++++++++++++++++
+ 4 files changed, 111 insertions(+)
+ create mode 100644 src/cmd/go/testdata/script/build_cwd_newline.txt
+
+diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
+index a83cc9a..d4da86d 100644
+--- a/src/cmd/go/internal/load/pkg.go
++++ b/src/cmd/go/internal/load/pkg.go
+@@ -1897,6 +1897,10 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk *
+ setError(fmt.Errorf("invalid input directory name %q", name))
+ return
+ }
++ if strings.ContainsAny(p.Dir, "\r\n") {
++ setError(fmt.Errorf("invalid package directory %q", p.Dir))
++ return
++ }
+
+ // Build list of imported packages and full dependency list.
+ imports := make([]*Package, 0, len(p.Imports))
+diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go
+index b35caa4..b1bf347 100644
+--- a/src/cmd/go/internal/work/exec.go
++++ b/src/cmd/go/internal/work/exec.go
+@@ -505,6 +505,12 @@ func (b *Builder) build(ctx context.Context, a *Action) (err error) {
+ b.Print(a.Package.ImportPath + "\n")
+ }
+
++ if p.Error != nil {
++ // Don't try to build anything for packages with errors. There may be a
++ // problem with the inputs that makes the package unsafe to build.
++ return p.Error
++ }
++
+ if a.Package.BinaryOnly {
+ p.Stale = true
+ p.StaleReason = "binary-only packages are no longer supported"
+diff --git a/src/cmd/go/script_test.go b/src/cmd/go/script_test.go
+index c0156d0..ce4ff37 100644
+--- a/src/cmd/go/script_test.go
++++ b/src/cmd/go/script_test.go
+@@ -182,6 +182,7 @@ func (ts *testScript) setup() {
+ "devnull=" + os.DevNull,
+ "goversion=" + goVersion(ts),
+ ":=" + string(os.PathListSeparator),
++ "newline=\n",
+ }
+ if !testenv.HasExternalNetwork() {
+ ts.env = append(ts.env, "TESTGONETWORK=panic", "TESTGOVCS=panic")
+diff --git a/src/cmd/go/testdata/script/build_cwd_newline.txt b/src/cmd/go/testdata/script/build_cwd_newline.txt
+new file mode 100644
+index 0000000..61c6966
+--- /dev/null
++++ b/src/cmd/go/testdata/script/build_cwd_newline.txt
+@@ -0,0 +1,100 @@
++[windows] skip 'filesystem normalizes / to \'
++[plan9] skip 'filesystem disallows \n in paths'
++
++# If the directory path containing a package to be built includes a newline,
++# the go command should refuse to even try to build the package.
++
++env DIR=$WORK${/}${newline}'package main'${newline}'func main() { panic("uh-oh")'${newline}'/*'
++
++mkdir $DIR
++cd $DIR
++exec pwd
++cp $WORK/go.mod ./go.mod
++cp $WORK/main.go ./main.go
++cp $WORK/main_test.go ./main_test.go
++
++! go build -o $devnull .
++stderr 'package example: invalid package directory .*uh-oh'
++
++! go build -o $devnull main.go
++stderr 'package command-line-arguments: invalid package directory .*uh-oh'
++
++! go run .
++stderr 'package example: invalid package directory .*uh-oh'
++
++! go run main.go
++stderr 'package command-line-arguments: invalid package directory .*uh-oh'
++
++! go test .
++stderr 'package example: invalid package directory .*uh-oh'
++
++! go test -v main.go main_test.go
++stderr 'package command-line-arguments: invalid package directory .*uh-oh'
++
++
++# Since we do preserve $PWD (or set it appropriately) for commands, and we do
++# not resolve symlinks unnecessarily, referring to the contents of the unsafe
++# directory via a safe symlink should be ok, and should not inject the data from
++# the symlink target path.
++
++[!symlink] stop 'remainder of test checks symlink behavior'
++[short] stop 'links and runs binaries'
++
++symlink $WORK${/}link -> $DIR
++
++go run $WORK${/}link${/}main.go
++! stdout panic
++! stderr panic
++stderr '^ok$'
++
++go test -v $WORK${/}link${/}main.go $WORK${/}link${/}main_test.go
++! stdout panic
++! stderr panic
++stdout '^ok$' # 'go test' combines the test's stdout into stderr
++
++cd $WORK/link
++
++! go run $DIR${/}main.go
++stderr 'package command-line-arguments: invalid package directory .*uh-oh'
++
++go run .
++! stdout panic
++! stderr panic
++stderr '^ok$'
++
++go run main.go
++! stdout panic
++! stderr panic
++stderr '^ok$'
++
++go test -v
++! stdout panic
++! stderr panic
++stdout '^ok$' # 'go test' combines the test's stdout into stderr
++
++go test -v .
++! stdout panic
++! stderr panic
++stdout '^ok$' # 'go test' combines the test's stdout into stderr
++
++
++-- $WORK/go.mod --
++module example
++go 1.19
++-- $WORK/main.go --
++package main
++
++import "C"
++
++func main() {
++ /* nothing here */
++ println("ok")
++}
++-- $WORK/main_test.go --
++package main
++
++import "testing"
++
++func TestMain(*testing.M) {
++ main()
++}
+--
+2.40.0
diff --git a/meta/recipes-devtools/go/go-1.19/CVE-2023-29404.patch b/meta/recipes-devtools/go/go-1.19/CVE-2023-29404.patch
new file mode 100644
index 0000000000..c6beced884
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.19/CVE-2023-29404.patch
@@ -0,0 +1,78 @@
+From bbeb55f5faf93659e1cfd6ab073ab3c9d126d195 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Fri, 5 May 2023 13:10:34 -0700
+Subject: [PATCH] cmd/go: enforce flags with non-optional arguments
+
+Enforce that linker flags which expect arguments get them, otherwise it
+may be possible to smuggle unexpected flags through as the linker can
+consume what looks like a flag as an argument to a preceding flag (i.e.
+"-Wl,-O -Wl,-R,-bad-flag" is interpreted as "-O=-R -bad-flag"). Also be
+somewhat more restrictive in the general format of some flags.
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Fixes #60305
+Fixes CVE-2023-29404
+
+Change-Id: I913df78a692cee390deefc3cd7d8f5b031524fc9
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1876275
+Reviewed-by: Ian Lance Taylor <iant@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/501225
+Run-TryBot: David Chase <drchase@google.com>
+Reviewed-by: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/bbeb55f5faf93659e1cfd6ab073ab3c9d126d195]
+CVE: CVE-2023-29404
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ src/cmd/go/internal/work/security.go | 6 +++---
+ src/cmd/go/internal/work/security_test.go | 5 +++++
+ 2 files changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/src/cmd/go/internal/work/security.go b/src/cmd/go/internal/work/security.go
+index e9b9f6c..91e6e4c 100644
+--- a/src/cmd/go/internal/work/security.go
++++ b/src/cmd/go/internal/work/security.go
+@@ -179,10 +179,10 @@ var validLinkerFlags = []*lazyregexp.Regexp{
+ re(`-Wl,-berok`),
+ re(`-Wl,-Bstatic`),
+ re(`-Wl,-Bsymbolic-functions`),
+- re(`-Wl,-O([^@,\-][^,]*)?`),
++ re(`-Wl,-O[0-9]+`),
+ re(`-Wl,-d[ny]`),
+ re(`-Wl,--disable-new-dtags`),
+- re(`-Wl,-e[=,][a-zA-Z0-9]*`),
++ re(`-Wl,-e[=,][a-zA-Z0-9]+`),
+ re(`-Wl,--enable-new-dtags`),
+ re(`-Wl,--end-group`),
+ re(`-Wl,--(no-)?export-dynamic`),
+@@ -191,7 +191,7 @@ var validLinkerFlags = []*lazyregexp.Regexp{
+ re(`-Wl,--hash-style=(sysv|gnu|both)`),
+ re(`-Wl,-headerpad_max_install_names`),
+ re(`-Wl,--no-undefined`),
+- re(`-Wl,-R([^@\-][^,@]*$)`),
++ re(`-Wl,-R,?([^@\-,][^,@]*$)`),
+ re(`-Wl,--just-symbols[=,]([^,@\-][^,@]+)`),
+ re(`-Wl,-rpath(-link)?[=,]([^,@\-][^,]+)`),
+ re(`-Wl,-s`),
+diff --git a/src/cmd/go/internal/work/security_test.go b/src/cmd/go/internal/work/security_test.go
+index 8d4be0a..3616548 100644
+--- a/src/cmd/go/internal/work/security_test.go
++++ b/src/cmd/go/internal/work/security_test.go
+@@ -227,6 +227,11 @@ var badLinkerFlags = [][]string{
+ {"-Wl,-R,@foo"},
+ {"-Wl,--just-symbols,@foo"},
+ {"../x.o"},
++ {"-Wl,-R,"},
++ {"-Wl,-O"},
++ {"-Wl,-e="},
++ {"-Wl,-e,"},
++ {"-Wl,-R,-flag"},
+ }
+
+ func TestCheckLinkerFlags(t *testing.T) {
+--
+2.40.0
diff --git a/meta/recipes-devtools/go/go-1.19/CVE-2023-29405.patch b/meta/recipes-devtools/go/go-1.19/CVE-2023-29405.patch
new file mode 100644
index 0000000000..d806e1e67d
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.19/CVE-2023-29405.patch
@@ -0,0 +1,109 @@
+From 6d8af00a630aa51134e54f0f321658621c6410f0 Mon Sep 17 00:00:00 2001
+From: Ian Lance Taylor <iant@golang.org>
+Date: Thu, 4 May 2023 14:06:39 -0700
+Subject: [PATCH] cmd/go,cmd/cgo: in _cgo_flags use one line per flag
+
+The flags that we recorded in _cgo_flags did not use any quoting,
+so a flag containing embedded spaces was mishandled.
+Change the _cgo_flags format to put each flag on a separate line.
+That is a simple format that does not require any quoting.
+
+As far as I can tell only cmd/go uses _cgo_flags, and it is only
+used for gccgo. If this patch doesn't cause any trouble, then
+in the next release we can change to only using _cgo_flags for gccgo.
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Fixes #60306
+Fixes CVE-2023-29405
+
+Change-Id: I81fb5337db8a22e1f4daca22ceff4b79b96d0b4f
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1875094
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/501224
+Reviewed-by: Ian Lance Taylor <iant@google.com>
+Run-TryBot: David Chase <drchase@google.com>
+Reviewed-by: Michael Knyszek <mknyszek@google.com>
+Reviewed-by: Roland Shoemaker <roland@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/6d8af00a630aa51134e54f0f321658621c6410f0]
+CVE: CVE-2023-29405
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ src/cmd/cgo/out.go | 4 +++-
+ src/cmd/go/internal/work/gccgo.go | 14 ++++++-------
+ .../go/testdata/script/gccgo_link_ldflags.txt | 20 +++++++++++++++++++
+ 3 files changed, 29 insertions(+), 9 deletions(-)
+ create mode 100644 src/cmd/go/testdata/script/gccgo_link_ldflags.txt
+
+diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
+index 94152f4..62e6528 100644
+--- a/src/cmd/cgo/out.go
++++ b/src/cmd/cgo/out.go
+@@ -47,7 +47,9 @@ func (p *Package) writeDefs() {
+
+ fflg := creat(*objDir + "_cgo_flags")
+ for k, v := range p.CgoFlags {
+- fmt.Fprintf(fflg, "_CGO_%s=%s\n", k, strings.Join(v, " "))
++ for _, arg := range v {
++ fmt.Fprintf(fflg, "_CGO_%s=%s\n", k, arg)
++ }
+ if k == "LDFLAGS" && !*gccgo {
+ for _, arg := range v {
+ fmt.Fprintf(fgo2, "//go:cgo_ldflag %q\n", arg)
+diff --git a/src/cmd/go/internal/work/gccgo.go b/src/cmd/go/internal/work/gccgo.go
+index 1499536..bb4be2f 100644
+--- a/src/cmd/go/internal/work/gccgo.go
++++ b/src/cmd/go/internal/work/gccgo.go
+@@ -283,14 +283,12 @@ func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string
+ const ldflagsPrefix = "_CGO_LDFLAGS="
+ for _, line := range strings.Split(string(flags), "\n") {
+ if strings.HasPrefix(line, ldflagsPrefix) {
+- newFlags := strings.Fields(line[len(ldflagsPrefix):])
+- for _, flag := range newFlags {
+- // Every _cgo_flags file has -g and -O2 in _CGO_LDFLAGS
+- // but they don't mean anything to the linker so filter
+- // them out.
+- if flag != "-g" && !strings.HasPrefix(flag, "-O") {
+- cgoldflags = append(cgoldflags, flag)
+- }
++ flag := line[len(ldflagsPrefix):]
++ // Every _cgo_flags file has -g and -O2 in _CGO_LDFLAGS
++ // but they don't mean anything to the linker so filter
++ // them out.
++ if flag != "-g" && !strings.HasPrefix(flag, "-O") {
++ cgoldflags = append(cgoldflags, flag)
+ }
+ }
+ }
+diff --git a/src/cmd/go/testdata/script/gccgo_link_ldflags.txt b/src/cmd/go/testdata/script/gccgo_link_ldflags.txt
+new file mode 100644
+index 0000000..4e91ae5
+--- /dev/null
++++ b/src/cmd/go/testdata/script/gccgo_link_ldflags.txt
+@@ -0,0 +1,20 @@
++# Test that #cgo LDFLAGS are properly quoted.
++# The #cgo LDFLAGS below should pass a string with spaces to -L,
++# as though searching a directory with a space in its name.
++# It should not pass --nosuchoption to the external linker.
++
++[!cgo] skip
++
++go build
++
++[!exec:gccgo] skip
++
++go build -compiler gccgo
++
++-- go.mod --
++module m
++-- cgo.go --
++package main
++// #cgo LDFLAGS: -L "./ -Wl,--nosuchoption"
++import "C"
++func main() {}
+--
+2.40.0
diff --git a/meta/recipes-devtools/go/go-1.19/CVE-2023-29409.patch b/meta/recipes-devtools/go/go-1.19/CVE-2023-29409.patch
new file mode 100644
index 0000000000..38451f7555
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.19/CVE-2023-29409.patch
@@ -0,0 +1,175 @@
+From 2300f7ef07718f6be4d8aa8486c7de99836e233f Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Wed, 23 Aug 2023 12:03:43 +0000
+Subject: [PATCH] crypto/tls: restrict RSA keys in certificates to <= 8192 bits
+
+Extremely large RSA keys in certificate chains can cause a client/server
+to expend significant CPU time verifying signatures. Limit this by
+restricting the size of RSA keys transmitted during handshakes to <=
+8192 bits.
+
+Based on a survey of publicly trusted RSA keys, there are currently only
+three certificates in circulation with keys larger than this, and all
+three appear to be test certificates that are not actively deployed. It
+is possible there are larger keys in use in private PKIs, but we target
+the web PKI, so causing breakage here in the interests of increasing the
+default safety of users of crypto/tls seems reasonable.
+
+Thanks to Mateusz Poliwczak for reporting this issue.
+
+Updates #61460
+Fixes #61579
+Fixes CVE-2023-29409
+
+Change-Id: Ie35038515a649199a36a12fc2c5df3af855dca6c
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1912161
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+(cherry picked from commit d865c715d92887361e4bd5596e19e513f27781b7)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1965487
+Reviewed-on: https://go-review.googlesource.com/c/go/+/514915
+Run-TryBot: David Chase <drchase@google.com>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+TryBot-Bypass: David Chase <drchase@google.com>
+
+CVE: CVE-2023-29409
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/2300f7ef07718f6be4d8aa8486c7de99836e233f]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/crypto/tls/handshake_client.go | 8 +++
+ src/crypto/tls/handshake_client_test.go | 78 +++++++++++++++++++++++++
+ src/crypto/tls/handshake_server.go | 4 ++
+ 3 files changed, 90 insertions(+)
+
+diff --git a/src/crypto/tls/handshake_client.go b/src/crypto/tls/handshake_client.go
+index 85622f1..828d2cb 100644
+--- a/src/crypto/tls/handshake_client.go
++++ b/src/crypto/tls/handshake_client.go
+@@ -852,6 +852,10 @@ func (hs *clientHandshakeState) sendFinished(out []byte) error {
+ return nil
+ }
+
++// maxRSAKeySize is the maximum RSA key size in bits that we are willing
++// to verify the signatures of during a TLS handshake.
++const maxRSAKeySize = 8192
++
+ // verifyServerCertificate parses and verifies the provided chain, setting
+ // c.verifiedChains and c.peerCertificates or sending the appropriate alert.
+ func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
+@@ -862,6 +866,10 @@ func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
+ c.sendAlert(alertBadCertificate)
+ return errors.New("tls: failed to parse certificate from server: " + err.Error())
+ }
++ if cert.PublicKeyAlgorithm == x509.RSA && cert.PublicKey.(*rsa.PublicKey).N.BitLen() > maxRSAKeySize {
++ c.sendAlert(alertBadCertificate)
++ return fmt.Errorf("tls: server sent certificate containing RSA key larger than %d bits", maxRSAKeySize)
++ }
+ certs[i] = cert
+ }
+
+diff --git a/src/crypto/tls/handshake_client_test.go b/src/crypto/tls/handshake_client_test.go
+index 0228745..d581cb1 100644
+--- a/src/crypto/tls/handshake_client_test.go
++++ b/src/crypto/tls/handshake_client_test.go
+@@ -2595,3 +2595,81 @@ func TestClientHandshakeContextCancellation(t *testing.T) {
+ t.Error("Client connection was not closed when the context was canceled")
+ }
+ }
++
++// discardConn wraps a net.Conn but discards all writes, but reports that they happened.
++type discardConn struct {
++ net.Conn
++}
++
++func (dc *discardConn) Write(data []byte) (int, error) {
++ return len(data), nil
++}
++
++// largeRSAKeyCertPEM contains a 8193 bit RSA key
++const largeRSAKeyCertPEM = `-----BEGIN CERTIFICATE-----
++MIIInjCCBIWgAwIBAgIBAjANBgkqhkiG9w0BAQsFADASMRAwDgYDVQQDEwd0ZXN0
++aW5nMB4XDTIzMDYwNzIxMjMzNloXDTIzMDYwNzIzMjMzNlowEjEQMA4GA1UEAxMH
++dGVzdGluZzCCBCIwDQYJKoZIhvcNAQEBBQADggQPADCCBAoCggQBAWdHsf6Rh2Ca
++n2SQwn4t4OQrOjbLLdGE1pM6TBKKrHUFy62uEL8atNjlcfXIsa4aEu3xNGiqxqur
++ZectlkZbm0FkaaQ1Wr9oikDY3KfjuaXdPdO/XC/h8AKNxlDOylyXwUSK/CuYb+1j
++gy8yF5QFvVfwW/xwTlHmhUeSkVSQPosfQ6yXNNsmMzkd+ZPWLrfq4R+wiNtwYGu0
++WSBcI/M9o8/vrNLnIppoiBJJ13j9CR1ToEAzOFh9wwRWLY10oZhoh1ONN1KQURx4
++qedzvvP2DSjZbUccdvl2rBGvZpzfOiFdm1FCnxB0c72Cqx+GTHXBFf8bsa7KHky9
++sNO1GUanbq17WoDNgwbY6H51bfShqv0CErxatwWox3we4EcAmFHPVTCYL1oWVMGo
++a3Eth91NZj+b/nGhF9lhHKGzXSv9brmLLkfvM1jA6XhNhA7BQ5Vz67lj2j3XfXdh
++t/BU5pBXbL4Ut4mIhT1YnKXAjX2/LF5RHQTE8Vwkx5JAEKZyUEGOReD/B+7GOrLp
++HduMT9vZAc5aR2k9I8qq1zBAzsL69lyQNAPaDYd1BIAjUety9gAYaSQffCgAgpRO
++Gt+DYvxS+7AT/yEd5h74MU2AH7KrAkbXOtlwupiGwhMVTstncDJWXMJqbBhyHPF8
++3UmZH0hbL4PYmzSj9LDWQQXI2tv6vrCpfts3Cqhqxz9vRpgY7t1Wu6l/r+KxYYz3
++1pcGpPvRmPh0DJm7cPTiXqPnZcPt+ulSaSdlxmd19OnvG5awp0fXhxryZVwuiT8G
++VDkhyARrxYrdjlINsZJZbQjO0t8ketXAELJOnbFXXzeCOosyOHkLwsqOO96AVJA8
++45ZVL5m95ClGy0RSrjVIkXsxTAMVG6SPAqKwk6vmTdRGuSPS4rhgckPVDHmccmuq
++dfnT2YkX+wB2/M3oCgU+s30fAHGkbGZ0pCdNbFYFZLiH0iiMbTDl/0L/z7IdK0nH
++GLHVE7apPraKC6xl6rPWsD2iSfrmtIPQa0+rqbIVvKP5JdfJ8J4alI+OxFw/znQe
++V0/Rez0j22Fe119LZFFSXhRv+ZSvcq20xDwh00mzcumPWpYuCVPozA18yIhC9tNn
++ALHndz0tDseIdy9vC71jQWy9iwri3ueN0DekMMF8JGzI1Z6BAFzgyAx3DkHtwHg7
++B7qD0jPG5hJ5+yt323fYgJsuEAYoZ8/jzZ01pkX8bt+UsVN0DGnSGsI2ktnIIk3J
++l+8krjmUy6EaW79nITwoOqaeHOIp8m3UkjEcoKOYrzHRKqRy+A09rY+m/cAQaafW
++4xp0Zv7qZPLwnu0jsqB4jD8Ll9yPB02ndsoV6U5PeHzTkVhPml19jKUAwFfs7TJg
++kXy+/xFhYVUCAwEAATANBgkqhkiG9w0BAQsFAAOCBAIAAQnZY77pMNeypfpba2WK
++aDasT7dk2JqP0eukJCVPTN24Zca+xJNPdzuBATm/8SdZK9lddIbjSnWRsKvTnO2r
++/rYdlPf3jM5uuJtb8+Uwwe1s+gszelGS9G/lzzq+ehWicRIq2PFcs8o3iQMfENiv
++qILJ+xjcrvms5ZPDNahWkfRx3KCg8Q+/at2n5p7XYjMPYiLKHnDC+RE2b1qT20IZ
++FhuK/fTWLmKbfYFNNga6GC4qcaZJ7x0pbm4SDTYp0tkhzcHzwKhidfNB5J2vNz6l
++Ur6wiYwamFTLqcOwWo7rdvI+sSn05WQBv0QZlzFX+OAu0l7WQ7yU+noOxBhjvHds
++14+r9qcQZg2q9kG+evopYZqYXRUNNlZKo9MRBXhfrISulFAc5lRFQIXMXnglvAu+
++Ipz2gomEAOcOPNNVldhKAU94GAMJd/KfN0ZP7gX3YvPzuYU6XDhag5RTohXLm18w
++5AF+ES3DOQ6ixu3DTf0D+6qrDuK+prdX8ivcdTQVNOQ+MIZeGSc6NWWOTaMGJ3lg
++aZIxJUGdo6E7GBGiC1YTjgFKFbHzek1LRTh/LX3vbSudxwaG0HQxwsU9T4DWiMqa
++Fkf2KteLEUA6HrR+0XlAZrhwoqAmrJ+8lCFX3V0gE9lpENfVHlFXDGyx10DpTB28
++DdjnY3F7EPWNzwf9P3oNT69CKW3Bk6VVr3ROOJtDxVu1ioWo3TaXltQ0VOnap2Pu
++sa5wfrpfwBDuAS9JCDg4ttNp2nW3F7tgXC6xPqw5pvGwUppEw9XNrqV8TZrxduuv
++rQ3NyZ7KSzIpmFlD3UwV/fGfz3UQmHS6Ng1evrUID9DjfYNfRqSGIGjDfxGtYD+j
++Z1gLJZuhjJpNtwBkKRtlNtrCWCJK2hidK/foxwD7kwAPo2I9FjpltxCRywZUs07X
++KwXTfBR9v6ij1LV6K58hFS+8ezZyZ05CeVBFkMQdclTOSfuPxlMkQOtjp8QWDj+F
++j/MYziT5KBkHvcbrjdRtUJIAi4N7zCsPZtjik918AK1WBNRVqPbrgq/XSEXMfuvs
++6JbfK0B76vdBDRtJFC1JsvnIrGbUztxXzyQwFLaR/AjVJqpVlysLWzPKWVX6/+SJ
++u1NQOl2E8P6ycyBsuGnO89p0S4F8cMRcI2X1XQsZ7/q0NBrOMaEp5T3SrWo9GiQ3
++o2SBdbs3Y6MBPBtTu977Z/0RO63J3M5i2tjUiDfrFy7+VRLKr7qQ7JibohyB8QaR
++9tedgjn2f+of7PnP/PEl1cCphUZeHM7QKUMPT8dbqwmKtlYY43EHXcvNOT5IBk3X
++9lwJoZk/B2i+ZMRNSP34ztAwtxmasPt6RAWGQpWCn9qmttAHAnMfDqe7F7jVR6rS
++u58=
++-----END CERTIFICATE-----`
++
++func TestHandshakeRSATooBig(t *testing.T) {
++ testCert, _ := pem.Decode([]byte(largeRSAKeyCertPEM))
++
++ c := &Conn{conn: &discardConn{}, config: testConfig.Clone()}
++
++ expectedErr := "tls: server sent certificate containing RSA key larger than 8192 bits"
++ err := c.verifyServerCertificate([][]byte{testCert.Bytes})
++ if err == nil || err.Error() != expectedErr {
++ t.Errorf("Conn.verifyServerCertificate unexpected error: want %q, got %q", expectedErr, err)
++ }
++
++ expectedErr = "tls: client sent certificate containing RSA key larger than 8192 bits"
++ err = c.processCertsFromClient(Certificate{Certificate: [][]byte{testCert.Bytes}})
++ if err == nil || err.Error() != expectedErr {
++ t.Errorf("Conn.processCertsFromClient unexpected error: want %q, got %q", expectedErr, err)
++ }
++}
+diff --git a/src/crypto/tls/handshake_server.go b/src/crypto/tls/handshake_server.go
+index 8d51e7e..a5d8f4a 100644
+--- a/src/crypto/tls/handshake_server.go
++++ b/src/crypto/tls/handshake_server.go
+@@ -812,6 +812,10 @@ func (c *Conn) processCertsFromClient(certificate Certificate) error {
+ c.sendAlert(alertBadCertificate)
+ return errors.New("tls: failed to parse client certificate: " + err.Error())
+ }
++ if certs[i].PublicKeyAlgorithm == x509.RSA && certs[i].PublicKey.(*rsa.PublicKey).N.BitLen() > maxRSAKeySize {
++ c.sendAlert(alertBadCertificate)
++ return fmt.Errorf("tls: client sent certificate containing RSA key larger than %d bits", maxRSAKeySize)
++ }
+ }
+
+ if len(certs) == 0 && requiresClientCert(c.config.ClientAuth) {
+--
+2.40.0
diff --git a/meta/recipes-devtools/go/go-1.19/add_godebug.patch b/meta/recipes-devtools/go/go-1.19/add_godebug.patch
new file mode 100644
index 0000000000..0c3d2d2855
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.19/add_godebug.patch
@@ -0,0 +1,84 @@
+
+Upstream-Status: Backport [see text]
+
+https://github.com/golong/go.git as of commit 22c1d18a27...
+Copy src/internal/godebug from go 1.19 since it does not
+exist in 1.17.
+
+Signed-off-by: Joe Slater <joe.slater@windriver.com>
+---
+
+--- /dev/null
++++ go/src/internal/godebug/godebug.go
+@@ -0,0 +1,34 @@
++// Copyright 2021 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++// Package godebug parses the GODEBUG environment variable.
++package godebug
++
++import "os"
++
++// Get returns the value for the provided GODEBUG key.
++func Get(key string) string {
++ return get(os.Getenv("GODEBUG"), key)
++}
++
++// get returns the value part of key=value in s (a GODEBUG value).
++func get(s, key string) string {
++ for i := 0; i < len(s)-len(key)-1; i++ {
++ if i > 0 && s[i-1] != ',' {
++ continue
++ }
++ afterKey := s[i+len(key):]
++ if afterKey[0] != '=' || s[i:i+len(key)] != key {
++ continue
++ }
++ val := afterKey[1:]
++ for i, b := range val {
++ if b == ',' {
++ return val[:i]
++ }
++ }
++ return val
++ }
++ return ""
++}
+--- /dev/null
++++ go/src/internal/godebug/godebug_test.go
+@@ -0,0 +1,34 @@
++// Copyright 2021 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package godebug
++
++import "testing"
++
++func TestGet(t *testing.T) {
++ tests := []struct {
++ godebug string
++ key string
++ want string
++ }{
++ {"", "", ""},
++ {"", "foo", ""},
++ {"foo=bar", "foo", "bar"},
++ {"foo=bar,after=x", "foo", "bar"},
++ {"before=x,foo=bar,after=x", "foo", "bar"},
++ {"before=x,foo=bar", "foo", "bar"},
++ {",,,foo=bar,,,", "foo", "bar"},
++ {"foodecoy=wrong,foo=bar", "foo", "bar"},
++ {"foo=", "foo", ""},
++ {"foo", "foo", ""},
++ {",foo", "foo", ""},
++ {"foo=bar,baz", "loooooooong", ""},
++ }
++ for _, tt := range tests {
++ got := get(tt.godebug, tt.key)
++ if got != tt.want {
++ t.Errorf("get(%q, %q) = %q; want %q", tt.godebug, tt.key, got, tt.want)
++ }
++ }
++}
diff --git a/meta/recipes-devtools/go/go-1.19/cve-2022-41724.patch b/meta/recipes-devtools/go/go-1.19/cve-2022-41724.patch
new file mode 100644
index 0000000000..aacffbffcd
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.19/cve-2022-41724.patch
@@ -0,0 +1,2391 @@
+From 00b256e9e3c0fa02a278ec9dfc3e191e02ceaf80 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <roland@golang.org>
+Date: Wed, 14 Dec 2022 09:43:16 -0800
+Subject: [PATCH] [release-branch.go1.19] crypto/tls: replace all usages of
+ BytesOrPanic
+
+Message marshalling makes use of BytesOrPanic a lot, under the
+assumption that it will never panic. This assumption was incorrect, and
+specifically crafted handshakes could trigger panics. Rather than just
+surgically replacing the usages of BytesOrPanic in paths that could
+panic, replace all usages of it with proper error returns in case there
+are other ways of triggering panics which we didn't find.
+
+In one specific case, the tree routed by expandLabel, we replace the
+usage of BytesOrPanic, but retain a panic. This function already
+explicitly panicked elsewhere, and returning an error from it becomes
+rather painful because it requires changing a large number of APIs.
+The marshalling is unlikely to ever panic, as the inputs are all either
+fixed length, or already limited to the sizes required. If it were to
+panic, it'd likely only be during development. A close inspection shows
+no paths for a user to cause a panic currently.
+
+This patches ends up being rather large, since it requires routing
+errors back through functions which previously had no error returns.
+Where possible I've tried to use helpers that reduce the verbosity
+of frequently repeated stanzas, and to make the diffs as minimal as
+possible.
+
+Thanks to Marten Seemann for reporting this issue.
+
+Updates #58001
+Fixes #58358
+Fixes CVE-2022-41724
+
+Change-Id: Ieb55867ef0a3e1e867b33f09421932510cb58851
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1679436
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+(cherry picked from commit 0f3a44ad7b41cc89efdfad25278953e17d9c1e04)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1728204
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/468117
+Auto-Submit: Michael Pratt <mpratt@google.com>
+Run-TryBot: Michael Pratt <mpratt@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Than McIntosh <thanm@google.com>
+---
+
+CVE: CVE-2022-41724
+
+Upstream-Status: Backport [see text]
+
+https://github.com/golong/go.git commit 00b256e9e3c0fa...
+boring_test.go does not exist
+modified for conn.go and handshake_messages.go
+
+Signed-off-by: Joe Slater <joe.slater@windriver.com>
+
+---
+ src/crypto/tls/boring_test.go | 2 +-
+ src/crypto/tls/common.go | 2 +-
+ src/crypto/tls/conn.go | 46 +-
+ src/crypto/tls/handshake_client.go | 95 +--
+ src/crypto/tls/handshake_client_test.go | 4 +-
+ src/crypto/tls/handshake_client_tls13.go | 74 ++-
+ src/crypto/tls/handshake_messages.go | 716 +++++++++++-----------
+ src/crypto/tls/handshake_messages_test.go | 19 +-
+ src/crypto/tls/handshake_server.go | 73 ++-
+ src/crypto/tls/handshake_server_test.go | 31 +-
+ src/crypto/tls/handshake_server_tls13.go | 71 ++-
+ src/crypto/tls/key_schedule.go | 19 +-
+ src/crypto/tls/ticket.go | 8 +-
+ 13 files changed, 657 insertions(+), 503 deletions(-)
+
+--- go.orig/src/crypto/tls/common.go
++++ go/src/crypto/tls/common.go
+@@ -1357,7 +1357,7 @@ func (c *Certificate) leaf() (*x509.Cert
+ }
+
+ type handshakeMessage interface {
+- marshal() []byte
++ marshal() ([]byte, error)
+ unmarshal([]byte) bool
+ }
+
+--- go.orig/src/crypto/tls/conn.go
++++ go/src/crypto/tls/conn.go
+@@ -994,18 +994,46 @@ func (c *Conn) writeRecordLocked(typ rec
+ return n, nil
+ }
+
+-// writeRecord writes a TLS record with the given type and payload to the
+-// connection and updates the record layer state.
+-func (c *Conn) writeRecord(typ recordType, data []byte) (int, error) {
++// writeHandshakeRecord writes a handshake message to the connection and updates
++// the record layer state. If transcript is non-nil the marshalled message is
++// written to it.
++func (c *Conn) writeHandshakeRecord(msg handshakeMessage, transcript transcriptHash) (int, error) {
+ c.out.Lock()
+ defer c.out.Unlock()
+
+- return c.writeRecordLocked(typ, data)
++ data, err := msg.marshal()
++ if err != nil {
++ return 0, err
++ }
++ if transcript != nil {
++ transcript.Write(data)
++ }
++
++ return c.writeRecordLocked(recordTypeHandshake, data)
++}
++
++// writeChangeCipherRecord writes a ChangeCipherSpec message to the connection and
++// updates the record layer state.
++func (c *Conn) writeChangeCipherRecord() error {
++ c.out.Lock()
++ defer c.out.Unlock()
++ _, err := c.writeRecordLocked(recordTypeChangeCipherSpec, []byte{1})
++ return err
+ }
+
+ // readHandshake reads the next handshake message from
+-// the record layer.
+-func (c *Conn) readHandshake() (interface{}, error) {
++// the record layer. If transcript is non-nil, the message
++// is written to the passed transcriptHash.
++
++// backport 00b256e9e3c0fa02a278ec9dfc3e191e02ceaf80
++//
++// Commit wants to set this to
++//
++// func (c *Conn) readHandshake(transcript transcriptHash) (any, error) {
++//
++// but that does not compile. Retain the original interface{} argument.
++//
++func (c *Conn) readHandshake(transcript transcriptHash) (interface{}, error) {
+ for c.hand.Len() < 4 {
+ if err := c.readRecord(); err != nil {
+ return nil, err
+@@ -1084,6 +1112,11 @@ func (c *Conn) readHandshake() (interfac
+ if !m.unmarshal(data) {
+ return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
+ }
++
++ if transcript != nil {
++ transcript.Write(data)
++ }
++
+ return m, nil
+ }
+
+@@ -1159,7 +1192,7 @@ func (c *Conn) handleRenegotiation() err
+ return errors.New("tls: internal error: unexpected renegotiation")
+ }
+
+- msg, err := c.readHandshake()
++ msg, err := c.readHandshake(nil)
+ if err != nil {
+ return err
+ }
+@@ -1205,7 +1238,7 @@ func (c *Conn) handlePostHandshakeMessag
+ return c.handleRenegotiation()
+ }
+
+- msg, err := c.readHandshake()
++ msg, err := c.readHandshake(nil)
+ if err != nil {
+ return err
+ }
+@@ -1241,7 +1274,11 @@ func (c *Conn) handleKeyUpdate(keyUpdate
+ defer c.out.Unlock()
+
+ msg := &keyUpdateMsg{}
+- _, err := c.writeRecordLocked(recordTypeHandshake, msg.marshal())
++ msgBytes, err := msg.marshal()
++ if err != nil {
++ return err
++ }
++ _, err = c.writeRecordLocked(recordTypeHandshake, msgBytes)
+ if err != nil {
+ // Surface the error at the next write.
+ c.out.setErrorLocked(err)
+--- go.orig/src/crypto/tls/handshake_client.go
++++ go/src/crypto/tls/handshake_client.go
+@@ -157,7 +157,10 @@ func (c *Conn) clientHandshake(ctx conte
+ }
+ c.serverName = hello.serverName
+
+- cacheKey, session, earlySecret, binderKey := c.loadSession(hello)
++ cacheKey, session, earlySecret, binderKey, err := c.loadSession(hello)
++ if err != nil {
++ return err
++ }
+ if cacheKey != "" && session != nil {
+ defer func() {
+ // If we got a handshake failure when resuming a session, throw away
+@@ -172,11 +175,12 @@ func (c *Conn) clientHandshake(ctx conte
+ }()
+ }
+
+- if _, err := c.writeRecord(recordTypeHandshake, hello.marshal()); err != nil {
++ if _, err := c.writeHandshakeRecord(hello, nil); err != nil {
+ return err
+ }
+
+- msg, err := c.readHandshake()
++ // serverHelloMsg is not included in the transcript
++ msg, err := c.readHandshake(nil)
+ if err != nil {
+ return err
+ }
+@@ -241,9 +245,9 @@ func (c *Conn) clientHandshake(ctx conte
+ }
+
+ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
+- session *ClientSessionState, earlySecret, binderKey []byte) {
++ session *ClientSessionState, earlySecret, binderKey []byte, err error) {
+ if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil {
+- return "", nil, nil, nil
++ return "", nil, nil, nil, nil
+ }
+
+ hello.ticketSupported = true
+@@ -258,14 +262,14 @@ func (c *Conn) loadSession(hello *client
+ // renegotiation is primarily used to allow a client to send a client
+ // certificate, which would be skipped if session resumption occurred.
+ if c.handshakes != 0 {
+- return "", nil, nil, nil
++ return "", nil, nil, nil, nil
+ }
+
+ // Try to resume a previously negotiated TLS session, if available.
+ cacheKey = clientSessionCacheKey(c.conn.RemoteAddr(), c.config)
+ session, ok := c.config.ClientSessionCache.Get(cacheKey)
+ if !ok || session == nil {
+- return cacheKey, nil, nil, nil
++ return cacheKey, nil, nil, nil, nil
+ }
+
+ // Check that version used for the previous session is still valid.
+@@ -277,7 +281,7 @@ func (c *Conn) loadSession(hello *client
+ }
+ }
+ if !versOk {
+- return cacheKey, nil, nil, nil
++ return cacheKey, nil, nil, nil, nil
+ }
+
+ // Check that the cached server certificate is not expired, and that it's
+@@ -286,16 +290,16 @@ func (c *Conn) loadSession(hello *client
+ if !c.config.InsecureSkipVerify {
+ if len(session.verifiedChains) == 0 {
+ // The original connection had InsecureSkipVerify, while this doesn't.
+- return cacheKey, nil, nil, nil
++ return cacheKey, nil, nil, nil, nil
+ }
+ serverCert := session.serverCertificates[0]
+ if c.config.time().After(serverCert.NotAfter) {
+ // Expired certificate, delete the entry.
+ c.config.ClientSessionCache.Put(cacheKey, nil)
+- return cacheKey, nil, nil, nil
++ return cacheKey, nil, nil, nil, nil
+ }
+ if err := serverCert.VerifyHostname(c.config.ServerName); err != nil {
+- return cacheKey, nil, nil, nil
++ return cacheKey, nil, nil, nil, nil
+ }
+ }
+
+@@ -303,7 +307,7 @@ func (c *Conn) loadSession(hello *client
+ // In TLS 1.2 the cipher suite must match the resumed session. Ensure we
+ // are still offering it.
+ if mutualCipherSuite(hello.cipherSuites, session.cipherSuite) == nil {
+- return cacheKey, nil, nil, nil
++ return cacheKey, nil, nil, nil, nil
+ }
+
+ hello.sessionTicket = session.sessionTicket
+@@ -313,14 +317,14 @@ func (c *Conn) loadSession(hello *client
+ // Check that the session ticket is not expired.
+ if c.config.time().After(session.useBy) {
+ c.config.ClientSessionCache.Put(cacheKey, nil)
+- return cacheKey, nil, nil, nil
++ return cacheKey, nil, nil, nil, nil
+ }
+
+ // In TLS 1.3 the KDF hash must match the resumed session. Ensure we
+ // offer at least one cipher suite with that hash.
+ cipherSuite := cipherSuiteTLS13ByID(session.cipherSuite)
+ if cipherSuite == nil {
+- return cacheKey, nil, nil, nil
++ return cacheKey, nil, nil, nil, nil
+ }
+ cipherSuiteOk := false
+ for _, offeredID := range hello.cipherSuites {
+@@ -331,7 +335,7 @@ func (c *Conn) loadSession(hello *client
+ }
+ }
+ if !cipherSuiteOk {
+- return cacheKey, nil, nil, nil
++ return cacheKey, nil, nil, nil, nil
+ }
+
+ // Set the pre_shared_key extension. See RFC 8446, Section 4.2.11.1.
+@@ -349,9 +353,15 @@ func (c *Conn) loadSession(hello *client
+ earlySecret = cipherSuite.extract(psk, nil)
+ binderKey = cipherSuite.deriveSecret(earlySecret, resumptionBinderLabel, nil)
+ transcript := cipherSuite.hash.New()
+- transcript.Write(hello.marshalWithoutBinders())
++ helloBytes, err := hello.marshalWithoutBinders()
++ if err != nil {
++ return "", nil, nil, nil, err
++ }
++ transcript.Write(helloBytes)
+ pskBinders := [][]byte{cipherSuite.finishedHash(binderKey, transcript)}
+- hello.updateBinders(pskBinders)
++ if err := hello.updateBinders(pskBinders); err != nil {
++ return "", nil, nil, nil, err
++ }
+
+ return
+ }
+@@ -396,8 +406,12 @@ func (hs *clientHandshakeState) handshak
+ hs.finishedHash.discardHandshakeBuffer()
+ }
+
+- hs.finishedHash.Write(hs.hello.marshal())
+- hs.finishedHash.Write(hs.serverHello.marshal())
++ if err := transcriptMsg(hs.hello, &hs.finishedHash); err != nil {
++ return err
++ }
++ if err := transcriptMsg(hs.serverHello, &hs.finishedHash); err != nil {
++ return err
++ }
+
+ c.buffering = true
+ c.didResume = isResume
+@@ -468,7 +482,7 @@ func (hs *clientHandshakeState) pickCiph
+ func (hs *clientHandshakeState) doFullHandshake() error {
+ c := hs.c
+
+- msg, err := c.readHandshake()
++ msg, err := c.readHandshake(&hs.finishedHash)
+ if err != nil {
+ return err
+ }
+@@ -477,9 +491,8 @@ func (hs *clientHandshakeState) doFullHa
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(certMsg, msg)
+ }
+- hs.finishedHash.Write(certMsg.marshal())
+
+- msg, err = c.readHandshake()
++ msg, err = c.readHandshake(&hs.finishedHash)
+ if err != nil {
+ return err
+ }
+@@ -497,11 +510,10 @@ func (hs *clientHandshakeState) doFullHa
+ c.sendAlert(alertUnexpectedMessage)
+ return errors.New("tls: received unexpected CertificateStatus message")
+ }
+- hs.finishedHash.Write(cs.marshal())
+
+ c.ocspResponse = cs.response
+
+- msg, err = c.readHandshake()
++ msg, err = c.readHandshake(&hs.finishedHash)
+ if err != nil {
+ return err
+ }
+@@ -530,14 +542,13 @@ func (hs *clientHandshakeState) doFullHa
+
+ skx, ok := msg.(*serverKeyExchangeMsg)
+ if ok {
+- hs.finishedHash.Write(skx.marshal())
+ err = keyAgreement.processServerKeyExchange(c.config, hs.hello, hs.serverHello, c.peerCertificates[0], skx)
+ if err != nil {
+ c.sendAlert(alertUnexpectedMessage)
+ return err
+ }
+
+- msg, err = c.readHandshake()
++ msg, err = c.readHandshake(&hs.finishedHash)
+ if err != nil {
+ return err
+ }
+@@ -548,7 +559,6 @@ func (hs *clientHandshakeState) doFullHa
+ certReq, ok := msg.(*certificateRequestMsg)
+ if ok {
+ certRequested = true
+- hs.finishedHash.Write(certReq.marshal())
+
+ cri := certificateRequestInfoFromMsg(hs.ctx, c.vers, certReq)
+ if chainToSend, err = c.getClientCertificate(cri); err != nil {
+@@ -556,7 +566,7 @@ func (hs *clientHandshakeState) doFullHa
+ return err
+ }
+
+- msg, err = c.readHandshake()
++ msg, err = c.readHandshake(&hs.finishedHash)
+ if err != nil {
+ return err
+ }
+@@ -567,7 +577,6 @@ func (hs *clientHandshakeState) doFullHa
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(shd, msg)
+ }
+- hs.finishedHash.Write(shd.marshal())
+
+ // If the server requested a certificate then we have to send a
+ // Certificate message, even if it's empty because we don't have a
+@@ -575,8 +584,7 @@ func (hs *clientHandshakeState) doFullHa
+ if certRequested {
+ certMsg = new(certificateMsg)
+ certMsg.certificates = chainToSend.Certificate
+- hs.finishedHash.Write(certMsg.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(certMsg, &hs.finishedHash); err != nil {
+ return err
+ }
+ }
+@@ -587,8 +595,7 @@ func (hs *clientHandshakeState) doFullHa
+ return err
+ }
+ if ckx != nil {
+- hs.finishedHash.Write(ckx.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, ckx.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(ckx, &hs.finishedHash); err != nil {
+ return err
+ }
+ }
+@@ -635,8 +642,7 @@ func (hs *clientHandshakeState) doFullHa
+ return err
+ }
+
+- hs.finishedHash.Write(certVerify.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, certVerify.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(certVerify, &hs.finishedHash); err != nil {
+ return err
+ }
+ }
+@@ -771,7 +777,10 @@ func (hs *clientHandshakeState) readFini
+ return err
+ }
+
+- msg, err := c.readHandshake()
++ // finishedMsg is included in the transcript, but not until after we
++ // check the client version, since the state before this message was
++ // sent is used during verification.
++ msg, err := c.readHandshake(nil)
+ if err != nil {
+ return err
+ }
+@@ -787,7 +796,11 @@ func (hs *clientHandshakeState) readFini
+ c.sendAlert(alertHandshakeFailure)
+ return errors.New("tls: server's Finished message was incorrect")
+ }
+- hs.finishedHash.Write(serverFinished.marshal())
++
++ if err := transcriptMsg(serverFinished, &hs.finishedHash); err != nil {
++ return err
++ }
++
+ copy(out, verify)
+ return nil
+ }
+@@ -798,7 +811,7 @@ func (hs *clientHandshakeState) readSess
+ }
+
+ c := hs.c
+- msg, err := c.readHandshake()
++ msg, err := c.readHandshake(&hs.finishedHash)
+ if err != nil {
+ return err
+ }
+@@ -807,7 +820,6 @@ func (hs *clientHandshakeState) readSess
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(sessionTicketMsg, msg)
+ }
+- hs.finishedHash.Write(sessionTicketMsg.marshal())
+
+ hs.session = &ClientSessionState{
+ sessionTicket: sessionTicketMsg.ticket,
+@@ -827,14 +839,13 @@ func (hs *clientHandshakeState) readSess
+ func (hs *clientHandshakeState) sendFinished(out []byte) error {
+ c := hs.c
+
+- if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
++ if err := c.writeChangeCipherRecord(); err != nil {
+ return err
+ }
+
+ finished := new(finishedMsg)
+ finished.verifyData = hs.finishedHash.clientSum(hs.masterSecret)
+- hs.finishedHash.Write(finished.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(finished, &hs.finishedHash); err != nil {
+ return err
+ }
+ copy(out, finished.verifyData)
+--- go.orig/src/crypto/tls/handshake_client_test.go
++++ go/src/crypto/tls/handshake_client_test.go
+@@ -1257,7 +1257,7 @@ func TestServerSelectingUnconfiguredAppl
+ cipherSuite: TLS_RSA_WITH_AES_128_GCM_SHA256,
+ alpnProtocol: "how-about-this",
+ }
+- serverHelloBytes := serverHello.marshal()
++ serverHelloBytes := mustMarshal(t, serverHello)
+
+ s.Write([]byte{
+ byte(recordTypeHandshake),
+@@ -1500,7 +1500,7 @@ func TestServerSelectingUnconfiguredCiph
+ random: make([]byte, 32),
+ cipherSuite: TLS_RSA_WITH_AES_256_GCM_SHA384,
+ }
+- serverHelloBytes := serverHello.marshal()
++ serverHelloBytes := mustMarshal(t, serverHello)
+
+ s.Write([]byte{
+ byte(recordTypeHandshake),
+--- go.orig/src/crypto/tls/handshake_client_tls13.go
++++ go/src/crypto/tls/handshake_client_tls13.go
+@@ -58,7 +58,10 @@ func (hs *clientHandshakeStateTLS13) han
+ }
+
+ hs.transcript = hs.suite.hash.New()
+- hs.transcript.Write(hs.hello.marshal())
++
++ if err := transcriptMsg(hs.hello, hs.transcript); err != nil {
++ return err
++ }
+
+ if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) {
+ if err := hs.sendDummyChangeCipherSpec(); err != nil {
+@@ -69,7 +72,9 @@ func (hs *clientHandshakeStateTLS13) han
+ }
+ }
+
+- hs.transcript.Write(hs.serverHello.marshal())
++ if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil {
++ return err
++ }
+
+ c.buffering = true
+ if err := hs.processServerHello(); err != nil {
+@@ -168,8 +173,7 @@ func (hs *clientHandshakeStateTLS13) sen
+ }
+ hs.sentDummyCCS = true
+
+- _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
+- return err
++ return hs.c.writeChangeCipherRecord()
+ }
+
+ // processHelloRetryRequest handles the HRR in hs.serverHello, modifies and
+@@ -184,7 +188,9 @@ func (hs *clientHandshakeStateTLS13) pro
+ hs.transcript.Reset()
+ hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
+ hs.transcript.Write(chHash)
+- hs.transcript.Write(hs.serverHello.marshal())
++ if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil {
++ return err
++ }
+
+ // The only HelloRetryRequest extensions we support are key_share and
+ // cookie, and clients must abort the handshake if the HRR would not result
+@@ -249,10 +255,18 @@ func (hs *clientHandshakeStateTLS13) pro
+ transcript := hs.suite.hash.New()
+ transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
+ transcript.Write(chHash)
+- transcript.Write(hs.serverHello.marshal())
+- transcript.Write(hs.hello.marshalWithoutBinders())
++ if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil {
++ return err
++ }
++ helloBytes, err := hs.hello.marshalWithoutBinders()
++ if err != nil {
++ return err
++ }
++ transcript.Write(helloBytes)
+ pskBinders := [][]byte{hs.suite.finishedHash(hs.binderKey, transcript)}
+- hs.hello.updateBinders(pskBinders)
++ if err := hs.hello.updateBinders(pskBinders); err != nil {
++ return err
++ }
+ } else {
+ // Server selected a cipher suite incompatible with the PSK.
+ hs.hello.pskIdentities = nil
+@@ -260,12 +274,12 @@ func (hs *clientHandshakeStateTLS13) pro
+ }
+ }
+
+- hs.transcript.Write(hs.hello.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(hs.hello, hs.transcript); err != nil {
+ return err
+ }
+
+- msg, err := c.readHandshake()
++ // serverHelloMsg is not included in the transcript
++ msg, err := c.readHandshake(nil)
+ if err != nil {
+ return err
+ }
+@@ -354,6 +368,7 @@ func (hs *clientHandshakeStateTLS13) est
+ if !hs.usingPSK {
+ earlySecret = hs.suite.extract(nil, nil)
+ }
++
+ handshakeSecret := hs.suite.extract(sharedKey,
+ hs.suite.deriveSecret(earlySecret, "derived", nil))
+
+@@ -384,7 +399,7 @@ func (hs *clientHandshakeStateTLS13) est
+ func (hs *clientHandshakeStateTLS13) readServerParameters() error {
+ c := hs.c
+
+- msg, err := c.readHandshake()
++ msg, err := c.readHandshake(hs.transcript)
+ if err != nil {
+ return err
+ }
+@@ -394,7 +409,6 @@ func (hs *clientHandshakeStateTLS13) rea
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(encryptedExtensions, msg)
+ }
+- hs.transcript.Write(encryptedExtensions.marshal())
+
+ if err := checkALPN(hs.hello.alpnProtocols, encryptedExtensions.alpnProtocol); err != nil {
+ c.sendAlert(alertUnsupportedExtension)
+@@ -423,18 +437,16 @@ func (hs *clientHandshakeStateTLS13) rea
+ return nil
+ }
+
+- msg, err := c.readHandshake()
++ msg, err := c.readHandshake(hs.transcript)
+ if err != nil {
+ return err
+ }
+
+ certReq, ok := msg.(*certificateRequestMsgTLS13)
+ if ok {
+- hs.transcript.Write(certReq.marshal())
+-
+ hs.certReq = certReq
+
+- msg, err = c.readHandshake()
++ msg, err = c.readHandshake(hs.transcript)
+ if err != nil {
+ return err
+ }
+@@ -449,7 +461,6 @@ func (hs *clientHandshakeStateTLS13) rea
+ c.sendAlert(alertDecodeError)
+ return errors.New("tls: received empty certificates message")
+ }
+- hs.transcript.Write(certMsg.marshal())
+
+ c.scts = certMsg.certificate.SignedCertificateTimestamps
+ c.ocspResponse = certMsg.certificate.OCSPStaple
+@@ -458,7 +469,10 @@ func (hs *clientHandshakeStateTLS13) rea
+ return err
+ }
+
+- msg, err = c.readHandshake()
++ // certificateVerifyMsg is included in the transcript, but not until
++ // after we verify the handshake signature, since the state before
++ // this message was sent is used.
++ msg, err = c.readHandshake(nil)
+ if err != nil {
+ return err
+ }
+@@ -489,7 +503,9 @@ func (hs *clientHandshakeStateTLS13) rea
+ return errors.New("tls: invalid signature by the server certificate: " + err.Error())
+ }
+
+- hs.transcript.Write(certVerify.marshal())
++ if err := transcriptMsg(certVerify, hs.transcript); err != nil {
++ return err
++ }
+
+ return nil
+ }
+@@ -497,7 +513,10 @@ func (hs *clientHandshakeStateTLS13) rea
+ func (hs *clientHandshakeStateTLS13) readServerFinished() error {
+ c := hs.c
+
+- msg, err := c.readHandshake()
++ // finishedMsg is included in the transcript, but not until after we
++ // check the client version, since the state before this message was
++ // sent is used during verification.
++ msg, err := c.readHandshake(nil)
+ if err != nil {
+ return err
+ }
+@@ -514,7 +533,9 @@ func (hs *clientHandshakeStateTLS13) rea
+ return errors.New("tls: invalid server finished hash")
+ }
+
+- hs.transcript.Write(finished.marshal())
++ if err := transcriptMsg(finished, hs.transcript); err != nil {
++ return err
++ }
+
+ // Derive secrets that take context through the server Finished.
+
+@@ -563,8 +584,7 @@ func (hs *clientHandshakeStateTLS13) sen
+ certMsg.scts = hs.certReq.scts && len(cert.SignedCertificateTimestamps) > 0
+ certMsg.ocspStapling = hs.certReq.ocspStapling && len(cert.OCSPStaple) > 0
+
+- hs.transcript.Write(certMsg.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(certMsg, hs.transcript); err != nil {
+ return err
+ }
+
+@@ -601,8 +621,7 @@ func (hs *clientHandshakeStateTLS13) sen
+ }
+ certVerifyMsg.signature = sig
+
+- hs.transcript.Write(certVerifyMsg.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(certVerifyMsg, hs.transcript); err != nil {
+ return err
+ }
+
+@@ -616,8 +635,7 @@ func (hs *clientHandshakeStateTLS13) sen
+ verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript),
+ }
+
+- hs.transcript.Write(finished.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(finished, hs.transcript); err != nil {
+ return err
+ }
+
+--- go.orig/src/crypto/tls/handshake_messages.go
++++ go/src/crypto/tls/handshake_messages.go
+@@ -5,6 +5,7 @@
+ package tls
+
+ import (
++ "errors"
+ "fmt"
+ "strings"
+
+@@ -94,9 +95,181 @@ type clientHelloMsg struct {
+ pskBinders [][]byte
+ }
+
+-func (m *clientHelloMsg) marshal() []byte {
++func (m *clientHelloMsg) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
++ }
++
++ var exts cryptobyte.Builder
++ if len(m.serverName) > 0 {
++ // RFC 6066, Section 3
++ exts.AddUint16(extensionServerName)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint8(0) // name_type = host_name
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes([]byte(m.serverName))
++ })
++ })
++ })
++ }
++ if m.ocspStapling {
++ // RFC 4366, Section 3.6
++ exts.AddUint16(extensionStatusRequest)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint8(1) // status_type = ocsp
++ exts.AddUint16(0) // empty responder_id_list
++ exts.AddUint16(0) // empty request_extensions
++ })
++ }
++ if len(m.supportedCurves) > 0 {
++ // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7
++ exts.AddUint16(extensionSupportedCurves)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ for _, curve := range m.supportedCurves {
++ exts.AddUint16(uint16(curve))
++ }
++ })
++ })
++ }
++ if len(m.supportedPoints) > 0 {
++ // RFC 4492, Section 5.1.2
++ exts.AddUint16(extensionSupportedPoints)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes(m.supportedPoints)
++ })
++ })
++ }
++ if m.ticketSupported {
++ // RFC 5077, Section 3.2
++ exts.AddUint16(extensionSessionTicket)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes(m.sessionTicket)
++ })
++ }
++ if len(m.supportedSignatureAlgorithms) > 0 {
++ // RFC 5246, Section 7.4.1.4.1
++ exts.AddUint16(extensionSignatureAlgorithms)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ for _, sigAlgo := range m.supportedSignatureAlgorithms {
++ exts.AddUint16(uint16(sigAlgo))
++ }
++ })
++ })
++ }
++ if len(m.supportedSignatureAlgorithmsCert) > 0 {
++ // RFC 8446, Section 4.2.3
++ exts.AddUint16(extensionSignatureAlgorithmsCert)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ for _, sigAlgo := range m.supportedSignatureAlgorithmsCert {
++ exts.AddUint16(uint16(sigAlgo))
++ }
++ })
++ })
++ }
++ if m.secureRenegotiationSupported {
++ // RFC 5746, Section 3.2
++ exts.AddUint16(extensionRenegotiationInfo)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes(m.secureRenegotiation)
++ })
++ })
++ }
++ if len(m.alpnProtocols) > 0 {
++ // RFC 7301, Section 3.1
++ exts.AddUint16(extensionALPN)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ for _, proto := range m.alpnProtocols {
++ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes([]byte(proto))
++ })
++ }
++ })
++ })
++ }
++ if m.scts {
++ // RFC 6962, Section 3.3.1
++ exts.AddUint16(extensionSCT)
++ exts.AddUint16(0) // empty extension_data
++ }
++ if len(m.supportedVersions) > 0 {
++ // RFC 8446, Section 4.2.1
++ exts.AddUint16(extensionSupportedVersions)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
++ for _, vers := range m.supportedVersions {
++ exts.AddUint16(vers)
++ }
++ })
++ })
++ }
++ if len(m.cookie) > 0 {
++ // RFC 8446, Section 4.2.2
++ exts.AddUint16(extensionCookie)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes(m.cookie)
++ })
++ })
++ }
++ if len(m.keyShares) > 0 {
++ // RFC 8446, Section 4.2.8
++ exts.AddUint16(extensionKeyShare)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ for _, ks := range m.keyShares {
++ exts.AddUint16(uint16(ks.group))
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes(ks.data)
++ })
++ }
++ })
++ })
++ }
++ if m.earlyData {
++ // RFC 8446, Section 4.2.10
++ exts.AddUint16(extensionEarlyData)
++ exts.AddUint16(0) // empty extension_data
++ }
++ if len(m.pskModes) > 0 {
++ // RFC 8446, Section 4.2.9
++ exts.AddUint16(extensionPSKModes)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes(m.pskModes)
++ })
++ })
++ }
++ if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension
++ // RFC 8446, Section 4.2.11
++ exts.AddUint16(extensionPreSharedKey)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ for _, psk := range m.pskIdentities {
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes(psk.label)
++ })
++ exts.AddUint32(psk.obfuscatedTicketAge)
++ }
++ })
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ for _, binder := range m.pskBinders {
++ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes(binder)
++ })
++ }
++ })
++ })
++ }
++ extBytes, err := exts.Bytes()
++ if err != nil {
++ return nil, err
+ }
+
+ var b cryptobyte.Builder
+@@ -116,219 +289,53 @@ func (m *clientHelloMsg) marshal() []byt
+ b.AddBytes(m.compressionMethods)
+ })
+
+- // If extensions aren't present, omit them.
+- var extensionsPresent bool
+- bWithoutExtensions := *b
+-
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- if len(m.serverName) > 0 {
+- // RFC 6066, Section 3
+- b.AddUint16(extensionServerName)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint8(0) // name_type = host_name
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes([]byte(m.serverName))
+- })
+- })
+- })
+- }
+- if m.ocspStapling {
+- // RFC 4366, Section 3.6
+- b.AddUint16(extensionStatusRequest)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint8(1) // status_type = ocsp
+- b.AddUint16(0) // empty responder_id_list
+- b.AddUint16(0) // empty request_extensions
+- })
+- }
+- if len(m.supportedCurves) > 0 {
+- // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7
+- b.AddUint16(extensionSupportedCurves)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- for _, curve := range m.supportedCurves {
+- b.AddUint16(uint16(curve))
+- }
+- })
+- })
+- }
+- if len(m.supportedPoints) > 0 {
+- // RFC 4492, Section 5.1.2
+- b.AddUint16(extensionSupportedPoints)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes(m.supportedPoints)
+- })
+- })
+- }
+- if m.ticketSupported {
+- // RFC 5077, Section 3.2
+- b.AddUint16(extensionSessionTicket)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes(m.sessionTicket)
+- })
+- }
+- if len(m.supportedSignatureAlgorithms) > 0 {
+- // RFC 5246, Section 7.4.1.4.1
+- b.AddUint16(extensionSignatureAlgorithms)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- for _, sigAlgo := range m.supportedSignatureAlgorithms {
+- b.AddUint16(uint16(sigAlgo))
+- }
+- })
+- })
+- }
+- if len(m.supportedSignatureAlgorithmsCert) > 0 {
+- // RFC 8446, Section 4.2.3
+- b.AddUint16(extensionSignatureAlgorithmsCert)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- for _, sigAlgo := range m.supportedSignatureAlgorithmsCert {
+- b.AddUint16(uint16(sigAlgo))
+- }
+- })
+- })
+- }
+- if m.secureRenegotiationSupported {
+- // RFC 5746, Section 3.2
+- b.AddUint16(extensionRenegotiationInfo)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes(m.secureRenegotiation)
+- })
+- })
+- }
+- if len(m.alpnProtocols) > 0 {
+- // RFC 7301, Section 3.1
+- b.AddUint16(extensionALPN)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- for _, proto := range m.alpnProtocols {
+- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes([]byte(proto))
+- })
+- }
+- })
+- })
+- }
+- if m.scts {
+- // RFC 6962, Section 3.3.1
+- b.AddUint16(extensionSCT)
+- b.AddUint16(0) // empty extension_data
+- }
+- if len(m.supportedVersions) > 0 {
+- // RFC 8446, Section 4.2.1
+- b.AddUint16(extensionSupportedVersions)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+- for _, vers := range m.supportedVersions {
+- b.AddUint16(vers)
+- }
+- })
+- })
+- }
+- if len(m.cookie) > 0 {
+- // RFC 8446, Section 4.2.2
+- b.AddUint16(extensionCookie)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes(m.cookie)
+- })
+- })
+- }
+- if len(m.keyShares) > 0 {
+- // RFC 8446, Section 4.2.8
+- b.AddUint16(extensionKeyShare)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- for _, ks := range m.keyShares {
+- b.AddUint16(uint16(ks.group))
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes(ks.data)
+- })
+- }
+- })
+- })
+- }
+- if m.earlyData {
+- // RFC 8446, Section 4.2.10
+- b.AddUint16(extensionEarlyData)
+- b.AddUint16(0) // empty extension_data
+- }
+- if len(m.pskModes) > 0 {
+- // RFC 8446, Section 4.2.9
+- b.AddUint16(extensionPSKModes)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes(m.pskModes)
+- })
+- })
+- }
+- if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension
+- // RFC 8446, Section 4.2.11
+- b.AddUint16(extensionPreSharedKey)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- for _, psk := range m.pskIdentities {
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes(psk.label)
+- })
+- b.AddUint32(psk.obfuscatedTicketAge)
+- }
+- })
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- for _, binder := range m.pskBinders {
+- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes(binder)
+- })
+- }
+- })
+- })
+- }
+-
+- extensionsPresent = len(b.BytesOrPanic()) > 2
+- })
+-
+- if !extensionsPresent {
+- *b = bWithoutExtensions
+- }
+- })
++ if len(extBytes) > 0 {
++ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
++ b.AddBytes(extBytes)
++ })
++ }
++ })
+
+- m.raw = b.BytesOrPanic()
+- return m.raw
++ m.raw, err = b.Bytes()
++ return m.raw, err
+ }
+
+ // marshalWithoutBinders returns the ClientHello through the
+ // PreSharedKeyExtension.identities field, according to RFC 8446, Section
+ // 4.2.11.2. Note that m.pskBinders must be set to slices of the correct length.
+-func (m *clientHelloMsg) marshalWithoutBinders() []byte {
++func (m *clientHelloMsg) marshalWithoutBinders() ([]byte, error) {
+ bindersLen := 2 // uint16 length prefix
+ for _, binder := range m.pskBinders {
+ bindersLen += 1 // uint8 length prefix
+ bindersLen += len(binder)
+ }
+
+- fullMessage := m.marshal()
+- return fullMessage[:len(fullMessage)-bindersLen]
++ fullMessage, err := m.marshal()
++ if err != nil {
++ return nil, err
++ }
++ return fullMessage[:len(fullMessage)-bindersLen], nil
+ }
+
+ // updateBinders updates the m.pskBinders field, if necessary updating the
+ // cached marshaled representation. The supplied binders must have the same
+ // length as the current m.pskBinders.
+-func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) {
++func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) error {
+ if len(pskBinders) != len(m.pskBinders) {
+- panic("tls: internal error: pskBinders length mismatch")
++ return errors.New("tls: internal error: pskBinders length mismatch")
+ }
+ for i := range m.pskBinders {
+ if len(pskBinders[i]) != len(m.pskBinders[i]) {
+- panic("tls: internal error: pskBinders length mismatch")
++ return errors.New("tls: internal error: pskBinders length mismatch")
+ }
+ }
+ m.pskBinders = pskBinders
+ if m.raw != nil {
+- lenWithoutBinders := len(m.marshalWithoutBinders())
++ helloBytes, err := m.marshalWithoutBinders()
++ if err != nil {
++ return err
++ }
++ lenWithoutBinders := len(helloBytes)
+ // TODO(filippo): replace with NewFixedBuilder once CL 148882 is imported.
+ b := cryptobyte.NewBuilder(m.raw[:lenWithoutBinders])
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+@@ -339,9 +346,11 @@ func (m *clientHelloMsg) updateBinders(p
+ }
+ })
+ if len(b.BytesOrPanic()) != len(m.raw) {
+- panic("tls: internal error: failed to update binders")
++ return errors.New("tls: internal error: failed to update binders")
+ }
+ }
++
++ return nil
+ }
+
+ func (m *clientHelloMsg) unmarshal(data []byte) bool {
+@@ -613,9 +622,98 @@ type serverHelloMsg struct {
+ selectedGroup CurveID
+ }
+
+-func (m *serverHelloMsg) marshal() []byte {
++func (m *serverHelloMsg) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
++ }
++
++ var exts cryptobyte.Builder
++ if m.ocspStapling {
++ exts.AddUint16(extensionStatusRequest)
++ exts.AddUint16(0) // empty extension_data
++ }
++ if m.ticketSupported {
++ exts.AddUint16(extensionSessionTicket)
++ exts.AddUint16(0) // empty extension_data
++ }
++ if m.secureRenegotiationSupported {
++ exts.AddUint16(extensionRenegotiationInfo)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes(m.secureRenegotiation)
++ })
++ })
++ }
++ if len(m.alpnProtocol) > 0 {
++ exts.AddUint16(extensionALPN)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes([]byte(m.alpnProtocol))
++ })
++ })
++ })
++ }
++ if len(m.scts) > 0 {
++ exts.AddUint16(extensionSCT)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ for _, sct := range m.scts {
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes(sct)
++ })
++ }
++ })
++ })
++ }
++ if m.supportedVersion != 0 {
++ exts.AddUint16(extensionSupportedVersions)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16(m.supportedVersion)
++ })
++ }
++ if m.serverShare.group != 0 {
++ exts.AddUint16(extensionKeyShare)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16(uint16(m.serverShare.group))
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes(m.serverShare.data)
++ })
++ })
++ }
++ if m.selectedIdentityPresent {
++ exts.AddUint16(extensionPreSharedKey)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16(m.selectedIdentity)
++ })
++ }
++
++ if len(m.cookie) > 0 {
++ exts.AddUint16(extensionCookie)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes(m.cookie)
++ })
++ })
++ }
++ if m.selectedGroup != 0 {
++ exts.AddUint16(extensionKeyShare)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint16(uint16(m.selectedGroup))
++ })
++ }
++ if len(m.supportedPoints) > 0 {
++ exts.AddUint16(extensionSupportedPoints)
++ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
++ exts.AddBytes(m.supportedPoints)
++ })
++ })
++ }
++
++ extBytes, err := exts.Bytes()
++ if err != nil {
++ return nil, err
+ }
+
+ var b cryptobyte.Builder
+@@ -629,104 +727,15 @@ func (m *serverHelloMsg) marshal() []byt
+ b.AddUint16(m.cipherSuite)
+ b.AddUint8(m.compressionMethod)
+
+- // If extensions aren't present, omit them.
+- var extensionsPresent bool
+- bWithoutExtensions := *b
+-
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- if m.ocspStapling {
+- b.AddUint16(extensionStatusRequest)
+- b.AddUint16(0) // empty extension_data
+- }
+- if m.ticketSupported {
+- b.AddUint16(extensionSessionTicket)
+- b.AddUint16(0) // empty extension_data
+- }
+- if m.secureRenegotiationSupported {
+- b.AddUint16(extensionRenegotiationInfo)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes(m.secureRenegotiation)
+- })
+- })
+- }
+- if len(m.alpnProtocol) > 0 {
+- b.AddUint16(extensionALPN)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes([]byte(m.alpnProtocol))
+- })
+- })
+- })
+- }
+- if len(m.scts) > 0 {
+- b.AddUint16(extensionSCT)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- for _, sct := range m.scts {
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes(sct)
+- })
+- }
+- })
+- })
+- }
+- if m.supportedVersion != 0 {
+- b.AddUint16(extensionSupportedVersions)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16(m.supportedVersion)
+- })
+- }
+- if m.serverShare.group != 0 {
+- b.AddUint16(extensionKeyShare)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16(uint16(m.serverShare.group))
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes(m.serverShare.data)
+- })
+- })
+- }
+- if m.selectedIdentityPresent {
+- b.AddUint16(extensionPreSharedKey)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16(m.selectedIdentity)
+- })
+- }
+-
+- if len(m.cookie) > 0 {
+- b.AddUint16(extensionCookie)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes(m.cookie)
+- })
+- })
+- }
+- if m.selectedGroup != 0 {
+- b.AddUint16(extensionKeyShare)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint16(uint16(m.selectedGroup))
+- })
+- }
+- if len(m.supportedPoints) > 0 {
+- b.AddUint16(extensionSupportedPoints)
+- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+- b.AddBytes(m.supportedPoints)
+- })
+- })
+- }
+-
+- extensionsPresent = len(b.BytesOrPanic()) > 2
+- })
+-
+- if !extensionsPresent {
+- *b = bWithoutExtensions
++ if len(extBytes) > 0 {
++ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
++ b.AddBytes(extBytes)
++ })
+ }
+ })
+
+- m.raw = b.BytesOrPanic()
+- return m.raw
++ m.raw, err = b.Bytes()
++ return m.raw, err
+ }
+
+ func (m *serverHelloMsg) unmarshal(data []byte) bool {
+@@ -844,9 +853,9 @@ type encryptedExtensionsMsg struct {
+ alpnProtocol string
+ }
+
+-func (m *encryptedExtensionsMsg) marshal() []byte {
++func (m *encryptedExtensionsMsg) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
+ }
+
+ var b cryptobyte.Builder
+@@ -866,8 +875,9 @@ func (m *encryptedExtensionsMsg) marshal
+ })
+ })
+
+- m.raw = b.BytesOrPanic()
+- return m.raw
++ var err error
++ m.raw, err = b.Bytes()
++ return m.raw, err
+ }
+
+ func (m *encryptedExtensionsMsg) unmarshal(data []byte) bool {
+@@ -915,10 +925,10 @@ func (m *encryptedExtensionsMsg) unmarsh
+
+ type endOfEarlyDataMsg struct{}
+
+-func (m *endOfEarlyDataMsg) marshal() []byte {
++func (m *endOfEarlyDataMsg) marshal() ([]byte, error) {
+ x := make([]byte, 4)
+ x[0] = typeEndOfEarlyData
+- return x
++ return x, nil
+ }
+
+ func (m *endOfEarlyDataMsg) unmarshal(data []byte) bool {
+@@ -930,9 +940,9 @@ type keyUpdateMsg struct {
+ updateRequested bool
+ }
+
+-func (m *keyUpdateMsg) marshal() []byte {
++func (m *keyUpdateMsg) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
+ }
+
+ var b cryptobyte.Builder
+@@ -945,8 +955,9 @@ func (m *keyUpdateMsg) marshal() []byte
+ }
+ })
+
+- m.raw = b.BytesOrPanic()
+- return m.raw
++ var err error
++ m.raw, err = b.Bytes()
++ return m.raw, err
+ }
+
+ func (m *keyUpdateMsg) unmarshal(data []byte) bool {
+@@ -978,9 +989,9 @@ type newSessionTicketMsgTLS13 struct {
+ maxEarlyData uint32
+ }
+
+-func (m *newSessionTicketMsgTLS13) marshal() []byte {
++func (m *newSessionTicketMsgTLS13) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
+ }
+
+ var b cryptobyte.Builder
+@@ -1005,8 +1016,9 @@ func (m *newSessionTicketMsgTLS13) marsh
+ })
+ })
+
+- m.raw = b.BytesOrPanic()
+- return m.raw
++ var err error
++ m.raw, err = b.Bytes()
++ return m.raw, err
+ }
+
+ func (m *newSessionTicketMsgTLS13) unmarshal(data []byte) bool {
+@@ -1059,9 +1071,9 @@ type certificateRequestMsgTLS13 struct {
+ certificateAuthorities [][]byte
+ }
+
+-func (m *certificateRequestMsgTLS13) marshal() []byte {
++func (m *certificateRequestMsgTLS13) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
+ }
+
+ var b cryptobyte.Builder
+@@ -1120,8 +1132,9 @@ func (m *certificateRequestMsgTLS13) mar
+ })
+ })
+
+- m.raw = b.BytesOrPanic()
+- return m.raw
++ var err error
++ m.raw, err = b.Bytes()
++ return m.raw, err
+ }
+
+ func (m *certificateRequestMsgTLS13) unmarshal(data []byte) bool {
+@@ -1205,9 +1218,9 @@ type certificateMsg struct {
+ certificates [][]byte
+ }
+
+-func (m *certificateMsg) marshal() (x []byte) {
++func (m *certificateMsg) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
+ }
+
+ var i int
+@@ -1216,7 +1229,7 @@ func (m *certificateMsg) marshal() (x []
+ }
+
+ length := 3 + 3*len(m.certificates) + i
+- x = make([]byte, 4+length)
++ x := make([]byte, 4+length)
+ x[0] = typeCertificate
+ x[1] = uint8(length >> 16)
+ x[2] = uint8(length >> 8)
+@@ -1237,7 +1250,7 @@ func (m *certificateMsg) marshal() (x []
+ }
+
+ m.raw = x
+- return
++ return m.raw, nil
+ }
+
+ func (m *certificateMsg) unmarshal(data []byte) bool {
+@@ -1284,9 +1297,9 @@ type certificateMsgTLS13 struct {
+ scts bool
+ }
+
+-func (m *certificateMsgTLS13) marshal() []byte {
++func (m *certificateMsgTLS13) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
+ }
+
+ var b cryptobyte.Builder
+@@ -1304,8 +1317,9 @@ func (m *certificateMsgTLS13) marshal()
+ marshalCertificate(b, certificate)
+ })
+
+- m.raw = b.BytesOrPanic()
+- return m.raw
++ var err error
++ m.raw, err = b.Bytes()
++ return m.raw, err
+ }
+
+ func marshalCertificate(b *cryptobyte.Builder, certificate Certificate) {
+@@ -1428,9 +1442,9 @@ type serverKeyExchangeMsg struct {
+ key []byte
+ }
+
+-func (m *serverKeyExchangeMsg) marshal() []byte {
++func (m *serverKeyExchangeMsg) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
+ }
+ length := len(m.key)
+ x := make([]byte, length+4)
+@@ -1441,7 +1455,7 @@ func (m *serverKeyExchangeMsg) marshal()
+ copy(x[4:], m.key)
+
+ m.raw = x
+- return x
++ return x, nil
+ }
+
+ func (m *serverKeyExchangeMsg) unmarshal(data []byte) bool {
+@@ -1458,9 +1472,9 @@ type certificateStatusMsg struct {
+ response []byte
+ }
+
+-func (m *certificateStatusMsg) marshal() []byte {
++func (m *certificateStatusMsg) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
+ }
+
+ var b cryptobyte.Builder
+@@ -1472,8 +1486,9 @@ func (m *certificateStatusMsg) marshal()
+ })
+ })
+
+- m.raw = b.BytesOrPanic()
+- return m.raw
++ var err error
++ m.raw, err = b.Bytes()
++ return m.raw, err
+ }
+
+ func (m *certificateStatusMsg) unmarshal(data []byte) bool {
+@@ -1492,10 +1507,10 @@ func (m *certificateStatusMsg) unmarshal
+
+ type serverHelloDoneMsg struct{}
+
+-func (m *serverHelloDoneMsg) marshal() []byte {
++func (m *serverHelloDoneMsg) marshal() ([]byte, error) {
+ x := make([]byte, 4)
+ x[0] = typeServerHelloDone
+- return x
++ return x, nil
+ }
+
+ func (m *serverHelloDoneMsg) unmarshal(data []byte) bool {
+@@ -1507,9 +1522,9 @@ type clientKeyExchangeMsg struct {
+ ciphertext []byte
+ }
+
+-func (m *clientKeyExchangeMsg) marshal() []byte {
++func (m *clientKeyExchangeMsg) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
+ }
+ length := len(m.ciphertext)
+ x := make([]byte, length+4)
+@@ -1520,7 +1535,7 @@ func (m *clientKeyExchangeMsg) marshal()
+ copy(x[4:], m.ciphertext)
+
+ m.raw = x
+- return x
++ return x, nil
+ }
+
+ func (m *clientKeyExchangeMsg) unmarshal(data []byte) bool {
+@@ -1541,9 +1556,9 @@ type finishedMsg struct {
+ verifyData []byte
+ }
+
+-func (m *finishedMsg) marshal() []byte {
++func (m *finishedMsg) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
+ }
+
+ var b cryptobyte.Builder
+@@ -1552,8 +1567,9 @@ func (m *finishedMsg) marshal() []byte {
+ b.AddBytes(m.verifyData)
+ })
+
+- m.raw = b.BytesOrPanic()
+- return m.raw
++ var err error
++ m.raw, err = b.Bytes()
++ return m.raw, err
+ }
+
+ func (m *finishedMsg) unmarshal(data []byte) bool {
+@@ -1575,9 +1591,9 @@ type certificateRequestMsg struct {
+ certificateAuthorities [][]byte
+ }
+
+-func (m *certificateRequestMsg) marshal() (x []byte) {
++func (m *certificateRequestMsg) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
+ }
+
+ // See RFC 4346, Section 7.4.4.
+@@ -1592,7 +1608,7 @@ func (m *certificateRequestMsg) marshal(
+ length += 2 + 2*len(m.supportedSignatureAlgorithms)
+ }
+
+- x = make([]byte, 4+length)
++ x := make([]byte, 4+length)
+ x[0] = typeCertificateRequest
+ x[1] = uint8(length >> 16)
+ x[2] = uint8(length >> 8)
+@@ -1627,7 +1643,7 @@ func (m *certificateRequestMsg) marshal(
+ }
+
+ m.raw = x
+- return
++ return m.raw, nil
+ }
+
+ func (m *certificateRequestMsg) unmarshal(data []byte) bool {
+@@ -1713,9 +1729,9 @@ type certificateVerifyMsg struct {
+ signature []byte
+ }
+
+-func (m *certificateVerifyMsg) marshal() (x []byte) {
++func (m *certificateVerifyMsg) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
+ }
+
+ var b cryptobyte.Builder
+@@ -1729,8 +1745,9 @@ func (m *certificateVerifyMsg) marshal()
+ })
+ })
+
+- m.raw = b.BytesOrPanic()
+- return m.raw
++ var err error
++ m.raw, err = b.Bytes()
++ return m.raw, err
+ }
+
+ func (m *certificateVerifyMsg) unmarshal(data []byte) bool {
+@@ -1753,15 +1770,15 @@ type newSessionTicketMsg struct {
+ ticket []byte
+ }
+
+-func (m *newSessionTicketMsg) marshal() (x []byte) {
++func (m *newSessionTicketMsg) marshal() ([]byte, error) {
+ if m.raw != nil {
+- return m.raw
++ return m.raw, nil
+ }
+
+ // See RFC 5077, Section 3.3.
+ ticketLen := len(m.ticket)
+ length := 2 + 4 + ticketLen
+- x = make([]byte, 4+length)
++ x := make([]byte, 4+length)
+ x[0] = typeNewSessionTicket
+ x[1] = uint8(length >> 16)
+ x[2] = uint8(length >> 8)
+@@ -1772,7 +1789,7 @@ func (m *newSessionTicketMsg) marshal()
+
+ m.raw = x
+
+- return
++ return m.raw, nil
+ }
+
+ func (m *newSessionTicketMsg) unmarshal(data []byte) bool {
+@@ -1800,10 +1817,25 @@ func (m *newSessionTicketMsg) unmarshal(
+ type helloRequestMsg struct {
+ }
+
+-func (*helloRequestMsg) marshal() []byte {
+- return []byte{typeHelloRequest, 0, 0, 0}
++func (*helloRequestMsg) marshal() ([]byte, error) {
++ return []byte{typeHelloRequest, 0, 0, 0}, nil
+ }
+
+ func (*helloRequestMsg) unmarshal(data []byte) bool {
+ return len(data) == 4
+ }
++
++type transcriptHash interface {
++ Write([]byte) (int, error)
++}
++
++// transcriptMsg is a helper used to marshal and hash messages which typically
++// are not written to the wire, and as such aren't hashed during Conn.writeRecord.
++func transcriptMsg(msg handshakeMessage, h transcriptHash) error {
++ data, err := msg.marshal()
++ if err != nil {
++ return err
++ }
++ h.Write(data)
++ return nil
++}
+--- go.orig/src/crypto/tls/handshake_messages_test.go
++++ go/src/crypto/tls/handshake_messages_test.go
+@@ -37,6 +37,15 @@ var tests = []interface{}{
+ &certificateMsgTLS13{},
+ }
+
++func mustMarshal(t *testing.T, msg handshakeMessage) []byte {
++ t.Helper()
++ b, err := msg.marshal()
++ if err != nil {
++ t.Fatal(err)
++ }
++ return b
++}
++
+ func TestMarshalUnmarshal(t *testing.T) {
+ rand := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+@@ -55,7 +64,7 @@ func TestMarshalUnmarshal(t *testing.T)
+ }
+
+ m1 := v.Interface().(handshakeMessage)
+- marshaled := m1.marshal()
++ marshaled := mustMarshal(t, m1)
+ m2 := iface.(handshakeMessage)
+ if !m2.unmarshal(marshaled) {
+ t.Errorf("#%d failed to unmarshal %#v %x", i, m1, marshaled)
+@@ -408,12 +417,12 @@ func TestRejectEmptySCTList(t *testing.T
+
+ var random [32]byte
+ sct := []byte{0x42, 0x42, 0x42, 0x42}
+- serverHello := serverHelloMsg{
++ serverHello := &serverHelloMsg{
+ vers: VersionTLS12,
+ random: random[:],
+ scts: [][]byte{sct},
+ }
+- serverHelloBytes := serverHello.marshal()
++ serverHelloBytes := mustMarshal(t, serverHello)
+
+ var serverHelloCopy serverHelloMsg
+ if !serverHelloCopy.unmarshal(serverHelloBytes) {
+@@ -451,12 +460,12 @@ func TestRejectEmptySCT(t *testing.T) {
+ // not be zero length.
+
+ var random [32]byte
+- serverHello := serverHelloMsg{
++ serverHello := &serverHelloMsg{
+ vers: VersionTLS12,
+ random: random[:],
+ scts: [][]byte{nil},
+ }
+- serverHelloBytes := serverHello.marshal()
++ serverHelloBytes := mustMarshal(t, serverHello)
+
+ var serverHelloCopy serverHelloMsg
+ if serverHelloCopy.unmarshal(serverHelloBytes) {
+--- go.orig/src/crypto/tls/handshake_server.go
++++ go/src/crypto/tls/handshake_server.go
+@@ -129,7 +129,9 @@ func (hs *serverHandshakeState) handshak
+
+ // readClientHello reads a ClientHello message and selects the protocol version.
+ func (c *Conn) readClientHello(ctx context.Context) (*clientHelloMsg, error) {
+- msg, err := c.readHandshake()
++ // clientHelloMsg is included in the transcript, but we haven't initialized
++ // it yet. The respective handshake functions will record it themselves.
++ msg, err := c.readHandshake(nil)
+ if err != nil {
+ return nil, err
+ }
+@@ -456,9 +458,10 @@ func (hs *serverHandshakeState) doResume
+ hs.hello.ticketSupported = hs.sessionState.usedOldKey
+ hs.finishedHash = newFinishedHash(c.vers, hs.suite)
+ hs.finishedHash.discardHandshakeBuffer()
+- hs.finishedHash.Write(hs.clientHello.marshal())
+- hs.finishedHash.Write(hs.hello.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
++ if err := transcriptMsg(hs.clientHello, &hs.finishedHash); err != nil {
++ return err
++ }
++ if _, err := hs.c.writeHandshakeRecord(hs.hello, &hs.finishedHash); err != nil {
+ return err
+ }
+
+@@ -496,24 +499,23 @@ func (hs *serverHandshakeState) doFullHa
+ // certificates won't be used.
+ hs.finishedHash.discardHandshakeBuffer()
+ }
+- hs.finishedHash.Write(hs.clientHello.marshal())
+- hs.finishedHash.Write(hs.hello.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
++ if err := transcriptMsg(hs.clientHello, &hs.finishedHash); err != nil {
++ return err
++ }
++ if _, err := hs.c.writeHandshakeRecord(hs.hello, &hs.finishedHash); err != nil {
+ return err
+ }
+
+ certMsg := new(certificateMsg)
+ certMsg.certificates = hs.cert.Certificate
+- hs.finishedHash.Write(certMsg.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(certMsg, &hs.finishedHash); err != nil {
+ return err
+ }
+
+ if hs.hello.ocspStapling {
+ certStatus := new(certificateStatusMsg)
+ certStatus.response = hs.cert.OCSPStaple
+- hs.finishedHash.Write(certStatus.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, certStatus.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(certStatus, &hs.finishedHash); err != nil {
+ return err
+ }
+ }
+@@ -525,8 +527,7 @@ func (hs *serverHandshakeState) doFullHa
+ return err
+ }
+ if skx != nil {
+- hs.finishedHash.Write(skx.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, skx.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(skx, &hs.finishedHash); err != nil {
+ return err
+ }
+ }
+@@ -552,15 +553,13 @@ func (hs *serverHandshakeState) doFullHa
+ if c.config.ClientCAs != nil {
+ certReq.certificateAuthorities = c.config.ClientCAs.Subjects()
+ }
+- hs.finishedHash.Write(certReq.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(certReq, &hs.finishedHash); err != nil {
+ return err
+ }
+ }
+
+ helloDone := new(serverHelloDoneMsg)
+- hs.finishedHash.Write(helloDone.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, helloDone.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(helloDone, &hs.finishedHash); err != nil {
+ return err
+ }
+
+@@ -570,7 +569,7 @@ func (hs *serverHandshakeState) doFullHa
+
+ var pub crypto.PublicKey // public key for client auth, if any
+
+- msg, err := c.readHandshake()
++ msg, err := c.readHandshake(&hs.finishedHash)
+ if err != nil {
+ return err
+ }
+@@ -583,7 +582,6 @@ func (hs *serverHandshakeState) doFullHa
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(certMsg, msg)
+ }
+- hs.finishedHash.Write(certMsg.marshal())
+
+ if err := c.processCertsFromClient(Certificate{
+ Certificate: certMsg.certificates,
+@@ -594,7 +592,7 @@ func (hs *serverHandshakeState) doFullHa
+ pub = c.peerCertificates[0].PublicKey
+ }
+
+- msg, err = c.readHandshake()
++ msg, err = c.readHandshake(&hs.finishedHash)
+ if err != nil {
+ return err
+ }
+@@ -612,7 +610,6 @@ func (hs *serverHandshakeState) doFullHa
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(ckx, msg)
+ }
+- hs.finishedHash.Write(ckx.marshal())
+
+ preMasterSecret, err := keyAgreement.processClientKeyExchange(c.config, hs.cert, ckx, c.vers)
+ if err != nil {
+@@ -632,7 +629,10 @@ func (hs *serverHandshakeState) doFullHa
+ // to the client's certificate. This allows us to verify that the client is in
+ // possession of the private key of the certificate.
+ if len(c.peerCertificates) > 0 {
+- msg, err = c.readHandshake()
++ // certificateVerifyMsg is included in the transcript, but not until
++ // after we verify the handshake signature, since the state before
++ // this message was sent is used.
++ msg, err = c.readHandshake(nil)
+ if err != nil {
+ return err
+ }
+@@ -667,7 +667,9 @@ func (hs *serverHandshakeState) doFullHa
+ return errors.New("tls: invalid signature by the client certificate: " + err.Error())
+ }
+
+- hs.finishedHash.Write(certVerify.marshal())
++ if err := transcriptMsg(certVerify, &hs.finishedHash); err != nil {
++ return err
++ }
+ }
+
+ hs.finishedHash.discardHandshakeBuffer()
+@@ -707,7 +709,10 @@ func (hs *serverHandshakeState) readFini
+ return err
+ }
+
+- msg, err := c.readHandshake()
++ // finishedMsg is included in the transcript, but not until after we
++ // check the client version, since the state before this message was
++ // sent is used during verification.
++ msg, err := c.readHandshake(nil)
+ if err != nil {
+ return err
+ }
+@@ -724,7 +729,10 @@ func (hs *serverHandshakeState) readFini
+ return errors.New("tls: client's Finished message is incorrect")
+ }
+
+- hs.finishedHash.Write(clientFinished.marshal())
++ if err := transcriptMsg(clientFinished, &hs.finishedHash); err != nil {
++ return err
++ }
++
+ copy(out, verify)
+ return nil
+ }
+@@ -758,14 +766,16 @@ func (hs *serverHandshakeState) sendSess
+ masterSecret: hs.masterSecret,
+ certificates: certsFromClient,
+ }
+- var err error
+- m.ticket, err = c.encryptTicket(state.marshal())
++ stateBytes, err := state.marshal()
++ if err != nil {
++ return err
++ }
++ m.ticket, err = c.encryptTicket(stateBytes)
+ if err != nil {
+ return err
+ }
+
+- hs.finishedHash.Write(m.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(m, &hs.finishedHash); err != nil {
+ return err
+ }
+
+@@ -775,14 +785,13 @@ func (hs *serverHandshakeState) sendSess
+ func (hs *serverHandshakeState) sendFinished(out []byte) error {
+ c := hs.c
+
+- if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
++ if err := c.writeChangeCipherRecord(); err != nil {
+ return err
+ }
+
+ finished := new(finishedMsg)
+ finished.verifyData = hs.finishedHash.serverSum(hs.masterSecret)
+- hs.finishedHash.Write(finished.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(finished, &hs.finishedHash); err != nil {
+ return err
+ }
+
+--- go.orig/src/crypto/tls/handshake_server_test.go
++++ go/src/crypto/tls/handshake_server_test.go
+@@ -30,6 +30,13 @@ func testClientHello(t *testing.T, serve
+ testClientHelloFailure(t, serverConfig, m, "")
+ }
+
++// testFatal is a hack to prevent the compiler from complaining that there is a
++// call to t.Fatal from a non-test goroutine
++func testFatal(t *testing.T, err error) {
++ t.Helper()
++ t.Fatal(err)
++}
++
+ func testClientHelloFailure(t *testing.T, serverConfig *Config, m handshakeMessage, expectedSubStr string) {
+ c, s := localPipe(t)
+ go func() {
+@@ -37,7 +44,9 @@ func testClientHelloFailure(t *testing.T
+ if ch, ok := m.(*clientHelloMsg); ok {
+ cli.vers = ch.vers
+ }
+- cli.writeRecord(recordTypeHandshake, m.marshal())
++ if _, err := cli.writeHandshakeRecord(m, nil); err != nil {
++ testFatal(t, err)
++ }
+ c.Close()
+ }()
+ ctx := context.Background()
+@@ -194,7 +203,9 @@ func TestRenegotiationExtension(t *testi
+ go func() {
+ cli := Client(c, testConfig)
+ cli.vers = clientHello.vers
+- cli.writeRecord(recordTypeHandshake, clientHello.marshal())
++ if _, err := cli.writeHandshakeRecord(clientHello, nil); err != nil {
++ testFatal(t, err)
++ }
+
+ buf := make([]byte, 1024)
+ n, err := c.Read(buf)
+@@ -253,8 +264,10 @@ func TestTLS12OnlyCipherSuites(t *testin
+ go func() {
+ cli := Client(c, testConfig)
+ cli.vers = clientHello.vers
+- cli.writeRecord(recordTypeHandshake, clientHello.marshal())
+- reply, err := cli.readHandshake()
++ if _, err := cli.writeHandshakeRecord(clientHello, nil); err != nil {
++ testFatal(t, err)
++ }
++ reply, err := cli.readHandshake(nil)
+ c.Close()
+ if err != nil {
+ replyChan <- err
+@@ -308,8 +321,10 @@ func TestTLSPointFormats(t *testing.T) {
+ go func() {
+ cli := Client(c, testConfig)
+ cli.vers = clientHello.vers
+- cli.writeRecord(recordTypeHandshake, clientHello.marshal())
+- reply, err := cli.readHandshake()
++ if _, err := cli.writeHandshakeRecord(clientHello, nil); err != nil {
++ testFatal(t, err)
++ }
++ reply, err := cli.readHandshake(nil)
+ c.Close()
+ if err != nil {
+ replyChan <- err
+@@ -1425,7 +1440,9 @@ func TestSNIGivenOnFailure(t *testing.T)
+ go func() {
+ cli := Client(c, testConfig)
+ cli.vers = clientHello.vers
+- cli.writeRecord(recordTypeHandshake, clientHello.marshal())
++ if _, err := cli.writeHandshakeRecord(clientHello, nil); err != nil {
++ testFatal(t, err)
++ }
+ c.Close()
+ }()
+ conn := Server(s, serverConfig)
+--- go.orig/src/crypto/tls/handshake_server_tls13.go
++++ go/src/crypto/tls/handshake_server_tls13.go
+@@ -298,7 +298,12 @@ func (hs *serverHandshakeStateTLS13) che
+ c.sendAlert(alertInternalError)
+ return errors.New("tls: internal error: failed to clone hash")
+ }
+- transcript.Write(hs.clientHello.marshalWithoutBinders())
++ clientHelloBytes, err := hs.clientHello.marshalWithoutBinders()
++ if err != nil {
++ c.sendAlert(alertInternalError)
++ return err
++ }
++ transcript.Write(clientHelloBytes)
+ pskBinder := hs.suite.finishedHash(binderKey, transcript)
+ if !hmac.Equal(hs.clientHello.pskBinders[i], pskBinder) {
+ c.sendAlert(alertDecryptError)
+@@ -389,8 +394,7 @@ func (hs *serverHandshakeStateTLS13) sen
+ }
+ hs.sentDummyCCS = true
+
+- _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
+- return err
++ return hs.c.writeChangeCipherRecord()
+ }
+
+ func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) error {
+@@ -398,7 +402,9 @@ func (hs *serverHandshakeStateTLS13) doH
+
+ // The first ClientHello gets double-hashed into the transcript upon a
+ // HelloRetryRequest. See RFC 8446, Section 4.4.1.
+- hs.transcript.Write(hs.clientHello.marshal())
++ if err := transcriptMsg(hs.clientHello, hs.transcript); err != nil {
++ return err
++ }
+ chHash := hs.transcript.Sum(nil)
+ hs.transcript.Reset()
+ hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
+@@ -414,8 +420,7 @@ func (hs *serverHandshakeStateTLS13) doH
+ selectedGroup: selectedGroup,
+ }
+
+- hs.transcript.Write(helloRetryRequest.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, helloRetryRequest.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(helloRetryRequest, hs.transcript); err != nil {
+ return err
+ }
+
+@@ -423,7 +428,8 @@ func (hs *serverHandshakeStateTLS13) doH
+ return err
+ }
+
+- msg, err := c.readHandshake()
++ // clientHelloMsg is not included in the transcript.
++ msg, err := c.readHandshake(nil)
+ if err != nil {
+ return err
+ }
+@@ -514,9 +520,10 @@ func illegalClientHelloChange(ch, ch1 *c
+ func (hs *serverHandshakeStateTLS13) sendServerParameters() error {
+ c := hs.c
+
+- hs.transcript.Write(hs.clientHello.marshal())
+- hs.transcript.Write(hs.hello.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
++ if err := transcriptMsg(hs.clientHello, hs.transcript); err != nil {
++ return err
++ }
++ if _, err := hs.c.writeHandshakeRecord(hs.hello, hs.transcript); err != nil {
+ return err
+ }
+
+@@ -559,8 +566,7 @@ func (hs *serverHandshakeStateTLS13) sen
+ encryptedExtensions.alpnProtocol = selectedProto
+ c.clientProtocol = selectedProto
+
+- hs.transcript.Write(encryptedExtensions.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, encryptedExtensions.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(encryptedExtensions, hs.transcript); err != nil {
+ return err
+ }
+
+@@ -589,8 +595,7 @@ func (hs *serverHandshakeStateTLS13) sen
+ certReq.certificateAuthorities = c.config.ClientCAs.Subjects()
+ }
+
+- hs.transcript.Write(certReq.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(certReq, hs.transcript); err != nil {
+ return err
+ }
+ }
+@@ -601,8 +606,7 @@ func (hs *serverHandshakeStateTLS13) sen
+ certMsg.scts = hs.clientHello.scts && len(hs.cert.SignedCertificateTimestamps) > 0
+ certMsg.ocspStapling = hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0
+
+- hs.transcript.Write(certMsg.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(certMsg, hs.transcript); err != nil {
+ return err
+ }
+
+@@ -633,8 +637,7 @@ func (hs *serverHandshakeStateTLS13) sen
+ }
+ certVerifyMsg.signature = sig
+
+- hs.transcript.Write(certVerifyMsg.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(certVerifyMsg, hs.transcript); err != nil {
+ return err
+ }
+
+@@ -648,8 +651,7 @@ func (hs *serverHandshakeStateTLS13) sen
+ verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript),
+ }
+
+- hs.transcript.Write(finished.marshal())
+- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
++ if _, err := hs.c.writeHandshakeRecord(finished, hs.transcript); err != nil {
+ return err
+ }
+
+@@ -710,7 +712,9 @@ func (hs *serverHandshakeStateTLS13) sen
+ finishedMsg := &finishedMsg{
+ verifyData: hs.clientFinished,
+ }
+- hs.transcript.Write(finishedMsg.marshal())
++ if err := transcriptMsg(finishedMsg, hs.transcript); err != nil {
++ return err
++ }
+
+ if !hs.shouldSendSessionTickets() {
+ return nil
+@@ -735,8 +739,12 @@ func (hs *serverHandshakeStateTLS13) sen
+ SignedCertificateTimestamps: c.scts,
+ },
+ }
+- var err error
+- m.label, err = c.encryptTicket(state.marshal())
++ stateBytes, err := state.marshal()
++ if err != nil {
++ c.sendAlert(alertInternalError)
++ return err
++ }
++ m.label, err = c.encryptTicket(stateBytes)
+ if err != nil {
+ return err
+ }
+@@ -755,7 +763,7 @@ func (hs *serverHandshakeStateTLS13) sen
+ // ticket_nonce, which must be unique per connection, is always left at
+ // zero because we only ever send one ticket per connection.
+
+- if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
++ if _, err := c.writeHandshakeRecord(m, nil); err != nil {
+ return err
+ }
+
+@@ -780,7 +788,7 @@ func (hs *serverHandshakeStateTLS13) rea
+ // If we requested a client certificate, then the client must send a
+ // certificate message. If it's empty, no CertificateVerify is sent.
+
+- msg, err := c.readHandshake()
++ msg, err := c.readHandshake(hs.transcript)
+ if err != nil {
+ return err
+ }
+@@ -790,7 +798,6 @@ func (hs *serverHandshakeStateTLS13) rea
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(certMsg, msg)
+ }
+- hs.transcript.Write(certMsg.marshal())
+
+ if err := c.processCertsFromClient(certMsg.certificate); err != nil {
+ return err
+@@ -804,7 +811,10 @@ func (hs *serverHandshakeStateTLS13) rea
+ }
+
+ if len(certMsg.certificate.Certificate) != 0 {
+- msg, err = c.readHandshake()
++ // certificateVerifyMsg is included in the transcript, but not until
++ // after we verify the handshake signature, since the state before
++ // this message was sent is used.
++ msg, err = c.readHandshake(nil)
+ if err != nil {
+ return err
+ }
+@@ -835,7 +845,9 @@ func (hs *serverHandshakeStateTLS13) rea
+ return errors.New("tls: invalid signature by the client certificate: " + err.Error())
+ }
+
+- hs.transcript.Write(certVerify.marshal())
++ if err := transcriptMsg(certVerify, hs.transcript); err != nil {
++ return err
++ }
+ }
+
+ // If we waited until the client certificates to send session tickets, we
+@@ -850,7 +862,8 @@ func (hs *serverHandshakeStateTLS13) rea
+ func (hs *serverHandshakeStateTLS13) readClientFinished() error {
+ c := hs.c
+
+- msg, err := c.readHandshake()
++ // finishedMsg is not included in the transcript.
++ msg, err := c.readHandshake(nil)
+ if err != nil {
+ return err
+ }
+--- go.orig/src/crypto/tls/key_schedule.go
++++ go/src/crypto/tls/key_schedule.go
+@@ -8,6 +8,7 @@ import (
+ "crypto/elliptic"
+ "crypto/hmac"
+ "errors"
++ "fmt"
+ "hash"
+ "io"
+ "math/big"
+@@ -42,8 +43,24 @@ func (c *cipherSuiteTLS13) expandLabel(s
+ hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(context)
+ })
++ hkdfLabelBytes, err := hkdfLabel.Bytes()
++ if err != nil {
++ // Rather than calling BytesOrPanic, we explicitly handle this error, in
++ // order to provide a reasonable error message. It should be basically
++ // impossible for this to panic, and routing errors back through the
++ // tree rooted in this function is quite painful. The labels are fixed
++ // size, and the context is either a fixed-length computed hash, or
++ // parsed from a field which has the same length limitation. As such, an
++ // error here is likely to only be caused during development.
++ //
++ // NOTE: another reasonable approach here might be to return a
++ // randomized slice if we encounter an error, which would break the
++ // connection, but avoid panicking. This would perhaps be safer but
++ // significantly more confusing to users.
++ panic(fmt.Errorf("failed to construct HKDF label: %s", err))
++ }
+ out := make([]byte, length)
+- n, err := hkdf.Expand(c.hash.New, secret, hkdfLabel.BytesOrPanic()).Read(out)
++ n, err := hkdf.Expand(c.hash.New, secret, hkdfLabelBytes).Read(out)
+ if err != nil || n != length {
+ panic("tls: HKDF-Expand-Label invocation failed unexpectedly")
+ }
+--- go.orig/src/crypto/tls/ticket.go
++++ go/src/crypto/tls/ticket.go
+@@ -32,7 +32,7 @@ type sessionState struct {
+ usedOldKey bool
+ }
+
+-func (m *sessionState) marshal() []byte {
++func (m *sessionState) marshal() ([]byte, error) {
+ var b cryptobyte.Builder
+ b.AddUint16(m.vers)
+ b.AddUint16(m.cipherSuite)
+@@ -47,7 +47,7 @@ func (m *sessionState) marshal() []byte
+ })
+ }
+ })
+- return b.BytesOrPanic()
++ return b.Bytes()
+ }
+
+ func (m *sessionState) unmarshal(data []byte) bool {
+@@ -86,7 +86,7 @@ type sessionStateTLS13 struct {
+ certificate Certificate // CertificateEntry certificate_list<0..2^24-1>;
+ }
+
+-func (m *sessionStateTLS13) marshal() []byte {
++func (m *sessionStateTLS13) marshal() ([]byte, error) {
+ var b cryptobyte.Builder
+ b.AddUint16(VersionTLS13)
+ b.AddUint8(0) // revision
+@@ -96,7 +96,7 @@ func (m *sessionStateTLS13) marshal() []
+ b.AddBytes(m.resumptionSecret)
+ })
+ marshalCertificate(&b, m.certificate)
+- return b.BytesOrPanic()
++ return b.Bytes()
+ }
+
+ func (m *sessionStateTLS13) unmarshal(data []byte) bool {
diff --git a/meta/recipes-devtools/go/go-1.19/cve-2022-41725.patch b/meta/recipes-devtools/go/go-1.19/cve-2022-41725.patch
new file mode 100644
index 0000000000..a71d07e3f1
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.19/cve-2022-41725.patch
@@ -0,0 +1,652 @@
+From 5c55ac9bf1e5f779220294c843526536605f42ab Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Wed, 25 Jan 2023 09:27:01 -0800
+Subject: [PATCH] [release-branch.go1.19] mime/multipart: limit memory/inode
+ consumption of ReadForm
+
+Reader.ReadForm is documented as storing "up to maxMemory bytes + 10MB"
+in memory. Parsed forms can consume substantially more memory than
+this limit, since ReadForm does not account for map entry overhead
+and MIME headers.
+
+In addition, while the amount of disk memory consumed by ReadForm can
+be constrained by limiting the size of the parsed input, ReadForm will
+create one temporary file per form part stored on disk, potentially
+consuming a large number of inodes.
+
+Update ReadForm's memory accounting to include part names,
+MIME headers, and map entry overhead.
+
+Update ReadForm to store all on-disk file parts in a single
+temporary file.
+
+Files returned by FileHeader.Open are documented as having a concrete
+type of *os.File when a file is stored on disk. The change to use a
+single temporary file for all parts means that this is no longer the
+case when a form contains more than a single file part stored on disk.
+
+The previous behavior of storing each file part in a separate disk
+file may be reenabled with GODEBUG=multipartfiles=distinct.
+
+Update Reader.NextPart and Reader.NextRawPart to set a 10MiB cap
+on the size of MIME headers.
+
+Thanks to Jakob Ackermann (@das7pad) for reporting this issue.
+
+Updates #58006
+Fixes #58362
+Fixes CVE-2022-41725
+
+Change-Id: Ibd780a6c4c83ac8bcfd3cbe344f042e9940f2eab
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1714276
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Run-TryBot: Damien Neil <dneil@google.com>
+(cherry picked from commit ed4664330edcd91b24914c9371c377c132dbce8c)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1728949
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/468116
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Than McIntosh <thanm@google.com>
+Run-TryBot: Michael Pratt <mpratt@google.com>
+Auto-Submit: Michael Pratt <mpratt@google.com>
+---
+
+CVE: CVE-2022-41725
+
+Upstream-Status: Backport [see text]
+
+https://github.com/golong/go.git commit 5c55ac9bf1e5...
+modified for reader.go
+
+Signed-off-by: Joe Slater <joe.slater@windriver.com>
+
+___
+ src/mime/multipart/formdata.go | 132 ++++++++++++++++++++-----
+ src/mime/multipart/formdata_test.go | 140 ++++++++++++++++++++++++++-
+ src/mime/multipart/multipart.go | 25 +++--
+ src/mime/multipart/readmimeheader.go | 14 +++
+ src/net/http/request_test.go | 2 +-
+ src/net/textproto/reader.go | 20 +++-
+ 6 files changed, 295 insertions(+), 38 deletions(-)
+ create mode 100644 src/mime/multipart/readmimeheader.go
+
+--- go.orig/src/mime/multipart/formdata.go
++++ go/src/mime/multipart/formdata.go
+@@ -7,6 +7,7 @@ package multipart
+ import (
+ "bytes"
+ "errors"
++ "internal/godebug"
+ "io"
+ "math"
+ "net/textproto"
+@@ -33,23 +34,58 @@ func (r *Reader) ReadForm(maxMemory int6
+
+ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ form := &Form{make(map[string][]string), make(map[string][]*FileHeader)}
++ var (
++ file *os.File
++ fileOff int64
++ )
++ numDiskFiles := 0
++ multipartFiles := godebug.Get("multipartfiles")
++ combineFiles := multipartFiles != "distinct"
+ defer func() {
++ if file != nil {
++ if cerr := file.Close(); err == nil {
++ err = cerr
++ }
++ }
++ if combineFiles && numDiskFiles > 1 {
++ for _, fhs := range form.File {
++ for _, fh := range fhs {
++ fh.tmpshared = true
++ }
++ }
++ }
+ if err != nil {
+ form.RemoveAll()
++ if file != nil {
++ os.Remove(file.Name())
++ }
+ }
+ }()
+
+- // Reserve an additional 10 MB for non-file parts.
+- maxValueBytes := maxMemory + int64(10<<20)
+- if maxValueBytes <= 0 {
++ // maxFileMemoryBytes is the maximum bytes of file data we will store in memory.
++ // Data past this limit is written to disk.
++ // This limit strictly applies to content, not metadata (filenames, MIME headers, etc.),
++ // since metadata is always stored in memory, not disk.
++ //
++ // maxMemoryBytes is the maximum bytes we will store in memory, including file content,
++ // non-file part values, metdata, and map entry overhead.
++ //
++ // We reserve an additional 10 MB in maxMemoryBytes for non-file data.
++ //
++ // The relationship between these parameters, as well as the overly-large and
++ // unconfigurable 10 MB added on to maxMemory, is unfortunate but difficult to change
++ // within the constraints of the API as documented.
++ maxFileMemoryBytes := maxMemory
++ maxMemoryBytes := maxMemory + int64(10<<20)
++ if maxMemoryBytes <= 0 {
+ if maxMemory < 0 {
+- maxValueBytes = 0
++ maxMemoryBytes = 0
+ } else {
+- maxValueBytes = math.MaxInt64
++ maxMemoryBytes = math.MaxInt64
+ }
+ }
+ for {
+- p, err := r.NextPart()
++ p, err := r.nextPart(false, maxMemoryBytes)
+ if err == io.EOF {
+ break
+ }
+@@ -63,16 +99,27 @@ func (r *Reader) readForm(maxMemory int6
+ }
+ filename := p.FileName()
+
++ // Multiple values for the same key (one map entry, longer slice) are cheaper
++ // than the same number of values for different keys (many map entries), but
++ // using a consistent per-value cost for overhead is simpler.
++ maxMemoryBytes -= int64(len(name))
++ maxMemoryBytes -= 100 // map overhead
++ if maxMemoryBytes < 0 {
++ // We can't actually take this path, since nextPart would already have
++ // rejected the MIME headers for being too large. Check anyway.
++ return nil, ErrMessageTooLarge
++ }
++
+ var b bytes.Buffer
+
+ if filename == "" {
+ // value, store as string in memory
+- n, err := io.CopyN(&b, p, maxValueBytes+1)
++ n, err := io.CopyN(&b, p, maxMemoryBytes+1)
+ if err != nil && err != io.EOF {
+ return nil, err
+ }
+- maxValueBytes -= n
+- if maxValueBytes < 0 {
++ maxMemoryBytes -= n
++ if maxMemoryBytes < 0 {
+ return nil, ErrMessageTooLarge
+ }
+ form.Value[name] = append(form.Value[name], b.String())
+@@ -80,35 +127,45 @@ func (r *Reader) readForm(maxMemory int6
+ }
+
+ // file, store in memory or on disk
++ maxMemoryBytes -= mimeHeaderSize(p.Header)
++ if maxMemoryBytes < 0 {
++ return nil, ErrMessageTooLarge
++ }
+ fh := &FileHeader{
+ Filename: filename,
+ Header: p.Header,
+ }
+- n, err := io.CopyN(&b, p, maxMemory+1)
++ n, err := io.CopyN(&b, p, maxFileMemoryBytes+1)
+ if err != nil && err != io.EOF {
+ return nil, err
+ }
+- if n > maxMemory {
+- // too big, write to disk and flush buffer
+- file, err := os.CreateTemp("", "multipart-")
+- if err != nil {
+- return nil, err
++ if n > maxFileMemoryBytes {
++ if file == nil {
++ file, err = os.CreateTemp(r.tempDir, "multipart-")
++ if err != nil {
++ return nil, err
++ }
+ }
++ numDiskFiles++
+ size, err := io.Copy(file, io.MultiReader(&b, p))
+- if cerr := file.Close(); err == nil {
+- err = cerr
+- }
+ if err != nil {
+- os.Remove(file.Name())
+ return nil, err
+ }
+ fh.tmpfile = file.Name()
+ fh.Size = size
++ fh.tmpoff = fileOff
++ fileOff += size
++ if !combineFiles {
++ if err := file.Close(); err != nil {
++ return nil, err
++ }
++ file = nil
++ }
+ } else {
+ fh.content = b.Bytes()
+ fh.Size = int64(len(fh.content))
+- maxMemory -= n
+- maxValueBytes -= n
++ maxFileMemoryBytes -= n
++ maxMemoryBytes -= n
+ }
+ form.File[name] = append(form.File[name], fh)
+ }
+@@ -116,6 +173,17 @@ func (r *Reader) readForm(maxMemory int6
+ return form, nil
+ }
+
++func mimeHeaderSize(h textproto.MIMEHeader) (size int64) {
++ for k, vs := range h {
++ size += int64(len(k))
++ size += 100 // map entry overhead
++ for _, v := range vs {
++ size += int64(len(v))
++ }
++ }
++ return size
++}
++
+ // Form is a parsed multipart form.
+ // Its File parts are stored either in memory or on disk,
+ // and are accessible via the *FileHeader's Open method.
+@@ -133,7 +201,7 @@ func (f *Form) RemoveAll() error {
+ for _, fh := range fhs {
+ if fh.tmpfile != "" {
+ e := os.Remove(fh.tmpfile)
+- if e != nil && err == nil {
++ if e != nil && !errors.Is(e, os.ErrNotExist) && err == nil {
+ err = e
+ }
+ }
+@@ -148,15 +216,25 @@ type FileHeader struct {
+ Header textproto.MIMEHeader
+ Size int64
+
+- content []byte
+- tmpfile string
++ content []byte
++ tmpfile string
++ tmpoff int64
++ tmpshared bool
+ }
+
+ // Open opens and returns the FileHeader's associated File.
+ func (fh *FileHeader) Open() (File, error) {
+ if b := fh.content; b != nil {
+ r := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b)))
+- return sectionReadCloser{r}, nil
++ return sectionReadCloser{r, nil}, nil
++ }
++ if fh.tmpshared {
++ f, err := os.Open(fh.tmpfile)
++ if err != nil {
++ return nil, err
++ }
++ r := io.NewSectionReader(f, fh.tmpoff, fh.Size)
++ return sectionReadCloser{r, f}, nil
+ }
+ return os.Open(fh.tmpfile)
+ }
+@@ -175,8 +253,12 @@ type File interface {
+
+ type sectionReadCloser struct {
+ *io.SectionReader
++ io.Closer
+ }
+
+ func (rc sectionReadCloser) Close() error {
++ if rc.Closer != nil {
++ return rc.Closer.Close()
++ }
+ return nil
+ }
+--- go.orig/src/mime/multipart/formdata_test.go
++++ go/src/mime/multipart/formdata_test.go
+@@ -6,8 +6,10 @@ package multipart
+
+ import (
+ "bytes"
++ "fmt"
+ "io"
+ "math"
++ "net/textproto"
+ "os"
+ "strings"
+ "testing"
+@@ -208,8 +210,8 @@ Content-Disposition: form-data; name="la
+ maxMemory int64
+ err error
+ }{
+- {"smaller", 50, nil},
+- {"exact-fit", 25, nil},
++ {"smaller", 50 + int64(len("largetext")) + 100, nil},
++ {"exact-fit", 25 + int64(len("largetext")) + 100, nil},
+ {"too-large", 0, ErrMessageTooLarge},
+ }
+ for _, tc := range testCases {
+@@ -224,7 +226,7 @@ Content-Disposition: form-data; name="la
+ defer f.RemoveAll()
+ }
+ if tc.err != err {
+- t.Fatalf("ReadForm error - got: %v; expected: %v", tc.err, err)
++ t.Fatalf("ReadForm error - got: %v; expected: %v", err, tc.err)
+ }
+ if err == nil {
+ if g := f.Value["largetext"][0]; g != largeTextValue {
+@@ -234,3 +236,135 @@ Content-Disposition: form-data; name="la
+ })
+ }
+ }
++
++// TestReadForm_MetadataTooLarge verifies that we account for the size of field names,
++// MIME headers, and map entry overhead while limiting the memory consumption of parsed forms.
++func TestReadForm_MetadataTooLarge(t *testing.T) {
++ for _, test := range []struct {
++ name string
++ f func(*Writer)
++ }{{
++ name: "large name",
++ f: func(fw *Writer) {
++ name := strings.Repeat("a", 10<<20)
++ w, _ := fw.CreateFormField(name)
++ w.Write([]byte("value"))
++ },
++ }, {
++ name: "large MIME header",
++ f: func(fw *Writer) {
++ h := make(textproto.MIMEHeader)
++ h.Set("Content-Disposition", `form-data; name="a"`)
++ h.Set("X-Foo", strings.Repeat("a", 10<<20))
++ w, _ := fw.CreatePart(h)
++ w.Write([]byte("value"))
++ },
++ }, {
++ name: "many parts",
++ f: func(fw *Writer) {
++ for i := 0; i < 110000; i++ {
++ w, _ := fw.CreateFormField("f")
++ w.Write([]byte("v"))
++ }
++ },
++ }} {
++ t.Run(test.name, func(t *testing.T) {
++ var buf bytes.Buffer
++ fw := NewWriter(&buf)
++ test.f(fw)
++ if err := fw.Close(); err != nil {
++ t.Fatal(err)
++ }
++ fr := NewReader(&buf, fw.Boundary())
++ _, err := fr.ReadForm(0)
++ if err != ErrMessageTooLarge {
++ t.Errorf("fr.ReadForm() = %v, want ErrMessageTooLarge", err)
++ }
++ })
++ }
++}
++
++// TestReadForm_ManyFiles_Combined tests that a multipart form containing many files only
++// results in a single on-disk file.
++func TestReadForm_ManyFiles_Combined(t *testing.T) {
++ const distinct = false
++ testReadFormManyFiles(t, distinct)
++}
++
++// TestReadForm_ManyFiles_Distinct tests that setting GODEBUG=multipartfiles=distinct
++// results in every file in a multipart form being placed in a distinct on-disk file.
++func TestReadForm_ManyFiles_Distinct(t *testing.T) {
++ t.Setenv("GODEBUG", "multipartfiles=distinct")
++ const distinct = true
++ testReadFormManyFiles(t, distinct)
++}
++
++func testReadFormManyFiles(t *testing.T, distinct bool) {
++ var buf bytes.Buffer
++ fw := NewWriter(&buf)
++ const numFiles = 10
++ for i := 0; i < numFiles; i++ {
++ name := fmt.Sprint(i)
++ w, err := fw.CreateFormFile(name, name)
++ if err != nil {
++ t.Fatal(err)
++ }
++ w.Write([]byte(name))
++ }
++ if err := fw.Close(); err != nil {
++ t.Fatal(err)
++ }
++ fr := NewReader(&buf, fw.Boundary())
++ fr.tempDir = t.TempDir()
++ form, err := fr.ReadForm(0)
++ if err != nil {
++ t.Fatal(err)
++ }
++ for i := 0; i < numFiles; i++ {
++ name := fmt.Sprint(i)
++ if got := len(form.File[name]); got != 1 {
++ t.Fatalf("form.File[%q] has %v entries, want 1", name, got)
++ }
++ fh := form.File[name][0]
++ file, err := fh.Open()
++ if err != nil {
++ t.Fatalf("form.File[%q].Open() = %v", name, err)
++ }
++ if distinct {
++ if _, ok := file.(*os.File); !ok {
++ t.Fatalf("form.File[%q].Open: %T, want *os.File", name, file)
++ }
++ }
++ got, err := io.ReadAll(file)
++ file.Close()
++ if string(got) != name || err != nil {
++ t.Fatalf("read form.File[%q]: %q, %v; want %q, nil", name, string(got), err, name)
++ }
++ }
++ dir, err := os.Open(fr.tempDir)
++ if err != nil {
++ t.Fatal(err)
++ }
++ defer dir.Close()
++ names, err := dir.Readdirnames(0)
++ if err != nil {
++ t.Fatal(err)
++ }
++ wantNames := 1
++ if distinct {
++ wantNames = numFiles
++ }
++ if len(names) != wantNames {
++ t.Fatalf("temp dir contains %v files; want 1", len(names))
++ }
++ if err := form.RemoveAll(); err != nil {
++ t.Fatalf("form.RemoveAll() = %v", err)
++ }
++ names, err = dir.Readdirnames(0)
++ if err != nil {
++ t.Fatal(err)
++ }
++ if len(names) != 0 {
++ t.Fatalf("temp dir contains %v files; want 0", len(names))
++ }
++}
+--- go.orig/src/mime/multipart/multipart.go
++++ go/src/mime/multipart/multipart.go
+@@ -128,12 +128,12 @@ func (r *stickyErrorReader) Read(p []byt
+ return n, r.err
+ }
+
+-func newPart(mr *Reader, rawPart bool) (*Part, error) {
++func newPart(mr *Reader, rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
+ bp := &Part{
+ Header: make(map[string][]string),
+ mr: mr,
+ }
+- if err := bp.populateHeaders(); err != nil {
++ if err := bp.populateHeaders(maxMIMEHeaderSize); err != nil {
+ return nil, err
+ }
+ bp.r = partReader{bp}
+@@ -149,12 +149,16 @@ func newPart(mr *Reader, rawPart bool) (
+ return bp, nil
+ }
+
+-func (bp *Part) populateHeaders() error {
++func (bp *Part) populateHeaders(maxMIMEHeaderSize int64) error {
+ r := textproto.NewReader(bp.mr.bufReader)
+- header, err := r.ReadMIMEHeader()
++ header, err := readMIMEHeader(r, maxMIMEHeaderSize)
+ if err == nil {
+ bp.Header = header
+ }
++ // TODO: Add a distinguishable error to net/textproto.
++ if err != nil && err.Error() == "message too large" {
++ err = ErrMessageTooLarge
++ }
+ return err
+ }
+
+@@ -294,6 +298,7 @@ func (p *Part) Close() error {
+ // isn't supported.
+ type Reader struct {
+ bufReader *bufio.Reader
++ tempDir string // used in tests
+
+ currentPart *Part
+ partsRead int
+@@ -304,6 +309,10 @@ type Reader struct {
+ dashBoundary []byte // "--boundary"
+ }
+
++// maxMIMEHeaderSize is the maximum size of a MIME header we will parse,
++// including header keys, values, and map overhead.
++const maxMIMEHeaderSize = 10 << 20
++
+ // NextPart returns the next part in the multipart or an error.
+ // When there are no more parts, the error io.EOF is returned.
+ //
+@@ -311,7 +320,7 @@ type Reader struct {
+ // has a value of "quoted-printable", that header is instead
+ // hidden and the body is transparently decoded during Read calls.
+ func (r *Reader) NextPart() (*Part, error) {
+- return r.nextPart(false)
++ return r.nextPart(false, maxMIMEHeaderSize)
+ }
+
+ // NextRawPart returns the next part in the multipart or an error.
+@@ -320,10 +329,10 @@ func (r *Reader) NextPart() (*Part, erro
+ // Unlike NextPart, it does not have special handling for
+ // "Content-Transfer-Encoding: quoted-printable".
+ func (r *Reader) NextRawPart() (*Part, error) {
+- return r.nextPart(true)
++ return r.nextPart(true, maxMIMEHeaderSize)
+ }
+
+-func (r *Reader) nextPart(rawPart bool) (*Part, error) {
++func (r *Reader) nextPart(rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
+ if r.currentPart != nil {
+ r.currentPart.Close()
+ }
+@@ -348,7 +357,7 @@ func (r *Reader) nextPart(rawPart bool)
+
+ if r.isBoundaryDelimiterLine(line) {
+ r.partsRead++
+- bp, err := newPart(r, rawPart)
++ bp, err := newPart(r, rawPart, maxMIMEHeaderSize)
+ if err != nil {
+ return nil, err
+ }
+--- /dev/null
++++ go/src/mime/multipart/readmimeheader.go
+@@ -0,0 +1,14 @@
++// Copyright 2023 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++package multipart
++
++import (
++ "net/textproto"
++ _ "unsafe" // for go:linkname
++)
++
++// readMIMEHeader is defined in package net/textproto.
++//
++//go:linkname readMIMEHeader net/textproto.readMIMEHeader
++func readMIMEHeader(r *textproto.Reader, lim int64) (textproto.MIMEHeader, error)
+--- go.orig/src/net/http/request_test.go
++++ go/src/net/http/request_test.go
+@@ -1110,7 +1110,7 @@ func testMissingFile(t *testing.T, req *
+ t.Errorf("FormFile file = %v, want nil", f)
+ }
+ if fh != nil {
+- t.Errorf("FormFile file header = %q, want nil", fh)
++ t.Errorf("FormFile file header = %v, want nil", fh)
+ }
+ if err != ErrMissingFile {
+ t.Errorf("FormFile err = %q, want ErrMissingFile", err)
+--- go.orig/src/net/textproto/reader.go
++++ go/src/net/textproto/reader.go
+@@ -7,8 +7,10 @@ package textproto
+ import (
+ "bufio"
+ "bytes"
++ "errors"
+ "fmt"
+ "io"
++ "math"
+ "strconv"
+ "strings"
+ "sync"
+@@ -481,6 +483,12 @@ func (r *Reader) ReadDotLines() ([]strin
+ // }
+ //
+ func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
++ return readMIMEHeader(r, math.MaxInt64)
++}
++
++// readMIMEHeader is a version of ReadMIMEHeader which takes a limit on the header size.
++// It is called by the mime/multipart package.
++func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+ // Avoid lots of small slice allocations later by allocating one
+ // large one ahead of time which we'll cut up into smaller
+ // slices. If this isn't big enough later, we allocate small ones.
+@@ -521,6 +529,16 @@ func (r *Reader) ReadMIMEHeader() (MIMEH
+ continue
+ }
+
++ // backport 5c55ac9bf1e5f779220294c843526536605f42ab
++ //
++ // value is computed as
++ //
++ // value := string(bytes.TrimLeft(v, " \t"))
++ //
++ // in the original patch from 1.19. This relies on
++ // 'v' which does not exist in 1.17. We leave the
++ // 1.17 method unchanged.
++
+ // Skip initial spaces in value.
+ i++ // skip colon
+ for i < len(kv) && (kv[i] == ' ' || kv[i] == '\t') {
+@@ -529,6 +547,16 @@ func (r *Reader) ReadMIMEHeader() (MIMEH
+ value := string(kv[i:])
+
+ vv := m[key]
++ if vv == nil {
++ lim -= int64(len(key))
++ lim -= 100 // map entry overhead
++ }
++ lim -= int64(len(value))
++ if lim < 0 {
++ // TODO: This should be a distinguishable error (ErrMessageTooLarge)
++ // to allow mime/multipart to detect it.
++ return m, errors.New("message too large")
++ }
+ if vv == nil && len(strs) > 0 {
+ // More than likely this will be a single-element key.
+ // Most headers aren't multi-valued.
diff --git a/meta/recipes-devtools/go/go-1.20/0010-net-Fix-issue-with-DNS-not-being-updated.patch b/meta/recipes-devtools/go/go-1.20/0010-net-Fix-issue-with-DNS-not-being-updated.patch
new file mode 100644
index 0000000000..6ead518843
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.20/0010-net-Fix-issue-with-DNS-not-being-updated.patch
@@ -0,0 +1,51 @@
+From 20176b390e28daa86b4552965cb7bd9181983c4d Mon Sep 17 00:00:00 2001
+From: Chaitanya Vadrevu <chaitanya.vadrevu@ni.com>
+Date: Mon, 6 Nov 2023 20:11:19 -0600
+Subject: [PATCH] net: Fix issue with DNS not being updated
+
+When dns requests are made, go's native DNS resolver only reads
+/etc/resolv.conf if the previous request is older than 5 seconds.
+
+On first network call, an initialization code runs that is
+supposed to initialize DNS data and set lastChecked time. There is a bug
+in this code that causes /etc/resolv.conf to not be read during
+initialization and the DNS data from program startup ends up being used
+until the next 5 seconds. This means that if /etc/resolv.conf changed
+between program startup and the first network call, old DNS data is
+still used until the next 5 seconds.
+
+This causes "docker pull" to fail the first time if docker daemon is
+started before networking is up.
+
+Upstream commit d52883f443e1d564b0300acdd382af1769bf0477 made lot of
+improvements to DNS resolver to fix some issues which also fixes this
+issue.
+This patch picks the relevant changes from it to fix this particular
+issue.
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/d52883f443e1d564b0300acdd382af1769bf0477]
+
+Signed-off-by: Chaitanya Vadrevu <chaitanya.vadrevu@ni.com>
+---
+ src/net/dnsclient_unix.go | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/src/net/dnsclient_unix.go b/src/net/dnsclient_unix.go
+index 6dfd4af..520ffe6 100644
+--- a/src/net/dnsclient_unix.go
++++ b/src/net/dnsclient_unix.go
+@@ -337,10 +337,7 @@ var resolvConf resolverConfig
+ func (conf *resolverConfig) init() {
+ // Set dnsConfig and lastChecked so we don't parse
+ // resolv.conf twice the first time.
+- conf.dnsConfig = systemConf().resolv
+- if conf.dnsConfig == nil {
+- conf.dnsConfig = dnsReadConfig("/etc/resolv.conf")
+- }
++ conf.dnsConfig = dnsReadConfig("/etc/resolv.conf")
+ conf.lastChecked = time.Now()
+
+ // Prepare ch so that only one update of resolverConfig may
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/go/go-1.20/CVE-2023-39319.patch b/meta/recipes-devtools/go/go-1.20/CVE-2023-39319.patch
new file mode 100644
index 0000000000..1554aa975c
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.20/CVE-2023-39319.patch
@@ -0,0 +1,254 @@
+From 2070531d2f53df88e312edace6c8dfc9686ab2f5 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Thu Aug 3 12:28:28 2023 -0700
+Subject: [PATCH] html/template: properly handle special tags within the script
+ context
+
+The HTML specification has incredibly complex rules for how to handle
+"<!--", "<script", and "</script" when they appear within literals in
+the script context. Rather than attempting to apply these restrictions
+(which require a significantly more complex state machine) we apply
+the workaround suggested in section 4.12.1.3 of the HTML specification [1].
+
+More precisely, when "<!--", "<script", and "</script" appear within
+literals (strings and regular expressions, ignoring comments since we
+already elide their content) we replace the "<" with "\x3C". This avoids
+the unintuitive behavior that using these tags within literals can cause,
+by simply preventing the rendered content from triggering it. This may
+break some correct usages of these tags, but on balance is more likely
+to prevent XSS attacks where users are unknowingly either closing or not
+closing the script blocks where they think they are.
+
+Thanks to Takeshi Kaneko (GMO Cybersecurity by Ierae, Inc.) for
+reporting this issue.
+
+Fixes #62197
+Fixes #62397
+Fixes CVE-2023-39319
+
+[1] https://html.spec.whatwg.org/#restrictions-for-contents-of-script-elements
+
+Change-Id: Iab57b0532694827e3eddf57a7497ba1fab1746dc
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1976594
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2014621
+Reviewed-on: https://go-review.googlesource.com/c/go/+/526099
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Cherry Mui <cherryyz@google.com>
+
+CVE: CVE-2023-39319
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/2070531d2f53df88e312edace6c8dfc9686ab2f5]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/go/build/deps_test.go | 6 ++--
+ src/html/template/context.go | 14 ++++++++++
+ src/html/template/escape.go | 26 ++++++++++++++++++
+ src/html/template/escape_test.go | 47 +++++++++++++++++++++++++++++++-
+ src/html/template/transition.go | 15 ++++++++++
+ 5 files changed, 104 insertions(+), 4 deletions(-)
+
+diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go
+index dc3bb8c..359a00a 100644
+--- a/src/go/build/deps_test.go
++++ b/src/go/build/deps_test.go
+@@ -255,15 +255,15 @@ var depsRules = `
+ < text/template
+ < internal/lazytemplate;
+
+- encoding/json, html, text/template
+- < html/template;
+-
+ # regexp
+ FMT
+ < regexp/syntax
+ < regexp
+ < internal/lazyregexp;
+
++ encoding/json, html, text/template, regexp
++ < html/template;
++
+ # suffix array
+ encoding/binary, regexp
+ < index/suffixarray;
+diff --git a/src/html/template/context.go b/src/html/template/context.go
+index 0b65313..f5f44a1 100644
+--- a/src/html/template/context.go
++++ b/src/html/template/context.go
+@@ -164,6 +164,20 @@ func isInTag(s state) bool {
+ return false
+ }
+
++// isInScriptLiteral returns true if s is one of the literal states within a
++// <script> tag, and as such occurances of "<!--", "<script", and "</script"
++// need to be treated specially.
++func isInScriptLiteral(s state) bool {
++ // Ignore the comment states (stateJSBlockCmt, stateJSLineCmt,
++ // stateJSHTMLOpenCmt, stateJSHTMLCloseCmt) because their content is already
++ // omitted from the output.
++ switch s {
++ case stateJSDqStr, stateJSSqStr, stateJSBqStr, stateJSRegexp:
++ return true
++ }
++ return false
++}
++
+ // delim is the delimiter that will end the current HTML attribute.
+ type delim uint8
+
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index bdccc65..1747ec9 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -10,6 +10,7 @@ import (
+ "html"
+ "internal/godebug"
+ "io"
++ "regexp"
+ "text/template"
+ "text/template/parse"
+ )
+@@ -652,6 +653,26 @@ var delimEnds = [...]string{
+ delimSpaceOrTagEnd: " \t\n\f\r>",
+ }
+
++var (
++ // Per WHATWG HTML specification, section 4.12.1.3, there are extremely
++ // complicated rules for how to handle the set of opening tags <!--,
++ // <script, and </script when they appear in JS literals (i.e. strings,
++ // regexs, and comments). The specification suggests a simple solution,
++ // rather than implementing the arcane ABNF, which involves simply escaping
++ // the opening bracket with \x3C. We use the below regex for this, since it
++ // makes doing the case-insensitive find-replace much simpler.
++ specialScriptTagRE = regexp.MustCompile("(?i)<(script|/script|!--)")
++ specialScriptTagReplacement = []byte("\\x3C$1")
++)
++
++func containsSpecialScriptTag(s []byte) bool {
++ return specialScriptTagRE.Match(s)
++}
++
++func escapeSpecialScriptTags(s []byte) []byte {
++ return specialScriptTagRE.ReplaceAll(s, specialScriptTagReplacement)
++}
++
+ var doctypeBytes = []byte("<!DOCTYPE")
+
+ // escapeText escapes a text template node.
+@@ -707,6 +728,11 @@ func (e *escaper) escapeText(c context, n *parse.TextNode) context {
+ b.Write(s[written:cs])
+ written = i1
+ }
++ if isInScriptLiteral(c.state) && containsSpecialScriptTag(s[i:i1]) {
++ b.Write(s[written:i])
++ b.Write(escapeSpecialScriptTags(s[i:i1]))
++ written = i1
++ }
+ if i == i1 && c.state == c1.state {
+ panic(fmt.Sprintf("infinite loop from %v to %v on %q..%q", c, c1, s[:i], s[i:]))
+ }
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index 4f48afe..7853daa 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -503,6 +503,21 @@ func TestEscape(t *testing.T) {
+ "<script>var a/*b*///c\nd</script>",
+ "<script>var a \nd</script>",
+ },
++ {
++ "Special tags in <script> string literals",
++ `<script>var a = "asd < 123 <!-- 456 < fgh <script jkl < 789 </script"</script>`,
++ `<script>var a = "asd < 123 \x3C!-- 456 < fgh \x3Cscript jkl < 789 \x3C/script"</script>`,
++ },
++ {
++ "Special tags in <script> string literals (mixed case)",
++ `<script>var a = "<!-- <ScripT </ScripT"</script>`,
++ `<script>var a = "\x3C!-- \x3CScripT \x3C/ScripT"</script>`,
++ },
++ {
++ "Special tags in <script> regex literals (mixed case)",
++ `<script>var a = /<!-- <ScripT </ScripT/</script>`,
++ `<script>var a = /\x3C!-- \x3CScripT \x3C/ScripT/</script>`,
++ },
+ {
+ "CSS comments",
+ "<style>p// paragraph\n" +
+@@ -1491,8 +1506,38 @@ func TestEscapeText(t *testing.T) {
+ context{state: stateJS, element: elementScript},
+ },
+ {
++ // <script and </script tags are escaped, so </script> should not
++ // cause us to exit the JS state.
+ `<script>document.write("<script>alert(1)</script>");`,
+- context{state: stateText},
++ context{state: stateJS, element: elementScript},
++ },
++ {
++ `<script>document.write("<script>`,
++ context{state: stateJSDqStr, element: elementScript},
++ },
++ {
++ `<script>document.write("<script>alert(1)</script>`,
++ context{state: stateJSDqStr, element: elementScript},
++ },
++ {
++ `<script>document.write("<script>alert(1)<!--`,
++ context{state: stateJSDqStr, element: elementScript},
++ },
++ {
++ `<script>document.write("<script>alert(1)</Script>");`,
++ context{state: stateJS, element: elementScript},
++ },
++ {
++ `<script>document.write("<!--");`,
++ context{state: stateJS, element: elementScript},
++ },
++ {
++ `<script>let a = /</script`,
++ context{state: stateJSRegexp, element: elementScript},
++ },
++ {
++ `<script>let a = /</script/`,
++ context{state: stateJS, element: elementScript, jsCtx: jsCtxDivOp},
+ },
+ {
+ `<script type="text/template">`,
+diff --git a/src/html/template/transition.go b/src/html/template/transition.go
+index 92eb351..e2660cc 100644
+--- a/src/html/template/transition.go
++++ b/src/html/template/transition.go
+@@ -212,6 +212,11 @@ var (
+ // element states.
+ func tSpecialTagEnd(c context, s []byte) (context, int) {
+ if c.element != elementNone {
++ // script end tags ("</script") within script literals are ignored, so that
++ // we can properly escape them.
++ if c.element == elementScript && (isInScriptLiteral(c.state) || isComment(c.state)) {
++ return c, len(s)
++ }
+ if i := indexTagEnd(s, specialTagEndMarkers[c.element]); i != -1 {
+ return context{}, i
+ }
+@@ -331,6 +336,16 @@ func tJSDelimited(c context, s []byte) (context, int) {
+ inCharset = true
+ case ']':
+ inCharset = false
++ case '/':
++ // If "</script" appears in a regex literal, the '/' should not
++ // close the regex literal, and it will later be escaped to
++ // "\x3C/script" in escapeText.
++ if i > 0 && i+7 <= len(s) && bytes.Compare(bytes.ToLower(s[i-1:i+7]), []byte("</script")) == 0 {
++ i++
++ } else if !inCharset {
++ c.state, c.jsCtx = stateJS, jsCtxDivOp
++ return c, i + 1
++ }
+ default:
+ // end delimiter
+ if !inCharset {
+--
+2.40.0
diff --git a/meta/recipes-devtools/go/go-1.20/CVE-2023-39326.patch b/meta/recipes-devtools/go/go-1.20/CVE-2023-39326.patch
new file mode 100644
index 0000000000..ca78e552c2
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.20/CVE-2023-39326.patch
@@ -0,0 +1,182 @@
+From 6446af942e2e2b161c4ec1b60d9703a2b55dc4dd Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Tue, 7 Nov 2023 10:47:56 -0800
+Subject: [PATCH] net/http: limit chunked data overhead
+
+The chunked transfer encoding adds some overhead to
+the content transferred. When writing one byte per
+chunk, for example, there are five bytes of overhead
+per byte of data transferred: "1\r\nX\r\n" to send "X".
+
+Chunks may include "chunk extensions",
+which we skip over and do not use.
+For example: "1;chunk extension here\r\nX\r\n".
+
+A malicious sender can use chunk extensions to add
+about 4k of overhead per byte of data.
+(The maximum chunk header line size we will accept.)
+
+Track the amount of overhead read in chunked data,
+and produce an error if it seems excessive.
+
+Updates #64433
+Fixes #64434
+Fixes CVE-2023-39326
+
+Change-Id: I40f8d70eb6f9575fb43f506eb19132ccedafcf39
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2076135
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+(cherry picked from commit 3473ae72ee66c60744665a24b2fde143e8964d4f)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2095407
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/547355
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+
+CVE: CVE-2023-39326
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/6446af942e2e2b161c4ec1b60d9703a2b55dc4dd]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/net/http/internal/chunked.go | 36 +++++++++++++---
+ src/net/http/internal/chunked_test.go | 59 +++++++++++++++++++++++++++
+ 2 files changed, 89 insertions(+), 6 deletions(-)
+
+diff --git a/src/net/http/internal/chunked.go b/src/net/http/internal/chunked.go
+index f06e572..ddbaacb 100644
+--- a/src/net/http/internal/chunked.go
++++ b/src/net/http/internal/chunked.go
+@@ -39,7 +39,8 @@ type chunkedReader struct {
+ n uint64 // unread bytes in chunk
+ err error
+ buf [2]byte
+- checkEnd bool // whether need to check for \r\n chunk footer
++ checkEnd bool // whether need to check for \r\n chunk footer
++ excess int64 // "excessive" chunk overhead, for malicious sender detection
+ }
+
+ func (cr *chunkedReader) beginChunk() {
+@@ -49,10 +50,38 @@ func (cr *chunkedReader) beginChunk() {
+ if cr.err != nil {
+ return
+ }
++ cr.excess += int64(len(line)) + 2 // header, plus \r\n after the chunk data
++ line = trimTrailingWhitespace(line)
++ line, cr.err = removeChunkExtension(line)
++ if cr.err != nil {
++ return
++ }
+ cr.n, cr.err = parseHexUint(line)
+ if cr.err != nil {
+ return
+ }
++ // A sender who sends one byte per chunk will send 5 bytes of overhead
++ // for every byte of data. ("1\r\nX\r\n" to send "X".)
++ // We want to allow this, since streaming a byte at a time can be legitimate.
++ //
++ // A sender can use chunk extensions to add arbitrary amounts of additional
++ // data per byte read. ("1;very long extension\r\nX\r\n" to send "X".)
++ // We don't want to disallow extensions (although we discard them),
++ // but we also don't want to allow a sender to reduce the signal/noise ratio
++ // arbitrarily.
++ //
++ // We track the amount of excess overhead read,
++ // and produce an error if it grows too large.
++ //
++ // Currently, we say that we're willing to accept 16 bytes of overhead per chunk,
++ // plus twice the amount of real data in the chunk.
++ cr.excess -= 16 + (2 * int64(cr.n))
++ if cr.excess < 0 {
++ cr.excess = 0
++ }
++ if cr.excess > 16*1024 {
++ cr.err = errors.New("chunked encoding contains too much non-data")
++ }
+ if cr.n == 0 {
+ cr.err = io.EOF
+ }
+@@ -133,11 +162,6 @@ func readChunkLine(b *bufio.Reader) ([]byte, error) {
+ if len(p) >= maxLineLength {
+ return nil, ErrLineTooLong
+ }
+- p = trimTrailingWhitespace(p)
+- p, err = removeChunkExtension(p)
+- if err != nil {
+- return nil, err
+- }
+ return p, nil
+ }
+
+diff --git a/src/net/http/internal/chunked_test.go b/src/net/http/internal/chunked_test.go
+index 08152ed..5fbeb08 100644
+--- a/src/net/http/internal/chunked_test.go
++++ b/src/net/http/internal/chunked_test.go
+@@ -211,3 +211,62 @@ func TestChunkReadPartial(t *testing.T) {
+ }
+
+ }
++
++func TestChunkReaderTooMuchOverhead(t *testing.T) {
++ // If the sender is sending 100x as many chunk header bytes as chunk data,
++ // we should reject the stream at some point.
++ chunk := []byte("1;")
++ for i := 0; i < 100; i++ {
++ chunk = append(chunk, 'a') // chunk extension
++ }
++ chunk = append(chunk, "\r\nX\r\n"...)
++ const bodylen = 1 << 20
++ r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) {
++ if i < bodylen {
++ return chunk, nil
++ }
++ return []byte("0\r\n"), nil
++ }})
++ _, err := io.ReadAll(r)
++ if err == nil {
++ t.Fatalf("successfully read body with excessive overhead; want error")
++ }
++}
++
++func TestChunkReaderByteAtATime(t *testing.T) {
++ // Sending one byte per chunk should not trip the excess-overhead detection.
++ const bodylen = 1 << 20
++ r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) {
++ if i < bodylen {
++ return []byte("1\r\nX\r\n"), nil
++ }
++ return []byte("0\r\n"), nil
++ }})
++ got, err := io.ReadAll(r)
++ if err != nil {
++ t.Errorf("unexpected error: %v", err)
++ }
++ if len(got) != bodylen {
++ t.Errorf("read %v bytes, want %v", len(got), bodylen)
++ }
++}
++
++type funcReader struct {
++ f func(iteration int) ([]byte, error)
++ i int
++ b []byte
++ err error
++}
++
++func (r *funcReader) Read(p []byte) (n int, err error) {
++ if len(r.b) == 0 && r.err == nil {
++ r.b, r.err = r.f(r.i)
++ r.i++
++ }
++ n = copy(p, r.b)
++ r.b = r.b[n:]
++ if len(r.b) > 0 {
++ return n, nil
++ }
++ return n, r.err
++}
+--
+2.40.0
diff --git a/meta/recipes-devtools/go/go-1.20/CVE-2023-45285.patch b/meta/recipes-devtools/go/go-1.20/CVE-2023-45285.patch
new file mode 100644
index 0000000000..0459ae0a1a
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.20/CVE-2023-45285.patch
@@ -0,0 +1,110 @@
+From 46bc33819ac86a9596b8059235842f0e0c7469bd Mon Sep 17 00:00:00 2001
+From: Bryan C. Mills <bcmills@google.com>
+Date: Thu, 2 Nov 2023 15:06:35 -0400
+Subject: [PATCH] cmd/go/internal/vcs: error out if the requested repo does not
+ support a secure protocol
+
+Updates #63845.
+Fixes #63972.
+
+Change-Id: If86d6b13d3b55877b35c087112bd76388c9404b8
+Reviewed-on: https://go-review.googlesource.com/c/go/+/539321
+Reviewed-by: Michael Matloob <matloob@golang.org>
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+Reviewed-by: Roland Shoemaker <roland@golang.org>
+Auto-Submit: Bryan Mills <bcmills@google.com>
+(cherry picked from commit be26ae18caf7ddffca4073333f80d0d9e76483c3)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/540335
+Auto-Submit: Dmitri Shuralyov <dmitshur@google.com>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+
+CVE: CVE-2023-45285
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/46bc33819ac86a9596b8059235842f0e0c7469bd]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/cmd/go/internal/vcs/vcs.go | 25 +++++++++++++----
+ .../script/mod_insecure_issue63845.txt | 28 +++++++++++++++++++
+ 2 files changed, 47 insertions(+), 6 deletions(-)
+ create mode 100644 src/cmd/go/testdata/script/mod_insecure_issue63845.txt
+
+diff --git a/src/cmd/go/internal/vcs/vcs.go b/src/cmd/go/internal/vcs/vcs.go
+index ab42424..0e2882d 100644
+--- a/src/cmd/go/internal/vcs/vcs.go
++++ b/src/cmd/go/internal/vcs/vcs.go
+@@ -891,19 +891,32 @@ func repoRootFromVCSPaths(importPath string, security web.SecurityMode, vcsPaths
+ if !srv.schemelessRepo {
+ repoURL = match["repo"]
+ } else {
+- scheme := vcs.Scheme[0] // default to first scheme
+ repo := match["repo"]
+- if vcs.PingCmd != "" {
+- // If we know how to test schemes, scan to find one.
++ scheme, err := func() (string, error) {
+ for _, s := range vcs.Scheme {
+ if security == web.SecureOnly && !vcs.isSecureScheme(s) {
+ continue
+ }
+- if vcs.Ping(s, repo) == nil {
+- scheme = s
+- break
++
++ // If we know how to ping URL schemes for this VCS,
++ // check that this repo works.
++ // Otherwise, default to the first scheme
++ // that meets the requested security level.
++ if vcs.PingCmd == "" {
++ return s, nil
++ }
++ if err := vcs.Ping(s, repo); err == nil {
++ return s, nil
+ }
+ }
++ securityFrag := ""
++ if security == web.SecureOnly {
++ securityFrag = "secure "
++ }
++ return "", fmt.Errorf("no %sprotocol found for repository", securityFrag)
++ }()
++ if err != nil {
++ return nil, err
+ }
+ repoURL = scheme + "://" + repo
+ }
+diff --git a/src/cmd/go/testdata/script/mod_insecure_issue63845.txt b/src/cmd/go/testdata/script/mod_insecure_issue63845.txt
+new file mode 100644
+index 0000000..5fa6a4f
+--- /dev/null
++++ b/src/cmd/go/testdata/script/mod_insecure_issue63845.txt
+@@ -0,0 +1,28 @@
++# Regression test for https://go.dev/issue/63845:
++# If 'git ls-remote' fails for all secure protocols,
++# we should fail instead of falling back to an arbitrary protocol.
++#
++# Note that this test does not use the local vcweb test server
++# (vcs-test.golang.org), because the hook for redirecting to that
++# server bypasses the "ping to determine protocol" logic
++# in cmd/go/internal/vcs.
++
++[!net] skip
++[!git] skip
++[short] skip 'tries to access a nonexistent external Git repo'
++
++env GOPRIVATE=golang.org
++env CURLOPT_TIMEOUT_MS=100
++env GIT_SSH_COMMAND=false
++
++! go get -x golang.org/nonexist.git@latest
++stderr '^git ls-remote https://golang.org/nonexist$'
++stderr '^git ls-remote git\+ssh://golang.org/nonexist'
++stderr '^git ls-remote ssh://golang.org/nonexist$'
++! stderr 'git://'
++stderr '^go: golang.org/nonexist.git@latest: no secure protocol found for repository$'
++
++-- go.mod --
++module example
++
++go 1.19
+--
+2.40.0
diff --git a/meta/recipes-devtools/go/go-1.20/CVE-2023-45287.patch b/meta/recipes-devtools/go/go-1.20/CVE-2023-45287.patch
new file mode 100644
index 0000000000..477e3c98ee
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.20/CVE-2023-45287.patch
@@ -0,0 +1,1695 @@
+From 8a81fdf165facdcefa06531de5af98a4db343035 Mon Sep 17 00:00:00 2001
+From: Lúcás Meier <cronokirby@gmail.com>
+Date: Tue Jun 8 21:36:06 2021 +0200
+Subject: [PATCH] crypto/rsa: replace big.Int for encryption and decryption
+
+Infamously, big.Int does not provide constant-time arithmetic, making
+its use in cryptographic code quite tricky. RSA uses big.Int
+pervasively, in its public API, for key generation, precomputation, and
+for encryption and decryption. This is a known problem. One mitigation,
+blinding, is already in place during decryption. This helps mitigate the
+very leaky exponentiation operation. Because big.Int is fundamentally
+not constant-time, it's unfortunately difficult to guarantee that
+mitigations like these are completely effective.
+
+This patch removes the use of big.Int for encryption and decryption,
+replacing it with an internal nat type instead. Signing and verification
+are also affected, because they depend on encryption and decryption.
+
+Overall, this patch degrades performance by 55% for private key
+operations, and 4-5x for (much faster) public key operations.
+(Signatures do both, so the slowdown is worse than decryption.)
+
+name old time/op new time/op delta
+DecryptPKCS1v15/2048-8 1.50ms ± 0% 2.34ms ± 0% +56.44% (p=0.000 n=8+10)
+DecryptPKCS1v15/3072-8 4.40ms ± 0% 6.79ms ± 0% +54.33% (p=0.000 n=10+9)
+DecryptPKCS1v15/4096-8 9.31ms ± 0% 15.14ms ± 0% +62.60% (p=0.000 n=10+10)
+EncryptPKCS1v15/2048-8 8.16µs ± 0% 355.58µs ± 0% +4258.90% (p=0.000 n=10+9)
+DecryptOAEP/2048-8 1.50ms ± 0% 2.34ms ± 0% +55.68% (p=0.000 n=10+9)
+EncryptOAEP/2048-8 8.51µs ± 0% 355.95µs ± 0% +4082.75% (p=0.000 n=10+9)
+SignPKCS1v15/2048-8 1.51ms ± 0% 2.69ms ± 0% +77.94% (p=0.000 n=10+10)
+VerifyPKCS1v15/2048-8 7.25µs ± 0% 354.34µs ± 0% +4789.52% (p=0.000 n=9+9)
+SignPSS/2048-8 1.51ms ± 0% 2.70ms ± 0% +78.80% (p=0.000 n=9+10)
+VerifyPSS/2048-8 8.27µs ± 1% 355.65µs ± 0% +4199.39% (p=0.000 n=10+10)
+
+Keep in mind that this is without any assembly at all, and that further
+improvements are likely possible. I think having a review of the logic
+and the cryptography would be a good idea at this stage, before we
+complicate the code too much through optimization.
+
+The bulk of the work is in nat.go. This introduces two new types: nat,
+representing natural numbers, and modulus, representing moduli used in
+modular arithmetic.
+
+A nat has an "announced size", which may be larger than its "true size",
+the number of bits needed to represent this number. Operations on a nat
+will only ever leak its announced size, never its true size, or other
+information about its value. The size of a nat is always clear based on
+how its value is set. For example, x.mod(y, m) will make the announced
+size of x match that of m, since x is reduced modulo m.
+
+Operations assume that the announced size of the operands match what's
+expected (with a few exceptions). For example, x.modAdd(y, m) assumes
+that x and y have the same announced size as m, and that they're reduced
+modulo m.
+
+Nats are represented over unsatured bits.UintSize - 1 bit limbs. This
+means that we can't reuse the assembly routines for big.Int, which use
+saturated bits.UintSize limbs. The advantage of unsaturated limbs is
+that it makes Montgomery multiplication faster, by needing fewer
+registers in a hot loop. This makes exponentiation faster, which
+consists of many Montgomery multiplications.
+
+Moduli use nat internally. Unlike nat, the true size of a modulus always
+matches its announced size. When creating a modulus, any zero padding is
+removed. Moduli will also precompute constants when created, which is
+another reason why having a separate type is desirable.
+
+Updates #20654
+
+Co-authored-by: Filippo Valsorda <filippo@golang.org>
+Change-Id: I73b61f87d58ab912e80a9644e255d552cbadcced
+Reviewed-on: https://go-review.googlesource.com/c/go/+/326012
+Run-TryBot: Filippo Valsorda <filippo@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Roland Shoemaker <roland@golang.org>
+Reviewed-by: Joedian Reid <joedian@golang.org>
+
+CVE: CVE-2023-45287
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/8a81fdf165facdcefa06531de5af98a4db343035]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/crypto/rsa/example_test.go | 21 +-
+ src/crypto/rsa/nat.go | 626 +++++++++++++++++++++++++++++++++
+ src/crypto/rsa/nat_test.go | 384 ++++++++++++++++++++
+ src/crypto/rsa/pkcs1v15.go | 47 +--
+ src/crypto/rsa/pss.go | 49 ++-
+ src/crypto/rsa/pss_test.go | 10 +-
+ src/crypto/rsa/rsa.go | 172 ++++-----
+ 7 files changed, 1140 insertions(+), 169 deletions(-)
+ create mode 100644 src/crypto/rsa/nat.go
+ create mode 100644 src/crypto/rsa/nat_test.go
+
+diff --git a/src/crypto/rsa/example_test.go b/src/crypto/rsa/example_test.go
+index ce5c2d9..52e5639 100644
+--- a/src/crypto/rsa/example_test.go
++++ b/src/crypto/rsa/example_test.go
+@@ -12,7 +12,6 @@ import (
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+- "io"
+ "os"
+ )
+
+@@ -36,21 +35,17 @@ import (
+ // a buffer that contains a random key. Thus, if the RSA result isn't
+ // well-formed, the implementation uses a random key in constant time.
+ func ExampleDecryptPKCS1v15SessionKey() {
+- // crypto/rand.Reader is a good source of entropy for blinding the RSA
+- // operation.
+- rng := rand.Reader
+-
+ // The hybrid scheme should use at least a 16-byte symmetric key. Here
+ // we read the random key that will be used if the RSA decryption isn't
+ // well-formed.
+ key := make([]byte, 32)
+- if _, err := io.ReadFull(rng, key); err != nil {
++ if _, err := rand.Read(key); err != nil {
+ panic("RNG failure")
+ }
+
+ rsaCiphertext, _ := hex.DecodeString("aabbccddeeff")
+
+- if err := DecryptPKCS1v15SessionKey(rng, rsaPrivateKey, rsaCiphertext, key); err != nil {
++ if err := DecryptPKCS1v15SessionKey(nil, rsaPrivateKey, rsaCiphertext, key); err != nil {
+ // Any errors that result will be “public” – meaning that they
+ // can be determined without any secret information. (For
+ // instance, if the length of key is impossible given the RSA
+@@ -86,10 +81,6 @@ func ExampleDecryptPKCS1v15SessionKey() {
+ }
+
+ func ExampleSignPKCS1v15() {
+- // crypto/rand.Reader is a good source of entropy for blinding the RSA
+- // operation.
+- rng := rand.Reader
+-
+ message := []byte("message to be signed")
+
+ // Only small messages can be signed directly; thus the hash of a
+@@ -99,7 +90,7 @@ func ExampleSignPKCS1v15() {
+ // of writing (2016).
+ hashed := sha256.Sum256(message)
+
+- signature, err := SignPKCS1v15(rng, rsaPrivateKey, crypto.SHA256, hashed[:])
++ signature, err := SignPKCS1v15(nil, rsaPrivateKey, crypto.SHA256, hashed[:])
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error from signing: %s\n", err)
+ return
+@@ -151,11 +142,7 @@ func ExampleDecryptOAEP() {
+ ciphertext, _ := hex.DecodeString("4d1ee10e8f286390258c51a5e80802844c3e6358ad6690b7285218a7c7ed7fc3a4c7b950fbd04d4b0239cc060dcc7065ca6f84c1756deb71ca5685cadbb82be025e16449b905c568a19c088a1abfad54bf7ecc67a7df39943ec511091a34c0f2348d04e058fcff4d55644de3cd1d580791d4524b92f3e91695582e6e340a1c50b6c6d78e80b4e42c5b4d45e479b492de42bbd39cc642ebb80226bb5200020d501b24a37bcc2ec7f34e596b4fd6b063de4858dbf5a4e3dd18e262eda0ec2d19dbd8e890d672b63d368768360b20c0b6b8592a438fa275e5fa7f60bef0dd39673fd3989cc54d2cb80c08fcd19dacbc265ee1c6014616b0e04ea0328c2a04e73460")
+ label := []byte("orders")
+
+- // crypto/rand.Reader is a good source of entropy for blinding the RSA
+- // operation.
+- rng := rand.Reader
+-
+- plaintext, err := DecryptOAEP(sha256.New(), rng, test2048Key, ciphertext, label)
++ plaintext, err := DecryptOAEP(sha256.New(), nil, test2048Key, ciphertext, label)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error from decryption: %s\n", err)
+ return
+diff --git a/src/crypto/rsa/nat.go b/src/crypto/rsa/nat.go
+new file mode 100644
+index 0000000..da521c2
+--- /dev/null
++++ b/src/crypto/rsa/nat.go
+@@ -0,0 +1,626 @@
++// Copyright 2021 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package rsa
++
++import (
++ "math/big"
++ "math/bits"
++)
++
++const (
++ // _W is the number of bits we use for our limbs.
++ _W = bits.UintSize - 1
++ // _MASK selects _W bits from a full machine word.
++ _MASK = (1 << _W) - 1
++)
++
++// choice represents a constant-time boolean. The value of choice is always
++// either 1 or 0. We use an int instead of bool in order to make decisions in
++// constant time by turning it into a mask.
++type choice uint
++
++func not(c choice) choice { return 1 ^ c }
++
++const yes = choice(1)
++const no = choice(0)
++
++// ctSelect returns x if on == 1, and y if on == 0. The execution time of this
++// function does not depend on its inputs. If on is any value besides 1 or 0,
++// the result is undefined.
++func ctSelect(on choice, x, y uint) uint {
++ // When on == 1, mask is 0b111..., otherwise mask is 0b000...
++ mask := -uint(on)
++ // When mask is all zeros, we just have y, otherwise, y cancels with itself.
++ return y ^ (mask & (y ^ x))
++}
++
++// ctEq returns 1 if x == y, and 0 otherwise. The execution time of this
++// function does not depend on its inputs.
++func ctEq(x, y uint) choice {
++ // If x != y, then either x - y or y - x will generate a carry.
++ _, c1 := bits.Sub(x, y, 0)
++ _, c2 := bits.Sub(y, x, 0)
++ return not(choice(c1 | c2))
++}
++
++// ctGeq returns 1 if x >= y, and 0 otherwise. The execution time of this
++// function does not depend on its inputs.
++func ctGeq(x, y uint) choice {
++ // If x < y, then x - y generates a carry.
++ _, carry := bits.Sub(x, y, 0)
++ return not(choice(carry))
++}
++
++// nat represents an arbitrary natural number
++//
++// Each nat has an announced length, which is the number of limbs it has stored.
++// Operations on this number are allowed to leak this length, but will not leak
++// any information about the values contained in those limbs.
++type nat struct {
++ // limbs is a little-endian representation in base 2^W with
++ // W = bits.UintSize - 1. The top bit is always unset between operations.
++ //
++ // The top bit is left unset to optimize Montgomery multiplication, in the
++ // inner loop of exponentiation. Using fully saturated limbs would leave us
++ // working with 129-bit numbers on 64-bit platforms, wasting a lot of space,
++ // and thus time.
++ limbs []uint
++}
++
++// expand expands x to n limbs, leaving its value unchanged.
++func (x *nat) expand(n int) *nat {
++ for len(x.limbs) > n {
++ if x.limbs[len(x.limbs)-1] != 0 {
++ panic("rsa: internal error: shrinking nat")
++ }
++ x.limbs = x.limbs[:len(x.limbs)-1]
++ }
++ if cap(x.limbs) < n {
++ newLimbs := make([]uint, n)
++ copy(newLimbs, x.limbs)
++ x.limbs = newLimbs
++ return x
++ }
++ extraLimbs := x.limbs[len(x.limbs):n]
++ for i := range extraLimbs {
++ extraLimbs[i] = 0
++ }
++ x.limbs = x.limbs[:n]
++ return x
++}
++
++// reset returns a zero nat of n limbs, reusing x's storage if n <= cap(x.limbs).
++func (x *nat) reset(n int) *nat {
++ if cap(x.limbs) < n {
++ x.limbs = make([]uint, n)
++ return x
++ }
++ for i := range x.limbs {
++ x.limbs[i] = 0
++ }
++ x.limbs = x.limbs[:n]
++ return x
++}
++
++// clone returns a new nat, with the same value and announced length as x.
++func (x *nat) clone() *nat {
++ out := &nat{make([]uint, len(x.limbs))}
++ copy(out.limbs, x.limbs)
++ return out
++}
++
++// natFromBig creates a new natural number from a big.Int.
++//
++// The announced length of the resulting nat is based on the actual bit size of
++// the input, ignoring leading zeroes.
++func natFromBig(x *big.Int) *nat {
++ xLimbs := x.Bits()
++ bitSize := bigBitLen(x)
++ requiredLimbs := (bitSize + _W - 1) / _W
++
++ out := &nat{make([]uint, requiredLimbs)}
++ outI := 0
++ shift := 0
++ for i := range xLimbs {
++ xi := uint(xLimbs[i])
++ out.limbs[outI] |= (xi << shift) & _MASK
++ outI++
++ if outI == requiredLimbs {
++ return out
++ }
++ out.limbs[outI] = xi >> (_W - shift)
++ shift++ // this assumes bits.UintSize - _W = 1
++ if shift == _W {
++ shift = 0
++ outI++
++ }
++ }
++ return out
++}
++
++// fillBytes sets bytes to x as a zero-extended big-endian byte slice.
++//
++// If bytes is not long enough to contain the number or at least len(x.limbs)-1
++// limbs, or has zero length, fillBytes will panic.
++func (x *nat) fillBytes(bytes []byte) []byte {
++ if len(bytes) == 0 {
++ panic("nat: fillBytes invoked with too small buffer")
++ }
++ for i := range bytes {
++ bytes[i] = 0
++ }
++ shift := 0
++ outI := len(bytes) - 1
++ for i, limb := range x.limbs {
++ remainingBits := _W
++ for remainingBits >= 8 {
++ bytes[outI] |= byte(limb) << shift
++ consumed := 8 - shift
++ limb >>= consumed
++ remainingBits -= consumed
++ shift = 0
++ outI--
++ if outI < 0 {
++ if limb != 0 || i < len(x.limbs)-1 {
++ panic("nat: fillBytes invoked with too small buffer")
++ }
++ return bytes
++ }
++ }
++ bytes[outI] = byte(limb)
++ shift = remainingBits
++ }
++ return bytes
++}
++
++// natFromBytes converts a slice of big-endian bytes into a nat.
++//
++// The announced length of the output depends on the length of bytes. Unlike
++// big.Int, creating a nat will not remove leading zeros.
++func natFromBytes(bytes []byte) *nat {
++ bitSize := len(bytes) * 8
++ requiredLimbs := (bitSize + _W - 1) / _W
++
++ out := &nat{make([]uint, requiredLimbs)}
++ outI := 0
++ shift := 0
++ for i := len(bytes) - 1; i >= 0; i-- {
++ bi := bytes[i]
++ out.limbs[outI] |= uint(bi) << shift
++ shift += 8
++ if shift >= _W {
++ shift -= _W
++ out.limbs[outI] &= _MASK
++ outI++
++ if shift > 0 {
++ out.limbs[outI] = uint(bi) >> (8 - shift)
++ }
++ }
++ }
++ return out
++}
++
++// cmpEq returns 1 if x == y, and 0 otherwise.
++//
++// Both operands must have the same announced length.
++func (x *nat) cmpEq(y *nat) choice {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ equal := yes
++ for i := 0; i < size; i++ {
++ equal &= ctEq(xLimbs[i], yLimbs[i])
++ }
++ return equal
++}
++
++// cmpGeq returns 1 if x >= y, and 0 otherwise.
++//
++// Both operands must have the same announced length.
++func (x *nat) cmpGeq(y *nat) choice {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ var c uint
++ for i := 0; i < size; i++ {
++ c = (xLimbs[i] - yLimbs[i] - c) >> _W
++ }
++ // If there was a carry, then subtracting y underflowed, so
++ // x is not greater than or equal to y.
++ return not(choice(c))
++}
++
++// assign sets x <- y if on == 1, and does nothing otherwise.
++//
++// Both operands must have the same announced length.
++func (x *nat) assign(on choice, y *nat) *nat {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ for i := 0; i < size; i++ {
++ xLimbs[i] = ctSelect(on, yLimbs[i], xLimbs[i])
++ }
++ return x
++}
++
++// add computes x += y if on == 1, and does nothing otherwise. It returns the
++// carry of the addition regardless of on.
++//
++// Both operands must have the same announced length.
++func (x *nat) add(on choice, y *nat) (c uint) {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ for i := 0; i < size; i++ {
++ res := xLimbs[i] + yLimbs[i] + c
++ xLimbs[i] = ctSelect(on, res&_MASK, xLimbs[i])
++ c = res >> _W
++ }
++ return
++}
++
++// sub computes x -= y if on == 1, and does nothing otherwise. It returns the
++// borrow of the subtraction regardless of on.
++//
++// Both operands must have the same announced length.
++func (x *nat) sub(on choice, y *nat) (c uint) {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ for i := 0; i < size; i++ {
++ res := xLimbs[i] - yLimbs[i] - c
++ xLimbs[i] = ctSelect(on, res&_MASK, xLimbs[i])
++ c = res >> _W
++ }
++ return
++}
++
++// modulus is used for modular arithmetic, precomputing relevant constants.
++//
++// Moduli are assumed to be odd numbers. Moduli can also leak the exact
++// number of bits needed to store their value, and are stored without padding.
++//
++// Their actual value is still kept secret.
++type modulus struct {
++ // The underlying natural number for this modulus.
++ //
++ // This will be stored without any padding, and shouldn't alias with any
++ // other natural number being used.
++ nat *nat
++ leading int // number of leading zeros in the modulus
++ m0inv uint // -nat.limbs[0]⁻¹ mod _W
++}
++
++// minusInverseModW computes -x⁻¹ mod _W with x odd.
++//
++// This operation is used to precompute a constant involved in Montgomery
++// multiplication.
++func minusInverseModW(x uint) uint {
++ // Every iteration of this loop doubles the least-significant bits of
++ // correct inverse in y. The first three bits are already correct (1⁻¹ = 1,
++ // 3⁻¹ = 3, 5⁻¹ = 5, and 7⁻¹ = 7 mod 8), so doubling five times is enough
++ // for 61 bits (and wastes only one iteration for 31 bits).
++ //
++ // See https://crypto.stackexchange.com/a/47496.
++ y := x
++ for i := 0; i < 5; i++ {
++ y = y * (2 - x*y)
++ }
++ return (1 << _W) - (y & _MASK)
++}
++
++// modulusFromNat creates a new modulus from a nat.
++//
++// The nat should be odd, nonzero, and the number of significant bits in the
++// number should be leakable. The nat shouldn't be reused.
++func modulusFromNat(nat *nat) *modulus {
++ m := &modulus{}
++ m.nat = nat
++ size := len(m.nat.limbs)
++ for m.nat.limbs[size-1] == 0 {
++ size--
++ }
++ m.nat.limbs = m.nat.limbs[:size]
++ m.leading = _W - bitLen(m.nat.limbs[size-1])
++ m.m0inv = minusInverseModW(m.nat.limbs[0])
++ return m
++}
++
++// bitLen is a version of bits.Len that only leaks the bit length of n, but not
++// its value. bits.Len and bits.LeadingZeros use a lookup table for the
++// low-order bits on some architectures.
++func bitLen(n uint) int {
++ var len int
++ // We assume, here and elsewhere, that comparison to zero is constant time
++ // with respect to different non-zero values.
++ for n != 0 {
++ len++
++ n >>= 1
++ }
++ return len
++}
++
++// bigBitLen is a version of big.Int.BitLen that only leaks the bit length of x,
++// but not its value. big.Int.BitLen uses bits.Len.
++func bigBitLen(x *big.Int) int {
++ xLimbs := x.Bits()
++ fullLimbs := len(xLimbs) - 1
++ topLimb := uint(xLimbs[len(xLimbs)-1])
++ return fullLimbs*bits.UintSize + bitLen(topLimb)
++}
++
++// modulusSize returns the size of m in bytes.
++func modulusSize(m *modulus) int {
++ bits := len(m.nat.limbs)*_W - int(m.leading)
++ return (bits + 7) / 8
++}
++
++// shiftIn calculates x = x << _W + y mod m.
++//
++// This assumes that x is already reduced mod m, and that y < 2^_W.
++func (x *nat) shiftIn(y uint, m *modulus) *nat {
++ d := new(nat).resetFor(m)
++
++ // Eliminate bounds checks in the loop.
++ size := len(m.nat.limbs)
++ xLimbs := x.limbs[:size]
++ dLimbs := d.limbs[:size]
++ mLimbs := m.nat.limbs[:size]
++
++ // Each iteration of this loop computes x = 2x + b mod m, where b is a bit
++ // from y. Effectively, it left-shifts x and adds y one bit at a time,
++ // reducing it every time.
++ //
++ // To do the reduction, each iteration computes both 2x + b and 2x + b - m.
++ // The next iteration (and finally the return line) will use either result
++ // based on whether the subtraction underflowed.
++ needSubtraction := no
++ for i := _W - 1; i >= 0; i-- {
++ carry := (y >> i) & 1
++ var borrow uint
++ for i := 0; i < size; i++ {
++ l := ctSelect(needSubtraction, dLimbs[i], xLimbs[i])
++
++ res := l<<1 + carry
++ xLimbs[i] = res & _MASK
++ carry = res >> _W
++
++ res = xLimbs[i] - mLimbs[i] - borrow
++ dLimbs[i] = res & _MASK
++ borrow = res >> _W
++ }
++ // See modAdd for how carry (aka overflow), borrow (aka underflow), and
++ // needSubtraction relate.
++ needSubtraction = ctEq(carry, borrow)
++ }
++ return x.assign(needSubtraction, d)
++}
++
++// mod calculates out = x mod m.
++//
++// This works regardless how large the value of x is.
++//
++// The output will be resized to the size of m and overwritten.
++func (out *nat) mod(x *nat, m *modulus) *nat {
++ out.resetFor(m)
++ // Working our way from the most significant to the least significant limb,
++ // we can insert each limb at the least significant position, shifting all
++ // previous limbs left by _W. This way each limb will get shifted by the
++ // correct number of bits. We can insert at least N - 1 limbs without
++ // overflowing m. After that, we need to reduce every time we shift.
++ i := len(x.limbs) - 1
++ // For the first N - 1 limbs we can skip the actual shifting and position
++ // them at the shifted position, which starts at min(N - 2, i).
++ start := len(m.nat.limbs) - 2
++ if i < start {
++ start = i
++ }
++ for j := start; j >= 0; j-- {
++ out.limbs[j] = x.limbs[i]
++ i--
++ }
++ // We shift in the remaining limbs, reducing modulo m each time.
++ for i >= 0 {
++ out.shiftIn(x.limbs[i], m)
++ i--
++ }
++ return out
++}
++
++// expandFor ensures out has the right size to work with operations modulo m.
++//
++// This assumes that out has as many or fewer limbs than m, or that the extra
++// limbs are all zero (which may happen when decoding a value that has leading
++// zeroes in its bytes representation that spill over the limb threshold).
++func (out *nat) expandFor(m *modulus) *nat {
++ return out.expand(len(m.nat.limbs))
++}
++
++// resetFor ensures out has the right size to work with operations modulo m.
++//
++// out is zeroed and may start at any size.
++func (out *nat) resetFor(m *modulus) *nat {
++ return out.reset(len(m.nat.limbs))
++}
++
++// modSub computes x = x - y mod m.
++//
++// The length of both operands must be the same as the modulus. Both operands
++// must already be reduced modulo m.
++func (x *nat) modSub(y *nat, m *modulus) *nat {
++ underflow := x.sub(yes, y)
++ // If the subtraction underflowed, add m.
++ x.add(choice(underflow), m.nat)
++ return x
++}
++
++// modAdd computes x = x + y mod m.
++//
++// The length of both operands must be the same as the modulus. Both operands
++// must already be reduced modulo m.
++func (x *nat) modAdd(y *nat, m *modulus) *nat {
++ overflow := x.add(yes, y)
++ underflow := not(x.cmpGeq(m.nat)) // x < m
++
++ // Three cases are possible:
++ //
++ // - overflow = 0, underflow = 0
++ //
++ // In this case, addition fits in our limbs, but we can still subtract away
++ // m without an underflow, so we need to perform the subtraction to reduce
++ // our result.
++ //
++ // - overflow = 0, underflow = 1
++ //
++ // The addition fits in our limbs, but we can't subtract m without
++ // underflowing. The result is already reduced.
++ //
++ // - overflow = 1, underflow = 1
++ //
++ // The addition does not fit in our limbs, and the subtraction's borrow
++ // would cancel out with the addition's carry. We need to subtract m to
++ // reduce our result.
++ //
++ // The overflow = 1, underflow = 0 case is not possible, because y is at
++ // most m - 1, and if adding m - 1 overflows, then subtracting m must
++ // necessarily underflow.
++ needSubtraction := ctEq(overflow, uint(underflow))
++
++ x.sub(needSubtraction, m.nat)
++ return x
++}
++
++// montgomeryRepresentation calculates x = x * R mod m, with R = 2^(_W * n) and
++// n = len(m.nat.limbs).
++//
++// Faster Montgomery multiplication replaces standard modular multiplication for
++// numbers in this representation.
++//
++// This assumes that x is already reduced mod m.
++func (x *nat) montgomeryRepresentation(m *modulus) *nat {
++ for i := 0; i < len(m.nat.limbs); i++ {
++ x.shiftIn(0, m) // x = x * 2^_W mod m
++ }
++ return x
++}
++
++// montgomeryMul calculates d = a * b / R mod m, with R = 2^(_W * n) and
++// n = len(m.nat.limbs), using the Montgomery Multiplication technique.
++//
++// All inputs should be the same length, not aliasing d, and already
++// reduced modulo m. d will be resized to the size of m and overwritten.
++func (d *nat) montgomeryMul(a *nat, b *nat, m *modulus) *nat {
++ // See https://bearssl.org/bigint.html#montgomery-reduction-and-multiplication
++ // for a description of the algorithm.
++
++ // Eliminate bounds checks in the loop.
++ size := len(m.nat.limbs)
++ aLimbs := a.limbs[:size]
++ bLimbs := b.limbs[:size]
++ dLimbs := d.resetFor(m).limbs[:size]
++ mLimbs := m.nat.limbs[:size]
++
++ var overflow uint
++ for i := 0; i < size; i++ {
++ f := ((dLimbs[0] + aLimbs[i]*bLimbs[0]) * m.m0inv) & _MASK
++ carry := uint(0)
++ for j := 0; j < size; j++ {
++ // z = d[j] + a[i] * b[j] + f * m[j] + carry <= 2^(2W+1) - 2^(W+1) + 2^W
++ hi, lo := bits.Mul(aLimbs[i], bLimbs[j])
++ z_lo, c := bits.Add(dLimbs[j], lo, 0)
++ z_hi, _ := bits.Add(0, hi, c)
++ hi, lo = bits.Mul(f, mLimbs[j])
++ z_lo, c = bits.Add(z_lo, lo, 0)
++ z_hi, _ = bits.Add(z_hi, hi, c)
++ z_lo, c = bits.Add(z_lo, carry, 0)
++ z_hi, _ = bits.Add(z_hi, 0, c)
++ if j > 0 {
++ dLimbs[j-1] = z_lo & _MASK
++ }
++ carry = z_hi<<1 | z_lo>>_W // carry <= 2^(W+1) - 2
++ }
++ z := overflow + carry // z <= 2^(W+1) - 1
++ dLimbs[size-1] = z & _MASK
++ overflow = z >> _W // overflow <= 1
++ }
++ // See modAdd for how overflow, underflow, and needSubtraction relate.
++ underflow := not(d.cmpGeq(m.nat)) // d < m
++ needSubtraction := ctEq(overflow, uint(underflow))
++ d.sub(needSubtraction, m.nat)
++
++ return d
++}
++
++// modMul calculates x *= y mod m.
++//
++// x and y must already be reduced modulo m, they must share its announced
++// length, and they may not alias.
++func (x *nat) modMul(y *nat, m *modulus) *nat {
++ // A Montgomery multiplication by a value out of the Montgomery domain
++ // takes the result out of Montgomery representation.
++ xR := x.clone().montgomeryRepresentation(m) // xR = x * R mod m
++ return x.montgomeryMul(xR, y, m) // x = xR * y / R mod m
++}
++
++// exp calculates out = x^e mod m.
++//
++// The exponent e is represented in big-endian order. The output will be resized
++// to the size of m and overwritten. x must already be reduced modulo m.
++func (out *nat) exp(x *nat, e []byte, m *modulus) *nat {
++ // We use a 4 bit window. For our RSA workload, 4 bit windows are faster
++ // than 2 bit windows, but use an extra 12 nats worth of scratch space.
++ // Using bit sizes that don't divide 8 are more complex to implement.
++ table := make([]*nat, (1<<4)-1) // table[i] = x ^ (i+1)
++ table[0] = x.clone().montgomeryRepresentation(m)
++ for i := 1; i < len(table); i++ {
++ table[i] = new(nat).expandFor(m)
++ table[i].montgomeryMul(table[i-1], table[0], m)
++ }
++
++ out.resetFor(m)
++ out.limbs[0] = 1
++ out.montgomeryRepresentation(m)
++ t0 := new(nat).expandFor(m)
++ t1 := new(nat).expandFor(m)
++ for _, b := range e {
++ for _, j := range []int{4, 0} {
++ // Square four times.
++ t1.montgomeryMul(out, out, m)
++ out.montgomeryMul(t1, t1, m)
++ t1.montgomeryMul(out, out, m)
++ out.montgomeryMul(t1, t1, m)
++
++ // Select x^k in constant time from the table.
++ k := uint((b >> j) & 0b1111)
++ for i := range table {
++ t0.assign(ctEq(k, uint(i+1)), table[i])
++ }
++
++ // Multiply by x^k, discarding the result if k = 0.
++ t1.montgomeryMul(out, t0, m)
++ out.assign(not(ctEq(k, 0)), t1)
++ }
++ }
++
++ // By Montgomery multiplying with 1 not in Montgomery representation, we
++ // convert out back from Montgomery representation, because it works out to
++ // dividing by R.
++ t0.assign(yes, out)
++ t1.resetFor(m)
++ t1.limbs[0] = 1
++ out.montgomeryMul(t0, t1, m)
++
++ return out
++}
+diff --git a/src/crypto/rsa/nat_test.go b/src/crypto/rsa/nat_test.go
+new file mode 100644
+index 0000000..3e6eb10
+--- /dev/null
++++ b/src/crypto/rsa/nat_test.go
+@@ -0,0 +1,384 @@
++// Copyright 2021 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package rsa
++
++import (
++ "bytes"
++ "math/big"
++ "math/bits"
++ "math/rand"
++ "reflect"
++ "testing"
++ "testing/quick"
++)
++
++// Generate generates an even nat. It's used by testing/quick to produce random
++// *nat values for quick.Check invocations.
++func (*nat) Generate(r *rand.Rand, size int) reflect.Value {
++ limbs := make([]uint, size)
++ for i := 0; i < size; i++ {
++ limbs[i] = uint(r.Uint64()) & ((1 << _W) - 2)
++ }
++ return reflect.ValueOf(&nat{limbs})
++}
++
++func testModAddCommutative(a *nat, b *nat) bool {
++ mLimbs := make([]uint, len(a.limbs))
++ for i := 0; i < len(mLimbs); i++ {
++ mLimbs[i] = _MASK
++ }
++ m := modulusFromNat(&nat{mLimbs})
++ aPlusB := a.clone()
++ aPlusB.modAdd(b, m)
++ bPlusA := b.clone()
++ bPlusA.modAdd(a, m)
++ return aPlusB.cmpEq(bPlusA) == 1
++}
++
++func TestModAddCommutative(t *testing.T) {
++ err := quick.Check(testModAddCommutative, &quick.Config{})
++ if err != nil {
++ t.Error(err)
++ }
++}
++
++func testModSubThenAddIdentity(a *nat, b *nat) bool {
++ mLimbs := make([]uint, len(a.limbs))
++ for i := 0; i < len(mLimbs); i++ {
++ mLimbs[i] = _MASK
++ }
++ m := modulusFromNat(&nat{mLimbs})
++ original := a.clone()
++ a.modSub(b, m)
++ a.modAdd(b, m)
++ return a.cmpEq(original) == 1
++}
++
++func TestModSubThenAddIdentity(t *testing.T) {
++ err := quick.Check(testModSubThenAddIdentity, &quick.Config{})
++ if err != nil {
++ t.Error(err)
++ }
++}
++
++func testMontgomeryRoundtrip(a *nat) bool {
++ one := &nat{make([]uint, len(a.limbs))}
++ one.limbs[0] = 1
++ aPlusOne := a.clone()
++ aPlusOne.add(1, one)
++ m := modulusFromNat(aPlusOne)
++ monty := a.clone()
++ monty.montgomeryRepresentation(m)
++ aAgain := monty.clone()
++ aAgain.montgomeryMul(monty, one, m)
++ return a.cmpEq(aAgain) == 1
++}
++
++func TestMontgomeryRoundtrip(t *testing.T) {
++ err := quick.Check(testMontgomeryRoundtrip, &quick.Config{})
++ if err != nil {
++ t.Error(err)
++ }
++}
++
++func TestFromBig(t *testing.T) {
++ expected := []byte{0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
++ theBig := new(big.Int).SetBytes(expected)
++ actual := natFromBig(theBig).fillBytes(make([]byte, len(expected)))
++ if !bytes.Equal(actual, expected) {
++ t.Errorf("%+x != %+x", actual, expected)
++ }
++}
++
++func TestFillBytes(t *testing.T) {
++ xBytes := []byte{0xAA, 0xFF, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88}
++ x := natFromBytes(xBytes)
++ for l := 20; l >= len(xBytes); l-- {
++ buf := make([]byte, l)
++ rand.Read(buf)
++ actual := x.fillBytes(buf)
++ expected := make([]byte, l)
++ copy(expected[l-len(xBytes):], xBytes)
++ if !bytes.Equal(actual, expected) {
++ t.Errorf("%d: %+v != %+v", l, actual, expected)
++ }
++ }
++ for l := len(xBytes) - 1; l >= 0; l-- {
++ (func() {
++ defer func() {
++ if recover() == nil {
++ t.Errorf("%d: expected panic", l)
++ }
++ }()
++ x.fillBytes(make([]byte, l))
++ })()
++ }
++}
++
++func TestFromBytes(t *testing.T) {
++ f := func(xBytes []byte) bool {
++ if len(xBytes) == 0 {
++ return true
++ }
++ actual := natFromBytes(xBytes).fillBytes(make([]byte, len(xBytes)))
++ if !bytes.Equal(actual, xBytes) {
++ t.Errorf("%+x != %+x", actual, xBytes)
++ return false
++ }
++ return true
++ }
++
++ err := quick.Check(f, &quick.Config{})
++ if err != nil {
++ t.Error(err)
++ }
++
++ f([]byte{0xFF, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88})
++ f(bytes.Repeat([]byte{0xFF}, _W))
++}
++
++func TestShiftIn(t *testing.T) {
++ if bits.UintSize != 64 {
++ t.Skip("examples are only valid in 64 bit")
++ }
++ examples := []struct {
++ m, x, expected []byte
++ y uint64
++ }{{
++ m: []byte{13},
++ x: []byte{0},
++ y: 0x7FFF_FFFF_FFFF_FFFF,
++ expected: []byte{7},
++ }, {
++ m: []byte{13},
++ x: []byte{7},
++ y: 0x7FFF_FFFF_FFFF_FFFF,
++ expected: []byte{11},
++ }, {
++ m: []byte{0x06, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d},
++ x: make([]byte, 9),
++ y: 0x7FFF_FFFF_FFFF_FFFF,
++ expected: []byte{0x00, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
++ }, {
++ m: []byte{0x06, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d},
++ x: []byte{0x00, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
++ y: 0,
++ expected: []byte{0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08},
++ }}
++
++ for i, tt := range examples {
++ m := modulusFromNat(natFromBytes(tt.m))
++ got := natFromBytes(tt.x).expandFor(m).shiftIn(uint(tt.y), m)
++ if got.cmpEq(natFromBytes(tt.expected).expandFor(m)) != 1 {
++ t.Errorf("%d: got %x, expected %x", i, got, tt.expected)
++ }
++ }
++}
++
++func TestModulusAndNatSizes(t *testing.T) {
++ // These are 126 bit (2 * _W on 64-bit architectures) values, serialized as
++ // 128 bits worth of bytes. If leading zeroes are stripped, they fit in two
++ // limbs, if they are not, they fit in three. This can be a problem because
++ // modulus strips leading zeroes and nat does not.
++ m := modulusFromNat(natFromBytes([]byte{
++ 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}))
++ x := natFromBytes([]byte{
++ 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe})
++ x.expandFor(m) // must not panic for shrinking
++}
++
++func TestExpand(t *testing.T) {
++ sliced := []uint{1, 2, 3, 4}
++ examples := []struct {
++ in []uint
++ n int
++ out []uint
++ }{{
++ []uint{1, 2},
++ 4,
++ []uint{1, 2, 0, 0},
++ }, {
++ sliced[:2],
++ 4,
++ []uint{1, 2, 0, 0},
++ }, {
++ []uint{1, 2},
++ 2,
++ []uint{1, 2},
++ }, {
++ []uint{1, 2, 0},
++ 2,
++ []uint{1, 2},
++ }}
++
++ for i, tt := range examples {
++ got := (&nat{tt.in}).expand(tt.n)
++ if len(got.limbs) != len(tt.out) || got.cmpEq(&nat{tt.out}) != 1 {
++ t.Errorf("%d: got %x, expected %x", i, got, tt.out)
++ }
++ }
++}
++
++func TestMod(t *testing.T) {
++ m := modulusFromNat(natFromBytes([]byte{0x06, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d}))
++ x := natFromBytes([]byte{0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
++ out := new(nat)
++ out.mod(x, m)
++ expected := natFromBytes([]byte{0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09})
++ if out.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", out, expected)
++ }
++}
++
++func TestModSub(t *testing.T) {
++ m := modulusFromNat(&nat{[]uint{13}})
++ x := &nat{[]uint{6}}
++ y := &nat{[]uint{7}}
++ x.modSub(y, m)
++ expected := &nat{[]uint{12}}
++ if x.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", x, expected)
++ }
++ x.modSub(y, m)
++ expected = &nat{[]uint{5}}
++ if x.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", x, expected)
++ }
++}
++
++func TestModAdd(t *testing.T) {
++ m := modulusFromNat(&nat{[]uint{13}})
++ x := &nat{[]uint{6}}
++ y := &nat{[]uint{7}}
++ x.modAdd(y, m)
++ expected := &nat{[]uint{0}}
++ if x.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", x, expected)
++ }
++ x.modAdd(y, m)
++ expected = &nat{[]uint{7}}
++ if x.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", x, expected)
++ }
++}
++
++func TestExp(t *testing.T) {
++ m := modulusFromNat(&nat{[]uint{13}})
++ x := &nat{[]uint{3}}
++ out := &nat{[]uint{0}}
++ out.exp(x, []byte{12}, m)
++ expected := &nat{[]uint{1}}
++ if out.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", out, expected)
++ }
++}
++
++func makeBenchmarkModulus() *modulus {
++ m := make([]uint, 32)
++ for i := 0; i < 32; i++ {
++ m[i] = _MASK
++ }
++ return modulusFromNat(&nat{limbs: m})
++}
++
++func makeBenchmarkValue() *nat {
++ x := make([]uint, 32)
++ for i := 0; i < 32; i++ {
++ x[i] = _MASK - 1
++ }
++ return &nat{limbs: x}
++}
++
++func makeBenchmarkExponent() []byte {
++ e := make([]byte, 256)
++ for i := 0; i < 32; i++ {
++ e[i] = 0xFF
++ }
++ return e
++}
++
++func BenchmarkModAdd(b *testing.B) {
++ x := makeBenchmarkValue()
++ y := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ x.modAdd(y, m)
++ }
++}
++
++func BenchmarkModSub(b *testing.B) {
++ x := makeBenchmarkValue()
++ y := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ x.modSub(y, m)
++ }
++}
++
++func BenchmarkMontgomeryRepr(b *testing.B) {
++ x := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ x.montgomeryRepresentation(m)
++ }
++}
++
++func BenchmarkMontgomeryMul(b *testing.B) {
++ x := makeBenchmarkValue()
++ y := makeBenchmarkValue()
++ out := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ out.montgomeryMul(x, y, m)
++ }
++}
++
++func BenchmarkModMul(b *testing.B) {
++ x := makeBenchmarkValue()
++ y := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ x.modMul(y, m)
++ }
++}
++
++func BenchmarkExpBig(b *testing.B) {
++ out := new(big.Int)
++ exponentBytes := makeBenchmarkExponent()
++ x := new(big.Int).SetBytes(exponentBytes)
++ e := new(big.Int).SetBytes(exponentBytes)
++ n := new(big.Int).SetBytes(exponentBytes)
++ one := new(big.Int).SetUint64(1)
++ n.Add(n, one)
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ out.Exp(x, e, n)
++ }
++}
++
++func BenchmarkExp(b *testing.B) {
++ x := makeBenchmarkValue()
++ e := makeBenchmarkExponent()
++ out := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ out.exp(x, e, m)
++ }
++}
+diff --git a/src/crypto/rsa/pkcs1v15.go b/src/crypto/rsa/pkcs1v15.go
+index 0cbd6d0..90233bb 100644
+--- a/src/crypto/rsa/pkcs1v15.go
++++ b/src/crypto/rsa/pkcs1v15.go
+@@ -9,7 +9,6 @@ import (
+ "crypto/subtle"
+ "errors"
+ "io"
+- "math/big"
+
+ "crypto/internal/randutil"
+ )
+@@ -58,14 +57,11 @@ func EncryptPKCS1v15(rand io.Reader, pub *PublicKey, msg []byte) ([]byte, error)
+ em[len(em)-len(msg)-1] = 0
+ copy(mm, msg)
+
+- m := new(big.Int).SetBytes(em)
+- c := encrypt(new(big.Int), pub, m)
+-
+- return c.FillBytes(em), nil
++ return encrypt(pub, em), nil
+ }
+
+ // DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS #1 v1.5.
+-// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
++// The random parameter is legacy and ignored, and it can be as nil.
+ //
+ // Note that whether this function returns an error or not discloses secret
+ // information. If an attacker can cause this function to run repeatedly and
+@@ -76,7 +72,7 @@ func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) ([]byt
+ if err := checkPub(&priv.PublicKey); err != nil {
+ return nil, err
+ }
+- valid, out, index, err := decryptPKCS1v15(rand, priv, ciphertext)
++ valid, out, index, err := decryptPKCS1v15(priv, ciphertext)
+ if err != nil {
+ return nil, err
+ }
+@@ -87,7 +83,7 @@ func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) ([]byt
+ }
+
+ // DecryptPKCS1v15SessionKey decrypts a session key using RSA and the padding scheme from PKCS #1 v1.5.
+-// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
++// The random parameter is legacy and ignored, and it can be as nil.
+ // It returns an error if the ciphertext is the wrong length or if the
+ // ciphertext is greater than the public modulus. Otherwise, no error is
+ // returned. If the padding is valid, the resulting plaintext message is copied
+@@ -114,7 +110,7 @@ func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []by
+ return ErrDecryption
+ }
+
+- valid, em, index, err := decryptPKCS1v15(rand, priv, ciphertext)
++ valid, em, index, err := decryptPKCS1v15(priv, ciphertext)
+ if err != nil {
+ return err
+ }
+@@ -130,26 +126,24 @@ func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []by
+ return nil
+ }
+
+-// decryptPKCS1v15 decrypts ciphertext using priv and blinds the operation if
+-// rand is not nil. It returns one or zero in valid that indicates whether the
+-// plaintext was correctly structured. In either case, the plaintext is
+-// returned in em so that it may be read independently of whether it was valid
+-// in order to maintain constant memory access patterns. If the plaintext was
+-// valid then index contains the index of the original message in em.
+-func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
++// decryptPKCS1v15 decrypts ciphertext using priv. It returns one or zero in
++// valid that indicates whether the plaintext was correctly structured.
++// In either case, the plaintext is returned in em so that it may be read
++// independently of whether it was valid in order to maintain constant memory
++// access patterns. If the plaintext was valid then index contains the index of
++// the original message in em, to allow constant time padding removal.
++func decryptPKCS1v15(priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
+ k := priv.Size()
+ if k < 11 {
+ err = ErrDecryption
+ return
+ }
+
+- c := new(big.Int).SetBytes(ciphertext)
+- m, err := decrypt(rand, priv, c)
++ em, err = decrypt(priv, ciphertext)
+ if err != nil {
+ return
+ }
+
+- em = m.FillBytes(make([]byte, k))
+ firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
+ secondByteIsTwo := subtle.ConstantTimeByteEq(em[1], 2)
+
+@@ -221,8 +215,7 @@ var hashPrefixes = map[crypto.Hash][]byte{
+ // function. If hash is zero, hashed is signed directly. This isn't
+ // advisable except for interoperability.
+ //
+-// If rand is not nil then RSA blinding will be used to avoid timing
+-// side-channel attacks.
++// The random parameter is legacy and ignored, and it can be as nil.
+ //
+ // This function is deterministic. Thus, if the set of possible
+ // messages is small, an attacker may be able to build a map from
+@@ -249,13 +242,7 @@ func SignPKCS1v15(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []b
+ copy(em[k-tLen:k-hashLen], prefix)
+ copy(em[k-hashLen:k], hashed)
+
+- m := new(big.Int).SetBytes(em)
+- c, err := decryptAndCheck(rand, priv, m)
+- if err != nil {
+- return nil, err
+- }
+-
+- return c.FillBytes(em), nil
++ return decryptAndCheck(priv, em)
+ }
+
+ // VerifyPKCS1v15 verifies an RSA PKCS #1 v1.5 signature.
+@@ -282,9 +269,7 @@ func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte)
+ return ErrVerification
+ }
+
+- c := new(big.Int).SetBytes(sig)
+- m := encrypt(new(big.Int), pub, c)
+- em := m.FillBytes(make([]byte, k))
++ em := encrypt(pub, sig)
+ // EM = 0x00 || 0x01 || PS || 0x00 || T
+
+ ok := subtle.ConstantTimeByteEq(em[0], 0)
+diff --git a/src/crypto/rsa/pss.go b/src/crypto/rsa/pss.go
+index 814522d..aeb6148 100644
+--- a/src/crypto/rsa/pss.go
++++ b/src/crypto/rsa/pss.go
+@@ -12,7 +12,6 @@ import (
+ "errors"
+ "hash"
+ "io"
+- "math/big"
+ )
+
+ // Per RFC 8017, Section 9.1
+@@ -207,19 +206,26 @@ func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
+ // Note that hashed must be the result of hashing the input message using the
+ // given hash function. salt is a random sequence of bytes whose length will be
+ // later used to verify the signature.
+-func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) ([]byte, error) {
+- emBits := priv.N.BitLen() - 1
++func signPSSWithSalt(priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) ([]byte, error) {
++ emBits := bigBitLen(priv.N) - 1
+ em, err := emsaPSSEncode(hashed, emBits, salt, hash.New())
+ if err != nil {
+ return nil, err
+ }
+- m := new(big.Int).SetBytes(em)
+- c, err := decryptAndCheck(rand, priv, m)
+- if err != nil {
+- return nil, err
++ // RFC 8017: "Note that the octet length of EM will be one less than k if
++ // modBits - 1 is divisible by 8 and equal to k otherwise, where k is the
++ // length in octets of the RSA modulus n."
++ //
++ // This is extremely annoying, as all other encrypt and decrypt inputs are
++ // always the exact same size as the modulus. Since it only happens for
++ // weird modulus sizes, fix it by padding inefficiently.
++ if emLen, k := len(em), priv.Size(); emLen < k {
++ emNew := make([]byte, k)
++ copy(emNew[k-emLen:], em)
++ em = emNew
+ }
+- s := make([]byte, priv.Size())
+- return c.FillBytes(s), nil
++
++ return decryptAndCheck(priv, em)
+ }
+
+ const (
+@@ -269,7 +275,7 @@ func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte,
+ saltLength := opts.saltLength()
+ switch saltLength {
+ case PSSSaltLengthAuto:
+- saltLength = (priv.N.BitLen()-1+7)/8 - 2 - hash.Size()
++ saltLength = (bigBitLen(priv.N)-1+7)/8 - 2 - hash.Size()
+ case PSSSaltLengthEqualsHash:
+ saltLength = hash.Size()
+ }
+@@ -278,7 +284,7 @@ func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte,
+ if _, err := io.ReadFull(rand, salt); err != nil {
+ return nil, err
+ }
+- return signPSSWithSalt(rand, priv, hash, digest, salt)
++ return signPSSWithSalt(priv, hash, digest, salt)
+ }
+
+ // VerifyPSS verifies a PSS signature.
+@@ -291,13 +297,22 @@ func VerifyPSS(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts
+ if len(sig) != pub.Size() {
+ return ErrVerification
+ }
+- s := new(big.Int).SetBytes(sig)
+- m := encrypt(new(big.Int), pub, s)
+- emBits := pub.N.BitLen() - 1
++
++ emBits := bigBitLen(pub.N) - 1
+ emLen := (emBits + 7) / 8
+- if m.BitLen() > emLen*8 {
+- return ErrVerification
++ em := encrypt(pub, sig)
++
++ // Like in signPSSWithSalt, deal with mismatches between emLen and the size
++ // of the modulus. The spec would have us wire emLen into the encoding
++ // function, but we'd rather always encode to the size of the modulus and
++ // then strip leading zeroes if necessary. This only happens for weird
++ // modulus sizes anyway.
++ for len(em) > emLen && len(em) > 0 {
++ if em[0] != 0 {
++ return ErrVerification
++ }
++ em = em[1:]
+ }
+- em := m.FillBytes(make([]byte, emLen))
++
+ return emsaPSSVerify(digest, em, emBits, opts.saltLength(), hash.New())
+ }
+diff --git a/src/crypto/rsa/pss_test.go b/src/crypto/rsa/pss_test.go
+index c3a6d46..d018b43 100644
+--- a/src/crypto/rsa/pss_test.go
++++ b/src/crypto/rsa/pss_test.go
+@@ -233,7 +233,10 @@ func TestPSSSigning(t *testing.T) {
+ }
+ }
+
+-func TestSignWithPSSSaltLengthAuto(t *testing.T) {
++func TestPSS513(t *testing.T) {
++ // See Issue 42741, and separately, RFC 8017: "Note that the octet length of
++ // EM will be one less than k if modBits - 1 is divisible by 8 and equal to
++ // k otherwise, where k is the length in octets of the RSA modulus n."
+ key, err := GenerateKey(rand.Reader, 513)
+ if err != nil {
+ t.Fatal(err)
+@@ -246,8 +249,9 @@ func TestSignWithPSSSaltLengthAuto(t *testing.T) {
+ if err != nil {
+ t.Fatal(err)
+ }
+- if len(signature) == 0 {
+- t.Fatal("empty signature returned")
++ err = VerifyPSS(&key.PublicKey, crypto.SHA256, digest[:], signature, nil)
++ if err != nil {
++ t.Error(err)
+ }
+ }
+
+diff --git a/src/crypto/rsa/rsa.go b/src/crypto/rsa/rsa.go
+index 6fd59b3..20c1fe1 100644
+--- a/src/crypto/rsa/rsa.go
++++ b/src/crypto/rsa/rsa.go
+@@ -19,13 +19,17 @@
+ // over the public key primitive, the PrivateKey type implements the
+ // Decrypter and Signer interfaces from the crypto package.
+ //
+-// The RSA operations in this package are not implemented using constant-time algorithms.
++// Operations in this package are implemented using constant-time algorithms,
++// except for [GenerateKey], [PrivateKey.Precompute], and [PrivateKey.Validate].
++// Every other operation only leaks the bit size of the involved values, which
++// all depend on the selected key size.
+ package rsa
+
+ import (
+ "crypto"
+ "crypto/rand"
+ "crypto/subtle"
++ "encoding/binary"
+ "errors"
+ "hash"
+ "io"
+@@ -35,7 +39,6 @@ import (
+ "crypto/internal/randutil"
+ )
+
+-var bigZero = big.NewInt(0)
+ var bigOne = big.NewInt(1)
+
+ // A PublicKey represents the public part of an RSA key.
+@@ -50,7 +53,7 @@ type PublicKey struct {
+ // Size returns the modulus size in bytes. Raw signatures and ciphertexts
+ // for or by this public key will have the same size.
+ func (pub *PublicKey) Size() int {
+- return (pub.N.BitLen() + 7) / 8
++ return (bigBitLen(pub.N) + 7) / 8
+ }
+
+ // Equal reports whether pub and x have the same value.
+@@ -384,10 +387,18 @@ func mgf1XOR(out []byte, hash hash.Hash, seed []byte) {
+ // too large for the size of the public key.
+ var ErrMessageTooLong = errors.New("crypto/rsa: message too long for RSA public key size")
+
+-func encrypt(c *big.Int, pub *PublicKey, m *big.Int) *big.Int {
+- e := big.NewInt(int64(pub.E))
+- c.Exp(m, e, pub.N)
+- return c
++func encrypt(pub *PublicKey, plaintext []byte) []byte {
++ N := modulusFromNat(natFromBig(pub.N))
++ m := natFromBytes(plaintext).expandFor(N)
++
++ e := make([]byte, 8)
++ binary.BigEndian.PutUint64(e, uint64(pub.E))
++ for len(e) > 1 && e[0] == 0 {
++ e = e[1:]
++ }
++
++ out := make([]byte, modulusSize(N))
++ return new(nat).exp(m, e, N).fillBytes(out)
+ }
+
+ // EncryptOAEP encrypts the given message with RSA-OAEP.
+@@ -437,12 +448,7 @@ func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, l
+ mgf1XOR(db, hash, seed)
+ mgf1XOR(seed, hash, db)
+
+- m := new(big.Int)
+- m.SetBytes(em)
+- c := encrypt(new(big.Int), pub, m)
+-
+- out := make([]byte, k)
+- return c.FillBytes(out), nil
++ return encrypt(pub, em), nil
+ }
+
+ // ErrDecryption represents a failure to decrypt a message.
+@@ -484,98 +490,70 @@ func (priv *PrivateKey) Precompute() {
+ }
+ }
+
+-// decrypt performs an RSA decryption, resulting in a plaintext integer. If a
+-// random source is given, RSA blinding is used.
+-func decrypt(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) {
+- // TODO(agl): can we get away with reusing blinds?
+- if c.Cmp(priv.N) > 0 {
+- err = ErrDecryption
+- return
++// decrypt performs an RSA decryption of ciphertext into out.
++func decrypt(priv *PrivateKey, ciphertext []byte) ([]byte, error) {
++ N := modulusFromNat(natFromBig(priv.N))
++ c := natFromBytes(ciphertext).expandFor(N)
++ if c.cmpGeq(N.nat) == 1 {
++ return nil, ErrDecryption
+ }
+ if priv.N.Sign() == 0 {
+ return nil, ErrDecryption
+ }
+
+- var ir *big.Int
+- if random != nil {
+- randutil.MaybeReadByte(random)
+-
+- // Blinding enabled. Blinding involves multiplying c by r^e.
+- // Then the decryption operation performs (m^e * r^e)^d mod n
+- // which equals mr mod n. The factor of r can then be removed
+- // by multiplying by the multiplicative inverse of r.
+-
+- var r *big.Int
+- ir = new(big.Int)
+- for {
+- r, err = rand.Int(random, priv.N)
+- if err != nil {
+- return
+- }
+- if r.Cmp(bigZero) == 0 {
+- r = bigOne
+- }
+- ok := ir.ModInverse(r, priv.N)
+- if ok != nil {
+- break
+- }
+- }
+- bigE := big.NewInt(int64(priv.E))
+- rpowe := new(big.Int).Exp(r, bigE, priv.N) // N != 0
+- cCopy := new(big.Int).Set(c)
+- cCopy.Mul(cCopy, rpowe)
+- cCopy.Mod(cCopy, priv.N)
+- c = cCopy
+- }
+-
++ // Note that because our private decryption exponents are stored as big.Int,
++ // we potentially leak the exact number of bits of these exponents. This
++ // isn't great, but should be fine.
+ if priv.Precomputed.Dp == nil {
+- m = new(big.Int).Exp(c, priv.D, priv.N)
+- } else {
+- // We have the precalculated values needed for the CRT.
+- m = new(big.Int).Exp(c, priv.Precomputed.Dp, priv.Primes[0])
+- m2 := new(big.Int).Exp(c, priv.Precomputed.Dq, priv.Primes[1])
+- m.Sub(m, m2)
+- if m.Sign() < 0 {
+- m.Add(m, priv.Primes[0])
+- }
+- m.Mul(m, priv.Precomputed.Qinv)
+- m.Mod(m, priv.Primes[0])
+- m.Mul(m, priv.Primes[1])
+- m.Add(m, m2)
+-
+- for i, values := range priv.Precomputed.CRTValues {
+- prime := priv.Primes[2+i]
+- m2.Exp(c, values.Exp, prime)
+- m2.Sub(m2, m)
+- m2.Mul(m2, values.Coeff)
+- m2.Mod(m2, prime)
+- if m2.Sign() < 0 {
+- m2.Add(m2, prime)
+- }
+- m2.Mul(m2, values.R)
+- m.Add(m, m2)
+- }
+- }
+-
+- if ir != nil {
+- // Unblind.
+- m.Mul(m, ir)
+- m.Mod(m, priv.N)
+- }
+-
+- return
++ out := make([]byte, modulusSize(N))
++ return new(nat).exp(c, priv.D.Bytes(), N).fillBytes(out), nil
++ }
++
++ t0 := new(nat)
++ P := modulusFromNat(natFromBig(priv.Primes[0]))
++ Q := modulusFromNat(natFromBig(priv.Primes[1]))
++ // m = c ^ Dp mod p
++ m := new(nat).exp(t0.mod(c, P), priv.Precomputed.Dp.Bytes(), P)
++ // m2 = c ^ Dq mod q
++ m2 := new(nat).exp(t0.mod(c, Q), priv.Precomputed.Dq.Bytes(), Q)
++ // m = m - m2 mod p
++ m.modSub(t0.mod(m2, P), P)
++ // m = m * Qinv mod p
++ m.modMul(natFromBig(priv.Precomputed.Qinv).expandFor(P), P)
++ // m = m * q mod N
++ m.expandFor(N).modMul(t0.mod(Q.nat, N), N)
++ // m = m + m2 mod N
++ m.modAdd(m2.expandFor(N), N)
++
++ for i, values := range priv.Precomputed.CRTValues {
++ p := modulusFromNat(natFromBig(priv.Primes[2+i]))
++ // m2 = c ^ Exp mod p
++ m2.exp(t0.mod(c, p), values.Exp.Bytes(), p)
++ // m2 = m2 - m mod p
++ m2.modSub(t0.mod(m, p), p)
++ // m2 = m2 * Coeff mod p
++ m2.modMul(natFromBig(values.Coeff).expandFor(p), p)
++ // m2 = m2 * R mod N
++ R := natFromBig(values.R).expandFor(N)
++ m2.expandFor(N).modMul(R, N)
++ // m = m + m2 mod N
++ m.modAdd(m2, N)
++ }
++
++ out := make([]byte, modulusSize(N))
++ return m.fillBytes(out), nil
+ }
+
+-func decryptAndCheck(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) {
+- m, err = decrypt(random, priv, c)
++func decryptAndCheck(priv *PrivateKey, ciphertext []byte) (m []byte, err error) {
++ m, err = decrypt(priv, ciphertext)
+ if err != nil {
+ return nil, err
+ }
+
+ // In order to defend against errors in the CRT computation, m^e is
+ // calculated, which should match the original ciphertext.
+- check := encrypt(new(big.Int), &priv.PublicKey, m)
+- if c.Cmp(check) != 0 {
++ check := encrypt(&priv.PublicKey, m)
++ if subtle.ConstantTimeCompare(ciphertext, check) != 1 {
+ return nil, errors.New("rsa: internal error")
+ }
+ return m, nil
+@@ -587,9 +565,7 @@ func decryptAndCheck(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int
+ // Encryption and decryption of a given message must use the same hash function
+ // and sha256.New() is a reasonable choice.
+ //
+-// The random parameter, if not nil, is used to blind the private-key operation
+-// and avoid timing side-channel attacks. Blinding is purely internal to this
+-// function – the random data need not match that used when encrypting.
++// The random parameter is legacy and ignored, and it can be as nil.
+ //
+ // The label parameter must match the value given when encrypting. See
+ // EncryptOAEP for details.
+@@ -603,9 +579,7 @@ func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext
+ return nil, ErrDecryption
+ }
+
+- c := new(big.Int).SetBytes(ciphertext)
+-
+- m, err := decrypt(random, priv, c)
++ em, err := decrypt(priv, ciphertext)
+ if err != nil {
+ return nil, err
+ }
+@@ -614,10 +588,6 @@ func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext
+ lHash := hash.Sum(nil)
+ hash.Reset()
+
+- // We probably leak the number of leading zeros.
+- // It's not clear that we can do anything about this.
+- em := m.FillBytes(make([]byte, k))
+-
+ firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
+
+ seed := em[1 : hash.Size()+1]
+--
+2.40.0
diff --git a/meta/recipes-devtools/go/go-1.21/CVE-2023-24531_1.patch b/meta/recipes-devtools/go/go-1.21/CVE-2023-24531_1.patch
new file mode 100644
index 0000000000..5f6d7e16a8
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.21/CVE-2023-24531_1.patch
@@ -0,0 +1,252 @@
+From 0f717b5f7d32bb660c01ec0366bd53c9b4c5ab5d Mon Sep 17 00:00:00 2001
+From: Michael Matloob <matloob@golang.org>
+Date: Mon, 24 Apr 2023 16:57:28 -0400
+Subject: [PATCH 1/2] cmd/go: sanitize go env outputs
+
+go env, without any arguments, outputs the environment variables in
+the form of a script that can be run on the host OS. On Unix, single
+quote the strings and place single quotes themselves outside the
+single quoted strings. On windows use the set "var=val" syntax with
+the quote starting before the variable.
+
+Fixes #58508
+
+Change-Id: Iecd379a4af7285ea9b2024f0202250c74fd9a2bd
+Reviewed-on: https://go-review.googlesource.com/c/go/+/488375
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Michael Matloob <matloob@golang.org>
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Michael Matloob <matloob@golang.org>
+Reviewed-by: Bryan Mills <bcmills@google.com>
+Reviewed-by: Quim Muntal <quimmuntal@gmail.com>
+
+CVE: CVE-2023-24531
+Upstream-Status: Backport [f379e78951a405e7e99a60fb231eeedbf976c108]
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ src/cmd/go/internal/envcmd/env.go | 60 ++++++++++++-
+ src/cmd/go/internal/envcmd/env_test.go | 94 +++++++++++++++++++++
+ src/cmd/go/testdata/script/env_sanitize.txt | 5 ++
+ 3 files changed, 157 insertions(+), 2 deletions(-)
+ create mode 100644 src/cmd/go/internal/envcmd/env_test.go
+ create mode 100644 src/cmd/go/testdata/script/env_sanitize.txt
+
+diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go
+index 43b94e7..0ce8843 100644
+--- a/src/cmd/go/internal/envcmd/env.go
++++ b/src/cmd/go/internal/envcmd/env.go
+@@ -6,6 +6,7 @@
+ package envcmd
+
+ import (
++ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+@@ -17,6 +18,7 @@ import (
+ "runtime"
+ "sort"
+ "strings"
++ "unicode"
+ "unicode/utf8"
+
+ "cmd/go/internal/base"
+@@ -379,9 +381,12 @@ func checkBuildConfig(add map[string]string, del map[string]bool) error {
+ func PrintEnv(w io.Writer, env []cfg.EnvVar) {
+ for _, e := range env {
+ if e.Name != "TERM" {
++ if runtime.GOOS != "plan9" && bytes.Contains([]byte(e.Value), []byte{0}) {
++ base.Fatalf("go: internal error: encountered null byte in environment variable %s on non-plan9 platform", e.Name)
++ }
+ switch runtime.GOOS {
+ default:
+- fmt.Fprintf(w, "%s=\"%s\"\n", e.Name, e.Value)
++ fmt.Fprintf(w, "%s=%s\n", e.Name, shellQuote(e.Value))
+ case "plan9":
+ if strings.IndexByte(e.Value, '\x00') < 0 {
+ fmt.Fprintf(w, "%s='%s'\n", e.Name, strings.ReplaceAll(e.Value, "'", "''"))
+@@ -392,17 +397,68 @@ func PrintEnv(w io.Writer, env []cfg.EnvVar) {
+ if x > 0 {
+ fmt.Fprintf(w, " ")
+ }
++ // TODO(#59979): Does this need to be quoted like above?
+ fmt.Fprintf(w, "%s", s)
+ }
+ fmt.Fprintf(w, ")\n")
+ }
+ case "windows":
+- fmt.Fprintf(w, "set %s=%s\n", e.Name, e.Value)
++ if hasNonGraphic(e.Value) {
++ base.Errorf("go: stripping unprintable or unescapable characters from %%%q%%", e.Name)
++ }
++ fmt.Fprintf(w, "set %s=%s\n", e.Name, batchEscape(e.Value))
+ }
+ }
+ }
+ }
+
++func hasNonGraphic(s string) bool {
++ for _, c := range []byte(s) {
++ if c == '\r' || c == '\n' || (!unicode.IsGraphic(rune(c)) && !unicode.IsSpace(rune(c))) {
++ return true
++ }
++ }
++ return false
++}
++
++func shellQuote(s string) string {
++ var b bytes.Buffer
++ b.WriteByte('\'')
++ for _, x := range []byte(s) {
++ if x == '\'' {
++ // Close the single quoted string, add an escaped single quote,
++ // and start another single quoted string.
++ b.WriteString(`'\''`)
++ } else {
++ b.WriteByte(x)
++ }
++ }
++ b.WriteByte('\'')
++ return b.String()
++}
++
++func batchEscape(s string) string {
++ var b bytes.Buffer
++ for _, x := range []byte(s) {
++ if x == '\r' || x == '\n' || (!unicode.IsGraphic(rune(x)) && !unicode.IsSpace(rune(x))) {
++ b.WriteRune(unicode.ReplacementChar)
++ continue
++ }
++ switch x {
++ case '%':
++ b.WriteString("%%")
++ case '<', '>', '|', '&', '^':
++ // These are special characters that need to be escaped with ^. See
++ // https://learn.microsoft.com/en-us/windows-server/administration/windows-commands/set_1.
++ b.WriteByte('^')
++ b.WriteByte(x)
++ default:
++ b.WriteByte(x)
++ }
++ }
++ return b.String()
++}
++
+ func printEnvAsJSON(env []cfg.EnvVar) {
+ m := make(map[string]string)
+ for _, e := range env {
+diff --git a/src/cmd/go/internal/envcmd/env_test.go b/src/cmd/go/internal/envcmd/env_test.go
+new file mode 100644
+index 0000000..32d99fd
+--- /dev/null
++++ b/src/cmd/go/internal/envcmd/env_test.go
+@@ -0,0 +1,94 @@
++// Copyright 2022 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++//go:build unix || windows
++
++package envcmd
++
++import (
++ "bytes"
++ "cmd/go/internal/cfg"
++ "fmt"
++ "internal/testenv"
++ "os"
++ "os/exec"
++ "path/filepath"
++ "runtime"
++ "testing"
++ "unicode"
++)
++
++func FuzzPrintEnvEscape(f *testing.F) {
++ f.Add(`$(echo 'cc"'; echo 'OOPS="oops')`)
++ f.Add("$(echo shell expansion 1>&2)")
++ f.Add("''")
++ f.Add(`C:\"Program Files"\`)
++ f.Add(`\\"Quoted Host"\\share`)
++ f.Add("\xfb")
++ f.Add("0")
++ f.Add("")
++ f.Add("''''''''")
++ f.Add("\r")
++ f.Add("\n")
++ f.Add("E,%")
++ f.Fuzz(func(t *testing.T, s string) {
++ t.Parallel()
++
++ for _, c := range []byte(s) {
++ if c == 0 {
++ t.Skipf("skipping %q: contains a null byte. Null bytes can't occur in the environment"+
++ " outside of Plan 9, which has different code path than Windows and Unix that this test"+
++ " isn't testing.", s)
++ }
++ if c > unicode.MaxASCII {
++ t.Skipf("skipping %#q: contains a non-ASCII character %q", s, c)
++ }
++ if !unicode.IsGraphic(rune(c)) && !unicode.IsSpace(rune(c)) {
++ t.Skipf("skipping %#q: contains non-graphic character %q", s, c)
++ }
++ if runtime.GOOS == "windows" && c == '\r' || c == '\n' {
++ t.Skipf("skipping %#q on Windows: contains unescapable character %q", s, c)
++ }
++ }
++
++ var b bytes.Buffer
++ if runtime.GOOS == "windows" {
++ b.WriteString("@echo off\n")
++ }
++ PrintEnv(&b, []cfg.EnvVar{{Name: "var", Value: s}})
++ var want string
++ if runtime.GOOS == "windows" {
++ fmt.Fprintf(&b, "echo \"%%var%%\"\n")
++ want += "\"" + s + "\"\r\n"
++ } else {
++ fmt.Fprintf(&b, "printf '%%s\\n' \"$var\"\n")
++ want += s + "\n"
++ }
++ scriptfilename := "script.sh"
++ if runtime.GOOS == "windows" {
++ scriptfilename = "script.bat"
++ }
++ scriptfile := filepath.Join(t.TempDir(), scriptfilename)
++ if err := os.WriteFile(scriptfile, b.Bytes(), 0777); err != nil {
++ t.Fatal(err)
++ }
++ t.Log(b.String())
++ var cmd *exec.Cmd
++ if runtime.GOOS == "windows" {
++ cmd = testenv.Command(t, "cmd.exe", "/C", scriptfile)
++ } else {
++ cmd = testenv.Command(t, "sh", "-c", scriptfile)
++ }
++ out, err := cmd.Output()
++ t.Log(string(out))
++ if err != nil {
++ t.Fatal(err)
++ }
++
++ if string(out) != want {
++ t.Fatalf("output of running PrintEnv script and echoing variable: got: %q, want: %q",
++ string(out), want)
++ }
++ })
++}
+diff --git a/src/cmd/go/testdata/script/env_sanitize.txt b/src/cmd/go/testdata/script/env_sanitize.txt
+new file mode 100644
+index 0000000..cc4d23a
+--- /dev/null
++++ b/src/cmd/go/testdata/script/env_sanitize.txt
+@@ -0,0 +1,5 @@
++env GOFLAGS='$(echo ''cc"''; echo ''OOPS="oops'')'
++go env
++[GOOS:darwin] stdout 'GOFLAGS=''\$\(echo ''\\''''cc"''\\''''; echo ''\\''''OOPS="oops''\\''''\)'''
++[GOOS:linux] stdout 'GOFLAGS=''\$\(echo ''\\''''cc"''\\''''; echo ''\\''''OOPS="oops''\\''''\)'''
++[GOOS:windows] stdout 'set GOFLAGS=\$\(echo ''cc"''; echo ''OOPS="oops''\)'
+--
+2.35.5
+
diff --git a/meta/recipes-devtools/go/go-1.21/CVE-2023-24531_2.patch b/meta/recipes-devtools/go/go-1.21/CVE-2023-24531_2.patch
new file mode 100644
index 0000000000..eecc04c2e3
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.21/CVE-2023-24531_2.patch
@@ -0,0 +1,47 @@
+From b2624f973692ca093348395c2418d1c422f2a162 Mon Sep 17 00:00:00 2001
+From: miller <millerresearch@gmail.com>
+Date: Mon, 8 May 2023 16:56:21 +0100
+Subject: [PATCH 2/2] cmd/go: quote entries in list-valued variables for go env
+ in plan9
+
+When 'go env' without an argument prints environment variables as
+a script which can be executed by the shell, variables with a
+list value in Plan 9 (such as GOPATH) need to be printed with each
+element enclosed in single quotes in case it contains characters
+significant to the Plan 9 shell (such as ' ' or '=').
+
+For #58508
+
+Change-Id: Ia30f51307cc6d07a7e3ada6bf9d60bf9951982ff
+Reviewed-on: https://go-review.googlesource.com/c/go/+/493535
+Run-TryBot: Cherry Mui <cherryyz@google.com>
+Reviewed-by: Cherry Mui <cherryyz@google.com>
+Reviewed-by: Russ Cox <rsc@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Auto-Submit: Dmitri Shuralyov <dmitshur@golang.org>
+
+CVE: CVE-2023-24531
+Upstream-Status: Backport [05cc9e55876874462a4726ca0101c970838c80e5]
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ src/cmd/go/internal/envcmd/env.go | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go
+index 0ce8843..b48d0bd 100644
+--- a/src/cmd/go/internal/envcmd/env.go
++++ b/src/cmd/go/internal/envcmd/env.go
+@@ -397,8 +397,7 @@ func PrintEnv(w io.Writer, env []cfg.EnvVar) {
+ if x > 0 {
+ fmt.Fprintf(w, " ")
+ }
+- // TODO(#59979): Does this need to be quoted like above?
+- fmt.Fprintf(w, "%s", s)
++ fmt.Fprintf(w, "'%s'", strings.ReplaceAll(s, "'", "''"))
+ }
+ fmt.Fprintf(w, ")\n")
+ }
+--
+2.35.5
+
diff --git a/meta/recipes-devtools/go/go-1.21/CVE-2023-39318.patch b/meta/recipes-devtools/go/go-1.21/CVE-2023-39318.patch
new file mode 100644
index 0000000000..503a4a288a
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.21/CVE-2023-39318.patch
@@ -0,0 +1,262 @@
+From 023b542edf38e2a1f87fcefb9f75ff2f99401b4c Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Thu, 3 Aug 2023 12:24:13 -0700
+Subject: [PATCH] [release-branch.go1.20] html/template: support HTML-like
+ comments in script contexts
+
+Per Appendix B.1.1 of the ECMAScript specification, support HTML-like
+comments in script contexts. Also per section 12.5, support hashbang
+comments. This brings our parsing in-line with how browsers treat these
+comment types.
+
+Thanks to Takeshi Kaneko (GMO Cybersecurity by Ierae, Inc.) for
+reporting this issue.
+
+Fixes #62196
+Fixes #62395
+Fixes CVE-2023-39318
+
+Change-Id: Id512702c5de3ae46cf648e268cb10e1eb392a181
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1976593
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2014620
+Reviewed-on: https://go-review.googlesource.com/c/go/+/526098
+Run-TryBot: Cherry Mui <cherryyz@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport from [https://github.com/golang/go/commit/023b542edf38e2a1f87fcefb9f75ff2f99401b4c]
+CVE: CVE-2023-39318
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/html/template/context.go | 6 ++-
+ src/html/template/escape.go | 5 ++-
+ src/html/template/escape_test.go | 10 +++++
+ src/html/template/state_string.go | 26 +++++++------
+ src/html/template/transition.go | 80 +++++++++++++++++++++++++--------------
+ 5 files changed, 84 insertions(+), 43 deletions(-)
+
+diff --git a/src/html/template/context.go b/src/html/template/context.go
+index f5f44a1..feb6517 100644
+--- a/src/html/template/context.go
++++ b/src/html/template/context.go
+@@ -124,6 +124,10 @@ const (
+ stateJSBlockCmt
+ // stateJSLineCmt occurs inside a JavaScript // line comment.
+ stateJSLineCmt
++ // stateJSHTMLOpenCmt occurs inside a JavaScript <!-- HTML-like comment.
++ stateJSHTMLOpenCmt
++ // stateJSHTMLCloseCmt occurs inside a JavaScript --> HTML-like comment.
++ stateJSHTMLCloseCmt
+ // stateCSS occurs inside a <style> element or style attribute.
+ stateCSS
+ // stateCSSDqStr occurs inside a CSS double quoted string.
+@@ -149,7 +153,7 @@ const (
+ // authors & maintainers, not for end-users or machines.
+ func isComment(s state) bool {
+ switch s {
+- case stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateCSSBlockCmt, stateCSSLineCmt:
++ case stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateJSHTMLOpenCmt, stateJSHTMLCloseCmt, stateCSSBlockCmt, stateCSSLineCmt:
+ return true
+ }
+ return false
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index 1747ec9..b0085ce 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -721,9 +721,12 @@ func (e *escaper) escapeText(c context, n *parse.TextNode) context {
+ if c.state != c1.state && isComment(c1.state) && c1.delim == delimNone {
+ // Preserve the portion between written and the comment start.
+ cs := i1 - 2
+- if c1.state == stateHTMLCmt {
++ if c1.state == stateHTMLCmt || c1.state == stateJSHTMLOpenCmt {
+ // "<!--" instead of "/*" or "//"
+ cs -= 2
++ } else if c1.state == stateJSHTMLCloseCmt {
++ // "-->" instead of "/*" or "//"
++ cs -= 1
+ }
+ b.Write(s[written:cs])
+ written = i1
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index 7853daa..bff38c6 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -503,6 +503,16 @@ func TestEscape(t *testing.T) {
+ "<script>var a/*b*///c\nd</script>",
+ "<script>var a \nd</script>",
+ },
++ {
++ "JS HTML-like comments",
++ "<script>before <!-- beep\nbetween\nbefore-->boop\n</script>",
++ "<script>before \nbetween\nbefore\n</script>",
++ },
++ {
++ "JS hashbang comment",
++ "<script>#! beep\n</script>",
++ "<script>\n</script>",
++ },
+ {
+ "Special tags in <script> string literals",
+ `<script>var a = "asd < 123 <!-- 456 < fgh <script jkl < 789 </script"</script>`,
+diff --git a/src/html/template/state_string.go b/src/html/template/state_string.go
+index 05104be..b5cfe70 100644
+--- a/src/html/template/state_string.go
++++ b/src/html/template/state_string.go
+@@ -25,21 +25,23 @@ func _() {
+ _ = x[stateJSRegexp-14]
+ _ = x[stateJSBlockCmt-15]
+ _ = x[stateJSLineCmt-16]
+- _ = x[stateCSS-17]
+- _ = x[stateCSSDqStr-18]
+- _ = x[stateCSSSqStr-19]
+- _ = x[stateCSSDqURL-20]
+- _ = x[stateCSSSqURL-21]
+- _ = x[stateCSSURL-22]
+- _ = x[stateCSSBlockCmt-23]
+- _ = x[stateCSSLineCmt-24]
+- _ = x[stateError-25]
+- _ = x[stateDead-26]
++ _ = x[stateJSHTMLOpenCmt-17]
++ _ = x[stateJSHTMLCloseCmt-18]
++ _ = x[stateCSS-19]
++ _ = x[stateCSSDqStr-20]
++ _ = x[stateCSSSqStr-21]
++ _ = x[stateCSSDqURL-22]
++ _ = x[stateCSSSqURL-23]
++ _ = x[stateCSSURL-24]
++ _ = x[stateCSSBlockCmt-25]
++ _ = x[stateCSSLineCmt-26]
++ _ = x[stateError-27]
++ _ = x[stateDead-28]
+ }
+
+-const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSBqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateErrorstateDead"
++const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSBqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateJSHTMLOpenCmtstateJSHTMLCloseCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateErrorstateDead"
+
+-var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 154, 167, 182, 196, 204, 217, 230, 243, 256, 267, 283, 298, 308, 317}
++var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 154, 167, 182, 196, 214, 233, 241, 254, 267, 280, 293, 304, 320, 335, 345, 354}
+
+ func (i state) String() string {
+ if i >= state(len(_state_index)-1) {
+diff --git a/src/html/template/transition.go b/src/html/template/transition.go
+index e2660cc..3d2a37c 100644
+--- a/src/html/template/transition.go
++++ b/src/html/template/transition.go
+@@ -14,32 +14,34 @@ import (
+ // the updated context and the number of bytes consumed from the front of the
+ // input.
+ var transitionFunc = [...]func(context, []byte) (context, int){
+- stateText: tText,
+- stateTag: tTag,
+- stateAttrName: tAttrName,
+- stateAfterName: tAfterName,
+- stateBeforeValue: tBeforeValue,
+- stateHTMLCmt: tHTMLCmt,
+- stateRCDATA: tSpecialTagEnd,
+- stateAttr: tAttr,
+- stateURL: tURL,
+- stateSrcset: tURL,
+- stateJS: tJS,
+- stateJSDqStr: tJSDelimited,
+- stateJSSqStr: tJSDelimited,
+- stateJSBqStr: tJSDelimited,
+- stateJSRegexp: tJSDelimited,
+- stateJSBlockCmt: tBlockCmt,
+- stateJSLineCmt: tLineCmt,
+- stateCSS: tCSS,
+- stateCSSDqStr: tCSSStr,
+- stateCSSSqStr: tCSSStr,
+- stateCSSDqURL: tCSSStr,
+- stateCSSSqURL: tCSSStr,
+- stateCSSURL: tCSSStr,
+- stateCSSBlockCmt: tBlockCmt,
+- stateCSSLineCmt: tLineCmt,
+- stateError: tError,
++ stateText: tText,
++ stateTag: tTag,
++ stateAttrName: tAttrName,
++ stateAfterName: tAfterName,
++ stateBeforeValue: tBeforeValue,
++ stateHTMLCmt: tHTMLCmt,
++ stateRCDATA: tSpecialTagEnd,
++ stateAttr: tAttr,
++ stateURL: tURL,
++ stateSrcset: tURL,
++ stateJS: tJS,
++ stateJSDqStr: tJSDelimited,
++ stateJSSqStr: tJSDelimited,
++ stateJSBqStr: tJSDelimited,
++ stateJSRegexp: tJSDelimited,
++ stateJSBlockCmt: tBlockCmt,
++ stateJSLineCmt: tLineCmt,
++ stateJSHTMLOpenCmt: tLineCmt,
++ stateJSHTMLCloseCmt: tLineCmt,
++ stateCSS: tCSS,
++ stateCSSDqStr: tCSSStr,
++ stateCSSSqStr: tCSSStr,
++ stateCSSDqURL: tCSSStr,
++ stateCSSSqURL: tCSSStr,
++ stateCSSURL: tCSSStr,
++ stateCSSBlockCmt: tBlockCmt,
++ stateCSSLineCmt: tLineCmt,
++ stateError: tError,
+ }
+
+ var commentStart = []byte("<!--")
+@@ -268,7 +270,7 @@ func tURL(c context, s []byte) (context, int) {
+
+ // tJS is the context transition function for the JS state.
+ func tJS(c context, s []byte) (context, int) {
+- i := bytes.IndexAny(s, "\"`'/")
++ i := bytes.IndexAny(s, "\"`'/<-#")
+ if i == -1 {
+ // Entire input is non string, comment, regexp tokens.
+ c.jsCtx = nextJSCtx(s, c.jsCtx)
+@@ -298,6 +300,26 @@ func tJS(c context, s []byte) (context, int) {
+ err: errorf(ErrSlashAmbig, nil, 0, "'/' could start a division or regexp: %.32q", s[i:]),
+ }, len(s)
+ }
++ // ECMAScript supports HTML style comments for legacy reasons, see Appendix
++ // B.1.1 "HTML-like Comments". The handling of these comments is somewhat
++ // confusing. Multi-line comments are not supported, i.e. anything on lines
++ // between the opening and closing tokens is not considered a comment, but
++ // anything following the opening or closing token, on the same line, is
++ // ignored. As such we simply treat any line prefixed with "<!--" or "-->"
++ // as if it were actually prefixed with "//" and move on.
++ case '<':
++ if i+3 < len(s) && bytes.Equal(commentStart, s[i:i+4]) {
++ c.state, i = stateJSHTMLOpenCmt, i+3
++ }
++ case '-':
++ if i+2 < len(s) && bytes.Equal(commentEnd, s[i:i+3]) {
++ c.state, i = stateJSHTMLCloseCmt, i+2
++ }
++ // ECMAScript also supports "hashbang" comment lines, see Section 12.5.
++ case '#':
++ if i+1 < len(s) && s[i+1] == '!' {
++ c.state, i = stateJSLineCmt, i+1
++ }
+ default:
+ panic("unreachable")
+ }
+@@ -387,12 +409,12 @@ func tBlockCmt(c context, s []byte) (context, int) {
+ return c, i + 2
+ }
+
+-// tLineCmt is the context transition function for //comment states.
++// tLineCmt is the context transition function for //comment states, and the JS HTML-like comment state.
+ func tLineCmt(c context, s []byte) (context, int) {
+ var lineTerminators string
+ var endState state
+ switch c.state {
+- case stateJSLineCmt:
++ case stateJSLineCmt, stateJSHTMLOpenCmt, stateJSHTMLCloseCmt:
+ lineTerminators, endState = "\n\r\u2028\u2029", stateJS
+ case stateCSSLineCmt:
+ lineTerminators, endState = "\n\f\r", stateCSS
+--
+2.35.7
+
diff --git a/meta/recipes-devtools/go/go-1.21/CVE-2023-45289.patch b/meta/recipes-devtools/go/go-1.21/CVE-2023-45289.patch
new file mode 100644
index 0000000000..f8ac64472f
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.21/CVE-2023-45289.patch
@@ -0,0 +1,121 @@
+From 3a855208e3efed2e9d7c20ad023f1fa78afcc0be Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Thu, 11 Jan 2024 11:31:57 -0800
+Subject: [PATCH] [release-branch.go1.22] net/http, net/http/cookiejar: avoid
+ subdomain matches on IPv6 zones
+
+When deciding whether to forward cookies or sensitive headers
+across a redirect, do not attempt to interpret an IPv6 address
+as a domain name.
+
+Avoids a case where a maliciously-crafted redirect to an
+IPv6 address with a scoped addressing zone could be
+misinterpreted as a within-domain redirect. For example,
+we could interpret "::1%.www.example.com" as a subdomain
+of "www.example.com".
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Fixes CVE-2023-45289
+Fixes #65859
+For #65065
+
+Change-Id: I8f463f59f0e700c8a18733d2b264a8bcb3a19599
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2131938
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2174344
+Reviewed-by: Carlos Amedee <amedee@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/569236
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/3a855208e3efed2e9d7c20ad023f1fa78afcc0be]
+CVE: CVE-2023-45289
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/net/http/client.go | 6 ++++++
+ src/net/http/client_test.go | 1 +
+ src/net/http/cookiejar/jar.go | 7 +++++++
+ src/net/http/cookiejar/jar_test.go | 10 ++++++++++
+ 4 files changed, 24 insertions(+)
+
+diff --git a/src/net/http/client.go b/src/net/http/client.go
+index 22db96b..b2dd445 100644
+--- a/src/net/http/client.go
++++ b/src/net/http/client.go
+@@ -1015,6 +1015,12 @@ func isDomainOrSubdomain(sub, parent string) bool {
+ if sub == parent {
+ return true
+ }
++ // If sub contains a :, it's probably an IPv6 address (and is definitely not a hostname).
++ // Don't check the suffix in this case, to avoid matching the contents of a IPv6 zone.
++ // For example, "::1%.www.example.com" is not a subdomain of "www.example.com".
++ if strings.ContainsAny(sub, ":%") {
++ return false
++ }
+ // If sub is "foo.example.com" and parent is "example.com",
+ // that means sub must end in "."+parent.
+ // Do it without allocating.
+diff --git a/src/net/http/client_test.go b/src/net/http/client_test.go
+index 9788c7a..7a0aa53 100644
+--- a/src/net/http/client_test.go
++++ b/src/net/http/client_test.go
+@@ -1729,6 +1729,7 @@ func TestShouldCopyHeaderOnRedirect(t *testing.T) {
+ {"cookie2", "http://foo.com/", "http://bar.com/", false},
+ {"authorization", "http://foo.com/", "http://bar.com/", false},
+ {"www-authenticate", "http://foo.com/", "http://bar.com/", false},
++ {"authorization", "http://foo.com/", "http://[::1%25.foo.com]/", false},
+
+ // But subdomains should work:
+ {"www-authenticate", "http://foo.com/", "http://foo.com/", true},
+diff --git a/src/net/http/cookiejar/jar.go b/src/net/http/cookiejar/jar.go
+index e6583da..f2cf9c2 100644
+--- a/src/net/http/cookiejar/jar.go
++++ b/src/net/http/cookiejar/jar.go
+@@ -362,6 +362,13 @@ func jarKey(host string, psl PublicSuffixList) string {
+
+ // isIP reports whether host is an IP address.
+ func isIP(host string) bool {
++ if strings.ContainsAny(host, ":%") {
++ // Probable IPv6 address.
++ // Hostnames can't contain : or %, so this is definitely not a valid host.
++ // Treating it as an IP is the more conservative option, and avoids the risk
++ // of interpeting ::1%.www.example.com as a subtomain of www.example.com.
++ return true
++ }
+ return net.ParseIP(host) != nil
+ }
+
+diff --git a/src/net/http/cookiejar/jar_test.go b/src/net/http/cookiejar/jar_test.go
+index 47fb1ab..fd8d40e 100644
+--- a/src/net/http/cookiejar/jar_test.go
++++ b/src/net/http/cookiejar/jar_test.go
+@@ -251,6 +251,7 @@ var isIPTests = map[string]bool{
+ "127.0.0.1": true,
+ "1.2.3.4": true,
+ "2001:4860:0:2001::68": true,
++ "::1%zone": true,
+ "example.com": false,
+ "1.1.1.300": false,
+ "www.foo.bar.net": false,
+@@ -613,6 +614,15 @@ var basicsTests = [...]jarTest{
+ {"http://www.host.test:1234/", "a=1"},
+ },
+ },
++ {
++ "IPv6 zone is not treated as a host.",
++ "https://example.com/",
++ []string{"a=1"},
++ "a=1",
++ []query{
++ {"https://[::1%25.example.com]:80/", ""},
++ },
++ },
+ }
+
+ func TestBasics(t *testing.T) {
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.21/CVE-2023-45290.patch b/meta/recipes-devtools/go/go-1.21/CVE-2023-45290.patch
new file mode 100644
index 0000000000..81f2123f34
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.21/CVE-2023-45290.patch
@@ -0,0 +1,270 @@
+From 041a47712e765e94f86d841c3110c840e76d8f82 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Tue, 16 Jan 2024 15:37:52 -0800
+Subject: [PATCH] [release-branch.go1.22] net/textproto, mime/multipart: avoid
+ unbounded read in MIME header
+
+mime/multipart.Reader.ReadForm allows specifying the maximum amount
+of memory that will be consumed by the form. While this limit is
+correctly applied to the parsed form data structure, it was not
+being applied to individual header lines in a form.
+
+For example, when presented with a form containing a header line
+that never ends, ReadForm will continue to read the line until it
+runs out of memory.
+
+Limit the amount of data consumed when reading a header.
+
+Fixes CVE-2023-45290
+Fixes #65850
+For #65383
+
+Change-Id: I7f9264d25752009e95f6b2c80e3d76aaf321d658
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2134435
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2174345
+Reviewed-by: Carlos Amedee <amedee@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/569237
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/041a47712e765e94f86d841c3110c840e76d8f82]
+CVE: CVE-2023-45290
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>---
+ src/mime/multipart/formdata_test.go | 42 +++++++++++++++++++++++++
+ src/net/textproto/reader.go | 48 ++++++++++++++++++++---------
+ src/net/textproto/reader_test.go | 12 ++++++++
+ 3 files changed, 87 insertions(+), 15 deletions(-)
+
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index c78eeb7..f729da6 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -421,6 +421,48 @@ func TestReadFormLimits(t *testing.T) {
+ }
+ }
+
++func TestReadFormEndlessHeaderLine(t *testing.T) {
++ for _, test := range []struct {
++ name string
++ prefix string
++ }{{
++ name: "name",
++ prefix: "X-",
++ }, {
++ name: "value",
++ prefix: "X-Header: ",
++ }, {
++ name: "continuation",
++ prefix: "X-Header: foo\r\n ",
++ }} {
++ t.Run(test.name, func(t *testing.T) {
++ const eol = "\r\n"
++ s := `--boundary` + eol
++ s += `Content-Disposition: form-data; name="a"` + eol
++ s += `Content-Type: text/plain` + eol
++ s += test.prefix
++ fr := io.MultiReader(
++ strings.NewReader(s),
++ neverendingReader('X'),
++ )
++ r := NewReader(fr, "boundary")
++ _, err := r.ReadForm(1 << 20)
++ if err != ErrMessageTooLarge {
++ t.Fatalf("ReadForm(1 << 20): %v, want ErrMessageTooLarge", err)
++ }
++ })
++ }
++}
++
++type neverendingReader byte
++
++func (r neverendingReader) Read(p []byte) (n int, err error) {
++ for i := range p {
++ p[i] = byte(r)
++ }
++ return len(p), nil
++}
++
+ func BenchmarkReadForm(b *testing.B) {
+ for _, test := range []struct {
+ name string
+diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
+index c6569c8..3ac4d4d 100644
+--- a/src/net/textproto/reader.go
++++ b/src/net/textproto/reader.go
+@@ -16,6 +16,10 @@ import (
+ "sync"
+ )
+
++// TODO: This should be a distinguishable error (ErrMessageTooLarge)
++// to allow mime/multipart to detect it.
++var errMessageTooLarge = errors.New("message too large")
++
+ // A Reader implements convenience methods for reading requests
+ // or responses from a text protocol network connection.
+ type Reader struct {
+@@ -37,13 +41,13 @@ func NewReader(r *bufio.Reader) *Reader {
+ // ReadLine reads a single line from r,
+ // eliding the final \n or \r\n from the returned string.
+ func (r *Reader) ReadLine() (string, error) {
+- line, err := r.readLineSlice()
++ line, err := r.readLineSlice(-1)
+ return string(line), err
+ }
+
+ // ReadLineBytes is like ReadLine but returns a []byte instead of a string.
+ func (r *Reader) ReadLineBytes() ([]byte, error) {
+- line, err := r.readLineSlice()
++ line, err := r.readLineSlice(-1)
+ if line != nil {
+ buf := make([]byte, len(line))
+ copy(buf, line)
+@@ -52,7 +56,10 @@ func (r *Reader) ReadLineBytes() ([]byte, error) {
+ return line, err
+ }
+
+-func (r *Reader) readLineSlice() ([]byte, error) {
++// readLineSlice reads a single line from r,
++// up to lim bytes long (or unlimited if lim is less than 0),
++// eliding the final \r or \r\n from the returned string.
++func (r *Reader) readLineSlice(lim int64) ([]byte, error) {
+ r.closeDot()
+ var line []byte
+ for {
+@@ -60,6 +67,9 @@ func (r *Reader) readLineSlice() ([]byte, error) {
+ if err != nil {
+ return nil, err
+ }
++ if lim >= 0 && int64(len(line))+int64(len(l)) > lim {
++ return nil, errMessageTooLarge
++ }
+ // Avoid the copy if the first call produced a full line.
+ if line == nil && !more {
+ return l, nil
+@@ -92,7 +102,7 @@ func (r *Reader) readLineSlice() ([]byte, error) {
+ // Empty lines are never continued.
+ //
+ func (r *Reader) ReadContinuedLine() (string, error) {
+- line, err := r.readContinuedLineSlice(noValidation)
++ line, err := r.readContinuedLineSlice(-1, noValidation)
+ return string(line), err
+ }
+
+@@ -113,7 +123,7 @@ func trim(s []byte) []byte {
+ // ReadContinuedLineBytes is like ReadContinuedLine but
+ // returns a []byte instead of a string.
+ func (r *Reader) ReadContinuedLineBytes() ([]byte, error) {
+- line, err := r.readContinuedLineSlice(noValidation)
++ line, err := r.readContinuedLineSlice(-1, noValidation)
+ if line != nil {
+ buf := make([]byte, len(line))
+ copy(buf, line)
+@@ -126,13 +136,14 @@ func (r *Reader) ReadContinuedLineBytes() ([]byte, error) {
+ // returning a byte slice with all lines. The validateFirstLine function
+ // is run on the first read line, and if it returns an error then this
+ // error is returned from readContinuedLineSlice.
+-func (r *Reader) readContinuedLineSlice(validateFirstLine func([]byte) error) ([]byte, error) {
++// It reads up to lim bytes of data (or unlimited if lim is less than 0).
++func (r *Reader) readContinuedLineSlice(lim int64, validateFirstLine func([]byte) error) ([]byte, error) {
+ if validateFirstLine == nil {
+ return nil, fmt.Errorf("missing validateFirstLine func")
+ }
+
+ // Read the first line.
+- line, err := r.readLineSlice()
++ line, err := r.readLineSlice(lim)
+ if err != nil {
+ return nil, err
+ }
+@@ -160,13 +171,21 @@ func (r *Reader) readContinuedLineSlice(validateFirstLine func([]byte) error) ([
+ // copy the slice into buf.
+ r.buf = append(r.buf[:0], trim(line)...)
+
++ if lim < 0 {
++ lim = math.MaxInt64
++ }
++ lim -= int64(len(r.buf))
++
+ // Read continuation lines.
+ for r.skipSpace() > 0 {
+- line, err := r.readLineSlice()
++ r.buf = append(r.buf, ' ')
++ if int64(len(r.buf)) >= lim {
++ return nil, errMessageTooLarge
++ }
++ line, err := r.readLineSlice(lim - int64(len(r.buf)))
+ if err != nil {
+ break
+ }
+- r.buf = append(r.buf, ' ')
+ r.buf = append(r.buf, trim(line)...)
+ }
+ return r.buf, nil
+@@ -511,7 +530,8 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error)
+
+ // The first line cannot start with a leading space.
+ if buf, err := r.R.Peek(1); err == nil && (buf[0] == ' ' || buf[0] == '\t') {
+- line, err := r.readLineSlice()
++ const errorLimit = 80 // arbitrary limit on how much of the line we'll quote
++ line, err := r.readLineSlice(errorLimit)
+ if err != nil {
+ return m, err
+ }
+@@ -519,7 +539,7 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error)
+ }
+
+ for {
+- kv, err := r.readContinuedLineSlice(mustHaveFieldNameColon)
++ kv, err := r.readContinuedLineSlice(maxMemory, mustHaveFieldNameColon)
+ if len(kv) == 0 {
+ return m, err
+ }
+@@ -540,7 +560,7 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error)
+
+ maxHeaders--
+ if maxHeaders < 0 {
+- return nil, errors.New("message too large")
++ return nil, errMessageTooLarge
+ }
+
+ // backport 5c55ac9bf1e5f779220294c843526536605f42ab
+@@ -567,9 +587,7 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error)
+ }
+ maxMemory -= int64(len(value))
+ if maxMemory < 0 {
+- // TODO: This should be a distinguishable error (ErrMessageTooLarge)
+- // to allow mime/multipart to detect it.
+- return m, errors.New("message too large")
++ return m, errMessageTooLarge
+ }
+ if vv == nil && len(strs) > 0 {
+ // More than likely this will be a single-element key.
+diff --git a/src/net/textproto/reader_test.go b/src/net/textproto/reader_test.go
+index 3ae0de1..db1ed91 100644
+--- a/src/net/textproto/reader_test.go
++++ b/src/net/textproto/reader_test.go
+@@ -34,6 +34,18 @@ func TestReadLine(t *testing.T) {
+ }
+ }
+
++func TestReadLineLongLine(t *testing.T) {
++ line := strings.Repeat("12345", 10000)
++ r := reader(line + "\r\n")
++ s, err := r.ReadLine()
++ if err != nil {
++ t.Fatalf("Line 1: %v", err)
++ }
++ if s != line {
++ t.Fatalf("%v-byte line does not match expected %v-byte line", len(s), len(line))
++ }
++}
++
+ func TestReadContinuedLine(t *testing.T) {
+ r := reader("line1\nline\n 2\nline3\n")
+ s, err := r.ReadContinuedLine()
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-binary-native_1.17.10.bb b/meta/recipes-devtools/go/go-binary-native_1.17.13.bb
index 0f49cebcb7..4ee0148417 100644
--- a/meta/recipes-devtools/go/go-binary-native_1.17.10.bb
+++ b/meta/recipes-devtools/go/go-binary-native_1.17.13.bb
@@ -8,8 +8,8 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=5d4950ecb7b26d2c5e4e7b4e0dd74707"
PROVIDES = "go-native"
SRC_URI = "https://dl.google.com/go/go${PV}.${BUILD_GOOS}-${BUILD_GOARCH}.tar.gz;name=go_${BUILD_GOTUPLE}"
-SRC_URI[go_linux_amd64.sha256sum] = "87fc728c9c731e2f74e4a999ef53cf07302d7ed3504b0839027bd9c10edaa3fd"
-SRC_URI[go_linux_arm64.sha256sum] = "649141201efa7195403eb1301b95dc79c5b3e65968986a391da1370521701b0c"
+SRC_URI[go_linux_amd64.sha256sum] = "4cdd2bc664724dc7db94ad51b503512c5ae7220951cac568120f64f8e94399fc"
+SRC_URI[go_linux_arm64.sha256sum] = "914daad3f011cc2014dea799bb7490442677e4ad6de0b2ac3ded6cee7e3f493d"
UPSTREAM_CHECK_URI = "https://golang.org/dl/"
UPSTREAM_CHECK_REGEX = "go(?P<pver>\d+(\.\d+)+)\.linux"
diff --git a/meta/recipes-devtools/go/go-cross-canadian_1.17.10.bb b/meta/recipes-devtools/go/go-cross-canadian_1.17.13.bb
index 7ac9449e47..7ac9449e47 100644
--- a/meta/recipes-devtools/go/go-cross-canadian_1.17.10.bb
+++ b/meta/recipes-devtools/go/go-cross-canadian_1.17.13.bb
diff --git a/meta/recipes-devtools/go/go-cross_1.17.10.bb b/meta/recipes-devtools/go/go-cross_1.17.13.bb
index 80b5a03f6c..80b5a03f6c 100644
--- a/meta/recipes-devtools/go/go-cross_1.17.10.bb
+++ b/meta/recipes-devtools/go/go-cross_1.17.13.bb
diff --git a/meta/recipes-devtools/go/go-crosssdk.inc b/meta/recipes-devtools/go/go-crosssdk.inc
index cd23cca2fe..766938670a 100644
--- a/meta/recipes-devtools/go/go-crosssdk.inc
+++ b/meta/recipes-devtools/go/go-crosssdk.inc
@@ -4,6 +4,8 @@ DEPENDS = "go-native virtual/${TARGET_PREFIX}gcc-crosssdk virtual/nativesdk-${TA
PN = "go-crosssdk-${SDK_SYS}"
PROVIDES = "virtual/${TARGET_PREFIX}go-crosssdk"
+export GOCACHE = "${B}/.cache"
+
do_configure[noexec] = "1"
do_compile() {
diff --git a/meta/recipes-devtools/go/go-crosssdk_1.17.10.bb b/meta/recipes-devtools/go/go-crosssdk_1.17.13.bb
index 1857c8a577..1857c8a577 100644
--- a/meta/recipes-devtools/go/go-crosssdk_1.17.10.bb
+++ b/meta/recipes-devtools/go/go-crosssdk_1.17.13.bb
diff --git a/meta/recipes-devtools/go/go-native_1.17.10.bb b/meta/recipes-devtools/go/go-native_1.17.13.bb
index 76c0ab73a6..ddf25b2c9b 100644
--- a/meta/recipes-devtools/go/go-native_1.17.10.bb
+++ b/meta/recipes-devtools/go/go-native_1.17.13.bb
@@ -5,7 +5,7 @@ require go-${PV}.inc
inherit native
-SRC_URI:append = " https://dl.google.com/go/go1.4-bootstrap-20171003.tar.gz;name=bootstrap;subdir=go1.4"
+SRC_URI += "https://dl.google.com/go/go1.4-bootstrap-20171003.tar.gz;name=bootstrap;subdir=go1.4"
SRC_URI[bootstrap.sha256sum] = "f4ff5b5eb3a3cae1c993723f3eab519c5bae18866b5e5f96fe1102f0cb5c3e52"
export GOOS = "${BUILD_GOOS}"
diff --git a/meta/recipes-devtools/go/go-runtime_1.17.10.bb b/meta/recipes-devtools/go/go-runtime_1.17.13.bb
index 63464a1501..63464a1501 100644
--- a/meta/recipes-devtools/go/go-runtime_1.17.10.bb
+++ b/meta/recipes-devtools/go/go-runtime_1.17.13.bb
diff --git a/meta/recipes-devtools/go/go_1.17.10.bb b/meta/recipes-devtools/go/go_1.17.13.bb
index 34dc89bb0c..bb57c1c48a 100644
--- a/meta/recipes-devtools/go/go_1.17.10.bb
+++ b/meta/recipes-devtools/go/go_1.17.13.bb
@@ -11,7 +11,7 @@ export CXX_FOR_TARGET = "g++"
# mips/rv64 doesn't support -buildmode=pie, so skip the QA checking for mips/riscv32 and its
# variants.
python() {
- if 'mips' in d.getVar('TARGET_ARCH',True) or 'riscv32' in d.getVar('TARGET_ARCH',True):
- d.appendVar('INSANE_SKIP:%s' % d.getVar('PN',True), " textrel")
+ if 'mips' in d.getVar('TARGET_ARCH') or 'riscv32' in d.getVar('TARGET_ARCH'):
+ d.appendVar('INSANE_SKIP:%s' % d.getVar('PN'), " textrel")
}
diff --git a/meta/recipes-devtools/json-c/json-c/CVE-2021-32292.patch b/meta/recipes-devtools/json-c/json-c/CVE-2021-32292.patch
new file mode 100644
index 0000000000..28da522115
--- /dev/null
+++ b/meta/recipes-devtools/json-c/json-c/CVE-2021-32292.patch
@@ -0,0 +1,30 @@
+From da22ae6541584068f8169315274016920da11d8b Mon Sep 17 00:00:00 2001
+From: Marc <34656315+MarcT512@users.noreply.github.com>
+Date: Fri, 7 Aug 2020 10:49:45 +0100
+Subject: [PATCH] Fix read past end of buffer
+
+Fixes: CVE-2021-32292
+Issue: https://github.com/json-c/json-c/issues/654
+
+Upstream-Status: Backport [4e9e44e5258dee7654f74948b0dd5da39c28beec]
+CVE: CVE-2021-32292
+
+Signed-off-by: Adrian Freihofer <adrian.freihofer@siemens.com>
+---
+ apps/json_parse.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/apps/json_parse.c b/apps/json_parse.c
+index bba4622..72b31a8 100644
+--- a/apps/json_parse.c
++++ b/apps/json_parse.c
+@@ -82,7 +82,8 @@ static int parseit(int fd, int (*callback)(struct json_object *))
+ int parse_end = json_tokener_get_parse_end(tok);
+ if (obj == NULL && jerr != json_tokener_continue)
+ {
+- char *aterr = &buf[start_pos + parse_end];
++ char *aterr = (start_pos + parse_end < sizeof(buf)) ?
++ &buf[start_pos + parse_end] : "";
+ fflush(stdout);
+ int fail_offset = total_read - ret + start_pos + parse_end;
+ fprintf(stderr, "Failed at offset %d: %s %c\n", fail_offset,
diff --git a/meta/recipes-devtools/json-c/json-c/run-ptest b/meta/recipes-devtools/json-c/json-c/run-ptest
new file mode 100644
index 0000000000..9ee6095ea2
--- /dev/null
+++ b/meta/recipes-devtools/json-c/json-c/run-ptest
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+# This script is used to run json-c test suites
+cd tests
+
+ret_val=0
+for i in test*.test; do
+ # test_basic is not an own testcase, just
+ # contains common code of other tests
+ if [ "$i" != "test_basic.test" ]; then
+ if ./$i > json-c_test.log 2>&1 ; then
+ echo PASS: $i
+ else
+ ret_val=1
+ echo FAIL: $i
+ fi
+ fi
+done
+
+exit $ret_val
diff --git a/meta/recipes-devtools/json-c/json-c_0.15.bb b/meta/recipes-devtools/json-c/json-c_0.15.bb
index a4673a2f0e..b3679e0135 100644
--- a/meta/recipes-devtools/json-c/json-c_0.15.bb
+++ b/meta/recipes-devtools/json-c/json-c_0.15.bb
@@ -4,15 +4,31 @@ HOMEPAGE = "https://github.com/json-c/json-c/wiki"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://COPYING;md5=de54b60fbbc35123ba193fea8ee216f2"
-SRC_URI = "https://s3.amazonaws.com/json-c_releases/releases/${BP}.tar.gz"
+SRC_URI = " \
+ https://s3.amazonaws.com/json-c_releases/releases/${BP}.tar.gz \
+ file://run-ptest \
+ file://CVE-2021-32292.patch \
+"
SRC_URI[sha256sum] = "b8d80a1ddb718b3ba7492916237bbf86609e9709fb007e7f7d4322f02341a4c6"
+# NVD uses full tag name including date
+CVE_VERSION = "0.15-20200726"
+
UPSTREAM_CHECK_URI = "https://github.com/${BPN}/${BPN}/tags"
UPSTREAM_CHECK_REGEX = "json-c-(?P<pver>\d+(\.\d+)+)-\d+"
RPROVIDES:${PN} = "libjson"
-inherit cmake
+inherit cmake ptest
+
+do_install_ptest() {
+ install -d ${D}/${PTEST_PATH}/tests
+ install ${B}/tests/test* ${D}/${PTEST_PATH}/tests
+ install ${S}/tests/*.test ${D}/${PTEST_PATH}/tests
+ install ${S}/tests/*.expected ${D}/${PTEST_PATH}/tests
+ install ${S}/tests/test-defs.sh ${D}/${PTEST_PATH}/tests
+ install ${S}/tests/valid*json ${D}/${PTEST_PATH}/tests
+}
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-devtools/libdnf/libdnf/0001-Fix-1558-Don-t-assume-inclusion-of-cstdint.patch b/meta/recipes-devtools/libdnf/libdnf/0001-Fix-1558-Don-t-assume-inclusion-of-cstdint.patch
new file mode 100644
index 0000000000..277fd9fbf6
--- /dev/null
+++ b/meta/recipes-devtools/libdnf/libdnf/0001-Fix-1558-Don-t-assume-inclusion-of-cstdint.patch
@@ -0,0 +1,56 @@
+From 779ea105564b6d717300af2fcb02a399737a536f Mon Sep 17 00:00:00 2001
+From: ctxnop <ctxnop@gmail.com>
+Date: Mon, 15 May 2023 19:30:16 +0200
+Subject: [PATCH] Fix #1558: Don't assume inclusion of cstdint
+
+With last versions of gcc, some headers don't include cstdint anymore,
+but some sources assume that it is.
+
+Upstream-Status: Backport [https://github.com/rpm-software-management/libdnf/commit/779ea105564b6d717300af2fcb02a399737a536f]
+Signed-off-by: ctxnop <ctxnop@gmail.com>
+---
+ libdnf/conf/ConfigMain.hpp | 1 +
+ libdnf/conf/ConfigRepo.hpp | 1 +
+ libdnf/conf/OptionSeconds.hpp | 2 ++
+ 3 files changed, 4 insertions(+)
+
+diff --git a/libdnf/conf/ConfigMain.hpp b/libdnf/conf/ConfigMain.hpp
+index 19395c71..59f65c48 100644
+--- a/libdnf/conf/ConfigMain.hpp
++++ b/libdnf/conf/ConfigMain.hpp
+@@ -32,6 +32,7 @@
+ #include "OptionString.hpp"
+ #include "OptionStringList.hpp"
+
++#include <cstdint>
+ #include <memory>
+
+ namespace libdnf {
+diff --git a/libdnf/conf/ConfigRepo.hpp b/libdnf/conf/ConfigRepo.hpp
+index 2b198441..84cafbad 100644
+--- a/libdnf/conf/ConfigRepo.hpp
++++ b/libdnf/conf/ConfigRepo.hpp
+@@ -26,6 +26,7 @@
+ #include "ConfigMain.hpp"
+ #include "OptionChild.hpp"
+
++#include <cstdint>
+ #include <memory>
+
+ namespace libdnf {
+diff --git a/libdnf/conf/OptionSeconds.hpp b/libdnf/conf/OptionSeconds.hpp
+index dc714b23..a80a973f 100644
+--- a/libdnf/conf/OptionSeconds.hpp
++++ b/libdnf/conf/OptionSeconds.hpp
+@@ -25,6 +25,8 @@
+
+ #include "OptionNumber.hpp"
+
++#include <cstdint>
++
+ namespace libdnf {
+
+ /**
+--
+2.42.0
+
diff --git a/meta/recipes-devtools/libdnf/libdnf/0001-libdnf-conf-OptionNumber.hpp-add-missing-cstdint-inc.patch b/meta/recipes-devtools/libdnf/libdnf/0001-libdnf-conf-OptionNumber.hpp-add-missing-cstdint-inc.patch
new file mode 100644
index 0000000000..abb9504e6e
--- /dev/null
+++ b/meta/recipes-devtools/libdnf/libdnf/0001-libdnf-conf-OptionNumber.hpp-add-missing-cstdint-inc.patch
@@ -0,0 +1,33 @@
+From f8af6399c4f6a65a35d33ecc191bb14094dc9e18 Mon Sep 17 00:00:00 2001
+From: Sergei Trofimovich <slyich@gmail.com>
+Date: Fri, 27 May 2022 22:13:48 +0100
+Subject: [PATCH] libdnf/conf/OptionNumber.hpp: add missing <cstdint> include
+
+Without the change libdnf build fails on this week's gcc-13 snapshot as:
+
+ In file included from /build/libdnf/libdnf/conf/ConfigMain.hpp:29,
+ from /build/libdnf/libdnf/conf/ConfigMain.cpp:21:
+ /build/libdnf/libdnf/conf/OptionNumber.hpp:94:41: error: 'int32_t' is not a member of 'std'; did you mean 'int32_t'?
+ 94 | extern template class OptionNumber<std::int32_t>;
+ | ^~~~~~~
+
+Upstream-Status: Backport [https://github.com/rpm-software-management/libdnf/commit/f8af6399c4f6a65a35d33ecc191bb14094dc9e18]
+---
+ libdnf/conf/OptionNumber.hpp | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/libdnf/conf/OptionNumber.hpp b/libdnf/conf/OptionNumber.hpp
+index f7a7b3d6..a3a4dea6 100644
+--- a/libdnf/conf/OptionNumber.hpp
++++ b/libdnf/conf/OptionNumber.hpp
+@@ -25,6 +25,7 @@
+
+ #include "Option.hpp"
+
++#include <cstdint>
+ #include <functional>
+
+ namespace libdnf {
+--
+2.42.0
+
diff --git a/meta/recipes-devtools/libdnf/libdnf/0001-libdnf-utils-sqlite3-Sqlite3.hpp-add-missing-cstdint.patch b/meta/recipes-devtools/libdnf/libdnf/0001-libdnf-utils-sqlite3-Sqlite3.hpp-add-missing-cstdint.patch
new file mode 100644
index 0000000000..adde48ee46
--- /dev/null
+++ b/meta/recipes-devtools/libdnf/libdnf/0001-libdnf-utils-sqlite3-Sqlite3.hpp-add-missing-cstdint.patch
@@ -0,0 +1,36 @@
+From 24b5d7f154cac9e322dd3459f6d0a5016abbbb57 Mon Sep 17 00:00:00 2001
+From: Sergei Trofimovich <slyich@gmail.com>
+Date: Fri, 27 May 2022 22:12:07 +0100
+Subject: [PATCH] libdnf/utils/sqlite3/Sqlite3.hpp: add missing <cstdint>
+ include
+
+Without the change libdnf build fails on this week's gcc-13 snapshot as:
+
+ In file included from /build/libdnf/libdnf/sack/../transaction/Swdb.hpp:38,
+ from /build/libdnf/libdnf/sack/query.hpp:32,
+ from /build/libdnf/libdnf/dnf-sack-private.hpp:31,
+ from /build/libdnf/libdnf/hy-iutil.cpp:60:
+ /build/libdnf/libdnf/sack/../transaction/../utils/sqlite3/Sqlite3.hpp:100:33: error: 'std::int64_t' has not been declared
+ 100 | void bind(int pos, std::int64_t val)
+ | ^~~~~~~
+
+Upstream-Status: Backport [https://github.com/rpm-software-management/libdnf/commit/24b5d7f154cac9e322dd3459f6d0a5016abbbb57]
+---
+ libdnf/utils/sqlite3/Sqlite3.hpp | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/libdnf/utils/sqlite3/Sqlite3.hpp b/libdnf/utils/sqlite3/Sqlite3.hpp
+index 3a7da23c..0403bb33 100644
+--- a/libdnf/utils/sqlite3/Sqlite3.hpp
++++ b/libdnf/utils/sqlite3/Sqlite3.hpp
+@@ -27,6 +27,7 @@
+
+ #include <sqlite3.h>
+
++#include <cstdint>
+ #include <map>
+ #include <memory>
+ #include <stdexcept>
+--
+2.42.0
+
diff --git a/meta/recipes-devtools/libdnf/libdnf_0.66.0.bb b/meta/recipes-devtools/libdnf/libdnf_0.66.0.bb
index 2558f96851..bd06937ed8 100644
--- a/meta/recipes-devtools/libdnf/libdnf_0.66.0.bb
+++ b/meta/recipes-devtools/libdnf/libdnf_0.66.0.bb
@@ -11,6 +11,9 @@ SRC_URI = "git://github.com/rpm-software-management/libdnf;branch=dnf-4-master;p
file://enable_test_data_dir_set.patch \
file://0001-drop-FindPythonInstDir.cmake.patch \
file://0001-libdnf-dnf-context.cpp-do-not-try-to-access-BDB-data.patch \
+ file://0001-Fix-1558-Don-t-assume-inclusion-of-cstdint.patch \
+ file://0001-libdnf-utils-sqlite3-Sqlite3.hpp-add-missing-cstdint.patch \
+ file://0001-libdnf-conf-OptionNumber.hpp-add-missing-cstdint-inc.patch \
"
SRCREV = "add5d5418b140a86d08667dd2b14793093984875"
diff --git a/meta/recipes-devtools/llvm/llvm/0001-Support-Add-missing-cstdint-header-to-Signals.h.patch b/meta/recipes-devtools/llvm/llvm/0001-Support-Add-missing-cstdint-header-to-Signals.h.patch
new file mode 100644
index 0000000000..fdb6307ab5
--- /dev/null
+++ b/meta/recipes-devtools/llvm/llvm/0001-Support-Add-missing-cstdint-header-to-Signals.h.patch
@@ -0,0 +1,31 @@
+From a94bf34221fc4519bd8ec72560c2d363ffe2de4c Mon Sep 17 00:00:00 2001
+From: Sergei Trofimovich <slyich@gmail.com>
+Date: Mon, 23 May 2022 08:03:23 +0100
+Subject: [PATCH] [Support] Add missing <cstdint> header to Signals.h
+
+Without the change llvm build fails on this week's gcc-13 snapshot as:
+
+ [ 0%] Building CXX object lib/Support/CMakeFiles/LLVMSupport.dir/Signals.cpp.o
+ In file included from llvm/lib/Support/Signals.cpp:14:
+ llvm/include/llvm/Support/Signals.h:119:8: error: variable or field 'CleanupOnSignal' declared void
+ 119 | void CleanupOnSignal(uintptr_t Context);
+ | ^~~~~~~~~~~~~~~
+
+Upstream-Status: Backport [llvmorg-15.0.0 ff1681ddb303223973653f7f5f3f3435b48a1983]
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+---
+ llvm/include/llvm/Support/Signals.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/llvm/include/llvm/Support/Signals.h b/llvm/include/llvm/Support/Signals.h
+index 44f5a750ff5c..937e0572d4a7 100644
+--- a/llvm/include/llvm/Support/Signals.h
++++ b/llvm/include/llvm/Support/Signals.h
+@@ -14,6 +14,7 @@
+ #ifndef LLVM_SUPPORT_SIGNALS_H
+ #define LLVM_SUPPORT_SIGNALS_H
+
++#include <cstdint>
+ #include <string>
+
+ namespace llvm {
diff --git a/meta/recipes-devtools/llvm/llvm_git.bb b/meta/recipes-devtools/llvm/llvm_git.bb
index 9400bf0821..cedbfb138e 100644
--- a/meta/recipes-devtools/llvm/llvm_git.bb
+++ b/meta/recipes-devtools/llvm/llvm_git.bb
@@ -32,6 +32,7 @@ SRC_URI = "git://github.com/llvm/llvm-project.git;branch=${BRANCH};protocol=http
file://0006-llvm-TargetLibraryInfo-Undefine-libc-functions-if-th.patch;striplevel=2 \
file://0007-llvm-allow-env-override-of-exe-path.patch;striplevel=2 \
file://0001-AsmMatcherEmitter-sort-ClassInfo-lists-by-name-as-we.patch;striplevel=2 \
+ file://0001-Support-Add-missing-cstdint-header-to-Signals.h.patch;striplevel=2 \
"
UPSTREAM_CHECK_GITTAGREGEX = "llvmorg-(?P<pver>\d+(\.\d+)+)"
diff --git a/meta/recipes-devtools/log4cplus/log4cplus_2.0.7.bb b/meta/recipes-devtools/log4cplus/log4cplus_2.0.8.bb
index 3798b93f76..bbf4ce6218 100644
--- a/meta/recipes-devtools/log4cplus/log4cplus_2.0.7.bb
+++ b/meta/recipes-devtools/log4cplus/log4cplus_2.0.8.bb
@@ -9,7 +9,7 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=41e8e060c26822886b592ab4765c756b"
SRC_URI = "${SOURCEFORGE_MIRROR}/project/${BPN}/${BPN}-stable/${PV}/${BP}.tar.gz \
"
-SRC_URI[sha256sum] = "086451c7e7c582862cbd6c60d87bb6d9d63c4b65321dba85fa71766382f7ec6d"
+SRC_URI[sha256sum] = "cdc3c738e00be84d8d03b580816b9f12628ecc1d71e1395080c802615d2d9ced"
UPSTREAM_CHECK_URI = "https://sourceforge.net/projects/log4cplus/files/log4cplus-stable/"
UPSTREAM_CHECK_REGEX = "log4cplus-stable/(?P<pver>\d+(\.\d+)+)/"
diff --git a/meta/recipes-devtools/lua/lua/CVE-2022-33099.patch b/meta/recipes-devtools/lua/lua/CVE-2022-33099.patch
new file mode 100644
index 0000000000..fe7b6065c2
--- /dev/null
+++ b/meta/recipes-devtools/lua/lua/CVE-2022-33099.patch
@@ -0,0 +1,61 @@
+From 42d40581dd919fb134c07027ca1ce0844c670daf Mon Sep 17 00:00:00 2001
+From: Roberto Ierusalimschy <roberto@inf.puc-rio.br>
+Date: Fri, 20 May 2022 13:14:33 -0300
+Subject: [PATCH] Save stack space while handling errors
+
+Because error handling (luaG_errormsg) uses slots from EXTRA_STACK,
+and some errors can recur (e.g., string overflow while creating an
+error message in 'luaG_runerror', or a C-stack overflow before calling
+the message handler), the code should use stack slots with parsimony.
+
+This commit fixes the bug "Lua-stack overflow when C stack overflows
+while handling an error".
+
+CVE: CVE-2022-33099
+
+Upstream-Status: Backport [https://github.com/lua/lua/commit/42d40581dd919fb134c07027ca1ce0844c670daf]
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ ldebug.c | 5 ++++-
+ lvm.c | 6 ++++--
+ 2 files changed, 8 insertions(+), 3 deletions(-)
+
+--- a/src/ldebug.c
++++ b/src/ldebug.c
+@@ -824,8 +824,11 @@ l_noret luaG_runerror (lua_State *L, con
+ va_start(argp, fmt);
+ msg = luaO_pushvfstring(L, fmt, argp); /* format message */
+ va_end(argp);
+- if (isLua(ci)) /* if Lua function, add source:line information */
++ if (isLua(ci)) { /* if Lua function, add source:line information */
+ luaG_addinfo(L, msg, ci_func(ci)->p->source, getcurrentline(ci));
++ setobjs2s(L, L->top - 2, L->top - 1); /* remove 'msg' from the stack */
++ L->top--;
++ }
+ luaG_errormsg(L);
+ }
+
+--- a/src/lvm.c
++++ b/src/lvm.c
+@@ -656,8 +656,10 @@ void luaV_concat (lua_State *L, int tota
+ /* collect total length and number of strings */
+ for (n = 1; n < total && tostring(L, s2v(top - n - 1)); n++) {
+ size_t l = vslen(s2v(top - n - 1));
+- if (l_unlikely(l >= (MAX_SIZE/sizeof(char)) - tl))
++ if (l_unlikely(l >= (MAX_SIZE/sizeof(char)) - tl)) {
++ L->top = top - total; /* pop strings to avoid wasting stack */
+ luaG_runerror(L, "string length overflow");
++ }
+ tl += l;
+ }
+ if (tl <= LUAI_MAXSHORTLEN) { /* is result a short string? */
+@@ -672,7 +674,7 @@ void luaV_concat (lua_State *L, int tota
+ setsvalue2s(L, top - n, ts); /* create result */
+ }
+ total -= n-1; /* got 'n' strings to create 1 new */
+- L->top -= n-1; /* popped 'n' strings and pushed one */
++ L->top = top - (n - 1); /* popped 'n' strings and pushed one */
+ } while (total > 1); /* repeat until only 1 result left */
+ }
+
diff --git a/meta/recipes-devtools/lua/lua/lua.pc.in b/meta/recipes-devtools/lua/lua/lua.pc.in
index c27e86e85d..1fc288c4fe 100644
--- a/meta/recipes-devtools/lua/lua/lua.pc.in
+++ b/meta/recipes-devtools/lua/lua/lua.pc.in
@@ -1,6 +1,5 @@
-prefix=/usr
-libdir=${prefix}/lib
-includedir=${prefix}/include
+libdir=@LIBDIR@
+includedir=@INCLUDEDIR@
Name: Lua
Description: Lua language engine
diff --git a/meta/recipes-devtools/lua/lua_5.4.4.bb b/meta/recipes-devtools/lua/lua_5.4.4.bb
index d704841378..a39d888ec2 100644
--- a/meta/recipes-devtools/lua/lua_5.4.4.bb
+++ b/meta/recipes-devtools/lua/lua_5.4.4.bb
@@ -7,6 +7,7 @@ HOMEPAGE = "http://www.lua.org/"
SRC_URI = "http://www.lua.org/ftp/lua-${PV}.tar.gz;name=tarballsrc \
file://lua.pc.in \
file://CVE-2022-28805.patch \
+ file://CVE-2022-33099.patch \
${@bb.utils.contains('DISTRO_FEATURES', 'ptest', 'http://www.lua.org/tests/lua-${PV_testsuites}-tests.tar.gz;name=tarballtest file://run-ptest ', '', d)} \
"
@@ -45,7 +46,7 @@ do_install () {
install
install -d ${D}${libdir}/pkgconfig
- sed -e s/@VERSION@/${PV}/ ${WORKDIR}/lua.pc.in > ${WORKDIR}/lua.pc
+ sed -e s/@VERSION@/${PV}/ -e s#@LIBDIR@#${libdir}# -e s#@INCLUDEDIR@#${includedir}# ${WORKDIR}/lua.pc.in > ${WORKDIR}/lua.pc
install -m 0644 ${WORKDIR}/lua.pc ${D}${libdir}/pkgconfig/
rmdir ${D}${datadir}/lua/5.4
rmdir ${D}${datadir}/lua
@@ -56,3 +57,6 @@ do_install_ptest () {
}
BBCLASSEXTEND = "native nativesdk"
+
+inherit multilib_script
+MULTILIB_SCRIPTS = "${PN}-dev:${includedir}/luaconf.h"
diff --git a/meta/recipes-devtools/meson/meson/meson-wrapper b/meta/recipes-devtools/meson/meson/meson-wrapper
index 8fafaad975..71c61db84f 100755
--- a/meta/recipes-devtools/meson/meson/meson-wrapper
+++ b/meta/recipes-devtools/meson/meson/meson-wrapper
@@ -5,7 +5,7 @@ if [ -z "$OECORE_NATIVE_SYSROOT" ]; then
fi
if [ -z "$SSL_CERT_DIR" ]; then
- export SSL_CERT_DIR="${OECORE_NATIVE_SYSROOT}/etc/ssl/certs/"
+ export SSL_CERT_DIR="$OECORE_NATIVE_SYSROOT/etc/ssl/certs/"
fi
# If these are set to a cross-compile path, meson will get confused and try to
@@ -13,7 +13,19 @@ fi
# config is already in meson.cross.
unset CC CXX CPP LD AR NM STRIP
+case "$1" in
+setup|configure|dist|install|introspect|init|test|wrap|subprojects|rewrite|compile|devenv|env2mfile|help) MESON_CMD="$1" ;;
+*) echo meson-wrapper: Implicit setup command assumed; MESON_CMD=setup ;;
+esac
+
+if [ "$MESON_CMD" = "setup" ]; then
+ MESON_SETUP_OPTS=" \
+ --cross-file="$OECORE_NATIVE_SYSROOT/usr/share/meson/${TARGET_PREFIX}meson.cross" \
+ --native-file="$OECORE_NATIVE_SYSROOT/usr/share/meson/meson.native" \
+ "
+ echo meson-wrapper: Running meson with setup options: \"$MESON_SETUP_OPTS\"
+fi
+
exec "$OECORE_NATIVE_SYSROOT/usr/bin/meson.real" \
- --cross-file "${OECORE_NATIVE_SYSROOT}/usr/share/meson/${TARGET_PREFIX}meson.cross" \
- --native-file "${OECORE_NATIVE_SYSROOT}/usr/share/meson/meson.native" \
- "$@"
+ "$@" \
+ $MESON_SETUP_OPTS
diff --git a/meta/recipes-devtools/mtd/mtd-utils_git.bb b/meta/recipes-devtools/mtd/mtd-utils_git.bb
index 3318277477..6a4f7b0688 100644
--- a/meta/recipes-devtools/mtd/mtd-utils_git.bb
+++ b/meta/recipes-devtools/mtd/mtd-utils_git.bb
@@ -11,9 +11,9 @@ inherit autotools pkgconfig update-alternatives
DEPENDS = "zlib e2fsprogs util-linux"
RDEPENDS:mtd-utils-tests += "bash"
-PV = "2.1.4"
+PV = "2.1.5"
-SRCREV = "c7f1bfa44a84d02061787e2f6093df5cc40b9f5c"
+SRCREV = "3f3b4cc6c3120107e7aaa21c6415772a255ac49c"
SRC_URI = "git://git.infradead.org/mtd-utils.git;branch=master \
file://add-exclusion-to-mkfs-jffs2-git-2.patch \
"
diff --git a/meta/recipes-devtools/nasm/nasm/CVE-2020-21528.patch b/meta/recipes-devtools/nasm/nasm/CVE-2020-21528.patch
new file mode 100644
index 0000000000..2303744540
--- /dev/null
+++ b/meta/recipes-devtools/nasm/nasm/CVE-2020-21528.patch
@@ -0,0 +1,47 @@
+From 93c774d482694643cafbc82578ac8b729fb5bc8b Mon Sep 17 00:00:00 2001
+From: Cyrill Gorcunov <gorcunov@gmail.com>
+Date: Wed, 4 Nov 2020 13:08:06 +0300
+Subject: [PATCH] BR3392637: output/outieee: Fix nil dereference
+
+The handling been broken in commit 98578071.
+
+Upstream-Status: Backport [https://github.com/netwide-assembler/nasm/commit/93c774d482694643cafbc82578ac8b729fb5bc8b]
+
+CVE: CVE-2020-21528
+
+Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com>
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ output/outieee.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+diff --git a/output/outieee.c b/output/outieee.c
+index bff2f085..b3ccc5f6 100644
+--- a/output/outieee.c
++++ b/output/outieee.c
+@@ -795,6 +795,23 @@ static int32_t ieee_segment(char *name, int *bits)
+ define_label(name, seg->index + 1, 0L, false);
+ ieee_seg_needs_update = NULL;
+
++ /*
++ * In commit 98578071b9d71ecaa2344dd9c185237c1765041e
++ * we reworked labels significantly which in turn lead
++ * to the case where seg->name = NULL here and we get
++ * nil dereference in next segments definitions.
++ *
++ * Lets placate this case with explicit name setting
++ * if labels engine didn't set it yet.
++ *
++ * FIXME: Need to revisit this moment if such fix doesn't
++ * break anything but since IEEE 695 format is veeery
++ * old I don't expect there are many users left. In worst
++ * case this should only lead to a memory leak.
++ */
++ if (!seg->name)
++ seg->name = nasm_strdup(name);
++
+ if (seg->use32)
+ *bits = 32;
+ else
+--
+2.40.0
diff --git a/meta/recipes-devtools/nasm/nasm/CVE-2022-44370.patch b/meta/recipes-devtools/nasm/nasm/CVE-2022-44370.patch
new file mode 100644
index 0000000000..1bd49c9fd9
--- /dev/null
+++ b/meta/recipes-devtools/nasm/nasm/CVE-2022-44370.patch
@@ -0,0 +1,104 @@
+From b37677f7e40276bd8f504584bcba2c092f1146a8 Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@zytor.com>
+Date: Mon, 7 Nov 2022 10:26:03 -0800
+Subject: [PATCH] quote_for_pmake: fix counter underrun resulting in segfault
+
+while (nbs--) { ... } ends with nbs == -1. Rather than a minimal fix,
+introduce mempset() to make these kinds of errors less likely in the
+future.
+
+Fixes: https://bugzilla.nasm.us/show_bug.cgi?id=3392815
+Reported-by: <13579and24680@gmail.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+
+Upstream-Status: Backport
+CVE: CVE-2022-4437
+
+Reference to upstream patch:
+[https://github.com/netwide-assembler/nasm/commit/2d4e6952417ec6f08b6f135d2b5d0e19b7dae30d]
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ asm/nasm.c | 12 +++++-------
+ configure.ac | 1 +
+ include/compiler.h | 7 +++++++
+ 3 files changed, 13 insertions(+), 7 deletions(-)
+
+diff --git a/asm/nasm.c b/asm/nasm.c
+index 7a7f8b4..675cff4 100644
+--- a/asm/nasm.c
++++ b/asm/nasm.c
+@@ -1,6 +1,6 @@
+ /* ----------------------------------------------------------------------- *
+ *
+- * Copyright 1996-2020 The NASM Authors - All Rights Reserved
++ * Copyright 1996-2022 The NASM Authors - All Rights Reserved
+ * See the file AUTHORS included with the NASM distribution for
+ * the specific copyright holders.
+ *
+@@ -814,8 +814,7 @@ static char *quote_for_pmake(const char *str)
+ }
+
+ /* Convert N backslashes at the end of filename to 2N backslashes */
+- if (nbs)
+- n += nbs;
++ n += nbs;
+
+ os = q = nasm_malloc(n);
+
+@@ -824,10 +823,10 @@ static char *quote_for_pmake(const char *str)
+ switch (*p) {
+ case ' ':
+ case '\t':
+- while (nbs--)
+- *q++ = '\\';
++ q = mempset(q, '\\', nbs);
+ *q++ = '\\';
+ *q++ = *p;
++ nbs = 0;
+ break;
+ case '$':
+ *q++ = *p;
+@@ -849,9 +848,8 @@ static char *quote_for_pmake(const char *str)
+ break;
+ }
+ }
+- while (nbs--)
+- *q++ = '\\';
+
++ q = mempset(q, '\\', nbs);
+ *q = '\0';
+
+ return os;
+diff --git a/configure.ac b/configure.ac
+index 39680b1..940ebe2 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -199,6 +199,7 @@ AC_CHECK_FUNCS(strrchrnul)
+ AC_CHECK_FUNCS(iscntrl)
+ AC_CHECK_FUNCS(isascii)
+ AC_CHECK_FUNCS(mempcpy)
++AC_CHECK_FUNCS(mempset)
+
+ AC_CHECK_FUNCS(getuid)
+ AC_CHECK_FUNCS(getgid)
+diff --git a/include/compiler.h b/include/compiler.h
+index db3d6d6..b64da6a 100644
+--- a/include/compiler.h
++++ b/include/compiler.h
+@@ -256,6 +256,13 @@ static inline void *mempcpy(void *dst, const void *src, size_t n)
+ }
+ #endif
+
++#ifndef HAVE_MEMPSET
++static inline void *mempset(void *dst, int c, size_t n)
++{
++ return (char *)memset(dst, c, n) + n;
++}
++#endif
++
+ /*
+ * Hack to support external-linkage inline functions
+ */
+--
+2.40.0
diff --git a/meta/recipes-devtools/nasm/nasm/CVE-2022-46457.patch b/meta/recipes-devtools/nasm/nasm/CVE-2022-46457.patch
new file mode 100644
index 0000000000..3502d572cd
--- /dev/null
+++ b/meta/recipes-devtools/nasm/nasm/CVE-2022-46457.patch
@@ -0,0 +1,50 @@
+From c8af73112027fad0ecbb277e9cba257678c405af Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@zytor.com>
+Date: Wed, 7 Dec 2022 10:23:46 -0800
+Subject: [PATCH] outieee: fix segfault on empty input
+
+Fix the IEEE backend crashing if the input file is empty.
+
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+
+Upstream-Status: Backport [https://github.com/netwide-assembler/nasm/commit/c8af73112027fad0ecbb277e9cba257678c405af]
+CVE: CVE-2022-46457
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ output/outieee.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/output/outieee.c b/output/outieee.c
+index cdb8333..8bc5eaa 100644
+--- a/output/outieee.c
++++ b/output/outieee.c
+@@ -919,7 +919,7 @@ static void ieee_write_file(void)
+ * Write the section headers
+ */
+ seg = seghead;
+- if (!debuginfo && !strcmp(seg->name, "??LINE"))
++ if (!debuginfo && seg && !strcmp(seg->name, "??LINE"))
+ seg = seg->next;
+ while (seg) {
+ char buf[256];
+@@ -954,7 +954,7 @@ static void ieee_write_file(void)
+ /*
+ * write the start address if there is one
+ */
+- if (ieee_entry_seg) {
++ if (ieee_entry_seg && seghead) {
+ for (seg = seghead; seg; seg = seg->next)
+ if (seg->index == ieee_entry_seg)
+ break;
+@@ -1067,7 +1067,7 @@ static void ieee_write_file(void)
+ * put out section data;
+ */
+ seg = seghead;
+- if (!debuginfo && !strcmp(seg->name, "??LINE"))
++ if (!debuginfo && seg && !strcmp(seg->name, "??LINE"))
+ seg = seg->next;
+ while (seg) {
+ if (seg->currentpos) {
+--
+2.40.0
diff --git a/meta/recipes-devtools/nasm/nasm_2.15.05.bb b/meta/recipes-devtools/nasm/nasm_2.15.05.bb
index edc17aeebf..aba061f56f 100644
--- a/meta/recipes-devtools/nasm/nasm_2.15.05.bb
+++ b/meta/recipes-devtools/nasm/nasm_2.15.05.bb
@@ -8,6 +8,9 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=90904486f8fbf1861cf42752e1a39efe"
SRC_URI = "http://www.nasm.us/pub/nasm/releasebuilds/${PV}/nasm-${PV}.tar.bz2 \
file://0001-stdlib-Add-strlcat.patch \
file://0002-Add-debug-prefix-map-option.patch \
+ file://CVE-2022-44370.patch \
+ file://CVE-2022-46457.patch \
+ file://CVE-2020-21528.patch \
"
SRC_URI[sha256sum] = "3c4b8339e5ab54b1bcb2316101f8985a5da50a3f9e504d43fa6f35668bee2fd0"
diff --git a/meta/recipes-devtools/ninja/ninja_1.10.2.bb b/meta/recipes-devtools/ninja/ninja_1.10.2.bb
index 7270321d6e..1509a54c9e 100644
--- a/meta/recipes-devtools/ninja/ninja_1.10.2.bb
+++ b/meta/recipes-devtools/ninja/ninja_1.10.2.bb
@@ -29,3 +29,6 @@ do_install() {
}
BBCLASSEXTEND = "native nativesdk"
+
+# This is a different Ninja
+CVE_CHECK_IGNORE += "CVE-2021-4336"
diff --git a/meta/recipes-devtools/opkg-utils/opkg-utils_0.5.0.bb b/meta/recipes-devtools/opkg-utils/opkg-utils_0.5.0.bb
index e72c171b92..b27e3ded33 100644
--- a/meta/recipes-devtools/opkg-utils/opkg-utils_0.5.0.bb
+++ b/meta/recipes-devtools/opkg-utils/opkg-utils_0.5.0.bb
@@ -7,12 +7,12 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \
file://opkg.py;beginline=2;endline=18;md5=ffa11ff3c15eb31c6a7ceaa00cc9f986"
PROVIDES += "${@bb.utils.contains('PACKAGECONFIG', 'update-alternatives', 'virtual/update-alternatives', '', d)}"
-SRC_URI = "http://git.yoctoproject.org/cgit/cgit.cgi/${BPN}/snapshot/${BPN}-${PV}.tar.gz \
+SRC_URI = "git://git.yoctoproject.org/opkg-utils;protocol=https;branch=master \
file://0001-update-alternatives-correctly-match-priority.patch \
"
-UPSTREAM_CHECK_URI = "http://git.yoctoproject.org/cgit/cgit.cgi/opkg-utils/refs/"
+SRCREV = "9239541f14a2529b9d01c0a253ab11afa2822dab"
-SRC_URI[sha256sum] = "55733c0f8ffde2bb4f9593cfd66a1f68e6a2f814e8e62f6fd78472911c818c32"
+S = "${WORKDIR}/git"
TARGET_CC_ARCH += "${LDFLAGS}"
diff --git a/meta/recipes-devtools/opkg/opkg_0.5.0.bb b/meta/recipes-devtools/opkg/opkg_0.5.0.bb
index e91d7250bc..7bddaa3016 100644
--- a/meta/recipes-devtools/opkg/opkg_0.5.0.bb
+++ b/meta/recipes-devtools/opkg/opkg_0.5.0.bb
@@ -46,7 +46,9 @@ EXTRA_OECONF:class-native = "--localstatedir=/${@os.path.relpath('${localstatedi
do_install:append () {
install -d ${D}${sysconfdir}/opkg
install -m 0644 ${WORKDIR}/opkg.conf ${D}${sysconfdir}/opkg/opkg.conf
- echo "option lists_dir ${OPKGLIBDIR}/opkg/lists" >>${D}${sysconfdir}/opkg/opkg.conf
+ echo "option lists_dir ${OPKGLIBDIR}/opkg/lists" >>${D}${sysconfdir}/opkg/opkg.conf
+ echo "option info_dir ${OPKGLIBDIR}/opkg/info" >>${D}${sysconfdir}/opkg/opkg.conf
+ echo "option status_file ${OPKGLIBDIR}/opkg/status" >>${D}${sysconfdir}/opkg/opkg.conf
# We need to create the lock directory
install -d ${D}${OPKGLIBDIR}/opkg
diff --git a/meta/recipes-devtools/patchelf/patchelf/handle-read-only-files.patch b/meta/recipes-devtools/patchelf/patchelf/handle-read-only-files.patch
deleted file mode 100644
index b755a263a4..0000000000
--- a/meta/recipes-devtools/patchelf/patchelf/handle-read-only-files.patch
+++ /dev/null
@@ -1,65 +0,0 @@
-From 682fb48c137b687477008b68863c2a0b73ed47d1 Mon Sep 17 00:00:00 2001
-From: Fabio Berton <fabio.berton@ossystems.com.br>
-Date: Fri, 9 Sep 2016 16:00:42 -0300
-Subject: [PATCH] handle read-only files
-
-Patch from:
-https://github.com/darealshinji/patchelf/commit/40e66392bc4b96e9b4eda496827d26348a503509
-
-Upstream-Status: Denied [https://github.com/NixOS/patchelf/pull/89]
-
-Signed-off-by: Fabio Berton <fabio.berton@ossystems.com.br>
-
----
- src/patchelf.cc | 16 +++++++++++++++-
- 1 file changed, 15 insertions(+), 1 deletion(-)
-
-Index: git/src/patchelf.cc
-===================================================================
---- git.orig/src/patchelf.cc
-+++ git/src/patchelf.cc
-@@ -534,9 +534,19 @@ void ElfFile<ElfFileParamNames>::sortShd
-
- static void writeFile(const std::string & fileName, const FileContents & contents)
- {
-+ struct stat st;
-+ int fd;
-+
- debug("writing %s\n", fileName.c_str());
-
-- int fd = open(fileName.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0777);
-+ if (stat(fileName.c_str(), &st) != 0)
-+ error("stat");
-+
-+ if (chmod(fileName.c_str(), 0600) != 0)
-+ error("chmod");
-+
-+ fd = open(fileName.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0777);
-+
- if (fd == -1)
- error("open");
-
-@@ -551,8 +561,6 @@ static void writeFile(const std::string
- bytesWritten += portion;
- }
-
-- if (close(fd) >= 0)
-- return;
- /*
- * Just ignore EINTR; a retry loop is the wrong thing to do.
- *
-@@ -561,9 +569,11 @@ static void writeFile(const std::string
- * http://utcc.utoronto.ca/~cks/space/blog/unix/CloseEINTR
- * https://sites.google.com/site/michaelsafyan/software-engineering/checkforeintrwheninvokingclosethinkagain
- */
-- if (errno == EINTR)
-- return;
-- error("close");
-+ if ((close(fd) < 0) && errno != EINTR)
-+ error("close");
-+
-+ if (chmod(fileName.c_str(), st.st_mode) != 0)
-+ error("chmod");
- }
-
-
diff --git a/meta/recipes-devtools/patchelf/patchelf_0.14.5.bb b/meta/recipes-devtools/patchelf/patchelf_0.14.5.bb
index 0fa2c00f1d..82c7e807ac 100644
--- a/meta/recipes-devtools/patchelf/patchelf_0.14.5.bb
+++ b/meta/recipes-devtools/patchelf/patchelf_0.14.5.bb
@@ -5,7 +5,6 @@ HOMEPAGE = "https://github.com/NixOS/patchelf"
LICENSE = "GPL-3.0-only"
SRC_URI = "git://github.com/NixOS/patchelf;protocol=https;branch=master \
- file://handle-read-only-files.patch \
"
SRCREV = "a35054504293f9ff64539850d1ed0bfd2f5399f2"
diff --git a/meta/recipes-devtools/perl-cross/files/0001-Makefile-check-the-file-if-patched-or-not.patch b/meta/recipes-devtools/perl-cross/files/0001-Makefile-check-the-file-if-patched-or-not.patch
index 8c8f3b717c..0ef9b27439 100644
--- a/meta/recipes-devtools/perl-cross/files/0001-Makefile-check-the-file-if-patched-or-not.patch
+++ b/meta/recipes-devtools/perl-cross/files/0001-Makefile-check-the-file-if-patched-or-not.patch
@@ -21,8 +21,8 @@ index f4a26f5..7bc748e 100644
# Original versions are not saved anymore; patch generally takes care of this,
# and if that fails, reaching for the source tarball is the safest option.
$(CROSSPATCHED): %.applied: %.patch
-- patch -p1 -i $< && touch $@
-+ test ! -f $@ && (patch -p1 -i $< && touch $@) || echo "$@ exist"
+- $(cpatch) -p1 -i $< && touch $@
++ test ! -f $@ && ($(cpatch) -p1 -i $< && touch $@) || echo "$@ exist"
# ---[ common ]-----------------------------------------------------------------
diff --git a/meta/recipes-devtools/perl-cross/perlcross_1.3.7.bb b/meta/recipes-devtools/perl-cross/perlcross_1.5.2.bb
index 99a9ca1027..ac4dff33bb 100644
--- a/meta/recipes-devtools/perl-cross/perlcross_1.3.7.bb
+++ b/meta/recipes-devtools/perl-cross/perlcross_1.5.2.bb
@@ -18,7 +18,7 @@ SRC_URI = "https://github.com/arsv/perl-cross/releases/download/${PV}/perl-cross
"
UPSTREAM_CHECK_URI = "https://github.com/arsv/perl-cross/releases/"
-SRC_URI[perl-cross.sha256sum] = "77f13ca84a63025053852331b72d4046c1f90ded98bd45ccedea738621907335"
+SRC_URI[perl-cross.sha256sum] = "584dc54c48dca25e032b676a15bef377c1fed9de318b4fc140292a5dbf326e90"
S = "${WORKDIR}/perl-cross-${PV}"
diff --git a/meta/recipes-devtools/perl/files/CVE-2023-31484.patch b/meta/recipes-devtools/perl/files/CVE-2023-31484.patch
new file mode 100644
index 0000000000..1f7cbd0da1
--- /dev/null
+++ b/meta/recipes-devtools/perl/files/CVE-2023-31484.patch
@@ -0,0 +1,29 @@
+From a625ec2cc3a0b6116c1f8b831d3480deb621c245 Mon Sep 17 00:00:00 2001
+From: Stig Palmquist <git@stig.io>
+Date: Tue, 28 Feb 2023 11:54:06 +0100
+Subject: [PATCH] Add verify_SSL=>1 to HTTP::Tiny to verify https server
+ identity
+
+Upstream-Status: Backport [https://github.com/andk/cpanpm/commit/9c98370287f4e709924aee7c58ef21c85289a7f0]
+
+CVE: CVE-2023-31484
+
+Signed-off-by: Soumya <soumya.sambu@windriver.com>
+---
+ cpan/CPAN/lib/CPAN/HTTP/Client.pm | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/cpan/CPAN/lib/CPAN/HTTP/Client.pm b/cpan/CPAN/lib/CPAN/HTTP/Client.pm
+index 4fc792c..a616fee 100644
+--- a/cpan/CPAN/lib/CPAN/HTTP/Client.pm
++++ b/cpan/CPAN/lib/CPAN/HTTP/Client.pm
+@@ -32,6 +32,7 @@ sub mirror {
+
+ my $want_proxy = $self->_want_proxy($uri);
+ my $http = HTTP::Tiny->new(
++ verify_SSL => 1,
+ $want_proxy ? (proxy => $self->{proxy}) : ()
+ );
+
+--
+2.40.0
diff --git a/meta/recipes-devtools/perl/files/CVE-2023-31486-0001.patch b/meta/recipes-devtools/perl/files/CVE-2023-31486-0001.patch
new file mode 100644
index 0000000000..d29996ddcb
--- /dev/null
+++ b/meta/recipes-devtools/perl/files/CVE-2023-31486-0001.patch
@@ -0,0 +1,215 @@
+From 77f557ef84698efeb6eed04e4a9704eaf85b741d
+From: Stig Palmquist <git@stig.io>
+Date: Mon Jun 5 16:46:22 2023 +0200
+Subject: [PATCH] Change verify_SSL default to 1, add ENV var to enable
+ insecure default - Changes the `verify_SSL` default parameter from `0` to `1`
+
+ Based on patch by Dominic Hargreaves:
+ https://salsa.debian.org/perl-team/interpreter/perl/-/commit/1490431e40e22052f75a0b3449f1f53cbd27ba92
+
+ CVE: CVE-2023-31486
+
+- Add check for `$ENV{PERL_HTTP_TINY_SSL_INSECURE_BY_DEFAULT}` that
+ enables the previous insecure default behaviour if set to `1`.
+
+ This provides a workaround for users who encounter problems with the
+ new `verify_SSL` default.
+
+ Example to disable certificate checks:
+ ```
+ $ PERL_HTTP_TINY_SSL_INSECURE_BY_DEFAULT=1 ./script.pl
+ ```
+
+- Updates to documentation:
+ - Describe changing the verify_SSL value
+ - Describe the escape-hatch environment variable
+ - Remove rationale for not enabling verify_SSL
+ - Add missing certificate search paths
+ - Replace "SSL" with "TLS/SSL" where appropriate
+ - Use "machine-in-the-middle" instead of "man-in-the-middle"
+
+Upstream-Status: Backport [https://github.com/chansen/p5-http-tiny/commit/77f557ef84698efeb6eed04e4a9704eaf85b741d]
+
+Signed-off-by: Soumya <soumya.sambu@windriver.com>
+---
+ cpan/HTTP-Tiny/lib/HTTP/Tiny.pm | 86 ++++++++++++++++++++++-----------
+ 1 file changed, 57 insertions(+), 29 deletions(-)
+
+diff --git a/cpan/HTTP-Tiny/lib/HTTP/Tiny.pm b/cpan/HTTP-Tiny/lib/HTTP/Tiny.pm
+index 5803e45..1808c41 100644
+--- a/cpan/HTTP-Tiny/lib/HTTP/Tiny.pm
++++ b/cpan/HTTP-Tiny/lib/HTTP/Tiny.pm
+@@ -39,10 +39,14 @@ sub _croak { require Carp; Carp::croak(@_) }
+ #pod C<$ENV{no_proxy}> —)
+ #pod * C<timeout> — Request timeout in seconds (default is 60) If a socket open,
+ #pod read or write takes longer than the timeout, an exception is thrown.
+-#pod * C<verify_SSL> — A boolean that indicates whether to validate the SSL
+-#pod certificate of an C<https> — connection (default is false)
++#pod * C<verify_SSL> — A boolean that indicates whether to validate the TLS/SSL
++#pod certificate of an C<https> — connection (default is true). Changed from false
++#pod to true in version 0.083.
+ #pod * C<SSL_options> — A hashref of C<SSL_*> — options to pass through to
+ #pod L<IO::Socket::SSL>
++#pod * C<$ENV{PERL_HTTP_TINY_SSL_INSECURE_BY_DEFAULT}> - Changes the default
++#pod certificate verification behavior to not check server identity if set to 1.
++#pod Only effective if C<verify_SSL> is not set. Added in version 0.083.
+ #pod
+ #pod Passing an explicit C<undef> for C<proxy>, C<http_proxy> or C<https_proxy> will
+ #pod prevent getting the corresponding proxies from the environment.
+@@ -108,11 +112,17 @@ sub timeout {
+ sub new {
+ my($class, %args) = @_;
+
++ # Support lower case verify_ssl argument, but only if verify_SSL is not
++ # true.
++ if ( exists $args{verify_ssl} ) {
++ $args{verify_SSL} ||= $args{verify_ssl};
++ }
++
+ my $self = {
+ max_redirect => 5,
+ timeout => defined $args{timeout} ? $args{timeout} : 60,
+ keep_alive => 1,
+- verify_SSL => $args{verify_SSL} || $args{verify_ssl} || 0, # no verification by default
++ verify_SSL => defined $args{verify_SSL} ? $args{verify_SSL} : _verify_SSL_default(),
+ no_proxy => $ENV{no_proxy},
+ };
+
+@@ -131,6 +141,13 @@ sub new {
+ return $self;
+ }
+
++sub _verify_SSL_default {
++ my ($self) = @_;
++ # Check if insecure default certificate verification behaviour has been
++ # changed by the user by setting PERL_HTTP_TINY_SSL_INSECURE_BY_DEFAULT=1
++ return (($ENV{PERL_HTTP_TINY_INSECURE_BY_DEFAULT} || '') eq '1') ? 0 : 1;
++}
++
+ sub _set_proxies {
+ my ($self) = @_;
+
+@@ -1038,7 +1055,7 @@ sub new {
+ timeout => 60,
+ max_line_size => 16384,
+ max_header_lines => 64,
+- verify_SSL => 0,
++ verify_SSL => HTTP::Tiny::_verify_SSL_default(),
+ SSL_options => {},
+ %args
+ }, $class;
+@@ -2009,11 +2026,11 @@ proxy
+ timeout
+ verify_SSL
+
+-=head1 SSL SUPPORT
++=head1 TLS/SSL SUPPORT
+
+ Direct C<https> connections are supported only if L<IO::Socket::SSL> 1.56 or
+ greater and L<Net::SSLeay> 1.49 or greater are installed. An exception will be
+-thrown if new enough versions of these modules are not installed or if the SSL
++thrown if new enough versions of these modules are not installed or if the TLS
+ encryption fails. You can also use C<HTTP::Tiny::can_ssl()> utility function
+ that returns boolean to see if the required modules are installed.
+
+@@ -2021,7 +2038,7 @@ An C<https> connection may be made via an C<http> proxy that supports the CONNEC
+ command (i.e. RFC 2817). You may not proxy C<https> via a proxy that itself
+ requires C<https> to communicate.
+
+-SSL provides two distinct capabilities:
++TLS/SSL provides two distinct capabilities:
+
+ =over 4
+
+@@ -2035,24 +2052,17 @@ Verification of server identity
+
+ =back
+
+-B<By default, HTTP::Tiny does not verify server identity>.
+-
+-Server identity verification is controversial and potentially tricky because it
+-depends on a (usually paid) third-party Certificate Authority (CA) trust model
+-to validate a certificate as legitimate. This discriminates against servers
+-with self-signed certificates or certificates signed by free, community-driven
+-CA's such as L<CAcert.org|http://cacert.org>.
++B<By default, HTTP::Tiny verifies server identity>.
+
+-By default, HTTP::Tiny does not make any assumptions about your trust model,
+-threat level or risk tolerance. It just aims to give you an encrypted channel
+-when you need one.
++This was changed in version 0.083 due to security concerns. The previous default
++behavior can be enabled by setting C<$ENV{PERL_HTTP_TINY_SSL_INSECURE_BY_DEFAULT}>
++to 1.
+
+-Setting the C<verify_SSL> attribute to a true value will make HTTP::Tiny verify
+-that an SSL connection has a valid SSL certificate corresponding to the host
+-name of the connection and that the SSL certificate has been verified by a CA.
+-Assuming you trust the CA, this will protect against a L<man-in-the-middle
+-attack|http://en.wikipedia.org/wiki/Man-in-the-middle_attack>. If you are
+-concerned about security, you should enable this option.
++Verification is done by checking that that the TLS/SSL connection has a valid
++certificate corresponding to the host name of the connection and that the
++certificate has been verified by a CA. Assuming you trust the CA, this will
++protect against L<machine-in-the-middle
++attacks|http://en.wikipedia.org/wiki/Machine-in-the-middle_attack>.
+
+ Certificate verification requires a file containing trusted CA certificates.
+
+@@ -2060,9 +2070,7 @@ If the environment variable C<SSL_CERT_FILE> is present, HTTP::Tiny
+ will try to find a CA certificate file in that location.
+
+ If the L<Mozilla::CA> module is installed, HTTP::Tiny will use the CA file
+-included with it as a source of trusted CA's. (This means you trust Mozilla,
+-the author of Mozilla::CA, the CPAN mirror where you got Mozilla::CA, the
+-toolchain used to install it, and your operating system security, right?)
++included with it as a source of trusted CA's.
+
+ If that module is not available, then HTTP::Tiny will search several
+ system-specific default locations for a CA certificate file:
+@@ -2081,13 +2089,33 @@ system-specific default locations for a CA certificate file:
+
+ /etc/ssl/ca-bundle.pem
+
++=item *
++
++/etc/openssl/certs/ca-certificates.crt
++
++=item *
++
++/etc/ssl/cert.pem
++
++=item *
++
++/usr/local/share/certs/ca-root-nss.crt
++
++=item *
++
++/etc/pki/tls/cacert.pem
++
++=item *
++
++/etc/certs/ca-certificates.crt
++
+ =back
+
+ An exception will be raised if C<verify_SSL> is true and no CA certificate file
+ is available.
+
+-If you desire complete control over SSL connections, the C<SSL_options> attribute
+-lets you provide a hash reference that will be passed through to
++If you desire complete control over TLS/SSL connections, the C<SSL_options>
++attribute lets you provide a hash reference that will be passed through to
+ C<IO::Socket::SSL::start_SSL()>, overriding any options set by HTTP::Tiny. For
+ example, to provide your own trusted CA file:
+
+@@ -2097,7 +2125,7 @@ example, to provide your own trusted CA file:
+
+ The C<SSL_options> attribute could also be used for such things as providing a
+ client certificate for authentication to a server or controlling the choice of
+-cipher used for the SSL connection. See L<IO::Socket::SSL> documentation for
++cipher used for the TLS/SSL connection. See L<IO::Socket::SSL> documentation for
+ details.
+
+ =head1 PROXY SUPPORT
+--
+2.40.0
diff --git a/meta/recipes-devtools/perl/files/CVE-2023-31486-0002.patch b/meta/recipes-devtools/perl/files/CVE-2023-31486-0002.patch
new file mode 100644
index 0000000000..45452be389
--- /dev/null
+++ b/meta/recipes-devtools/perl/files/CVE-2023-31486-0002.patch
@@ -0,0 +1,36 @@
+From a22785783b17cbaa28afaee4a024d81a1903701d
+From: Stig Palmquist <git@stig.io>
+Date: Sun Jun 18 11:36:05 2023 +0200
+Subject: [PATCH] Fix incorrect env var name for verify_SSL default
+
+The variable to override the verify_SSL default differed slightly in the
+documentation from what was checked for in the code.
+
+This commit makes the code use `PERL_HTTP_TINY_SSL_INSECURE_BY_DEFAULT`
+as documented, instead of `PERL_HTTP_TINY_INSECURE_BY_DEFAULT` which was
+missing `SSL_`
+
+CVE: CVE-2023-31486
+
+Upstream-Status: Backport [https://github.com/chansen/p5-http-tiny/commit/a22785783b17cbaa28afaee4a024d81a1903701d]
+
+Signed-off-by: Soumya <soumya.sambu@windriver.com>
+---
+ cpan/HTTP-Tiny/lib/HTTP/Tiny.pm | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/cpan/HTTP-Tiny/lib/HTTP/Tiny.pm b/cpan/HTTP-Tiny/lib/HTTP/Tiny.pm
+index ebc34a1..65ac8ff 100644
+--- a/cpan/HTTP-Tiny/lib/HTTP/Tiny.pm
++++ b/cpan/HTTP-Tiny/lib/HTTP/Tiny.pm
+@@ -148,7 +148,7 @@ sub _verify_SSL_default {
+ my ($self) = @_;
+ # Check if insecure default certificate verification behaviour has been
+ # changed by the user by setting PERL_HTTP_TINY_SSL_INSECURE_BY_DEFAULT=1
+- return (($ENV{PERL_HTTP_TINY_INSECURE_BY_DEFAULT} || '') eq '1') ? 0 : 1;
++ return (($ENV{PERL_HTTP_TINY_SSL_INSECURE_BY_DEFAULT} || '') eq '1') ? 0 : 1;
+ }
+
+ sub _set_proxies {
+--
+2.40.0
diff --git a/meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb b/meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb
index e2c79d962b..881d5e672e 100644
--- a/meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb
+++ b/meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb
@@ -37,6 +37,7 @@ EXTRA_CPAN_BUILD_FLAGS = "--create_packlist=0"
do_install:append () {
rm -rf ${D}${docdir}/perl/html
+ sed -i "s:^#!.*:#!/usr/bin/env perl:" ${D}${bindir}/config_data
}
do_install_ptest() {
diff --git a/meta/recipes-devtools/perl/perl-ptest.inc b/meta/recipes-devtools/perl/perl-ptest.inc
index 54c7807571..c233fab545 100644
--- a/meta/recipes-devtools/perl/perl-ptest.inc
+++ b/meta/recipes-devtools/perl/perl-ptest.inc
@@ -10,12 +10,12 @@ do_install_ptest () {
sed -e "s:\/usr\/local:${bindir}:g" -i cpan/version/t/*
sed -e "s:\/opt:\/usr:" -i Porting/add-package.pl
sed -e "s:\/local\/gnu\/:\/:" -i hints/cxux.sh
- tar -c --exclude=try --exclude=a.out --exclude='*.o' --exclude=libperl.so* --exclude=Makefile --exclude=makefile --exclude=hostperl \
+ tar -c --exclude=try --exclude=a.out --exclude='*.o' --exclude=libperl.so* --exclude=[Mm]akefile --exclude=hostperl \
--exclude=cygwin --exclude=os2 --exclude=djgpp --exclude=qnx --exclude=symbian --exclude=haiku \
--exclude=vms --exclude=vos --exclude=NetWare --exclude=amigaos4 --exclude=buildcustomize.pl \
--exclude='win32/config.*' --exclude=plan9 --exclude=README.plan9 --exclude=perlplan9.pod --exclude=Configure \
--exclude=veryclean.sh --exclude=realclean.sh --exclude=getioctlsizes \
- --exclude=dl_aix.xs --exclude=sdbm.3 --exclude='cflags.SH' --exclude=makefile.old \
+ --exclude=dl_aix.xs --exclude=sdbm.3 --exclude='cflags.SH' --exclude=[Mm]akefile.old \
--exclude=miniperl --exclude=generate_uudmap --exclude=patches --exclude='config.log' * | ( cd ${D}${PTEST_PATH} && tar -x )
ln -sf ${bindir}/perl ${D}${PTEST_PATH}/t/perl
diff --git a/meta/recipes-devtools/perl/perl_5.34.1.bb b/meta/recipes-devtools/perl/perl_5.34.3.bb
index 42bcb8b1bc..215990c8fa 100644
--- a/meta/recipes-devtools/perl/perl_5.34.1.bb
+++ b/meta/recipes-devtools/perl/perl_5.34.3.bb
@@ -18,6 +18,9 @@ SRC_URI = "https://www.cpan.org/src/5.0/perl-${PV}.tar.gz;name=perl \
file://determinism.patch \
file://0001-cpan-Sys-Syslog-Makefile.PL-Fix-_PATH_LOG-for-determ.patch \
file://0001-Fix-build-with-gcc-12.patch \
+ file://CVE-2023-31484.patch \
+ file://CVE-2023-31486-0001.patch \
+ file://CVE-2023-31486-0002.patch \
"
SRC_URI:append:class-native = " \
file://perl-configpm-switch.patch \
@@ -26,7 +29,7 @@ SRC_URI:append:class-target = " \
file://encodefix.patch \
"
-SRC_URI[perl.sha256sum] = "357951a491b0ba1ce3611263922feec78ccd581dddc24a446b033e25acf242a1"
+SRC_URI[perl.sha256sum] = "5b12f62863332b2a5f54102af9cdf8c010877e4bf3294911edbd594b2a1e8ede"
S = "${WORKDIR}/perl-${PV}"
@@ -45,6 +48,9 @@ PACKAGECONFIG[gdbm] = ",-Ui_gdbm,gdbm"
# Don't generate comments in enc2xs output files. They are not reproducible
export ENC2XS_NO_COMMENTS = "1"
+# Duplicate of CVE-2023-47038, which has already been patched as of perl_5.34.3
+CVE_CHECK_IGNORE:append = " CVE-2023-47100"
+
do_configure:prepend() {
cp -rfp ${STAGING_DATADIR_NATIVE}/perl-cross/* ${S}
}
diff --git a/meta/recipes-devtools/pkgconf/pkgconf/0001-tuple-test-for-and-stop-string-processing-on-truncat.patch b/meta/recipes-devtools/pkgconf/pkgconf/0001-tuple-test-for-and-stop-string-processing-on-truncat.patch
new file mode 100644
index 0000000000..c6ec7c94e1
--- /dev/null
+++ b/meta/recipes-devtools/pkgconf/pkgconf/0001-tuple-test-for-and-stop-string-processing-on-truncat.patch
@@ -0,0 +1,75 @@
+From 9368831d360c0e47df55d1bb25c3517269320c5f Mon Sep 17 00:00:00 2001
+From: Ariadne Conill <ariadne@dereferenced.org>
+Date: Wed, 15 Mar 2023 16:12:43 +0800
+Subject: [PATCH] tuple: test for, and stop string processing, on truncation
+
+otherwise a buffer overflow occurs.
+this has been a bug in pkgconf since the beginning, it seems.
+instead of disclosing the bug correctly, a "hotshot" developer
+decided to blog about it instead. sigh.
+
+https://nullprogram.com/blog/2023/01/18/
+
+Upstream-Status: Backport [https://gitea.treehouse.systems/ariadne/pkgconf/commit/628b2b2bafa5d3a2017193ddf375093e70666059]
+CVE: CVE-2023-24056
+Signed-off-by: Hongxu Jia <hongxu.jia@eng.windriver.com>
+---
+ libpkgconf/tuple.c | 28 +++++++++++++++++++++++-----
+ 1 file changed, 23 insertions(+), 5 deletions(-)
+
+diff --git a/libpkgconf/tuple.c b/libpkgconf/tuple.c
+index 2d550d8..b831070 100644
+--- a/libpkgconf/tuple.c
++++ b/libpkgconf/tuple.c
+@@ -293,12 +293,21 @@ pkgconf_tuple_parse(const pkgconf_client_t *client, pkgconf_list_t *vars, const
+ }
+ }
+
++ size_t remain = PKGCONF_BUFSIZE - (bptr - buf);
+ ptr += (pptr - ptr);
+ kv = pkgconf_tuple_find_global(client, varname);
+ if (kv != NULL)
+ {
+- strncpy(bptr, kv, PKGCONF_BUFSIZE - (bptr - buf));
+- bptr += strlen(kv);
++ size_t nlen = pkgconf_strlcpy(bptr, kv, remain);
++ if (nlen > remain)
++ {
++ pkgconf_warn(client, "warning: truncating very long variable to 64KB\n");
++
++ bptr = buf + (PKGCONF_BUFSIZE - 1);
++ break;
++ }
++
++ bptr += nlen;
+ }
+ else
+ {
+@@ -306,12 +315,21 @@ pkgconf_tuple_parse(const pkgconf_client_t *client, pkgconf_list_t *vars, const
+
+ if (kv != NULL)
+ {
++ size_t nlen;
++
+ parsekv = pkgconf_tuple_parse(client, vars, kv);
++ nlen = pkgconf_strlcpy(bptr, parsekv, remain);
++ free(parsekv);
+
+- strncpy(bptr, parsekv, PKGCONF_BUFSIZE - (bptr - buf));
+- bptr += strlen(parsekv);
++ if (nlen > remain)
++ {
++ pkgconf_warn(client, "warning: truncating very long variable to 64KB\n");
+
+- free(parsekv);
++ bptr = buf + (PKGCONF_BUFSIZE - 1);
++ break;
++ }
++
++ bptr += nlen;
+ }
+ }
+ }
+--
+2.27.0
+
diff --git a/meta/recipes-devtools/pkgconf/pkgconf_1.8.0.bb b/meta/recipes-devtools/pkgconf/pkgconf_1.8.0.bb
index 887e15e28c..cad0a0fa4f 100644
--- a/meta/recipes-devtools/pkgconf/pkgconf_1.8.0.bb
+++ b/meta/recipes-devtools/pkgconf/pkgconf_1.8.0.bb
@@ -16,6 +16,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=2214222ec1a820bd6cc75167a56925e0"
SRC_URI = "\
https://distfiles.dereferenced.org/pkgconf/pkgconf-${PV}.tar.xz \
+ file://0001-tuple-test-for-and-stop-string-processing-on-truncat.patch \
file://pkg-config-wrapper \
file://pkg-config-native.in \
file://pkg-config-esdk.in \
diff --git a/meta/recipes-devtools/pseudo/files/glibc238.patch b/meta/recipes-devtools/pseudo/files/glibc238.patch
new file mode 100644
index 0000000000..76ca8c11eb
--- /dev/null
+++ b/meta/recipes-devtools/pseudo/files/glibc238.patch
@@ -0,0 +1,72 @@
+glibc 2.38 would include __isoc23_strtol and similar symbols. This is trggerd by
+_GNU_SOURCE but we have to set that for other definitions. Therefore play with defines
+to turn this off within pseudo_wrappers.c. Elsewhere we can switch to _DEFAULT_SOURCE
+rather than _GNU_SOURCE.
+
+Upstream-Status: Pending
+
+Index: git/pseudo_wrappers.c
+===================================================================
+--- git.orig/pseudo_wrappers.c
++++ git/pseudo_wrappers.c
+@@ -6,6 +6,15 @@
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ */
++/* glibc 2.38 would include __isoc23_strtol and similar symbols. This is trggerd by
++ * _GNU_SOURCE but we have to set that for other definitions. Therefore play with defines
++ * to turn this off.
++ */
++#include <features.h>
++#undef __GLIBC_USE_ISOC2X
++#undef __GLIBC_USE_C2X_STRTOL
++#define __GLIBC_USE_C2X_STRTOL 0
++
+ #include <assert.h>
+ #include <stdlib.h>
+ #include <limits.h>
+Index: git/pseudo_util.c
+===================================================================
+--- git.orig/pseudo_util.c
++++ git/pseudo_util.c
+@@ -8,6 +8,14 @@
+ */
+ /* we need access to RTLD_NEXT for a horrible workaround */
+ #define _GNU_SOURCE
++/* glibc 2.38 would include __isoc23_strtol and similar symbols. This is trggerd by
++ * _GNU_SOURCE but we have to set that for other definitions. Therefore play with defines
++ * to turn this off.
++ */
++#include <features.h>
++#undef __GLIBC_USE_ISOC2X
++#undef __GLIBC_USE_C2X_STRTOL
++#define __GLIBC_USE_C2X_STRTOL 0
+
+ #include <ctype.h>
+ #include <errno.h>
+Index: git/pseudolog.c
+===================================================================
+--- git.orig/pseudolog.c
++++ git/pseudolog.c
+@@ -8,7 +8,7 @@
+ */
+ /* We need _XOPEN_SOURCE for strptime(), but if we define that,
+ * we then don't get S_IFSOCK... _GNU_SOURCE turns on everything. */
+-#define _GNU_SOURCE
++#define _DEFAULT_SOURCE
+
+ #include <ctype.h>
+ #include <limits.h>
+Index: git/pseudo_client.c
+===================================================================
+--- git.orig/pseudo_client.c
++++ git/pseudo_client.c
+@@ -6,7 +6,7 @@
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ */
+-#define _GNU_SOURCE
++#define _DEFAULT_SOURCE
+
+ #include <stdio.h>
+ #include <signal.h>
diff --git a/meta/recipes-devtools/pseudo/pseudo_git.bb b/meta/recipes-devtools/pseudo/pseudo_git.bb
index e7ef6a730c..4dd9156238 100644
--- a/meta/recipes-devtools/pseudo/pseudo_git.bb
+++ b/meta/recipes-devtools/pseudo/pseudo_git.bb
@@ -2,6 +2,7 @@ require pseudo.inc
SRC_URI = "git://git.yoctoproject.org/pseudo;branch=oe-core \
file://0001-configure-Prune-PIE-flags.patch \
+ file://glibc238.patch \
file://fallback-passwd \
file://fallback-group \
"
@@ -13,7 +14,7 @@ SRC_URI:append:class-nativesdk = " \
file://older-glibc-symbols.patch"
SRC_URI[prebuilt.sha256sum] = "ed9f456856e9d86359f169f46a70ad7be4190d6040282b84c8d97b99072485aa"
-SRCREV = "2b4b88eb513335b0ece55fe51854693d9b20de35"
+SRCREV = "c9670c27ff67ab899007ce749254b16091577e55"
S = "${WORKDIR}/git"
PV = "1.9.0+git${SRCPV}"
diff --git a/meta/recipes-devtools/python/python3-certifi/CVE-2022-23491.patch b/meta/recipes-devtools/python/python3-certifi/CVE-2022-23491.patch
new file mode 100644
index 0000000000..94ca254549
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-certifi/CVE-2022-23491.patch
@@ -0,0 +1,230 @@
+From 167413eefa9482a7777b3ccdcc70e511ef5fcc2b Mon Sep 17 00:00:00 2001
+From: Narpat Mali <narpat.mali@windriver.com>
+Date: Thu, 2 Feb 2023 12:57:06 +0000
+Subject: [PATCH] Certifi is a curated collection of Root Certificates for
+ validating the trustworthiness of SSL certificates while verifying the
+ identity of TLS hosts. Certifi 2022.12.07 removes root certificates from
+ "TrustCor" from the root store. These are in the process of being removed
+ from Mozilla's trust store. TrustCor's root certificates are being removed
+ pursuant to an investigation prompted by media reporting that TrustCor's
+ ownership also operated a business that produced spyware. Conclusions of
+ Mozilla's investigation can be found in the linked google group discussion.
+
+CVE: CVE-2022-23491
+
+Upstream-Status: Backport [https://github.com/certifi/python-certifi/commit/9e9e840925d7b8e76c76fdac1fab7e6e88c1c3b8]
+
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+---
+ certifi/cacert.pem | 181 ---------------------------------------------
+ 1 file changed, 181 deletions(-)
+
+diff --git a/certifi/cacert.pem b/certifi/cacert.pem
+index 6d0ccc0..6bae3e4 100644
+--- a/certifi/cacert.pem
++++ b/certifi/cacert.pem
+@@ -694,37 +694,6 @@ BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB
+ ZQ==
+ -----END CERTIFICATE-----
+
+-# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
+-# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
+-# Label: "Network Solutions Certificate Authority"
+-# Serial: 116697915152937497490437556386812487904
+-# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e
+-# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce
+-# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c
+------BEGIN CERTIFICATE-----
+-MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi
+-MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu
+-MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp
+-dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV
+-UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO
+-ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG
+-SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz
+-c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP
+-OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl
+-mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF
+-BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4
+-qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw
+-gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB
+-BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu
+-bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp
+-dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8
+-6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/
+-h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH
+-/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv
+-wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN
+-pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey
+------END CERTIFICATE-----
+-
+ # Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+ # Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+ # Label: "COMODO ECC Certification Authority"
+@@ -2385,46 +2354,6 @@ KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg
+ xwy8p2Fp8fc74SrL+SvzZpA3
+ -----END CERTIFICATE-----
+
+-# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
+-# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
+-# Label: "Staat der Nederlanden EV Root CA"
+-# Serial: 10000013
+-# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba
+-# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb
+-# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a
+------BEGIN CERTIFICATE-----
+-MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO
+-TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh
+-dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y
+-MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg
+-TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS
+-b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS
+-M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC
+-UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d
+-Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p
+-rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l
+-pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb
+-j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC
+-KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS
+-/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X
+-cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH
+-1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP
+-px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB
+-/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7
+-MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI
+-eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u
+-2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS
+-v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC
+-wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy
+-CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e
+-vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6
+-Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa
+-Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL
+-eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8
+-FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc
+-7uzXLg==
+------END CERTIFICATE-----
+-
+ # Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+ # Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+ # Label: "IdenTrust Commercial Root CA 1"
+@@ -3032,116 +2961,6 @@ T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe
+ MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g==
+ -----END CERTIFICATE-----
+
+-# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+-# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+-# Label: "TrustCor RootCert CA-1"
+-# Serial: 15752444095811006489
+-# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45
+-# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a
+-# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c
+------BEGIN CERTIFICATE-----
+-MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD
+-VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk
+-MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U
+-cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y
+-IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB
+-pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h
+-IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG
+-A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU
+-cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+-CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid
+-RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V
+-seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme
+-9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV
+-EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW
+-hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/
+-DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw
+-DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD
+-ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I
+-/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf
+-ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ
+-yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts
+-L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN
+-zl/HHk484IkzlQsPpTLWPFp5LBk=
+------END CERTIFICATE-----
+-
+-# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+-# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+-# Label: "TrustCor RootCert CA-2"
+-# Serial: 2711694510199101698
+-# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64
+-# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0
+-# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65
+------BEGIN CERTIFICATE-----
+-MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV
+-BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw
+-IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy
+-dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig
+-Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk
+-MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg
+-Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD
+-VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy
+-dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+-AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+
+-QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq
+-1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp
+-2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK
+-DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape
+-az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF
+-3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88
+-oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM
+-g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3
+-mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh
+-8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd
+-BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U
+-nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw
+-DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX
+-dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+
+-MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL
+-/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX
+-CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa
+-ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW
+-2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7
+-N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3
+-Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB
+-As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp
+-5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu
+-1uwJ
+------END CERTIFICATE-----
+-
+-# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+-# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+-# Label: "TrustCor ECA-1"
+-# Serial: 9548242946988625984
+-# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c
+-# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd
+-# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c
+------BEGIN CERTIFICATE-----
+-MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD
+-VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk
+-MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U
+-cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y
+-IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV
+-BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw
+-IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy
+-dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig
+-RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb
+-3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA
+-BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5
+-3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou
+-owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/
+-wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF
+-ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf
+-BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/
+-MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv
+-civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2
+-AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F
+-hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50
+-soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI
+-WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi
+-tJ/X5g==
+------END CERTIFICATE-----
+-
+ # Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
+ # Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
+ # Label: "SSL.com Root Certification Authority RSA"
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/python/python3-certifi/CVE-2023-37920.patch b/meta/recipes-devtools/python/python3-certifi/CVE-2023-37920.patch
new file mode 100644
index 0000000000..62187ec469
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-certifi/CVE-2023-37920.patch
@@ -0,0 +1,301 @@
+From 2dfddd74a75e4a1fa9bb901ba31a96e13b98a4e2 Mon Sep 17 00:00:00 2001
+From: Narpat Mali <narpat.mali@windriver.com>
+Date: Wed, 2 Aug 2023 16:05:04 +0000
+Subject: [PATCH] Certifi is a curated collection of Root Certificates for
+ validating the trustworthiness of SSL certificates while verifying the
+ identity of TLS hosts. Certifi prior to version 2023.07.22 recognizes
+ "e-Tugra" root certificates. e-Tugra's root certificates were subject to an
+ investigation prompted by reporting of security issues in their systems.
+ Certifi 2023.07.22 removes root certificates from "e-Tugra" from the root
+ store.
+
+CVE: CVE-2023-37920
+
+Upstream-Status: Backport [https://github.com/certifi/python-certifi/commit/8fb96ed81f71e7097ed11bc4d9b19afd7ea5c909]
+
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+---
+ certifi/cacert.pem | 257 ++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 185 insertions(+), 72 deletions(-)
+
+diff --git a/certifi/cacert.pem b/certifi/cacert.pem
+index 6bae3e4..1bec256 100644
+--- a/certifi/cacert.pem
++++ b/certifi/cacert.pem
+@@ -879,34 +879,6 @@ uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2
+ XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
+ -----END CERTIFICATE-----
+
+-# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post
+-# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post
+-# Label: "Hongkong Post Root CA 1"
+-# Serial: 1000
+-# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca
+-# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58
+-# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2
+------BEGIN CERTIFICATE-----
+-MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx
+-FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg
+-Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG
+-A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr
+-b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+-AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ
+-jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn
+-PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh
+-ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9
+-nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h
+-q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED
+-MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC
+-mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3
+-7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB
+-oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs
+-EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO
+-fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi
+-AmvZWg==
+------END CERTIFICATE-----
+-
+ # Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+ # Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+ # Label: "SecureSign RootCA11"
+@@ -1836,50 +1808,6 @@ HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx
+ SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY=
+ -----END CERTIFICATE-----
+
+-# Issuer: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi
+-# Subject: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi
+-# Label: "E-Tugra Certification Authority"
+-# Serial: 7667447206703254355
+-# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49
+-# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39
+-# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c
+------BEGIN CERTIFICATE-----
+-MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV
+-BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC
+-aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV
+-BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1
+-Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz
+-MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+
+-BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp
+-em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN
+-ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+-MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY
+-B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH
+-D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF
+-Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo
+-q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D
+-k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH
+-fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut
+-dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM
+-ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8
+-zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn
+-rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX
+-U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6
+-Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5
+-XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF
+-Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR
+-HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY
+-GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c
+-77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3
+-+GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK
+-vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6
+-FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl
+-yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P
+-AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD
+-y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d
+-NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA==
+------END CERTIFICATE-----
+-
+ # Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+ # Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+ # Label: "T-TeleSec GlobalRoot Class 2"
+@@ -4179,3 +4107,188 @@ AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw
+ SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN
+ nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps
+ -----END CERTIFICATE-----
++
++# Issuer: CN=Sectigo Public Server Authentication Root E46 O=Sectigo Limited
++# Subject: CN=Sectigo Public Server Authentication Root E46 O=Sectigo Limited
++# Label: "Sectigo Public Server Authentication Root E46"
++# Serial: 88989738453351742415770396670917916916
++# MD5 Fingerprint: 28:23:f8:b2:98:5c:37:16:3b:3e:46:13:4e:b0:b3:01
++# SHA1 Fingerprint: ec:8a:39:6c:40:f0:2e:bc:42:75:d4:9f:ab:1c:1a:5b:67:be:d2:9a
++# SHA256 Fingerprint: c9:0f:26:f0:fb:1b:40:18:b2:22:27:51:9b:5c:a2:b5:3e:2c:a5:b3:be:5c:f1:8e:fe:1b:ef:47:38:0c:53:83
++-----BEGIN CERTIFICATE-----
++MIICOjCCAcGgAwIBAgIQQvLM2htpN0RfFf51KBC49DAKBggqhkjOPQQDAzBfMQsw
++CQYDVQQGEwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1T
++ZWN0aWdvIFB1YmxpYyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBFNDYwHhcN
++MjEwMzIyMDAwMDAwWhcNNDYwMzIxMjM1OTU5WjBfMQswCQYDVQQGEwJHQjEYMBYG
++A1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0aWdvIFB1YmxpYyBT
++ZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA
++IgNiAAR2+pmpbiDt+dd34wc7qNs9Xzjoq1WmVk/WSOrsfy2qw7LFeeyZYX8QeccC
++WvkEN/U0NSt3zn8gj1KjAIns1aeibVvjS5KToID1AZTc8GgHHs3u/iVStSBDHBv+
++6xnOQ6OjQjBAMB0GA1UdDgQWBBTRItpMWfFLXyY4qp3W7usNw/upYTAOBgNVHQ8B
++Af8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNnADBkAjAn7qRa
++qCG76UeXlImldCBteU/IvZNeWBj7LRoAasm4PdCkT0RHlAFWovgzJQxC36oCMB3q
++4S6ILuH5px0CMk7yn2xVdOOurvulGu7t0vzCAxHrRVxgED1cf5kDW21USAGKcw==
++-----END CERTIFICATE-----
++
++# Issuer: CN=Sectigo Public Server Authentication Root R46 O=Sectigo Limited
++# Subject: CN=Sectigo Public Server Authentication Root R46 O=Sectigo Limited
++# Label: "Sectigo Public Server Authentication Root R46"
++# Serial: 156256931880233212765902055439220583700
++# MD5 Fingerprint: 32:10:09:52:00:d5:7e:6c:43:df:15:c0:b1:16:93:e5
++# SHA1 Fingerprint: ad:98:f9:f3:e4:7d:75:3b:65:d4:82:b3:a4:52:17:bb:6e:f5:e4:38
++# SHA256 Fingerprint: 7b:b6:47:a6:2a:ee:ac:88:bf:25:7a:a5:22:d0:1f:fe:a3:95:e0:ab:45:c7:3f:93:f6:56:54:ec:38:f2:5a:06
++-----BEGIN CERTIFICATE-----
++MIIFijCCA3KgAwIBAgIQdY39i658BwD6qSWn4cetFDANBgkqhkiG9w0BAQwFADBf
++MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQD
++Ey1TZWN0aWdvIFB1YmxpYyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBSNDYw
++HhcNMjEwMzIyMDAwMDAwWhcNNDYwMzIxMjM1OTU5WjBfMQswCQYDVQQGEwJHQjEY
++MBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0aWdvIFB1Ymxp
++YyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB
++AQUAA4ICDwAwggIKAoICAQCTvtU2UnXYASOgHEdCSe5jtrch/cSV1UgrJnwUUxDa
++ef0rty2k1Cz66jLdScK5vQ9IPXtamFSvnl0xdE8H/FAh3aTPaE8bEmNtJZlMKpnz
++SDBh+oF8HqcIStw+KxwfGExxqjWMrfhu6DtK2eWUAtaJhBOqbchPM8xQljeSM9xf
++iOefVNlI8JhD1mb9nxc4Q8UBUQvX4yMPFF1bFOdLvt30yNoDN9HWOaEhUTCDsG3X
++ME6WW5HwcCSrv0WBZEMNvSE6Lzzpng3LILVCJ8zab5vuZDCQOc2TZYEhMbUjUDM3
++IuM47fgxMMxF/mL50V0yeUKH32rMVhlATc6qu/m1dkmU8Sf4kaWD5QazYw6A3OAS
++VYCmO2a0OYctyPDQ0RTp5A1NDvZdV3LFOxxHVp3i1fuBYYzMTYCQNFu31xR13NgE
++SJ/AwSiItOkcyqex8Va3e0lMWeUgFaiEAin6OJRpmkkGj80feRQXEgyDet4fsZfu
+++Zd4KKTIRJLpfSYFplhym3kT2BFfrsU4YjRosoYwjviQYZ4ybPUHNs2iTG7sijbt
++8uaZFURww3y8nDnAtOFr94MlI1fZEoDlSfB1D++N6xybVCi0ITz8fAr/73trdf+L
++HaAZBav6+CuBQug4urv7qv094PPK306Xlynt8xhW6aWWrL3DkJiy4Pmi1KZHQ3xt
++zwIDAQABo0IwQDAdBgNVHQ4EFgQUVnNYZJX5khqwEioEYnmhQBWIIUkwDgYDVR0P
++AQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAC9c
++mTz8Bl6MlC5w6tIyMY208FHVvArzZJ8HXtXBc2hkeqK5Duj5XYUtqDdFqij0lgVQ
++YKlJfp/imTYpE0RHap1VIDzYm/EDMrraQKFz6oOht0SmDpkBm+S8f74TlH7Kph52
++gDY9hAaLMyZlbcp+nv4fjFg4exqDsQ+8FxG75gbMY/qB8oFM2gsQa6H61SilzwZA
++Fv97fRheORKkU55+MkIQpiGRqRxOF3yEvJ+M0ejf5lG5Nkc/kLnHvALcWxxPDkjB
++JYOcCj+esQMzEhonrPcibCTRAUH4WAP+JWgiH5paPHxsnnVI84HxZmduTILA7rpX
++DhjvLpr3Etiga+kFpaHpaPi8TD8SHkXoUsCjvxInebnMMTzD9joiFgOgyY9mpFui
++TdaBJQbpdqQACj7LzTWb4OE4y2BThihCQRxEV+ioratF4yUQvNs+ZUH7G6aXD+u5
++dHn5HrwdVw1Hr8Mvn4dGp+smWg9WY7ViYG4A++MnESLn/pmPNPW56MORcr3Ywx65
++LvKRRFHQV80MNNVIIb/bE/FmJUNS0nAiNs2fxBx1IK1jcmMGDw4nztJqDby1ORrp
++0XZ60Vzk50lJLVU3aPAaOpg+VBeHVOmmJ1CJeyAvP/+/oYtKR5j/K3tJPsMpRmAY
++QqszKbrAKbkTidOIijlBO8n9pu0f9GBj39ItVQGL
++-----END CERTIFICATE-----
++
++# Issuer: CN=SSL.com TLS RSA Root CA 2022 O=SSL Corporation
++# Subject: CN=SSL.com TLS RSA Root CA 2022 O=SSL Corporation
++# Label: "SSL.com TLS RSA Root CA 2022"
++# Serial: 148535279242832292258835760425842727825
++# MD5 Fingerprint: d8:4e:c6:59:30:d8:fe:a0:d6:7a:5a:2c:2c:69:78:da
++# SHA1 Fingerprint: ec:2c:83:40:72:af:26:95:10:ff:0e:f2:03:ee:31:70:f6:78:9d:ca
++# SHA256 Fingerprint: 8f:af:7d:2e:2c:b4:70:9b:b8:e0:b3:36:66:bf:75:a5:dd:45:b5:de:48:0f:8e:a8:d4:bf:e6:be:bc:17:f2:ed
++-----BEGIN CERTIFICATE-----
++MIIFiTCCA3GgAwIBAgIQb77arXO9CEDii02+1PdbkTANBgkqhkiG9w0BAQsFADBO
++MQswCQYDVQQGEwJVUzEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMSUwIwYDVQQD
++DBxTU0wuY29tIFRMUyBSU0EgUm9vdCBDQSAyMDIyMB4XDTIyMDgyNTE2MzQyMloX
++DTQ2MDgxOTE2MzQyMVowTjELMAkGA1UEBhMCVVMxGDAWBgNVBAoMD1NTTCBDb3Jw
++b3JhdGlvbjElMCMGA1UEAwwcU1NMLmNvbSBUTFMgUlNBIFJvb3QgQ0EgMjAyMjCC
++AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANCkCXJPQIgSYT41I57u9nTP
++L3tYPc48DRAokC+X94xI2KDYJbFMsBFMF3NQ0CJKY7uB0ylu1bUJPiYYf7ISf5OY
++t6/wNr/y7hienDtSxUcZXXTzZGbVXcdotL8bHAajvI9AI7YexoS9UcQbOcGV0ins
++S657Lb85/bRi3pZ7QcacoOAGcvvwB5cJOYF0r/c0WRFXCsJbwST0MXMwgsadugL3
++PnxEX4MN8/HdIGkWCVDi1FW24IBydm5MR7d1VVm0U3TZlMZBrViKMWYPHqIbKUBO
++L9975hYsLfy/7PO0+r4Y9ptJ1O4Fbtk085zx7AGL0SDGD6C1vBdOSHtRwvzpXGk3
++R2azaPgVKPC506QVzFpPulJwoxJF3ca6TvvC0PeoUidtbnm1jPx7jMEWTO6Af77w
++dr5BUxIzrlo4QqvXDz5BjXYHMtWrifZOZ9mxQnUjbvPNQrL8VfVThxc7wDNY8VLS
+++YCk8OjwO4s4zKTGkH8PnP2L0aPP2oOnaclQNtVcBdIKQXTbYxE3waWglksejBYS
++d66UNHsef8JmAOSqg+qKkK3ONkRN0VHpvB/zagX9wHQfJRlAUW7qglFA35u5CCoG
++AtUjHBPW6dvbxrB6y3snm/vg1UYk7RBLY0ulBY+6uB0rpvqR4pJSvezrZ5dtmi2f
++gTIFZzL7SAg/2SW4BCUvAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0j
++BBgwFoAU+y437uOEeicuzRk1sTN8/9REQrkwHQYDVR0OBBYEFPsuN+7jhHonLs0Z
++NbEzfP/UREK5MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAjYlt
++hEUY8U+zoO9opMAdrDC8Z2awms22qyIZZtM7QbUQnRC6cm4pJCAcAZli05bg4vsM
++QtfhWsSWTVTNj8pDU/0quOr4ZcoBwq1gaAafORpR2eCNJvkLTqVTJXojpBzOCBvf
++R4iyrT7gJ4eLSYwfqUdYe5byiB0YrrPRpgqU+tvT5TgKa3kSM/tKWTcWQA673vWJ
++DPFs0/dRa1419dvAJuoSc06pkZCmF8NsLzjUo3KUQyxi4U5cMj29TH0ZR6LDSeeW
++P4+a0zvkEdiLA9z2tmBVGKaBUfPhqBVq6+AL8BQx1rmMRTqoENjwuSfr98t67wVy
++lrXEj5ZzxOhWc5y8aVFjvO9nHEMaX3cZHxj4HCUp+UmZKbaSPaKDN7EgkaibMOlq
++bLQjk2UEqxHzDh1TJElTHaE/nUiSEeJ9DU/1172iWD54nR4fK/4huxoTtrEoZP2w
++AgDHbICivRZQIA9ygV/MlP+7mea6kMvq+cYMwq7FGc4zoWtcu358NFcXrfA/rs3q
++r5nsLFR+jM4uElZI7xc7P0peYNLcdDa8pUNjyw9bowJWCZ4kLOGGgYz+qxcs+sji
++Mho6/4UIyYOf8kpIEFR3N+2ivEC+5BB09+Rbu7nzifmPQdjH5FCQNYA+HLhNkNPU
++98OwoX6EyneSMSy4kLGCenROmxMmtNVQZlR4rmA=
++-----END CERTIFICATE-----
++
++# Issuer: CN=SSL.com TLS ECC Root CA 2022 O=SSL Corporation
++# Subject: CN=SSL.com TLS ECC Root CA 2022 O=SSL Corporation
++# Label: "SSL.com TLS ECC Root CA 2022"
++# Serial: 26605119622390491762507526719404364228
++# MD5 Fingerprint: 99:d7:5c:f1:51:36:cc:e9:ce:d9:19:2e:77:71:56:c5
++# SHA1 Fingerprint: 9f:5f:d9:1a:54:6d:f5:0c:71:f0:ee:7a:bd:17:49:98:84:73:e2:39
++# SHA256 Fingerprint: c3:2f:fd:9f:46:f9:36:d1:6c:36:73:99:09:59:43:4b:9a:d6:0a:af:bb:9e:7c:f3:36:54:f1:44:cc:1b:a1:43
++-----BEGIN CERTIFICATE-----
++MIICOjCCAcCgAwIBAgIQFAP1q/s3ixdAW+JDsqXRxDAKBggqhkjOPQQDAzBOMQsw
++CQYDVQQGEwJVUzEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMSUwIwYDVQQDDBxT
++U0wuY29tIFRMUyBFQ0MgUm9vdCBDQSAyMDIyMB4XDTIyMDgyNTE2MzM0OFoXDTQ2
++MDgxOTE2MzM0N1owTjELMAkGA1UEBhMCVVMxGDAWBgNVBAoMD1NTTCBDb3Jwb3Jh
++dGlvbjElMCMGA1UEAwwcU1NMLmNvbSBUTFMgRUNDIFJvb3QgQ0EgMjAyMjB2MBAG
++ByqGSM49AgEGBSuBBAAiA2IABEUpNXP6wrgjzhR9qLFNoFs27iosU8NgCTWyJGYm
++acCzldZdkkAZDsalE3D07xJRKF3nzL35PIXBz5SQySvOkkJYWWf9lCcQZIxPBLFN
++SeR7T5v15wj4A4j3p8OSSxlUgaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSME
++GDAWgBSJjy+j6CugFFR781a4Jl9nOAuc0DAdBgNVHQ4EFgQUiY8vo+groBRUe/NW
++uCZfZzgLnNAwDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMDA2gAMGUCMFXjIlbp
++15IkWE8elDIPDAI2wv2sdDJO4fscgIijzPvX6yv/N33w7deedWo1dlJF4AIxAMeN
++b0Igj762TVntd00pxCAgRWSGOlDGxK0tk/UYfXLtqc/ErFc2KAhl3zx5Zn6g6g==
++-----END CERTIFICATE-----
++
++# Issuer: CN=Atos TrustedRoot Root CA ECC TLS 2021 O=Atos
++# Subject: CN=Atos TrustedRoot Root CA ECC TLS 2021 O=Atos
++# Label: "Atos TrustedRoot Root CA ECC TLS 2021"
++# Serial: 81873346711060652204712539181482831616
++# MD5 Fingerprint: 16:9f:ad:f1:70:ad:79:d6:ed:29:b4:d1:c5:79:70:a8
++# SHA1 Fingerprint: 9e:bc:75:10:42:b3:02:f3:81:f4:f7:30:62:d4:8f:c3:a7:51:b2:dd
++# SHA256 Fingerprint: b2:fa:e5:3e:14:cc:d7:ab:92:12:06:47:01:ae:27:9c:1d:89:88:fa:cb:77:5f:a8:a0:08:91:4e:66:39:88:a8
++-----BEGIN CERTIFICATE-----
++MIICFTCCAZugAwIBAgIQPZg7pmY9kGP3fiZXOATvADAKBggqhkjOPQQDAzBMMS4w
++LAYDVQQDDCVBdG9zIFRydXN0ZWRSb290IFJvb3QgQ0EgRUNDIFRMUyAyMDIxMQ0w
++CwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0yMTA0MjIwOTI2MjNaFw00MTA0
++MTcwOTI2MjJaMEwxLjAsBgNVBAMMJUF0b3MgVHJ1c3RlZFJvb3QgUm9vdCBDQSBF
++Q0MgVExTIDIwMjExDTALBgNVBAoMBEF0b3MxCzAJBgNVBAYTAkRFMHYwEAYHKoZI
++zj0CAQYFK4EEACIDYgAEloZYKDcKZ9Cg3iQZGeHkBQcfl+3oZIK59sRxUM6KDP/X
++tXa7oWyTbIOiaG6l2b4siJVBzV3dscqDY4PMwL502eCdpO5KTlbgmClBk1IQ1SQ4
++AjJn8ZQSb+/Xxd4u/RmAo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR2
++KCXWfeBmmnoJsmo7jjPXNtNPojAOBgNVHQ8BAf8EBAMCAYYwCgYIKoZIzj0EAwMD
++aAAwZQIwW5kp85wxtolrbNa9d+F851F+uDrNozZffPc8dz7kUK2o59JZDCaOMDtu
++CCrCp1rIAjEAmeMM56PDr9NJLkaCI2ZdyQAUEv049OGYa3cpetskz2VAv9LcjBHo
++9H1/IISpQuQo
++-----END CERTIFICATE-----
++
++# Issuer: CN=Atos TrustedRoot Root CA RSA TLS 2021 O=Atos
++# Subject: CN=Atos TrustedRoot Root CA RSA TLS 2021 O=Atos
++# Label: "Atos TrustedRoot Root CA RSA TLS 2021"
++# Serial: 111436099570196163832749341232207667876
++# MD5 Fingerprint: d4:d3:46:b8:9a:c0:9c:76:5d:9e:3a:c3:b9:99:31:d2
++# SHA1 Fingerprint: 18:52:3b:0d:06:37:e4:d6:3a:df:23:e4:98:fb:5b:16:fb:86:74:48
++# SHA256 Fingerprint: 81:a9:08:8e:a5:9f:b3:64:c5:48:a6:f8:55:59:09:9b:6f:04:05:ef:bf:18:e5:32:4e:c9:f4:57:ba:00:11:2f
++-----BEGIN CERTIFICATE-----
++MIIFZDCCA0ygAwIBAgIQU9XP5hmTC/srBRLYwiqipDANBgkqhkiG9w0BAQwFADBM
++MS4wLAYDVQQDDCVBdG9zIFRydXN0ZWRSb290IFJvb3QgQ0EgUlNBIFRMUyAyMDIx
++MQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0yMTA0MjIwOTIxMTBaFw00
++MTA0MTcwOTIxMDlaMEwxLjAsBgNVBAMMJUF0b3MgVHJ1c3RlZFJvb3QgUm9vdCBD
++QSBSU0EgVExTIDIwMjExDTALBgNVBAoMBEF0b3MxCzAJBgNVBAYTAkRFMIICIjAN
++BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAtoAOxHm9BYx9sKOdTSJNy/BBl01Z
++4NH+VoyX8te9j2y3I49f1cTYQcvyAh5x5en2XssIKl4w8i1mx4QbZFc4nXUtVsYv
++Ye+W/CBGvevUez8/fEc4BKkbqlLfEzfTFRVOvV98r61jx3ncCHvVoOX3W3WsgFWZ
++kmGbzSoXfduP9LVq6hdKZChmFSlsAvFr1bqjM9xaZ6cF4r9lthawEO3NUDPJcFDs
++GY6wx/J0W2tExn2WuZgIWWbeKQGb9Cpt0xU6kGpn8bRrZtkh68rZYnxGEFzedUln
++nkL5/nWpo63/dgpnQOPF943HhZpZnmKaau1Fh5hnstVKPNe0OwANwI8f4UDErmwh
++3El+fsqyjW22v5MvoVw+j8rtgI5Y4dtXz4U2OLJxpAmMkokIiEjxQGMYsluMWuPD
++0xeqqxmjLBvk1cbiZnrXghmmOxYsL3GHX0WelXOTwkKBIROW1527k2gV+p2kHYzy
++geBYBr3JtuP2iV2J+axEoctr+hbxx1A9JNr3w+SH1VbxT5Aw+kUJWdo0zuATHAR8
++ANSbhqRAvNncTFd+rrcztl524WWLZt+NyteYr842mIycg5kDcPOvdO3GDjbnvezB
++c6eUWsuSZIKmAMFwoW4sKeFYV+xafJlrJaSQOoD0IJ2azsct+bJLKZWD6TWNp0lI
++pw9MGZHQ9b8Q4HECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
++dEmZ0f+0emhFdcN+tNzMzjkz2ggwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB
++DAUAA4ICAQAjQ1MkYlxt/T7Cz1UAbMVWiLkO3TriJQ2VSpfKgInuKs1l+NsW4AmS
++4BjHeJi78+xCUvuppILXTdiK/ORO/auQxDh1MoSf/7OwKwIzNsAQkG8dnK/haZPs
++o0UvFJ/1TCplQ3IM98P4lYsU84UgYt1UU90s3BiVaU+DR3BAM1h3Egyi61IxHkzJ
++qM7F78PRreBrAwA0JrRUITWXAdxfG/F851X6LWh3e9NpzNMOa7pNdkTWwhWaJuyw
++xfW70Xp0wmzNxbVe9kzmWy2B27O3Opee7c9GslA9hGCZcbUztVdF5kJHdWoOsAgM
++rr3e97sPWD2PAzHoPYJQyi9eDF20l74gNAf0xBLh7tew2VktafcxBPTy+av5EzH4
++AXcOPUIjJsyacmdRIXrMPIWo6iFqO9taPKU0nprALN+AnCng33eU0aKAQv9qTFsR
++0PXNor6uzFFcw9VUewyu1rkGd4Di7wcaaMxZUa1+XGdrudviB0JbuAEFWDlN5LuY
++o7Ey7Nmj1m+UI/87tyll5gfp77YZ6ufCOB0yiJA8EytuzO+rdwY0d4RPcuSBhPm5
++dDTedk+SKlOxJTnbPP/lPqYO5Wue/9vsL3SD3460s6neFE3/MaNFcyT6lSnMEpcE
++oji2jbDwN/zIIX8/syQbPYtuzE2wFg2WHYMfRsCbvUOZ58SWLs5fyQ==
++-----END CERTIFICATE-----
+--
+2.40.0
diff --git a/meta/recipes-devtools/python/python3-certifi_2021.10.8.bb b/meta/recipes-devtools/python/python3-certifi_2021.10.8.bb
index 4c376da897..eb1574adf6 100644
--- a/meta/recipes-devtools/python/python3-certifi_2021.10.8.bb
+++ b/meta/recipes-devtools/python/python3-certifi_2021.10.8.bb
@@ -7,6 +7,10 @@ HOMEPAGE = " http://certifi.io/"
LICENSE = "ISC"
LIC_FILES_CHKSUM = "file://LICENSE;md5=67da0714c3f9471067b729eca6c9fbe8"
+SRC_URI += "file://CVE-2022-23491.patch \
+ file://CVE-2023-37920.patch \
+ "
+
SRC_URI[sha256sum] = "78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"
inherit pypi setuptools3
diff --git a/meta/recipes-devtools/python/python3-cryptography/CVE-2023-23931.patch b/meta/recipes-devtools/python/python3-cryptography/CVE-2023-23931.patch
new file mode 100644
index 0000000000..5fc4878978
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-cryptography/CVE-2023-23931.patch
@@ -0,0 +1,49 @@
+From 9fbf84efc861668755ab645530ec7be9cf3c6696 Mon Sep 17 00:00:00 2001
+From: Alex Gaynor <alex.gaynor@gmail.com>
+Date: Tue, 7 Feb 2023 11:34:18 -0500
+Subject: [PATCH] Don't allow update_into to mutate immutable objects (#8230)
+
+CVE: CVE-2023-23931
+
+Upstream-Status: Backport [https://github.com/pyca/cryptography/commit/9fbf84efc861668755ab645530ec7be9cf3c6696]
+
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+---
+ src/cryptography/hazmat/backends/openssl/ciphers.py | 2 +-
+ tests/hazmat/primitives/test_ciphers.py | 8 ++++++++
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/src/cryptography/hazmat/backends/openssl/ciphers.py b/src/cryptography/hazmat/backends/openssl/ciphers.py
+index 286583f93..075d68fb9 100644
+--- a/src/cryptography/hazmat/backends/openssl/ciphers.py
++++ b/src/cryptography/hazmat/backends/openssl/ciphers.py
+@@ -156,7 +156,7 @@ class _CipherContext:
+ data_processed = 0
+ total_out = 0
+ outlen = self._backend._ffi.new("int *")
+- baseoutbuf = self._backend._ffi.from_buffer(buf)
++ baseoutbuf = self._backend._ffi.from_buffer(buf, require_writable=True)
+ baseinbuf = self._backend._ffi.from_buffer(data)
+
+ while data_processed != total_data_len:
+diff --git a/tests/hazmat/primitives/test_ciphers.py b/tests/hazmat/primitives/test_ciphers.py
+index 02127dd9c..bf3b047de 100644
+--- a/tests/hazmat/primitives/test_ciphers.py
++++ b/tests/hazmat/primitives/test_ciphers.py
+@@ -318,6 +318,14 @@ class TestCipherUpdateInto:
+ with pytest.raises(ValueError):
+ encryptor.update_into(b"testing", buf)
+
++ def test_update_into_immutable(self, backend):
++ key = b"\x00" * 16
++ c = ciphers.Cipher(AES(key), modes.ECB(), backend)
++ encryptor = c.encryptor()
++ buf = b"\x00" * 32
++ with pytest.raises((TypeError, BufferError)):
++ encryptor.update_into(b"testing", buf)
++
+ @pytest.mark.supported(
+ only_if=lambda backend: backend.cipher_supported(
+ AES(b"\x00" * 16), modes.GCM(b"\x00" * 12)
+--
+2.40.0
diff --git a/meta/recipes-devtools/python/python3-cryptography/CVE-2023-49083.patch b/meta/recipes-devtools/python/python3-cryptography/CVE-2023-49083.patch
new file mode 100644
index 0000000000..d398eea1d9
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-cryptography/CVE-2023-49083.patch
@@ -0,0 +1,53 @@
+From 627ac5e314303acc00a19d58f09eb1eabd029fd1 Mon Sep 17 00:00:00 2001
+From: Alex Gaynor <alex.gaynor@gmail.com>
+Date: Wed, 6 Dec 2023 08:04:53 +0000
+Subject: [PATCH] Fixed crash when loading a PKCS#7 bundle with no certificates
+ (#9926)
+
+CVE: CVE-2023-49083
+
+Upstream-Status: Backport [https://github.com/pyca/cryptography/commit/1e7b4d074e14c4e694d3ce69ad6754a6039fd6ff]
+
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+---
+ src/cryptography/hazmat/backends/openssl/backend.py | 5 ++++-
+ tests/hazmat/primitives/test_pkcs7.py | 6 ++++++
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/src/cryptography/hazmat/backends/openssl/backend.py b/src/cryptography/hazmat/backends/openssl/backend.py
+index 5606fe6..c43fea0 100644
+--- a/src/cryptography/hazmat/backends/openssl/backend.py
++++ b/src/cryptography/hazmat/backends/openssl/backend.py
+@@ -2189,9 +2189,12 @@ class Backend(BackendInterface):
+ _Reasons.UNSUPPORTED_SERIALIZATION,
+ )
+
++ certs: list[x509.Certificate] = []
++ if p7.d.sign == self._ffi.NULL:
++ return certs
++
+ sk_x509 = p7.d.sign.cert
+ num = self._lib.sk_X509_num(sk_x509)
+- certs = []
+ for i in range(num):
+ x509 = self._lib.sk_X509_value(sk_x509, i)
+ self.openssl_assert(x509 != self._ffi.NULL)
+diff --git a/tests/hazmat/primitives/test_pkcs7.py b/tests/hazmat/primitives/test_pkcs7.py
+index 91ac842..b98a9f1 100644
+--- a/tests/hazmat/primitives/test_pkcs7.py
++++ b/tests/hazmat/primitives/test_pkcs7.py
+@@ -81,6 +81,12 @@ class TestPKCS7Loading(object):
+ mode="rb",
+ )
+
++ def test_load_pkcs7_empty_certificates(self):
++ der = b"\x30\x0B\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x07\x02"
++
++ certificates = pkcs7.load_der_pkcs7_certificates(der)
++ assert certificates == []
++
+
+ # We have no public verification API and won't be adding one until we get
+ # some requirements from users so this function exists to give us basic
+--
+2.40.0
diff --git a/meta/recipes-devtools/python/python3-cryptography/CVE-2024-26130.patch b/meta/recipes-devtools/python/python3-cryptography/CVE-2024-26130.patch
new file mode 100644
index 0000000000..ff113e8cc7
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-cryptography/CVE-2024-26130.patch
@@ -0,0 +1,66 @@
+From 97d231672763cdb5959a3b191e692a362f1b9e55 Mon Sep 17 00:00:00 2001
+From: Alex Gaynor <alex.gaynor@gmail.com>
+Date: Mon, 19 Feb 2024 11:50:28 -0500
+Subject: [PATCH] Fixes #10422 -- don't crash when a PKCS#12 key and cert don't
+ match (#10423)
+
+Upstream-Status: Backport [https://github.com/pyca/cryptography/commit/97d231672763cdb5959a3b191e692a362f1b9e55]
+CVE: CVE-2024-26130
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ .../hazmat/backends/openssl/backend.py | 9 +++++++++
+ tests/hazmat/primitives/test_pkcs12.py | 18 ++++++++++++++++++
+ 2 files changed, 27 insertions(+)
+
+diff --git a/src/cryptography/hazmat/backends/openssl/backend.py b/src/cryptography/hazmat/backends/openssl/backend.py
+index c43fea0..d687931 100644
+--- a/src/cryptography/hazmat/backends/openssl/backend.py
++++ b/src/cryptography/hazmat/backends/openssl/backend.py
+@@ -2131,6 +2131,15 @@ class Backend(BackendInterface):
+ mac_iter,
+ 0,
+ )
++ if p12 == self._ffi.NULL:
++ errors = self._consume_errors()
++ raise ValueError(
++ (
++ "Failed to create PKCS12 (does the key match the "
++ "certificate?)"
++ ),
++ errors,
++ )
+
+ self.openssl_assert(p12 != self._ffi.NULL)
+ p12 = self._ffi.gc(p12, self._lib.PKCS12_free)
+diff --git a/tests/hazmat/primitives/test_pkcs12.py b/tests/hazmat/primitives/test_pkcs12.py
+index c5cfbc0..8af4c93 100644
+--- a/tests/hazmat/primitives/test_pkcs12.py
++++ b/tests/hazmat/primitives/test_pkcs12.py
+@@ -25,6 +25,24 @@ from ...doubles import DummyKeySerializationEncryption
+ from ...utils import load_vectors_from_file
+
+
++ @pytest.mark.supported(
++ only_if=lambda backend: backend._lib.Cryptography_HAS_PKCS12_SET_MAC,
++ skip_message="Requires OpenSSL with PKCS12_set_mac",
++ )
++ def test_set_mac_key_certificate_mismatch(self, backend):
++ cacert, _ = _load_ca(backend)
++ key = ec.generate_private_key(ec.SECP256R1())
++ encryption = (
++ serialization.PrivateFormat.PKCS12.encryption_builder()
++ .hmac_hash(hashes.SHA256())
++ .build(b"password")
++ )
++
++ with pytest.raises(ValueError):
++ serialize_key_and_certificates(
++ b"name", key, cacert, [], encryption
++ )
++
+ @pytest.mark.skip_fips(
+ reason="PKCS12 unsupported in FIPS mode. So much bad crypto in it."
+ )
+--
+2.35.7
+
diff --git a/meta/recipes-devtools/python/python3-cryptography_36.0.2.bb b/meta/recipes-devtools/python/python3-cryptography_36.0.2.bb
index 9ef5ff39c8..83381f225c 100644
--- a/meta/recipes-devtools/python/python3-cryptography_36.0.2.bb
+++ b/meta/recipes-devtools/python/python3-cryptography_36.0.2.bb
@@ -17,6 +17,9 @@ SRC_URI += " \
file://0001-Cargo.toml-specify-pem-version.patch \
file://0002-Cargo.toml-edition-2018-2021.patch \
file://fix-leak-metric.patch \
+ file://CVE-2023-23931.patch \
+ file://CVE-2023-49083.patch \
+ file://CVE-2024-26130.patch \
"
inherit pypi python_setuptools3_rust
diff --git a/meta/recipes-devtools/python/python3-git_3.1.27.bb b/meta/recipes-devtools/python/python3-git_3.1.37.bb
index fb1bae8f8e..56a335a79e 100644
--- a/meta/recipes-devtools/python/python3-git_3.1.27.bb
+++ b/meta/recipes-devtools/python/python3-git_3.1.37.bb
@@ -6,13 +6,13 @@ access with big-files support."
HOMEPAGE = "http://github.com/gitpython-developers/GitPython"
SECTION = "devel/python"
LICENSE = "BSD-3-Clause"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=8b8d26c37c1d5a04f9b0186edbebc183"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=5279a7ab369ba336989dcf2a107e5c8e"
PYPI_PACKAGE = "GitPython"
inherit pypi python_setuptools_build_meta
-SRC_URI[sha256sum] = "1c885ce809e8ba2d88a29befeb385fcea06338d3640712b59ca623c220bb5704"
+SRC_URI[sha256sum] = "f9b9ddc0761c125d5780eab2d64be4873fc6817c2899cbcb34b02344bdc7bc54"
DEPENDS += " ${PYTHON_PN}-gitdb"
diff --git a/meta/recipes-devtools/python/python3-jinja2/run-ptest b/meta/recipes-devtools/python/python3-jinja2/run-ptest
index 5cec711696..5817735a63 100644
--- a/meta/recipes-devtools/python/python3-jinja2/run-ptest
+++ b/meta/recipes-devtools/python/python3-jinja2/run-ptest
@@ -1,3 +1,3 @@
#!/bin/sh
-pytest
+pytest -o log_cli=true -o log_cli_level=INFO | sed -e 's/\[...%\]//g'| sed -e 's/PASSED/PASS/g'| sed -e 's/FAILED/FAIL/g'| sed -e 's/SKIPPED/SKIP/g'| awk '{if ($NF=="PASS" || $NF=="FAIL" || $NF=="SKIP" || $NF=="XFAIL" || $NF=="XPASS"){printf "%s: %s\n", $NF, $0}else{print}}'| awk '{if ($NF=="PASS" || $NF=="FAIL" || $NF=="SKIP" || $NF=="XFAIL" || $NF=="XPASS") {$NF="";print $0}else{print}}'
diff --git a/meta/recipes-devtools/python/python3-jinja2_3.1.1.bb b/meta/recipes-devtools/python/python3-jinja2_3.1.3.bb
index c38686a5c2..068e21bf5f 100644
--- a/meta/recipes-devtools/python/python3-jinja2_3.1.1.bb
+++ b/meta/recipes-devtools/python/python3-jinja2_3.1.3.bb
@@ -4,7 +4,7 @@ HOMEPAGE = "https://pypi.org/project/Jinja2/"
LICENSE = "BSD-3-Clause"
LIC_FILES_CHKSUM = "file://LICENSE.rst;md5=5dc88300786f1c214c1e9827a5229462"
-SRC_URI[sha256sum] = "640bed4bb501cbd17194b3cace1dc2126f5b619cf068a726b98192a0fde74ae9"
+SRC_URI[sha256sum] = "ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"
PYPI_PACKAGE = "Jinja2"
diff --git a/meta/recipes-devtools/python/python3-mako/CVE-2022-40023.patch b/meta/recipes-devtools/python/python3-mako/CVE-2022-40023.patch
new file mode 100644
index 0000000000..66690e74b4
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-mako/CVE-2022-40023.patch
@@ -0,0 +1,119 @@
+From 925760291d6efec64fda6e9dd1fd9cfbd5be068c Mon Sep 17 00:00:00 2001
+From: Mike Bayer <mike_mp@zzzcomputing.com>
+Date: Mon, 29 Aug 2022 12:28:52 -0400
+Subject: [PATCH] fix tag regexp to match quoted groups correctly
+
+Fixed issue in lexer where the regexp used to match tags would not
+correctly interpret quoted sections individually. While this parsing issue
+still produced the same expected tag structure later on, the mis-handling
+of quoted sections was also subject to a regexp crash if a tag had a large
+number of quotes within its quoted sections.
+
+Fixes: #366
+Change-Id: I74e0d71ff7f419970711a7cd51adcf1bb90a44c0
+
+Upstream-Status: Backport [https://github.com/sqlalchemy/mako/commit/925760291d6efec64fda6e9dd1fd9cfbd5be068c]
+
+Signed-off-by: <narpat.mali@windriver.com>
+
+---
+ doc/build/unreleased/366.rst | 9 +++++++++
+ mako/lexer.py | 12 ++++++++----
+ test/test_lexer.py | 21 +++++++++++++++++----
+ 3 files changed, 34 insertions(+), 8 deletions(-)
+ create mode 100644 doc/build/unreleased/366.rst
+
+--- /dev/null
++++ Mako-1.1.6/doc/build/unreleased/366.rst
+@@ -0,0 +1,9 @@
++.. change::
++ :tags: bug, lexer
++ :tickets: 366
++
++ Fixed issue in lexer where the regexp used to match tags would not
++ correctly interpret quoted sections individually. While this parsing issue
++ still produced the same expected tag structure later on, the mis-handling
++ of quoted sections was also subject to a regexp crash if a tag had a large
++ number of quotes within its quoted sections.
+\ No newline at end of file
+--- Mako-1.1.6.orig/mako/lexer.py
++++ Mako-1.1.6/mako/lexer.py
+@@ -295,20 +295,24 @@ class Lexer(object):
+ return self.template
+
+ def match_tag_start(self):
+- match = self.match(
+- r"""
++ reg = r"""
+ \<% # opening tag
+
+ ([\w\.\:]+) # keyword
+
+- ((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = \
++ ((?:\s+\w+|\s*=\s*|"[^"]*?"|'[^']*?'|\s*,\s*)*) # attrname, = \
+ # sign, string expression
++ # comma is for backwards compat
++ # identified in #366
+
+ \s* # more whitespace
+
+ (/)?> # closing
+
+- """,
++ """
++
++ match = self.match(
++ reg,
+ re.I | re.S | re.X,
+ )
+
+--- Mako-1.1.6.orig/test/test_lexer.py
++++ Mako-1.1.6/test/test_lexer.py
+@@ -1,5 +1,7 @@
+ import re
+
++import pytest
++
+ from mako import compat
+ from mako import exceptions
+ from mako import parsetree
+@@ -146,6 +148,10 @@ class LexerTest(TemplateTest):
+ """
+ self.assertRaises(exceptions.CompileException, Lexer(template).parse)
+
++ def test_tag_many_quotes(self):
++ template = "<%0" + '"' * 3000
++ assert_raises(exceptions.SyntaxException, Lexer(template).parse)
++
+ def test_unmatched_tag(self):
+ template = """
+ <%namespace name="bar">
+@@ -432,9 +438,16 @@ class LexerTest(TemplateTest):
+ ),
+ )
+
+- def test_pagetag(self):
+- template = """
+- <%page cached="True", args="a, b"/>
++ @pytest.mark.parametrize("comma,numchars", [(",", 48), ("", 47)])
++ def test_pagetag(self, comma, numchars):
++ # note that the comma here looks like:
++ # <%page cached="True", args="a, b"/>
++ # that's what this test has looked like for decades, however, the
++ # comma there is not actually the right syntax. When issue #366
++ # was fixed, the reg was altered to accommodate for this comma to allow
++ # backwards compat
++ template = f"""
++ <%page cached="True"{comma} args="a, b"/>
+
+ some template
+ """
+@@ -453,7 +466,7 @@ class LexerTest(TemplateTest):
+
+ some template
+ """,
+- (2, 48),
++ (2, numchars),
+ ),
+ ],
+ ),
diff --git a/meta/recipes-devtools/python/python3-mako_1.1.6.bb b/meta/recipes-devtools/python/python3-mako_1.1.6.bb
index 71e5d96ba1..4e4f33f5dc 100644
--- a/meta/recipes-devtools/python/python3-mako_1.1.6.bb
+++ b/meta/recipes-devtools/python/python3-mako_1.1.6.bb
@@ -6,6 +6,8 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=943eb67718222db21d44a4ef1836675f"
PYPI_PACKAGE = "Mako"
+SRC_URI += "file://CVE-2022-40023.patch"
+
inherit pypi python_setuptools_build_meta
SRC_URI[sha256sum] = "4e9e345a41924a954251b95b4b28e14a301145b544901332e658907a7464b6b2"
diff --git a/meta/recipes-devtools/python/python3-pip_22.0.3.bb b/meta/recipes-devtools/python/python3-pip_22.0.3.bb
index 09a305edf8..6e28b87ba3 100644
--- a/meta/recipes-devtools/python/python3-pip_22.0.3.bb
+++ b/meta/recipes-devtools/python/python3-pip_22.0.3.bb
@@ -55,6 +55,8 @@ RDEPENDS:${PN} = "\
python3-unixadmin \
python3-xmlrpc \
python3-pickle \
+ python3-distutils \
+ python3-image \
"
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-devtools/python/python3-pycryptodome/CVE-2023-52323.patch b/meta/recipes-devtools/python/python3-pycryptodome/CVE-2023-52323.patch
new file mode 100644
index 0000000000..be3090eb8d
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-pycryptodome/CVE-2023-52323.patch
@@ -0,0 +1,436 @@
+From 73bbed822fadddf3c0ab4a945ee6ab16bbca6961 Mon Sep 17 00:00:00 2001
+From: Helder Eijs <helderijs@gmail.com>
+Date: Thu, 1 Feb 2024 13:43:44 +0000
+Subject: [PATCH] Use constant-time (faster) padding decoding also for OAEP
+
+CVE: CVE-2023-52323
+
+Upstream-Status: Backport [https://github.com/Legrandin/pycryptodome/commit/0deea1bfe1489e8c80d2053bbb06a1aa0b181ebd]
+
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+---
+ lib/Crypto/Cipher/PKCS1_OAEP.py | 38 +++++-------
+ lib/Crypto/Cipher/PKCS1_v1_5.py | 31 +---------
+ lib/Crypto/Cipher/_pkcs1_oaep_decode.py | 41 +++++++++++++
+ src/pkcs1_decode.c | 79 +++++++++++++++++++++++--
+ src/test/test_pkcs1.c | 22 +++----
+ 5 files changed, 145 insertions(+), 66 deletions(-)
+ create mode 100644 lib/Crypto/Cipher/_pkcs1_oaep_decode.py
+
+diff --git a/lib/Crypto/Cipher/PKCS1_OAEP.py b/lib/Crypto/Cipher/PKCS1_OAEP.py
+index 57a982b..6974584 100644
+--- a/lib/Crypto/Cipher/PKCS1_OAEP.py
++++ b/lib/Crypto/Cipher/PKCS1_OAEP.py
+@@ -23,11 +23,13 @@
+ from Crypto.Signature.pss import MGF1
+ import Crypto.Hash.SHA1
+
+-from Crypto.Util.py3compat import bord, _copy_bytes
++from Crypto.Util.py3compat import _copy_bytes
+ import Crypto.Util.number
+-from Crypto.Util.number import ceil_div, bytes_to_long, long_to_bytes
+-from Crypto.Util.strxor import strxor
++from Crypto.Util.number import ceil_div, bytes_to_long, long_to_bytes
++from Crypto.Util.strxor import strxor
+ from Crypto import Random
++from ._pkcs1_oaep_decode import oaep_decode
++
+
+ class PKCS1OAEP_Cipher:
+ """Cipher object for PKCS#1 v1.5 OAEP.
+@@ -68,7 +70,7 @@ class PKCS1OAEP_Cipher:
+ if mgfunc:
+ self._mgf = mgfunc
+ else:
+- self._mgf = lambda x,y: MGF1(x,y,self._hashObj)
++ self._mgf = lambda x, y: MGF1(x, y, self._hashObj)
+
+ self._label = _copy_bytes(None, None, label)
+ self._randfunc = randfunc
+@@ -105,7 +107,7 @@ class PKCS1OAEP_Cipher:
+
+ # See 7.1.1 in RFC3447
+ modBits = Crypto.Util.number.size(self._key.n)
+- k = ceil_div(modBits, 8) # Convert from bits to bytes
++ k = ceil_div(modBits, 8) # Convert from bits to bytes
+ hLen = self._hashObj.digest_size
+ mLen = len(message)
+
+@@ -159,11 +161,11 @@ class PKCS1OAEP_Cipher:
+
+ # See 7.1.2 in RFC3447
+ modBits = Crypto.Util.number.size(self._key.n)
+- k = ceil_div(modBits,8) # Convert from bits to bytes
++ k = ceil_div(modBits, 8) # Convert from bits to bytes
+ hLen = self._hashObj.digest_size
+
+ # Step 1b and 1c
+- if len(ciphertext) != k or k<hLen+2:
++ if len(ciphertext) != k or k < hLen+2:
+ raise ValueError("Ciphertext with incorrect length.")
+ # Step 2a (O2SIP)
+ ct_int = bytes_to_long(ciphertext)
+@@ -173,8 +175,6 @@ class PKCS1OAEP_Cipher:
+ em = long_to_bytes(m_int, k)
+ # Step 3a
+ lHash = self._hashObj.new(self._label).digest()
+- # Step 3b
+- y = em[0]
+ # y must be 0, but we MUST NOT check it here in order not to
+ # allow attacks like Manger's (http://dl.acm.org/citation.cfm?id=704143)
+ maskedSeed = em[1:hLen+1]
+@@ -187,22 +187,17 @@ class PKCS1OAEP_Cipher:
+ dbMask = self._mgf(seed, k-hLen-1)
+ # Step 3f
+ db = strxor(maskedDB, dbMask)
+- # Step 3g
+- one_pos = hLen + db[hLen:].find(b'\x01')
+- lHash1 = db[:hLen]
+- invalid = bord(y) | int(one_pos < hLen)
+- hash_compare = strxor(lHash1, lHash)
+- for x in hash_compare:
+- invalid |= bord(x)
+- for x in db[hLen:one_pos]:
+- invalid |= bord(x)
+- if invalid != 0:
++ # Step 3b + 3g
++ res = oaep_decode(em, lHash, db)
++ if res <= 0:
+ raise ValueError("Incorrect decryption.")
+ # Step 4
+- return db[one_pos + 1:]
++ return db[res:]
++
+
+ def new(key, hashAlgo=None, mgfunc=None, label=b'', randfunc=None):
+- """Return a cipher object :class:`PKCS1OAEP_Cipher` that can be used to perform PKCS#1 OAEP encryption or decryption.
++ """Return a cipher object :class:`PKCS1OAEP_Cipher`
++ that can be used to perform PKCS#1 OAEP encryption or decryption.
+
+ :param key:
+ The key object to use to encrypt or decrypt the message.
+@@ -236,4 +231,3 @@ def new(key, hashAlgo=None, mgfunc=None, label=b'', randfunc=None):
+ if randfunc is None:
+ randfunc = Random.get_random_bytes
+ return PKCS1OAEP_Cipher(key, hashAlgo, mgfunc, label, randfunc)
+-
+diff --git a/lib/Crypto/Cipher/PKCS1_v1_5.py b/lib/Crypto/Cipher/PKCS1_v1_5.py
+index d0d474a..94e99cf 100644
+--- a/lib/Crypto/Cipher/PKCS1_v1_5.py
++++ b/lib/Crypto/Cipher/PKCS1_v1_5.py
+@@ -25,31 +25,7 @@ __all__ = ['new', 'PKCS115_Cipher']
+ from Crypto import Random
+ from Crypto.Util.number import bytes_to_long, long_to_bytes
+ from Crypto.Util.py3compat import bord, is_bytes, _copy_bytes
+-
+-from Crypto.Util._raw_api import (load_pycryptodome_raw_lib, c_size_t,
+- c_uint8_ptr)
+-
+-
+-_raw_pkcs1_decode = load_pycryptodome_raw_lib("Crypto.Cipher._pkcs1_decode",
+- """
+- int pkcs1_decode(const uint8_t *em, size_t len_em,
+- const uint8_t *sentinel, size_t len_sentinel,
+- size_t expected_pt_len,
+- uint8_t *output);
+- """)
+-
+-
+-def _pkcs1_decode(em, sentinel, expected_pt_len, output):
+- if len(em) != len(output):
+- raise ValueError("Incorrect output length")
+-
+- ret = _raw_pkcs1_decode.pkcs1_decode(c_uint8_ptr(em),
+- c_size_t(len(em)),
+- c_uint8_ptr(sentinel),
+- c_size_t(len(sentinel)),
+- c_size_t(expected_pt_len),
+- c_uint8_ptr(output))
+- return ret
++from ._pkcs1_oaep_decode import pkcs1_decode
+
+
+ class PKCS115_Cipher:
+@@ -113,7 +89,6 @@ class PKCS115_Cipher:
+ continue
+ ps.append(new_byte)
+ ps = b"".join(ps)
+- assert(len(ps) == k - mLen - 3)
+ # Step 2b
+ em = b'\x00\x02' + ps + b'\x00' + _copy_bytes(None, None, message)
+ # Step 3a (OS2IP)
+@@ -185,14 +160,14 @@ class PKCS115_Cipher:
+ # Step 3 (not constant time when the sentinel is not a byte string)
+ output = bytes(bytearray(k))
+ if not is_bytes(sentinel) or len(sentinel) > k:
+- size = _pkcs1_decode(em, b'', expected_pt_len, output)
++ size = pkcs1_decode(em, b'', expected_pt_len, output)
+ if size < 0:
+ return sentinel
+ else:
+ return output[size:]
+
+ # Step 3 (somewhat constant time)
+- size = _pkcs1_decode(em, sentinel, expected_pt_len, output)
++ size = pkcs1_decode(em, sentinel, expected_pt_len, output)
+ return output[size:]
+
+
+diff --git a/lib/Crypto/Cipher/_pkcs1_oaep_decode.py b/lib/Crypto/Cipher/_pkcs1_oaep_decode.py
+new file mode 100644
+index 0000000..fc07528
+--- /dev/null
++++ b/lib/Crypto/Cipher/_pkcs1_oaep_decode.py
+@@ -0,0 +1,41 @@
++from Crypto.Util._raw_api import (load_pycryptodome_raw_lib, c_size_t,
++ c_uint8_ptr)
++
++
++_raw_pkcs1_decode = load_pycryptodome_raw_lib("Crypto.Cipher._pkcs1_decode",
++ """
++ int pkcs1_decode(const uint8_t *em, size_t len_em,
++ const uint8_t *sentinel, size_t len_sentinel,
++ size_t expected_pt_len,
++ uint8_t *output);
++
++ int oaep_decode(const uint8_t *em,
++ size_t em_len,
++ const uint8_t *lHash,
++ size_t hLen,
++ const uint8_t *db,
++ size_t db_len);
++ """)
++
++
++def pkcs1_decode(em, sentinel, expected_pt_len, output):
++ if len(em) != len(output):
++ raise ValueError("Incorrect output length")
++
++ ret = _raw_pkcs1_decode.pkcs1_decode(c_uint8_ptr(em),
++ c_size_t(len(em)),
++ c_uint8_ptr(sentinel),
++ c_size_t(len(sentinel)),
++ c_size_t(expected_pt_len),
++ c_uint8_ptr(output))
++ return ret
++
++
++def oaep_decode(em, lHash, db):
++ ret = _raw_pkcs1_decode.oaep_decode(c_uint8_ptr(em),
++ c_size_t(len(em)),
++ c_uint8_ptr(lHash),
++ c_size_t(len(lHash)),
++ c_uint8_ptr(db),
++ c_size_t(len(db)))
++ return ret
+diff --git a/src/pkcs1_decode.c b/src/pkcs1_decode.c
+index 207b198..74cb4a2 100644
+--- a/src/pkcs1_decode.c
++++ b/src/pkcs1_decode.c
+@@ -130,7 +130,7 @@ STATIC size_t safe_select_idx(size_t in1, size_t in2, uint8_t choice)
+ * - in1[] is NOT equal to in2[] where neq_mask[] is 0xFF.
+ * Return non-zero otherwise.
+ */
+-STATIC uint8_t safe_cmp(const uint8_t *in1, const uint8_t *in2,
++STATIC uint8_t safe_cmp_masks(const uint8_t *in1, const uint8_t *in2,
+ const uint8_t *eq_mask, const uint8_t *neq_mask,
+ size_t len)
+ {
+@@ -187,7 +187,7 @@ STATIC size_t safe_search(const uint8_t *in1, uint8_t c, size_t len)
+ return result;
+ }
+
+-#define EM_PREFIX_LEN 10
++#define PKCS1_PREFIX_LEN 10
+
+ /*
+ * Decode and verify the PKCS#1 padding, then put either the plaintext
+@@ -222,13 +222,13 @@ EXPORT_SYM int pkcs1_decode(const uint8_t *em, size_t len_em_output,
+ if (NULL == em || NULL == output || NULL == sentinel) {
+ return -1;
+ }
+- if (len_em_output < (EM_PREFIX_LEN + 2)) {
++ if (len_em_output < (PKCS1_PREFIX_LEN + 2)) {
+ return -1;
+ }
+ if (len_sentinel > len_em_output) {
+ return -1;
+ }
+- if (expected_pt_len > 0 && expected_pt_len > (len_em_output - EM_PREFIX_LEN - 1)) {
++ if (expected_pt_len > 0 && expected_pt_len > (len_em_output - PKCS1_PREFIX_LEN - 1)) {
+ return -1;
+ }
+
+@@ -240,7 +240,7 @@ EXPORT_SYM int pkcs1_decode(const uint8_t *em, size_t len_em_output,
+ memcpy(padded_sentinel + (len_em_output - len_sentinel), sentinel, len_sentinel);
+
+ /** The first 10 bytes must follow the pattern **/
+- match = safe_cmp(em,
++ match = safe_cmp_masks(em,
+ (const uint8_t*)"\x00\x02" "\x00\x00\x00\x00\x00\x00\x00\x00",
+ (const uint8_t*)"\xFF\xFF" "\x00\x00\x00\x00\x00\x00\x00\x00",
+ (const uint8_t*)"\x00\x00" "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF",
+@@ -283,3 +283,72 @@ end:
+ free(padded_sentinel);
+ return result;
+ }
++
++/*
++ * Decode and verify the OAEP padding in constant time.
++ *
++ * The function returns the number of bytes to ignore at the beginning
++ * of db (the rest is the plaintext), or -1 in case of problems.
++ */
++
++EXPORT_SYM int oaep_decode(const uint8_t *em,
++ size_t em_len,
++ const uint8_t *lHash,
++ size_t hLen,
++ const uint8_t *db,
++ size_t db_len) /* em_len - 1 - hLen */
++{
++ int result;
++ size_t one_pos, search_len, i;
++ uint8_t wrong_padding;
++ uint8_t *eq_mask = NULL;
++ uint8_t *neq_mask = NULL;
++ uint8_t *target_db = NULL;
++
++ if (NULL == em || NULL == lHash || NULL == db) {
++ return -1;
++ }
++
++ if (em_len < 2*hLen+2 || db_len != em_len-1-hLen) {
++ return -1;
++ }
++
++ /* Allocate */
++ eq_mask = (uint8_t*) calloc(1, db_len);
++ neq_mask = (uint8_t*) calloc(1, db_len);
++ target_db = (uint8_t*) calloc(1, db_len);
++ if (NULL == eq_mask || NULL == neq_mask || NULL == target_db) {
++ result = -1;
++ goto cleanup;
++ }
++
++ /* Step 3g */
++ search_len = db_len - hLen;
++
++ one_pos = safe_search(db + hLen, 0x01, search_len);
++ if (SIZE_T_MAX == one_pos) {
++ result = -1;
++ goto cleanup;
++ }
++
++ memset(eq_mask, 0xAA, db_len);
++ memcpy(target_db, lHash, hLen);
++ memset(eq_mask, 0xFF, hLen);
++
++ for (i=0; i<search_len; i++) {
++ eq_mask[hLen + i] = propagate_ones(i < one_pos);
++ }
++
++ wrong_padding = em[0];
++ wrong_padding |= safe_cmp_masks(db, target_db, eq_mask, neq_mask, db_len);
++ set_if_match(&wrong_padding, one_pos, search_len);
++
++ result = wrong_padding ? -1 : (int)(hLen + 1 + one_pos);
++
++cleanup:
++ free(eq_mask);
++ free(neq_mask);
++ free(target_db);
++
++ return result;
++}
+diff --git a/src/test/test_pkcs1.c b/src/test/test_pkcs1.c
+index 6ef63cb..69aaac5 100644
+--- a/src/test/test_pkcs1.c
++++ b/src/test/test_pkcs1.c
+@@ -5,7 +5,7 @@ void set_if_match(uint8_t *flag, size_t term1, size_t term2);
+ void set_if_no_match(uint8_t *flag, size_t term1, size_t term2);
+ void safe_select(const uint8_t *in1, const uint8_t *in2, uint8_t *out, uint8_t choice, size_t len);
+ size_t safe_select_idx(size_t in1, size_t in2, uint8_t choice);
+-uint8_t safe_cmp(const uint8_t *in1, const uint8_t *in2,
++uint8_t safe_cmp_masks(const uint8_t *in1, const uint8_t *in2,
+ const uint8_t *eq_mask, const uint8_t *neq_mask,
+ size_t len);
+ size_t safe_search(const uint8_t *in1, uint8_t c, size_t len);
+@@ -80,29 +80,29 @@ void test_safe_select_idx()
+ assert(safe_select_idx(0x100004, 0x223344, 1) == 0x223344);
+ }
+
+-void test_safe_cmp()
++void test_safe_cmp_masks(void)
+ {
+ uint8_t res;
+
+- res = safe_cmp(onezero, onezero,
++ res = safe_cmp_masks(onezero, onezero,
+ (uint8_t*)"\xFF\xFF",
+ (uint8_t*)"\x00\x00",
+ 2);
+ assert(res == 0);
+
+- res = safe_cmp(onezero, zerozero,
++ res = safe_cmp_masks(onezero, zerozero,
+ (uint8_t*)"\xFF\xFF",
+ (uint8_t*)"\x00\x00",
+ 2);
+ assert(res != 0);
+
+- res = safe_cmp(onezero, oneone,
++ res = safe_cmp_masks(onezero, oneone,
+ (uint8_t*)"\xFF\xFF",
+ (uint8_t*)"\x00\x00",
+ 2);
+ assert(res != 0);
+
+- res = safe_cmp(onezero, oneone,
++ res = safe_cmp_masks(onezero, oneone,
+ (uint8_t*)"\xFF\x00",
+ (uint8_t*)"\x00\x00",
+ 2);
+@@ -110,19 +110,19 @@ void test_safe_cmp()
+
+ /** -- **/
+
+- res = safe_cmp(onezero, onezero,
++ res = safe_cmp_masks(onezero, onezero,
+ (uint8_t*)"\x00\x00",
+ (uint8_t*)"\xFF\xFF",
+ 2);
+ assert(res != 0);
+
+- res = safe_cmp(oneone, zerozero,
++ res = safe_cmp_masks(oneone, zerozero,
+ (uint8_t*)"\x00\x00",
+ (uint8_t*)"\xFF\xFF",
+ 2);
+ assert(res == 0);
+
+- res = safe_cmp(onezero, oneone,
++ res = safe_cmp_masks(onezero, oneone,
+ (uint8_t*)"\x00\x00",
+ (uint8_t*)"\x00\xFF",
+ 2);
+@@ -130,7 +130,7 @@ void test_safe_cmp()
+
+ /** -- **/
+
+- res = safe_cmp(onezero, oneone,
++ res = safe_cmp_masks(onezero, oneone,
+ (uint8_t*)"\xFF\x00",
+ (uint8_t*)"\x00\xFF",
+ 2);
+@@ -158,7 +158,7 @@ int main(void)
+ test_set_if_no_match();
+ test_safe_select();
+ test_safe_select_idx();
+- test_safe_cmp();
++ test_safe_cmp_masks();
+ test_safe_search();
+ return 0;
+ }
+--
+2.40.0
diff --git a/meta/recipes-devtools/python/python3-pycryptodome_3.14.1.bb b/meta/recipes-devtools/python/python3-pycryptodome_3.14.1.bb
index c0324590c2..1e6c514224 100644
--- a/meta/recipes-devtools/python/python3-pycryptodome_3.14.1.bb
+++ b/meta/recipes-devtools/python/python3-pycryptodome_3.14.1.bb
@@ -3,3 +3,4 @@ inherit setuptools3
SRC_URI[sha256sum] = "e04e40a7f8c1669195536a37979dd87da2c32dbdc73d6fe35f0077b0c17c803b"
+SRC_URI += "file://CVE-2023-52323.patch"
diff --git a/meta/recipes-devtools/python/python3-pycryptodomex/CVE-2023-52323.patch b/meta/recipes-devtools/python/python3-pycryptodomex/CVE-2023-52323.patch
new file mode 100644
index 0000000000..56000b996e
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-pycryptodomex/CVE-2023-52323.patch
@@ -0,0 +1,436 @@
+From 8ed5cf533be298d40ec9f75a188738ad4c3a8417 Mon Sep 17 00:00:00 2001
+From: Narpat Mali <narpat.mali@windriver.com>
+Date: Thu, 8 Feb 2024 09:09:35 +0000
+Subject: [PATCH] Use constant-time (faster) padding decoding also for OAEP
+
+CVE: CVE-2023-52323
+
+Upstream-Status: Backport [https://github.com/Legrandin/pycryptodome/commit/0deea1bfe1489e8c80d2053bbb06a1aa0b181ebd]
+
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+---
+ lib/Cryptodome/Cipher/PKCS1_OAEP.py | 38 +++++-----
+ lib/Cryptodome/Cipher/PKCS1_v1_5.py | 31 +-------
+ lib/Cryptodome/Cipher/_pkcs1_oaep_decode.py | 41 +++++++++++
+ src/pkcs1_decode.c | 79 +++++++++++++++++++--
+ src/test/test_pkcs1.c | 22 +++---
+ 5 files changed, 145 insertions(+), 66 deletions(-)
+ create mode 100644 lib/Cryptodome/Cipher/_pkcs1_oaep_decode.py
+
+diff --git a/lib/Cryptodome/Cipher/PKCS1_OAEP.py b/lib/Cryptodome/Cipher/PKCS1_OAEP.py
+index 7525c5d..653df04 100644
+--- a/lib/Cryptodome/Cipher/PKCS1_OAEP.py
++++ b/lib/Cryptodome/Cipher/PKCS1_OAEP.py
+@@ -23,11 +23,13 @@
+ from Cryptodome.Signature.pss import MGF1
+ import Cryptodome.Hash.SHA1
+
+-from Cryptodome.Util.py3compat import bord, _copy_bytes
++from Crypto.Util.py3compat import _copy_bytes
+ import Cryptodome.Util.number
+-from Cryptodome.Util.number import ceil_div, bytes_to_long, long_to_bytes
+-from Cryptodome.Util.strxor import strxor
++from Crypto.Util.number import ceil_div, bytes_to_long, long_to_bytes
++from Crypto.Util.strxor import strxor
+ from Cryptodome import Random
++from ._pkcs1_oaep_decode import oaep_decode
++
+
+ class PKCS1OAEP_Cipher:
+ """Cipher object for PKCS#1 v1.5 OAEP.
+@@ -68,7 +70,7 @@ class PKCS1OAEP_Cipher:
+ if mgfunc:
+ self._mgf = mgfunc
+ else:
+- self._mgf = lambda x,y: MGF1(x,y,self._hashObj)
++ self._mgf = lambda x, y: MGF1(x, y, self._hashObj)
+
+ self._label = _copy_bytes(None, None, label)
+ self._randfunc = randfunc
+@@ -105,7 +107,7 @@ class PKCS1OAEP_Cipher:
+
+ # See 7.1.1 in RFC3447
+ modBits = Cryptodome.Util.number.size(self._key.n)
+- k = ceil_div(modBits, 8) # Convert from bits to bytes
++ k = ceil_div(modBits, 8) # Convert from bits to bytes
+ hLen = self._hashObj.digest_size
+ mLen = len(message)
+
+@@ -159,11 +161,11 @@ class PKCS1OAEP_Cipher:
+
+ # See 7.1.2 in RFC3447
+ modBits = Cryptodome.Util.number.size(self._key.n)
+- k = ceil_div(modBits,8) # Convert from bits to bytes
++ k = ceil_div(modBits, 8) # Convert from bits to bytes
+ hLen = self._hashObj.digest_size
+
+ # Step 1b and 1c
+- if len(ciphertext) != k or k<hLen+2:
++ if len(ciphertext) != k or k < hLen+2:
+ raise ValueError("Ciphertext with incorrect length.")
+ # Step 2a (O2SIP)
+ ct_int = bytes_to_long(ciphertext)
+@@ -173,8 +175,6 @@ class PKCS1OAEP_Cipher:
+ em = long_to_bytes(m_int, k)
+ # Step 3a
+ lHash = self._hashObj.new(self._label).digest()
+- # Step 3b
+- y = em[0]
+ # y must be 0, but we MUST NOT check it here in order not to
+ # allow attacks like Manger's (http://dl.acm.org/citation.cfm?id=704143)
+ maskedSeed = em[1:hLen+1]
+@@ -187,22 +187,17 @@ class PKCS1OAEP_Cipher:
+ dbMask = self._mgf(seed, k-hLen-1)
+ # Step 3f
+ db = strxor(maskedDB, dbMask)
+- # Step 3g
+- one_pos = hLen + db[hLen:].find(b'\x01')
+- lHash1 = db[:hLen]
+- invalid = bord(y) | int(one_pos < hLen)
+- hash_compare = strxor(lHash1, lHash)
+- for x in hash_compare:
+- invalid |= bord(x)
+- for x in db[hLen:one_pos]:
+- invalid |= bord(x)
+- if invalid != 0:
++ # Step 3b + 3g
++ res = oaep_decode(em, lHash, db)
++ if res <= 0:
+ raise ValueError("Incorrect decryption.")
+ # Step 4
+- return db[one_pos + 1:]
++ return db[res:]
++
+
+ def new(key, hashAlgo=None, mgfunc=None, label=b'', randfunc=None):
+- """Return a cipher object :class:`PKCS1OAEP_Cipher` that can be used to perform PKCS#1 OAEP encryption or decryption.
++ """Return a cipher object :class:`PKCS1OAEP_Cipher`
++ that can be used to perform PKCS#1 OAEP encryption or decryption.
+
+ :param key:
+ The key object to use to encrypt or decrypt the message.
+@@ -236,4 +231,3 @@ def new(key, hashAlgo=None, mgfunc=None, label=b'', randfunc=None):
+ if randfunc is None:
+ randfunc = Random.get_random_bytes
+ return PKCS1OAEP_Cipher(key, hashAlgo, mgfunc, label, randfunc)
+-
+diff --git a/lib/Cryptodome/Cipher/PKCS1_v1_5.py b/lib/Cryptodome/Cipher/PKCS1_v1_5.py
+index 17ef9eb..f20a7ce 100644
+--- a/lib/Cryptodome/Cipher/PKCS1_v1_5.py
++++ b/lib/Cryptodome/Cipher/PKCS1_v1_5.py
+@@ -25,31 +25,7 @@ __all__ = ['new', 'PKCS115_Cipher']
+ from Cryptodome import Random
+ from Cryptodome.Util.number import bytes_to_long, long_to_bytes
+ from Cryptodome.Util.py3compat import bord, is_bytes, _copy_bytes
+-
+-from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib, c_size_t,
+- c_uint8_ptr)
+-
+-
+-_raw_pkcs1_decode = load_pycryptodome_raw_lib("Cryptodome.Cipher._pkcs1_decode",
+- """
+- int pkcs1_decode(const uint8_t *em, size_t len_em,
+- const uint8_t *sentinel, size_t len_sentinel,
+- size_t expected_pt_len,
+- uint8_t *output);
+- """)
+-
+-
+-def _pkcs1_decode(em, sentinel, expected_pt_len, output):
+- if len(em) != len(output):
+- raise ValueError("Incorrect output length")
+-
+- ret = _raw_pkcs1_decode.pkcs1_decode(c_uint8_ptr(em),
+- c_size_t(len(em)),
+- c_uint8_ptr(sentinel),
+- c_size_t(len(sentinel)),
+- c_size_t(expected_pt_len),
+- c_uint8_ptr(output))
+- return ret
++from ._pkcs1_oaep_decode import pkcs1_decode
+
+
+ class PKCS115_Cipher:
+@@ -113,7 +89,6 @@ class PKCS115_Cipher:
+ continue
+ ps.append(new_byte)
+ ps = b"".join(ps)
+- assert(len(ps) == k - mLen - 3)
+ # Step 2b
+ em = b'\x00\x02' + ps + b'\x00' + _copy_bytes(None, None, message)
+ # Step 3a (OS2IP)
+@@ -185,14 +160,14 @@ class PKCS115_Cipher:
+ # Step 3 (not constant time when the sentinel is not a byte string)
+ output = bytes(bytearray(k))
+ if not is_bytes(sentinel) or len(sentinel) > k:
+- size = _pkcs1_decode(em, b'', expected_pt_len, output)
++ size = pkcs1_decode(em, b'', expected_pt_len, output)
+ if size < 0:
+ return sentinel
+ else:
+ return output[size:]
+
+ # Step 3 (somewhat constant time)
+- size = _pkcs1_decode(em, sentinel, expected_pt_len, output)
++ size = pkcs1_decode(em, sentinel, expected_pt_len, output)
+ return output[size:]
+
+
+diff --git a/lib/Cryptodome/Cipher/_pkcs1_oaep_decode.py b/lib/Cryptodome/Cipher/_pkcs1_oaep_decode.py
+new file mode 100644
+index 0000000..fc07528
+--- /dev/null
++++ b/lib/Cryptodome/Cipher/_pkcs1_oaep_decode.py
+@@ -0,0 +1,41 @@
++from Crypto.Util._raw_api import (load_pycryptodome_raw_lib, c_size_t,
++ c_uint8_ptr)
++
++
++_raw_pkcs1_decode = load_pycryptodome_raw_lib("Crypto.Cipher._pkcs1_decode",
++ """
++ int pkcs1_decode(const uint8_t *em, size_t len_em,
++ const uint8_t *sentinel, size_t len_sentinel,
++ size_t expected_pt_len,
++ uint8_t *output);
++
++ int oaep_decode(const uint8_t *em,
++ size_t em_len,
++ const uint8_t *lHash,
++ size_t hLen,
++ const uint8_t *db,
++ size_t db_len);
++ """)
++
++
++def pkcs1_decode(em, sentinel, expected_pt_len, output):
++ if len(em) != len(output):
++ raise ValueError("Incorrect output length")
++
++ ret = _raw_pkcs1_decode.pkcs1_decode(c_uint8_ptr(em),
++ c_size_t(len(em)),
++ c_uint8_ptr(sentinel),
++ c_size_t(len(sentinel)),
++ c_size_t(expected_pt_len),
++ c_uint8_ptr(output))
++ return ret
++
++
++def oaep_decode(em, lHash, db):
++ ret = _raw_pkcs1_decode.oaep_decode(c_uint8_ptr(em),
++ c_size_t(len(em)),
++ c_uint8_ptr(lHash),
++ c_size_t(len(lHash)),
++ c_uint8_ptr(db),
++ c_size_t(len(db)))
++ return ret
+diff --git a/src/pkcs1_decode.c b/src/pkcs1_decode.c
+index 207b198..74cb4a2 100644
+--- a/src/pkcs1_decode.c
++++ b/src/pkcs1_decode.c
+@@ -130,7 +130,7 @@ STATIC size_t safe_select_idx(size_t in1, size_t in2, uint8_t choice)
+ * - in1[] is NOT equal to in2[] where neq_mask[] is 0xFF.
+ * Return non-zero otherwise.
+ */
+-STATIC uint8_t safe_cmp(const uint8_t *in1, const uint8_t *in2,
++STATIC uint8_t safe_cmp_masks(const uint8_t *in1, const uint8_t *in2,
+ const uint8_t *eq_mask, const uint8_t *neq_mask,
+ size_t len)
+ {
+@@ -187,7 +187,7 @@ STATIC size_t safe_search(const uint8_t *in1, uint8_t c, size_t len)
+ return result;
+ }
+
+-#define EM_PREFIX_LEN 10
++#define PKCS1_PREFIX_LEN 10
+
+ /*
+ * Decode and verify the PKCS#1 padding, then put either the plaintext
+@@ -222,13 +222,13 @@ EXPORT_SYM int pkcs1_decode(const uint8_t *em, size_t len_em_output,
+ if (NULL == em || NULL == output || NULL == sentinel) {
+ return -1;
+ }
+- if (len_em_output < (EM_PREFIX_LEN + 2)) {
++ if (len_em_output < (PKCS1_PREFIX_LEN + 2)) {
+ return -1;
+ }
+ if (len_sentinel > len_em_output) {
+ return -1;
+ }
+- if (expected_pt_len > 0 && expected_pt_len > (len_em_output - EM_PREFIX_LEN - 1)) {
++ if (expected_pt_len > 0 && expected_pt_len > (len_em_output - PKCS1_PREFIX_LEN - 1)) {
+ return -1;
+ }
+
+@@ -240,7 +240,7 @@ EXPORT_SYM int pkcs1_decode(const uint8_t *em, size_t len_em_output,
+ memcpy(padded_sentinel + (len_em_output - len_sentinel), sentinel, len_sentinel);
+
+ /** The first 10 bytes must follow the pattern **/
+- match = safe_cmp(em,
++ match = safe_cmp_masks(em,
+ (const uint8_t*)"\x00\x02" "\x00\x00\x00\x00\x00\x00\x00\x00",
+ (const uint8_t*)"\xFF\xFF" "\x00\x00\x00\x00\x00\x00\x00\x00",
+ (const uint8_t*)"\x00\x00" "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF",
+@@ -283,3 +283,72 @@ end:
+ free(padded_sentinel);
+ return result;
+ }
++
++/*
++ * Decode and verify the OAEP padding in constant time.
++ *
++ * The function returns the number of bytes to ignore at the beginning
++ * of db (the rest is the plaintext), or -1 in case of problems.
++ */
++
++EXPORT_SYM int oaep_decode(const uint8_t *em,
++ size_t em_len,
++ const uint8_t *lHash,
++ size_t hLen,
++ const uint8_t *db,
++ size_t db_len) /* em_len - 1 - hLen */
++{
++ int result;
++ size_t one_pos, search_len, i;
++ uint8_t wrong_padding;
++ uint8_t *eq_mask = NULL;
++ uint8_t *neq_mask = NULL;
++ uint8_t *target_db = NULL;
++
++ if (NULL == em || NULL == lHash || NULL == db) {
++ return -1;
++ }
++
++ if (em_len < 2*hLen+2 || db_len != em_len-1-hLen) {
++ return -1;
++ }
++
++ /* Allocate */
++ eq_mask = (uint8_t*) calloc(1, db_len);
++ neq_mask = (uint8_t*) calloc(1, db_len);
++ target_db = (uint8_t*) calloc(1, db_len);
++ if (NULL == eq_mask || NULL == neq_mask || NULL == target_db) {
++ result = -1;
++ goto cleanup;
++ }
++
++ /* Step 3g */
++ search_len = db_len - hLen;
++
++ one_pos = safe_search(db + hLen, 0x01, search_len);
++ if (SIZE_T_MAX == one_pos) {
++ result = -1;
++ goto cleanup;
++ }
++
++ memset(eq_mask, 0xAA, db_len);
++ memcpy(target_db, lHash, hLen);
++ memset(eq_mask, 0xFF, hLen);
++
++ for (i=0; i<search_len; i++) {
++ eq_mask[hLen + i] = propagate_ones(i < one_pos);
++ }
++
++ wrong_padding = em[0];
++ wrong_padding |= safe_cmp_masks(db, target_db, eq_mask, neq_mask, db_len);
++ set_if_match(&wrong_padding, one_pos, search_len);
++
++ result = wrong_padding ? -1 : (int)(hLen + 1 + one_pos);
++
++cleanup:
++ free(eq_mask);
++ free(neq_mask);
++ free(target_db);
++
++ return result;
++}
+diff --git a/src/test/test_pkcs1.c b/src/test/test_pkcs1.c
+index 6ef63cb..69aaac5 100644
+--- a/src/test/test_pkcs1.c
++++ b/src/test/test_pkcs1.c
+@@ -5,7 +5,7 @@ void set_if_match(uint8_t *flag, size_t term1, size_t term2);
+ void set_if_no_match(uint8_t *flag, size_t term1, size_t term2);
+ void safe_select(const uint8_t *in1, const uint8_t *in2, uint8_t *out, uint8_t choice, size_t len);
+ size_t safe_select_idx(size_t in1, size_t in2, uint8_t choice);
+-uint8_t safe_cmp(const uint8_t *in1, const uint8_t *in2,
++uint8_t safe_cmp_masks(const uint8_t *in1, const uint8_t *in2,
+ const uint8_t *eq_mask, const uint8_t *neq_mask,
+ size_t len);
+ size_t safe_search(const uint8_t *in1, uint8_t c, size_t len);
+@@ -80,29 +80,29 @@ void test_safe_select_idx()
+ assert(safe_select_idx(0x100004, 0x223344, 1) == 0x223344);
+ }
+
+-void test_safe_cmp()
++void test_safe_cmp_masks(void)
+ {
+ uint8_t res;
+
+- res = safe_cmp(onezero, onezero,
++ res = safe_cmp_masks(onezero, onezero,
+ (uint8_t*)"\xFF\xFF",
+ (uint8_t*)"\x00\x00",
+ 2);
+ assert(res == 0);
+
+- res = safe_cmp(onezero, zerozero,
++ res = safe_cmp_masks(onezero, zerozero,
+ (uint8_t*)"\xFF\xFF",
+ (uint8_t*)"\x00\x00",
+ 2);
+ assert(res != 0);
+
+- res = safe_cmp(onezero, oneone,
++ res = safe_cmp_masks(onezero, oneone,
+ (uint8_t*)"\xFF\xFF",
+ (uint8_t*)"\x00\x00",
+ 2);
+ assert(res != 0);
+
+- res = safe_cmp(onezero, oneone,
++ res = safe_cmp_masks(onezero, oneone,
+ (uint8_t*)"\xFF\x00",
+ (uint8_t*)"\x00\x00",
+ 2);
+@@ -110,19 +110,19 @@ void test_safe_cmp()
+
+ /** -- **/
+
+- res = safe_cmp(onezero, onezero,
++ res = safe_cmp_masks(onezero, onezero,
+ (uint8_t*)"\x00\x00",
+ (uint8_t*)"\xFF\xFF",
+ 2);
+ assert(res != 0);
+
+- res = safe_cmp(oneone, zerozero,
++ res = safe_cmp_masks(oneone, zerozero,
+ (uint8_t*)"\x00\x00",
+ (uint8_t*)"\xFF\xFF",
+ 2);
+ assert(res == 0);
+
+- res = safe_cmp(onezero, oneone,
++ res = safe_cmp_masks(onezero, oneone,
+ (uint8_t*)"\x00\x00",
+ (uint8_t*)"\x00\xFF",
+ 2);
+@@ -130,7 +130,7 @@ void test_safe_cmp()
+
+ /** -- **/
+
+- res = safe_cmp(onezero, oneone,
++ res = safe_cmp_masks(onezero, oneone,
+ (uint8_t*)"\xFF\x00",
+ (uint8_t*)"\x00\xFF",
+ 2);
+@@ -158,7 +158,7 @@ int main(void)
+ test_set_if_no_match();
+ test_safe_select();
+ test_safe_select_idx();
+- test_safe_cmp();
++ test_safe_cmp_masks();
+ test_safe_search();
+ return 0;
+ }
+--
+2.40.0
diff --git a/meta/recipes-devtools/python/python3-pycryptodomex_3.14.1.bb b/meta/recipes-devtools/python/python3-pycryptodomex_3.14.1.bb
index 79a3fee19c..31ad3fda5e 100644
--- a/meta/recipes-devtools/python/python3-pycryptodomex_3.14.1.bb
+++ b/meta/recipes-devtools/python/python3-pycryptodomex_3.14.1.bb
@@ -3,6 +3,8 @@ inherit setuptools3
SRC_URI[sha256sum] = "2ce76ed0081fd6ac8c74edc75b9d14eca2064173af79843c24fa62573263c1f2"
+SRC_URI += "file://CVE-2023-52323.patch"
+
FILES:${PN}-tests = " \
${PYTHON_SITEPACKAGES_DIR}/Cryptodome/SelfTest/ \
${PYTHON_SITEPACKAGES_DIR}/Cryptodome/SelfTest/__pycache__/ \
diff --git a/meta/recipes-devtools/python/python3-pygments/CVE-2022-40896.patch b/meta/recipes-devtools/python/python3-pygments/CVE-2022-40896.patch
new file mode 100644
index 0000000000..9848072a94
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-pygments/CVE-2022-40896.patch
@@ -0,0 +1,124 @@
+From ed61747f328ff6aa343881b269600308ab8eac93 Mon Sep 17 00:00:00 2001
+From: Narpat Mali <narpat.mali@windriver.com>
+Date: Wed, 6 Sep 2023 10:32:38 +0000
+Subject: [PATCH] Improve the Smithy metadata matcher.
+
+Previously, metadata foo bar baz = 23 was accepted, but according to
+the definition https://smithy.io/2.0/spec/idl.html#grammar-token-smithy-MetadataSection
+it should be "metadata"<whitespace>Identifier/String<optional whitespace>.
+
+CVE: CVE-2022-40896
+
+Upstream-Status: Backport [https://github.com/pygments/pygments/commit/dd52102c38ebe78cd57748e09f38929fd283ad04]
+
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+---
+ pygments/lexers/smithy.py | 5 +-
+ tests/examplefiles/smithy/test.smithy | 12 +++++
+ tests/examplefiles/smithy/test.smithy.output | 52 ++++++++++++++++++++
+ 3 files changed, 67 insertions(+), 2 deletions(-)
+
+diff --git a/pygments/lexers/smithy.py b/pygments/lexers/smithy.py
+index 0f0a912..c5e25cd 100644
+--- a/pygments/lexers/smithy.py
++++ b/pygments/lexers/smithy.py
+@@ -58,8 +58,9 @@ class SmithyLexer(RegexLexer):
+ (words(aggregate_shapes,
+ prefix=r'^', suffix=r'(\s+' + identifier + r')'),
+ bygroups(Keyword.Declaration, Name.Class)),
+- (r'^(metadata)(\s+.+)(\s*)(=)',
+- bygroups(Keyword.Declaration, Name.Class, Whitespace, Name.Decorator)),
++ (r'^(metadata)(\s+)((?:\S+)|(?:\"[^"]+\"))(\s*)(=)',
++ bygroups(Keyword.Declaration, Whitespace, Name.Class,
++ Whitespace, Name.Decorator)),
+ (r"(true|false|null)", Keyword.Constant),
+ (r"(-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?)", Number),
+ (identifier + ":", Name.Label),
+diff --git a/tests/examplefiles/smithy/test.smithy b/tests/examplefiles/smithy/test.smithy
+index 3d20f06..9317fee 100644
+--- a/tests/examplefiles/smithy/test.smithy
++++ b/tests/examplefiles/smithy/test.smithy
+@@ -2,6 +2,18 @@ $version: "1.0"
+
+ namespace test
+
++metadata "foo" = ["bar", "baz"]
++metadata validators = [
++ {
++ name: "ValidatorName"
++ id: "ValidatorId"
++ message: "Some string"
++ configuration: {
++ selector: "operation"
++ }
++ }
++]
++
+ /// Define how an HTTP request is serialized given a specific protocol,
+ /// authentication scheme, and set of input parameters.
+ @trait(selector: "operation")
+diff --git a/tests/examplefiles/smithy/test.smithy.output b/tests/examplefiles/smithy/test.smithy.output
+index 1f22489..db44a38 100644
+--- a/tests/examplefiles/smithy/test.smithy.output
++++ b/tests/examplefiles/smithy/test.smithy.output
+@@ -7,6 +7,58 @@
+ ' test' Name.Class
+ '\n\n' Text.Whitespace
+
++'metadata' Keyword.Declaration
++' ' Text.Whitespace
++'"foo"' Name.Class
++' ' Text.Whitespace
++'=' Name.Decorator
++' ' Text.Whitespace
++'[' Text
++'"bar"' Literal.String.Double
++',' Punctuation
++' ' Text.Whitespace
++'"baz"' Literal.String.Double
++']' Text
++'\n' Text.Whitespace
++
++'metadata' Keyword.Declaration
++' ' Text.Whitespace
++'validators' Name.Class
++' ' Text.Whitespace
++'=' Name.Decorator
++' ' Text.Whitespace
++'[' Text
++'\n ' Text.Whitespace
++'{' Text
++'\n ' Text.Whitespace
++'name:' Name.Label
++' ' Text.Whitespace
++'"ValidatorName"' Literal.String.Double
++'\n ' Text.Whitespace
++'id:' Name.Label
++' ' Text.Whitespace
++'"ValidatorId"' Literal.String.Double
++'\n ' Text.Whitespace
++'message:' Name.Label
++' ' Text.Whitespace
++'"Some string"' Literal.String.Double
++'\n ' Text.Whitespace
++'configuration:' Name.Label
++' ' Text.Whitespace
++'{' Text
++'\n ' Text.Whitespace
++'selector:' Name.Label
++' ' Text.Whitespace
++'"operation"' Literal.String.Double
++'\n ' Text.Whitespace
++'}' Text
++'\n ' Text.Whitespace
++'}' Text
++'\n' Text.Whitespace
++
++']' Text
++'\n\n' Text.Whitespace
++
+ '/// Define how an HTTP request is serialized given a specific protocol,' Comment.Multiline
+ '\n' Text.Whitespace
+
+--
+2.40.0
diff --git a/meta/recipes-devtools/python/python3-pygments_2.11.2.bb b/meta/recipes-devtools/python/python3-pygments_2.11.2.bb
index 35d288c89e..6e787f23d2 100644
--- a/meta/recipes-devtools/python/python3-pygments_2.11.2.bb
+++ b/meta/recipes-devtools/python/python3-pygments_2.11.2.bb
@@ -7,6 +7,8 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=98419e351433ac106a24e3ad435930bc"
inherit setuptools3
SRC_URI[sha256sum] = "4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"
+SRC_URI += "file://CVE-2022-40896.patch"
+
DEPENDS += "\
${PYTHON_PN} \
"
diff --git a/meta/recipes-devtools/python/python3-pytest_7.1.1.bb b/meta/recipes-devtools/python/python3-pytest_7.1.1.bb
index 1cb2fb01c0..90a4787c17 100644
--- a/meta/recipes-devtools/python/python3-pytest_7.1.1.bb
+++ b/meta/recipes-devtools/python/python3-pytest_7.1.1.bb
@@ -26,7 +26,7 @@ RDEPENDS:${PN}:class-target += " \
${PYTHON_PN}-py \
${PYTHON_PN}-setuptools \
${PYTHON_PN}-six \
- ${PYTHON_PN}-toml \
+ ${PYTHON_PN}-tomli \
${PYTHON_PN}-wcwidth \
"
diff --git a/meta/recipes-devtools/python/python3-requests/CVE-2023-32681.patch b/meta/recipes-devtools/python/python3-requests/CVE-2023-32681.patch
new file mode 100644
index 0000000000..35b4241bde
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-requests/CVE-2023-32681.patch
@@ -0,0 +1,63 @@
+From cd0128c0becd8729d0f8733bf42fbd333d51f833 Mon Sep 17 00:00:00 2001
+From: Nate Prewitt <nate.prewitt@gmail.com>
+Date: Mon, 5 Jun 2023 09:31:36 +0000
+Subject: [PATCH] Merge pull request from GHSA-j8r2-6x86-q33q
+
+CVE: CVE-2023-32681
+
+Upstream-Status: Backport [https://github.com/psf/requests/commit/74ea7cf7a6a27a4eeb2ae24e162bcc942a6706d5]
+
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+---
+ requests/sessions.py | 4 +++-
+ tests/test_requests.py | 20 ++++++++++++++++++++
+ 2 files changed, 23 insertions(+), 1 deletion(-)
+
+diff --git a/requests/sessions.py b/requests/sessions.py
+index 3f59cab..648cffa 100644
+--- a/requests/sessions.py
++++ b/requests/sessions.py
+@@ -293,7 +293,9 @@ class SessionRedirectMixin(object):
+ except KeyError:
+ username, password = None, None
+
+- if username and password:
++ # urllib3 handles proxy authorization for us in the standard adapter.
++ # Avoid appending this to TLS tunneled requests where it may be leaked.
++ if not scheme.startswith('https') and username and password:
+ headers['Proxy-Authorization'] = _basic_auth_str(username, password)
+
+ return new_proxies
+diff --git a/tests/test_requests.py b/tests/test_requests.py
+index 29b3aca..6a37777 100644
+--- a/tests/test_requests.py
++++ b/tests/test_requests.py
+@@ -601,6 +601,26 @@ class TestRequests:
+
+ assert sent_headers.get("Proxy-Authorization") == proxy_auth_value
+
++
++ @pytest.mark.parametrize(
++ "url,has_proxy_auth",
++ (
++ ('http://example.com', True),
++ ('https://example.com', False),
++ ),
++ )
++ def test_proxy_authorization_not_appended_to_https_request(self, url, has_proxy_auth):
++ session = requests.Session()
++ proxies = {
++ 'http': 'http://test:pass@localhost:8080',
++ 'https': 'http://test:pass@localhost:8090',
++ }
++ req = requests.Request('GET', url)
++ prep = req.prepare()
++ session.rebuild_proxies(prep, proxies)
++
++ assert ('Proxy-Authorization' in prep.headers) is has_proxy_auth
++
+ def test_basicauth_with_netrc(self, httpbin):
+ auth = ('user', 'pass')
+ wrong_auth = ('wronguser', 'wrongpass')
+--
+2.40.0
diff --git a/meta/recipes-devtools/python/python3-requests_2.27.1.bb b/meta/recipes-devtools/python/python3-requests_2.27.1.bb
index af52b7caf5..635a6af31f 100644
--- a/meta/recipes-devtools/python/python3-requests_2.27.1.bb
+++ b/meta/recipes-devtools/python/python3-requests_2.27.1.bb
@@ -3,6 +3,8 @@ HOMEPAGE = "http://python-requests.org"
LICENSE = "Apache-2.0"
LIC_FILES_CHKSUM = "file://LICENSE;md5=34400b68072d710fecd0a2940a0d1658"
+SRC_URI += "file://CVE-2023-32681.patch"
+
SRC_URI[sha256sum] = "68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"
inherit pypi setuptools3
diff --git a/meta/recipes-devtools/python/python3-rfc3986-validator_0.1.1.bb b/meta/recipes-devtools/python/python3-rfc3986-validator_0.1.1.bb
index 4abd181acf..e374979cb4 100644
--- a/meta/recipes-devtools/python/python3-rfc3986-validator_0.1.1.bb
+++ b/meta/recipes-devtools/python/python3-rfc3986-validator_0.1.1.bb
@@ -13,7 +13,7 @@ UPSTREAM_CHECK_REGEX = "/rfc3986-validator/(?P<pver>(\d+[\.\-_]*)+)/"
inherit pypi setuptools3
-SRC_URI:append = " \
+SRC_URI += "\
file://0001-setup.py-move-pytest-runner-to-test_requirements.patch \
"
diff --git a/meta/recipes-devtools/python/python3-setuptools-rust-native_1.1.2.bb b/meta/recipes-devtools/python/python3-setuptools-rust-native_1.1.2.bb
index 8ec9a86f00..c11116a1f4 100644
--- a/meta/recipes-devtools/python/python3-setuptools-rust-native_1.1.2.bb
+++ b/meta/recipes-devtools/python/python3-setuptools-rust-native_1.1.2.bb
@@ -14,9 +14,7 @@ SRC_URI[sha256sum] = "a0adb9b503c0ffc4e8fe80b7c617898cefa78049983aaaea7f747e153a
inherit cargo pypi python_setuptools_build_meta native
-DEPENDS += "python3-setuptools-scm-native python3-wheel-native"
-
-RDEPENDS:${PN}:class-native += " \
+DEPENDS += " \
python3-semantic-version-native \
python3-setuptools-native \
python3-setuptools-scm-native \
diff --git a/meta/recipes-devtools/python/python3-setuptools/0001-Limit-the-amount-of-whitespace-to-search-backtrack.-.patch b/meta/recipes-devtools/python/python3-setuptools/0001-Limit-the-amount-of-whitespace-to-search-backtrack.-.patch
new file mode 100644
index 0000000000..20a13da7bc
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-setuptools/0001-Limit-the-amount-of-whitespace-to-search-backtrack.-.patch
@@ -0,0 +1,31 @@
+From 9e9f617a83f6593b476669030b0347d48e831c3f Mon Sep 17 00:00:00 2001
+From: Narpat Mali <narpat.mali@windriver.com>
+Date: Mon, 9 Jan 2023 14:45:05 +0000
+Subject: [PATCH] Limit the amount of whitespace to search/backtrack. Fixes
+ #3659.
+
+CVE: CVE-2022-40897
+
+Upstream-Status: Backport [https://github.com/pypa/setuptools/commit/43a9c9bfa6aa626ec2a22540bea28d2ca77964be]
+
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+---
+ setuptools/package_index.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/setuptools/package_index.py b/setuptools/package_index.py
+index 270e7f3..e93fcc6 100644
+--- a/setuptools/package_index.py
++++ b/setuptools/package_index.py
+@@ -197,7 +197,7 @@ def unique_values(func):
+ return wrapper
+
+
+-REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
++REL = re.compile(r"""<([^>]*\srel\s{0,10}=\s{0,10}['"]?([^'" >]+)[^>]*)>""", re.I)
+ # this line is here to fix emacs' cruddy broken syntax highlighting
+
+
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/python/python3-setuptools_59.5.0.bb b/meta/recipes-devtools/python/python3-setuptools_59.5.0.bb
index f2810e18d3..5f2676a04a 100644
--- a/meta/recipes-devtools/python/python3-setuptools_59.5.0.bb
+++ b/meta/recipes-devtools/python/python3-setuptools_59.5.0.bb
@@ -11,6 +11,7 @@ SRC_URI:append:class-native = " file://0001-conditionally-do-not-fetch-code-by-e
SRC_URI += "\
file://0001-change-shebang-to-python3.patch \
file://0001-_distutils-sysconfig-append-STAGING_LIBDIR-python-sy.patch \
+ file://0001-Limit-the-amount-of-whitespace-to-search-backtrack.-.patch \
"
SRC_URI[sha256sum] = "d144f85102f999444d06f9c0e8c737fd0194f10f2f7e5fdb77573f6e2fa4fad0"
diff --git a/meta/recipes-devtools/python/python3-urllib3_1.26.9.bb b/meta/recipes-devtools/python/python3-urllib3_1.26.18.bb
index 95ae4a54a4..d384b5eb2f 100644
--- a/meta/recipes-devtools/python/python3-urllib3_1.26.9.bb
+++ b/meta/recipes-devtools/python/python3-urllib3_1.26.18.bb
@@ -3,7 +3,7 @@ HOMEPAGE = "https://github.com/shazow/urllib3"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=c2823cb995439c984fd62a973d79815c"
-SRC_URI[sha256sum] = "aabaf16477806a5e1dd19aa41f8c2b7950dd3c746362d7e3223dbe6de6ac448e"
+SRC_URI[sha256sum] = "f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"
inherit pypi setuptools3
@@ -15,6 +15,7 @@ RDEPENDS:${PN} += "\
${PYTHON_PN}-netclient \
${PYTHON_PN}-pyopenssl \
${PYTHON_PN}-threading \
+ ${PYTHON_PN}-logging \
"
CVE_PRODUCT = "urllib3"
diff --git a/meta/recipes-devtools/python/python3-wheel/0001-Fixed-potential-DoS-attack-via-WHEEL_INFO_RE.patch b/meta/recipes-devtools/python/python3-wheel/0001-Fixed-potential-DoS-attack-via-WHEEL_INFO_RE.patch
new file mode 100644
index 0000000000..bdaae7dd10
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-wheel/0001-Fixed-potential-DoS-attack-via-WHEEL_INFO_RE.patch
@@ -0,0 +1,32 @@
+From a9a0d67a663f20b69903751c23851dd4cd6b49d4 Mon Sep 17 00:00:00 2001
+From: Narpat Mali <narpat.mali@windriver.com>
+Date: Wed, 11 Jan 2023 07:45:57 +0000
+Subject: [PATCH] Fixed potential DoS attack via WHEEL_INFO_RE
+
+CVE: CVE-2022-40898
+
+Upstream-Status: Backport [https://github.com/pypa/wheel/commit/88f02bc335d5404991e532e7f3b0fc80437bf4e0]
+
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+---
+ src/wheel/wheelfile.py | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/wheel/wheelfile.py b/src/wheel/wheelfile.py
+index 21e7361..ff06edf 100644
+--- a/src/wheel/wheelfile.py
++++ b/src/wheel/wheelfile.py
+@@ -27,8 +27,8 @@ else:
+ # Non-greedy matching of an optional build number may be too clever (more
+ # invalid wheel filenames will match). Separate regex for .dist-info?
+ WHEEL_INFO_RE = re.compile(
+- r"""^(?P<namever>(?P<name>.+?)-(?P<ver>.+?))(-(?P<build>\d[^-]*))?
+- -(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)\.whl$""",
++ r"""^(?P<namever>(?P<name>[^-]+?)-(?P<ver>[^-]+?))(-(?P<build>\d[^-]*))?
++ -(?P<pyver>[^-]+?)-(?P<abi>[^-]+?)-(?P<plat>[^.]+?)\.whl$""",
+ re.VERBOSE)
+
+
+--
+2.32.0
+
diff --git a/meta/recipes-devtools/python/python3-wheel_0.37.1.bb b/meta/recipes-devtools/python/python3-wheel_0.37.1.bb
index 2f7dd122ba..3ee03ddd36 100644
--- a/meta/recipes-devtools/python/python3-wheel_0.37.1.bb
+++ b/meta/recipes-devtools/python/python3-wheel_0.37.1.bb
@@ -8,7 +8,9 @@ SRC_URI[sha256sum] = "e9a504e793efbca1b8e0e9cb979a249cf4a0a7b5b8c9e8b65a5e39d495
inherit python_flit_core pypi
-SRC_URI += " file://0001-Backport-pyproject.toml-from-flit-backend-branch.patch"
+SRC_URI += "file://0001-Backport-pyproject.toml-from-flit-backend-branch.patch \
+ file://0001-Fixed-potential-DoS-attack-via-WHEEL_INFO_RE.patch \
+ "
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-devtools/python/python3/0001-test_storlines-skip-due-to-load-variability.patch b/meta/recipes-devtools/python/python3/0001-test_storlines-skip-due-to-load-variability.patch
new file mode 100644
index 0000000000..199031d42a
--- /dev/null
+++ b/meta/recipes-devtools/python/python3/0001-test_storlines-skip-due-to-load-variability.patch
@@ -0,0 +1,32 @@
+From 013ff01fdf2aa6ca69a7c80a2a2996630877e4ea Mon Sep 17 00:00:00 2001
+From: Trevor Gamblin <tgamblin@baylibre.com>
+Date: Fri, 6 Oct 2023 10:59:44 -0400
+Subject: [PATCH] test_storlines: skip due to load variability
+
+This is yet another test that intermittently fails on the Yocto AB when
+a worker is under heavy load, so skip it during testing.
+
+Upstream-Status: Inappropriate [OE-Specific]
+
+[YOCTO #14933]
+
+Signed-off-by: Trevor Gamblin <tgamblin@baylibre.com>
+---
+ Lib/test/test_ftplib.py | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/Lib/test/test_ftplib.py b/Lib/test/test_ftplib.py
+index 082a90d46b..508814d56a 100644
+--- a/Lib/test/test_ftplib.py
++++ b/Lib/test/test_ftplib.py
+@@ -629,6 +629,7 @@ def test_storbinary_rest(self):
+ self.client.storbinary('stor', f, rest=r)
+ self.assertEqual(self.server.handler_instance.rest, str(r))
+
++ @unittest.skip('timing related test, dependent on load')
+ def test_storlines(self):
+ data = RETR_DATA.replace('\r\n', '\n').encode(self.client.encoding)
+ f = io.BytesIO(data)
+--
+2.41.0
+
diff --git a/meta/recipes-devtools/python/python3/0017-setup.py-do-not-report-missing-dependencies-for-disa.patch b/meta/recipes-devtools/python/python3/0017-setup.py-do-not-report-missing-dependencies-for-disa.patch
index 0ead57e465..8c554feb4b 100644
--- a/meta/recipes-devtools/python/python3/0017-setup.py-do-not-report-missing-dependencies-for-disa.patch
+++ b/meta/recipes-devtools/python/python3/0017-setup.py-do-not-report-missing-dependencies-for-disa.patch
@@ -12,16 +12,18 @@ Upstream-Status: Inappropriate [oe-core specific]
Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
Signed-off-by: Alejandro Hernandez Samaniego <alejandro@enedino.org>
+Refresh for 3.10.7:
+Signed-off-by: Tim Orling <tim.orling@konsulko.com>
---
setup.py | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/setup.py b/setup.py
-index 2be4738..62f0e18 100644
+index 85a2b26357..7605347bf5 100644
--- a/setup.py
+++ b/setup.py
-@@ -517,6 +517,14 @@ class PyBuildExt(build_ext):
+@@ -517,6 +517,14 @@ def print_three_column(lst):
print("%-*s %-*s %-*s" % (longest, e, longest, f,
longest, g))
@@ -35,4 +37,4 @@ index 2be4738..62f0e18 100644
+
if self.missing:
print()
- print("Python build finished successfully!")
+ print("The necessary bits to build these optional modules were not "
diff --git a/meta/recipes-devtools/python/python3/get_module_deps3.py b/meta/recipes-devtools/python/python3/get_module_deps3.py
index 1f4c982aed..8e432b49af 100644
--- a/meta/recipes-devtools/python/python3/get_module_deps3.py
+++ b/meta/recipes-devtools/python/python3/get_module_deps3.py
@@ -32,7 +32,7 @@ def fix_path(dep_path):
dep_path = dep_path[dep_path.find(pivot)+len(pivot):]
if '/usr/bin' in dep_path:
- dep_path = dep_path.replace('/usr/bin''${bindir}')
+ dep_path = dep_path.replace('/usr/bin','${bindir}')
# Handle multilib, is there a better way?
if '/usr/lib32' in dep_path:
@@ -56,7 +56,7 @@ if debug == True:
try:
m = importlib.import_module(current_module)
# handle python packages which may not include all modules in the __init__
- if os.path.basename(m.__file__) == "__init__.py":
+ if hasattr(m, '__file__') and os.path.basename(m.__file__) == "__init__.py":
modulepath = os.path.dirname(m.__file__)
for i in os.listdir(modulepath):
if i.startswith("_") or not(i.endswith(".py")):
diff --git a/meta/recipes-devtools/python/python3_3.10.4.bb b/meta/recipes-devtools/python/python3_3.10.13.bb
index 357025f856..76e37e42a1 100644
--- a/meta/recipes-devtools/python/python3_3.10.4.bb
+++ b/meta/recipes-devtools/python/python3_3.10.13.bb
@@ -4,7 +4,7 @@ DESCRIPTION = "Python is a programming language that lets you work more quickly
LICENSE = "PSF-2.0"
SECTION = "devel/python"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=4b8801e752a2c70ac41a5f9aa243f766"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=fcf6b249c2641540219a727f35d8d2c2"
SRC_URI = "http://www.python.org/ftp/python/${PV}/Python-${PV}.tar.xz \
file://run-ptest \
@@ -35,6 +35,7 @@ SRC_URI = "http://www.python.org/ftp/python/${PV}/Python-${PV}.tar.xz \
file://0001-setup.py-Do-not-detect-multiarch-paths-when-cross-co.patch \
file://deterministic_imports.patch \
file://0001-Avoid-shebang-overflow-on-python-config.py.patch \
+ file://0001-test_storlines-skip-due-to-load-variability.patch \
"
SRC_URI:append:class-native = " \
@@ -43,7 +44,7 @@ SRC_URI:append:class-native = " \
file://12-distutils-prefix-is-inside-staging-area.patch \
file://0001-Don-t-search-system-for-headers-libraries.patch \
"
-SRC_URI[sha256sum] = "80bf925f571da436b35210886cf79f6eb5fa5d6c571316b73568343451f77a19"
+SRC_URI[sha256sum] = "5c88848668640d3e152b35b4536ef1c23b2ca4bd2c957ef1ecbb053f571dd3f6"
# exclude pre-releases for both python 2.x and 3.x
UPSTREAM_CHECK_REGEX = "[Pp]ython-(?P<pver>\d+(\.\d+)+).tar"
@@ -60,6 +61,8 @@ CVE_CHECK_IGNORE += "CVE-2020-15523 CVE-2022-26488"
# The mailcap module is insecure by design, so this can't be fixed in a meaningful way.
# The module will be removed in the future and flaws documented.
CVE_CHECK_IGNORE += "CVE-2015-20107"
+# Not an issue, in fact expected behaviour
+CVE_CHECK_IGNORE += "CVE-2023-36632"
PYTHON_MAJMIN = "3.10"
diff --git a/meta/recipes-devtools/qemu/qemu-helper-native_1.0.bb b/meta/recipes-devtools/qemu/qemu-helper-native_1.0.bb
index aa9e499c77..e297586bbb 100644
--- a/meta/recipes-devtools/qemu/qemu-helper-native_1.0.bb
+++ b/meta/recipes-devtools/qemu/qemu-helper-native_1.0.bb
@@ -7,7 +7,7 @@ LIC_FILES_CHKSUM = "file://${WORKDIR}/tunctl.c;endline=4;md5=ff3a09996bc5fff6bc5
SRC_URI = "\
file://tunctl.c \
- file://qemu-oe-bridge-helper \
+ file://qemu-oe-bridge-helper.c \
"
S = "${WORKDIR}"
@@ -16,13 +16,13 @@ inherit native
do_compile() {
${CC} ${CFLAGS} ${LDFLAGS} -Wall tunctl.c -o tunctl
+ ${CC} ${CFLAGS} ${LDFLAGS} -Wall qemu-oe-bridge-helper.c -o qemu-oe-bridge-helper
}
do_install() {
install -d ${D}${bindir}
install tunctl ${D}${bindir}/
-
- install -m 755 ${WORKDIR}/qemu-oe-bridge-helper ${D}${bindir}/
+ install qemu-oe-bridge-helper ${D}${bindir}/
}
DEPENDS += "qemu-system-native"
diff --git a/meta/recipes-devtools/qemu/qemu-helper/qemu-oe-bridge-helper b/meta/recipes-devtools/qemu/qemu-helper/qemu-oe-bridge-helper
deleted file mode 100755
index f057d4eef0..0000000000
--- a/meta/recipes-devtools/qemu/qemu-helper/qemu-oe-bridge-helper
+++ /dev/null
@@ -1,25 +0,0 @@
-#! /bin/sh
-# Copyright 2020 Garmin Ltd. or its subsidiaries
-#
-# SPDX-License-Identifier: GPL-2.0
-#
-# Attempts to find and exec the host qemu-bridge-helper program
-
-# If the QEMU_BRIDGE_HELPER variable is set by the user, exec it.
-if [ -n "$QEMU_BRIDGE_HELPER" ]; then
- exec "$QEMU_BRIDGE_HELPER" "$@"
-fi
-
-# Search common paths for the helper program
-BN="qemu-bridge-helper"
-PATHS="/usr/libexec/ /usr/lib/qemu/"
-
-for p in $PATHS; do
- if [ -e "$p/$BN" ]; then
- exec "$p/$BN" "$@"
- fi
-done
-
-echo "$BN not found!" > /dev/stderr
-exit 1
-
diff --git a/meta/recipes-devtools/qemu/qemu-helper/qemu-oe-bridge-helper.c b/meta/recipes-devtools/qemu/qemu-helper/qemu-oe-bridge-helper.c
new file mode 100644
index 0000000000..9434e1d269
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu-helper/qemu-oe-bridge-helper.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2022 Garmin Ltd. or its subsidiaries
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Attempts to find and exec the host qemu-bridge-helper program
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+void try_program(char const* path, char** args) {
+ if (access(path, X_OK) == 0) {
+ execv(path, args);
+ }
+}
+
+int main(int argc, char** argv) {
+ char* var;
+
+ var = getenv("QEMU_BRIDGE_HELPER");
+ if (var && var[0] != '\0') {
+ execvp(var, argv);
+ return 1;
+ }
+
+ try_program("/usr/libexec/qemu-bridge-helper", argv);
+ try_program("/usr/lib/qemu/qemu-bridge-helper", argv);
+
+ fprintf(stderr, "No bridge helper found\n");
+ return 1;
+}
+
diff --git a/meta/recipes-devtools/qemu/qemu-system-native_6.2.0.bb b/meta/recipes-devtools/qemu/qemu-system-native_6.2.0.bb
index bc5384d472..5ccede5095 100644
--- a/meta/recipes-devtools/qemu/qemu-system-native_6.2.0.bb
+++ b/meta/recipes-devtools/qemu/qemu-system-native_6.2.0.bb
@@ -11,7 +11,7 @@ DEPENDS = "glib-2.0-native zlib-native pixman-native qemu-native bison-native me
EXTRA_OECONF:append = " --target-list=${@get_qemu_system_target_list(d)}"
-PACKAGECONFIG ??= "fdt alsa kvm pie \
+PACKAGECONFIG ??= "fdt alsa kvm pie slirp \
${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'virglrenderer epoxy', '', d)} \
"
diff --git a/meta/recipes-devtools/qemu/qemu.inc b/meta/recipes-devtools/qemu/qemu.inc
index cc69eca9ae..4747310ae4 100644
--- a/meta/recipes-devtools/qemu/qemu.inc
+++ b/meta/recipes-devtools/qemu/qemu.inc
@@ -13,7 +13,6 @@ inherit pkgconfig ptest python3-dir
LIC_FILES_CHKSUM = "file://COPYING;md5=441c28d2cf86e15a37fa47e15a72fbac \
file://COPYING.LIB;endline=24;md5=8c5efda6cf1e1b03dcfd0e6c0d271c7f"
-
SRC_URI = "https://download.qemu.org/${BPN}-${PV}.tar.xz \
file://powerpc_rom.bin \
file://run-ptest \
@@ -35,6 +34,81 @@ SRC_URI = "https://download.qemu.org/${BPN}-${PV}.tar.xz \
file://pvrdma.patch \
file://CVE-2021-4206.patch \
file://CVE-2021-4207.patch \
+ file://CVE-2022-35414.patch \
+ file://CVE-2021-3929.patch \
+ file://CVE-2021-4158.patch \
+ file://CVE-2022-0358.patch \
+ file://CVE-2022-0216_1.patch \
+ file://CVE-2022-0216_2.patch \
+ file://CVE-2021-3750-1.patch \
+ file://CVE-2021-3750-2.patch \
+ file://CVE-2021-3750-3.patch \
+ file://0001-use-uint32t-for-reply-queue-head-tail-values.patch \
+ file://0002_let_dma_memory_valid_function_take_MemTxAttrs_argument.patch \
+ file://0003_let_dma_memory_set_function_take_MemTxAttrs_argument.patch \
+ file://0004_let_dma_memory_rw_relaxed_function_take_MemTxAttrs_argument.patch \
+ file://0005_let_dma_memory_rw_function_take_MemTxAttrs_argument.patch \
+ file://0006_let_dma_memory_read_write_function_take_MemTxAttrs_argument.patch \
+ file://0007_let_dma_memory_map_function_take_MemTxAttrs_argument.patch \
+ file://0008_have_dma_buf_rw_function_take_a_void_pointer.patch \
+ file://0009_have_dma_buf_read_and_dma_buf_write_functions_take_a_void.patch \
+ file://0010_let_pci_dma_rw_function_take_MemTxAttrs_argument.patch \
+ file://0011_let_dma_buf_rw_function_take_MemTxAttrs_argument.patch \
+ file://0012_let_dma_buf_write_function_take_MemTxAttrs_argument.patch \
+ file://0013_let_dma_buf_read_function_take_MemTxAttrs_argument.patch \
+ file://0014_let_dma_buf_rw_function_propagate_MemTxResult.patch \
+ file://0015_let_st_pointer_dma_function_take_MemTxAttrs_argument.patch \
+ file://0016_let_ld_pointer_dma_function_take_MemTxAttrs_argument.patch \
+ file://0017_let_st_pointer_dma_function_propagate_MemTxResult.patch \
+ file://0018_let_ld_pointer_dma_function_propagate_MemTxResult.patch \
+ file://0019_let_st_pointer_pci_dma_function_take_MemTxAttrs_argument.patch \
+ file://0020_let_ld_pointer_pci_dma_function_take_MemTxAttrs_argument.patch \
+ file://0021_let_st_pointer_pci_dma_function_propagate_MemTxResult.patch \
+ file://0022_let_ld_pointer_pci_dma_function_propagate_MemTxResult.patch \
+ file://CVE-2021-3611_1.patch \
+ file://CVE-2021-3611_2.patch \
+ file://0001-net-tulip-Restrict-DMA-engine-to-memories.patch \
+ file://0001-softfloat-Extend-float_exception_flags-to-16-bits.patch \
+ file://0002-softfloat-Add-flag-specific-to-Inf-Inf.patch \
+ file://0003-softfloat-Add-flag-specific-to-Inf-0.patch \
+ file://0004-softfloat-Add-flags-specific-to-Inf-Inf-and-0-0.patch \
+ file://0005-softfloat-Add-flag-specific-to-signaling-nans.patch \
+ file://0006-target-ppc-Update-float_invalid_op_addsub-for-new-fl.patch \
+ file://0007-target-ppc-Update-float_invalid_op_mul-for-new-flags.patch \
+ file://0008-target-ppc-Update-float_invalid_op_div-for-new-flags.patch \
+ file://0009-target-ppc-Update-fmadd-for-new-flags.patch \
+ file://0010-target-ppc-Split-out-do_fmadd.patch \
+ file://0011-target-ppc-Fix-xs-max-min-cj-dp-to-use-VSX-registers.patch \
+ file://0012-target-ppc-Move-xs-max-min-cj-dp-to-decodetree.patch \
+ file://0013-target-ppc-fix-xscvqpdp-register-access.patch \
+ file://0014-target-ppc-move-xscvqpdp-to-decodetree.patch \
+ file://0015-target-ppc-ppc_store_fpscr-doesn-t-update-bits-0-to-.patch \
+ file://0016-target-ppc-Introduce-TRANS-FLAGS-macros.patch \
+ file://0017-target-ppc-Implement-Vector-Expand-Mask.patch \
+ file://0018-target-ppc-Implement-Vector-Extract-Mask.patch \
+ file://0019-target-ppc-Implement-Vector-Mask-Move-insns.patch \
+ file://0020-target-ppc-move-xs-n-madd-am-ds-p-xs-n-msub-am-ds-p-.patch \
+ file://0021-target-ppc-implement-xs-n-maddqp-o-xs-n-msubqp-o.patch \
+ file://CVE-2022-3165.patch \
+ file://CVE-2022-4144.patch \
+ file://0001-hw-display-qxl-Have-qxl_log_command-Return-early-if-.patch \
+ file://0001-hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch \
+ file://CVE-2023-0330.patch \
+ file://CVE-2023-3301.patch \
+ file://CVE-2023-3255.patch \
+ file://CVE-2023-2861.patch \
+ file://CVE-2020-14394.patch \
+ file://CVE-2023-3354.patch \
+ file://CVE-2023-3180.patch \
+ file://CVE-2021-3638.patch \
+ file://CVE-2023-1544.patch \
+ file://CVE-2023-5088.patch \
+ file://CVE-2024-24474.patch \
+ file://CVE-2023-6693.patch \
+ file://scsi-disk-allow-MODE-SELECT-block-desriptor-to-set-the-block-size.patch \
+ file://scsi-disk-ensure-block-size-is-non-zero-and-changes-limited-to-bits-8-15.patch \
+ file://CVE-2023-42467.patch \
+ file://CVE-2023-6683.patch \
"
UPSTREAM_CHECK_REGEX = "qemu-(?P<pver>\d+(\.\d+)+)\.tar"
@@ -54,6 +128,15 @@ CVE_CHECK_IGNORE += "CVE-2007-0998"
# https://bugzilla.redhat.com/show_bug.cgi?id=1609015#c11
CVE_CHECK_IGNORE += "CVE-2018-18438"
+# As per https://nvd.nist.gov/vuln/detail/CVE-2023-0664
+# https://bugzilla.redhat.com/show_bug.cgi?id=2167423
+# this bug related to windows specific.
+CVE_CHECK_IGNORE += "CVE-2023-0664"
+
+# As per https://bugzilla.redhat.com/show_bug.cgi?id=2203387
+# RHEL specific issue
+CVE_CHECK_IGNORE += "CVE-2023-2680"
+
COMPATIBLE_HOST:mipsarchn32 = "null"
COMPATIBLE_HOST:mipsarchn64 = "null"
COMPATIBLE_HOST:riscv32 = "null"
@@ -153,6 +236,7 @@ PACKAGECONFIG:remove:mingw32 = "kvm virglrenderer epoxy gtk+"
PACKAGECONFIG[sdl] = "--enable-sdl,--disable-sdl,libsdl2"
PACKAGECONFIG[virtfs] = "--enable-virtfs --enable-attr --enable-cap-ng,--disable-virtfs,libcap-ng attr,"
PACKAGECONFIG[aio] = "--enable-linux-aio,--disable-linux-aio,libaio,"
+PACKAGECONFIG[uring] = "--enable-linux-io-uring,--disable-linux-io-uring,liburing"
PACKAGECONFIG[xfs] = "--enable-xfsctl,--disable-xfsctl,xfsprogs,"
PACKAGECONFIG[xen] = "--enable-xen,--disable-xen,xen-tools,xen-tools-libxenstore xen-tools-libxenctrl xen-tools-libxenguest"
PACKAGECONFIG[vnc-sasl] = "--enable-vnc --enable-vnc-sasl,--disable-vnc-sasl,cyrus-sasl,"
@@ -199,6 +283,12 @@ PACKAGECONFIG[pmem] = "--enable-libpmem,--disable-libpmem,pmdk"
PACKAGECONFIG[pulsedio] = "--enable-pa,--disable-pa,pulseaudio"
PACKAGECONFIG[selinux] = "--enable-selinux,--disable-selinux"
PACKAGECONFIG[bpf] = "--enable-bpf,--disable-bpf,libbpf"
+PACKAGECONFIG[capstone] = "--enable-capstone,--disable-capstone"
+PACKAGECONFIG[rdma] = "--enable-rdma,--disable-rdma"
+PACKAGECONFIG[vde] = "--enable-vde,--disable-vde"
+PACKAGECONFIG[slirp] = "--enable-slirp=internal,--disable-slirp"
+PACKAGECONFIG[brlapi] = "--enable-brlapi,--disable-brlapi"
+PACKAGECONFIG[jack] = "--enable-jack,--disable-jack,jack,"
INSANE_SKIP:${PN} = "arch"
diff --git a/meta/recipes-devtools/qemu/qemu/0001-hw-display-qxl-Have-qxl_log_command-Return-early-if-.patch b/meta/recipes-devtools/qemu/qemu/0001-hw-display-qxl-Have-qxl_log_command-Return-early-if-.patch
new file mode 100644
index 0000000000..cd846222c9
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0001-hw-display-qxl-Have-qxl_log_command-Return-early-if-.patch
@@ -0,0 +1,57 @@
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/61c34fc]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 61c34fc194b776ecadc39fb26b061331107e5599 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@linaro.org>
+Date: Mon, 28 Nov 2022 21:27:37 +0100
+Subject: [PATCH] hw/display/qxl: Have qxl_log_command Return early if no
+ log_cmd handler
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Only 3 command types are logged: no need to call qxl_phys2virt()
+for the other types. Using different cases will help to pass
+different structure sizes to qxl_phys2virt() in a pair of commits.
+
+Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Message-Id: <20221128202741.4945-2-philmd@linaro.org>
+---
+ hw/display/qxl-logger.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/hw/display/qxl-logger.c b/hw/display/qxl-logger.c
+index 68bfa47568..1bcf803db6 100644
+--- a/hw/display/qxl-logger.c
++++ b/hw/display/qxl-logger.c
+@@ -247,6 +247,16 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext)
+ qxl_name(qxl_type, ext->cmd.type),
+ compat ? "(compat)" : "");
+
++ switch (ext->cmd.type) {
++ case QXL_CMD_DRAW:
++ break;
++ case QXL_CMD_SURFACE:
++ break;
++ case QXL_CMD_CURSOR:
++ break;
++ default:
++ goto out;
++ }
+ data = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
+ if (!data) {
+ return 1;
+@@ -269,6 +279,7 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext)
+ qxl_log_cmd_cursor(qxl, data, ext->group_id);
+ break;
+ }
++out:
+ fprintf(stderr, "\n");
+ return 0;
+ }
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0001-hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch b/meta/recipes-devtools/qemu/qemu/0001-hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch
new file mode 100644
index 0000000000..ac51cf567a
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0001-hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch
@@ -0,0 +1,217 @@
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/8efec0e]
+
+Backport and rebase patch to fix compile error which imported by CVE-2022-4144.patch:
+
+../qemu-6.2.0/hw/display/qxl.c: In function 'qxl_phys2virt':
+../qemu-6.2.0/hw/display/qxl.c:1477:67: error: 'size' undeclared (first use in this function); did you mean 'gsize'?
+ 1477 | if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset, size)) {
+ | ^~~~
+ | gsize
+../qemu-6.2.0/hw/display/qxl.c:1477:67: note: each undeclared identifier is reported only once for each function it appears in
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 8efec0ef8bbc1e75a7ebf6e325a35806ece9b39f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@linaro.org>
+Date: Mon, 28 Nov 2022 21:27:39 +0100
+Subject: [PATCH] hw/display/qxl: Pass requested buffer size to qxl_phys2virt()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Currently qxl_phys2virt() doesn't check for buffer overrun.
+In order to do so in the next commit, pass the buffer size
+as argument.
+
+For QXLCursor in qxl_render_cursor() -> qxl_cursor() we
+verify the size of the chunked data ahead, checking we can
+access 'sizeof(QXLCursor) + chunk->data_size' bytes.
+Since in the SPICE_CURSOR_TYPE_MONO case the cursor is
+assumed to fit in one chunk, no change are required.
+In SPICE_CURSOR_TYPE_ALPHA the ahead read is handled in
+qxl_unpack_chunks().
+
+Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Acked-by: Gerd Hoffmann <kraxel@redhat.com>
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Message-Id: <20221128202741.4945-4-philmd@linaro.org>
+---
+ hw/display/qxl-logger.c | 11 ++++++++---
+ hw/display/qxl-render.c | 20 ++++++++++++++++----
+ hw/display/qxl.c | 14 +++++++++-----
+ hw/display/qxl.h | 3 ++-
+ 4 files changed, 35 insertions(+), 13 deletions(-)
+
+diff --git a/hw/display/qxl-logger.c b/hw/display/qxl-logger.c
+index 1bcf803..35c38f6 100644
+--- a/hw/display/qxl-logger.c
++++ b/hw/display/qxl-logger.c
+@@ -106,7 +106,7 @@ static int qxl_log_image(PCIQXLDevice *qxl, QXLPHYSICAL addr, int group_id)
+ QXLImage *image;
+ QXLImageDescriptor *desc;
+
+- image = qxl_phys2virt(qxl, addr, group_id);
++ image = qxl_phys2virt(qxl, addr, group_id, sizeof(QXLImage));
+ if (!image) {
+ return 1;
+ }
+@@ -214,7 +214,8 @@ int qxl_log_cmd_cursor(PCIQXLDevice *qxl, QXLCursorCmd *cmd, int group_id)
+ cmd->u.set.position.y,
+ cmd->u.set.visible ? "yes" : "no",
+ cmd->u.set.shape);
+- cursor = qxl_phys2virt(qxl, cmd->u.set.shape, group_id);
++ cursor = qxl_phys2virt(qxl, cmd->u.set.shape, group_id,
++ sizeof(QXLCursor));
+ if (!cursor) {
+ return 1;
+ }
+@@ -236,6 +237,7 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext)
+ {
+ bool compat = ext->flags & QXL_COMMAND_FLAG_COMPAT;
+ void *data;
++ size_t datasz;
+ int ret;
+
+ if (!qxl->cmdlog) {
+@@ -249,15 +251,18 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext)
+
+ switch (ext->cmd.type) {
+ case QXL_CMD_DRAW:
++ datasz = compat ? sizeof(QXLCompatDrawable) : sizeof(QXLDrawable);
+ break;
+ case QXL_CMD_SURFACE:
++ datasz = sizeof(QXLSurfaceCmd);
+ break;
+ case QXL_CMD_CURSOR:
++ datasz = sizeof(QXLCursorCmd);
+ break;
+ default:
+ goto out;
+ }
+- data = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
++ data = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id, datasz);
+ if (!data) {
+ return 1;
+ }
+diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c
+index ca21700..fcfd40c 100644
+--- a/hw/display/qxl-render.c
++++ b/hw/display/qxl-render.c
+@@ -107,7 +107,9 @@ static void qxl_render_update_area_unlocked(PCIQXLDevice *qxl)
+ qxl->guest_primary.resized = 0;
+ qxl->guest_primary.data = qxl_phys2virt(qxl,
+ qxl->guest_primary.surface.mem,
+- MEMSLOT_GROUP_GUEST);
++ MEMSLOT_GROUP_GUEST,
++ qxl->guest_primary.abs_stride
++ * height);
+ if (!qxl->guest_primary.data) {
+ goto end;
+ }
+@@ -228,7 +230,8 @@ static void qxl_unpack_chunks(void *dest, size_t size, PCIQXLDevice *qxl,
+ if (offset == size) {
+ return;
+ }
+- chunk = qxl_phys2virt(qxl, chunk->next_chunk, group_id);
++ chunk = qxl_phys2virt(qxl, chunk->next_chunk, group_id,
++ sizeof(QXLDataChunk) + chunk->data_size);
+ if (!chunk) {
+ return;
+ }
+@@ -295,7 +298,8 @@ fail:
+ /* called from spice server thread context only */
+ int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext)
+ {
+- QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
++ QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id,
++ sizeof(QXLCursorCmd));
+ QXLCursor *cursor;
+ QEMUCursor *c;
+
+@@ -314,7 +318,15 @@ int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext)
+ }
+ switch (cmd->type) {
+ case QXL_CURSOR_SET:
+- cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id);
++ /* First read the QXLCursor to get QXLDataChunk::data_size ... */
++ cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id,
++ sizeof(QXLCursor));
++ if (!cursor) {
++ return 1;
++ }
++ /* Then read including the chunked data following QXLCursor. */
++ cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id,
++ sizeof(QXLCursor) + cursor->chunk.data_size);
+ if (!cursor) {
+ return 1;
+ }
+diff --git a/hw/display/qxl.c b/hw/display/qxl.c
+index ae8aa07..2a4b2d4 100644
+--- a/hw/display/qxl.c
++++ b/hw/display/qxl.c
+@@ -274,7 +274,8 @@ static void qxl_spice_monitors_config_async(PCIQXLDevice *qxl, int replay)
+ QXL_IO_MONITORS_CONFIG_ASYNC));
+ }
+
+- cfg = qxl_phys2virt(qxl, qxl->guest_monitors_config, MEMSLOT_GROUP_GUEST);
++ cfg = qxl_phys2virt(qxl, qxl->guest_monitors_config, MEMSLOT_GROUP_GUEST,
++ sizeof(QXLMonitorsConfig));
+ if (cfg != NULL && cfg->count == 1) {
+ qxl->guest_primary.resized = 1;
+ qxl->guest_head0_width = cfg->heads[0].width;
+@@ -459,7 +460,8 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
+ switch (le32_to_cpu(ext->cmd.type)) {
+ case QXL_CMD_SURFACE:
+ {
+- QXLSurfaceCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
++ QXLSurfaceCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id,
++ sizeof(QXLSurfaceCmd));
+
+ if (!cmd) {
+ return 1;
+@@ -494,7 +496,8 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
+ }
+ case QXL_CMD_CURSOR:
+ {
+- QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
++ QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id,
++ sizeof(QXLCursorCmd));
+
+ if (!cmd) {
+ return 1;
+@@ -1463,7 +1466,8 @@ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl,
+ }
+
+ /* can be also called from spice server thread context */
+-void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id)
++void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id,
++ size_t size)
+ {
+ uint64_t offset;
+ uint32_t slot;
+@@ -1971,7 +1975,7 @@ static void qxl_dirty_surfaces(PCIQXLDevice *qxl)
+ }
+
+ cmd = qxl_phys2virt(qxl, qxl->guest_surfaces.cmds[i],
+- MEMSLOT_GROUP_GUEST);
++ MEMSLOT_GROUP_GUEST, sizeof(QXLSurfaceCmd));
+ assert(cmd);
+ assert(cmd->type == QXL_SURFACE_CMD_CREATE);
+ qxl_dirty_one_surface(qxl, cmd->u.surface_create.data,
+diff --git a/hw/display/qxl.h b/hw/display/qxl.h
+index 30d21f4..4551c23 100644
+--- a/hw/display/qxl.h
++++ b/hw/display/qxl.h
+@@ -147,7 +147,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(PCIQXLDevice, PCI_QXL)
+ #define QXL_DEFAULT_REVISION (QXL_REVISION_STABLE_V12 + 1)
+
+ /* qxl.c */
+-void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL phys, int group_id);
++void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL phys, int group_id,
++ size_t size);
+ void qxl_set_guest_bug(PCIQXLDevice *qxl, const char *msg, ...)
+ GCC_FMT_ATTR(2, 3);
+
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0001-net-tulip-Restrict-DMA-engine-to-memories.patch b/meta/recipes-devtools/qemu/qemu/0001-net-tulip-Restrict-DMA-engine-to-memories.patch
new file mode 100644
index 0000000000..6c85a77ba7
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0001-net-tulip-Restrict-DMA-engine-to-memories.patch
@@ -0,0 +1,64 @@
+CVE: CVE-2022-2962
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+From 5c5c50b0a73d78ffe18336c9996fef5eae9bbbb0 Mon Sep 17 00:00:00 2001
+From: Zheyu Ma <zheyuma97@gmail.com>
+Date: Sun, 21 Aug 2022 20:43:43 +0800
+Subject: [PATCH] net: tulip: Restrict DMA engine to memories
+
+The DMA engine is started by I/O access and then itself accesses the
+I/O registers, triggering a reentrancy bug.
+
+The following log can reveal it:
+==5637==ERROR: AddressSanitizer: stack-overflow
+ #0 0x5595435f6078 in tulip_xmit_list_update qemu/hw/net/tulip.c:673
+ #1 0x5595435f204a in tulip_write qemu/hw/net/tulip.c:805:13
+ #2 0x559544637f86 in memory_region_write_accessor qemu/softmmu/memory.c:492:5
+ #3 0x5595446379fa in access_with_adjusted_size qemu/softmmu/memory.c:554:18
+ #4 0x5595446372fa in memory_region_dispatch_write qemu/softmmu/memory.c
+ #5 0x55954468b74c in flatview_write_continue qemu/softmmu/physmem.c:2825:23
+ #6 0x559544683662 in flatview_write qemu/softmmu/physmem.c:2867:12
+ #7 0x5595446833f3 in address_space_write qemu/softmmu/physmem.c:2963:18
+ #8 0x5595435fb082 in dma_memory_rw_relaxed qemu/include/sysemu/dma.h:87:12
+ #9 0x5595435fb082 in dma_memory_rw qemu/include/sysemu/dma.h:130:12
+ #10 0x5595435fb082 in dma_memory_write qemu/include/sysemu/dma.h:171:12
+ #11 0x5595435fb082 in stl_le_dma qemu/include/sysemu/dma.h:272:1
+ #12 0x5595435fb082 in stl_le_pci_dma qemu/include/hw/pci/pci.h:910:1
+ #13 0x5595435fb082 in tulip_desc_write qemu/hw/net/tulip.c:101:9
+ #14 0x5595435f7e3d in tulip_xmit_list_update qemu/hw/net/tulip.c:706:9
+ #15 0x5595435f204a in tulip_write qemu/hw/net/tulip.c:805:13
+
+Fix this bug by restricting the DMA engine to memories regions.
+
+Signed-off-by: Zheyu Ma <zheyuma97@gmail.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+---
+ hw/net/tulip.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/hw/net/tulip.c b/hw/net/tulip.c
+index 097e905bec..b9e42c322a 100644
+--- a/hw/net/tulip.c
++++ b/hw/net/tulip.c
+@@ -70,7 +70,7 @@ static const VMStateDescription vmstate_pci_tulip = {
+ static void tulip_desc_read(TULIPState *s, hwaddr p,
+ struct tulip_descriptor *desc)
+ {
+- const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
++ const MemTxAttrs attrs = { .memory = true };
+
+ if (s->csr[0] & CSR0_DBO) {
+ ldl_be_pci_dma(&s->dev, p, &desc->status, attrs);
+@@ -88,7 +88,7 @@ static void tulip_desc_read(TULIPState *s, hwaddr p,
+ static void tulip_desc_write(TULIPState *s, hwaddr p,
+ struct tulip_descriptor *desc)
+ {
+- const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
++ const MemTxAttrs attrs = { .memory = true };
+
+ if (s->csr[0] & CSR0_DBO) {
+ stl_be_pci_dma(&s->dev, p, desc->status, attrs);
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0001-softfloat-Extend-float_exception_flags-to-16-bits.patch b/meta/recipes-devtools/qemu/qemu/0001-softfloat-Extend-float_exception_flags-to-16-bits.patch
new file mode 100644
index 0000000000..e9c47f6901
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0001-softfloat-Extend-float_exception_flags-to-16-bits.patch
@@ -0,0 +1,75 @@
+From 0bec1ded33a857f59cf5f3ceca2f72694256e710 Mon Sep 17 00:00:00 2001
+From: Richard Henderson <richard.henderson@linaro.org>
+Date: Fri, 17 Dec 2021 17:57:14 +0100
+Subject: [PATCH 01/21] softfloat: Extend float_exception_flags to 16 bits
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We will shortly have more than 8 bits of exceptions.
+Repack the existing flags into low bits and reformat to hex.
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=149a48f6e6ccedfa01307d45884aa480f5bf77c5]
+
+Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Message-Id: <20211119160502.17432-2-richard.henderson@linaro.org>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ include/fpu/softfloat-types.h | 16 ++++++++--------
+ include/fpu/softfloat.h | 2 +-
+ 2 files changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
+index 5bcbd041f7..65a43aff59 100644
+--- a/include/fpu/softfloat-types.h
++++ b/include/fpu/softfloat-types.h
+@@ -145,13 +145,13 @@ typedef enum __attribute__((__packed__)) {
+ */
+
+ enum {
+- float_flag_invalid = 1,
+- float_flag_divbyzero = 4,
+- float_flag_overflow = 8,
+- float_flag_underflow = 16,
+- float_flag_inexact = 32,
+- float_flag_input_denormal = 64,
+- float_flag_output_denormal = 128
++ float_flag_invalid = 0x0001,
++ float_flag_divbyzero = 0x0002,
++ float_flag_overflow = 0x0004,
++ float_flag_underflow = 0x0008,
++ float_flag_inexact = 0x0010,
++ float_flag_input_denormal = 0x0020,
++ float_flag_output_denormal = 0x0040,
+ };
+
+ /*
+@@ -171,8 +171,8 @@ typedef enum __attribute__((__packed__)) {
+ */
+
+ typedef struct float_status {
++ uint16_t float_exception_flags;
+ FloatRoundMode float_rounding_mode;
+- uint8_t float_exception_flags;
+ FloatX80RoundPrec floatx80_rounding_precision;
+ bool tininess_before_rounding;
+ /* should denormalised results go to zero and set the inexact flag? */
+diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
+index a249991e61..0d3b407807 100644
+--- a/include/fpu/softfloat.h
++++ b/include/fpu/softfloat.h
+@@ -100,7 +100,7 @@ typedef enum {
+ | Routine to raise any or all of the software IEC/IEEE floating-point
+ | exception flags.
+ *----------------------------------------------------------------------------*/
+-static inline void float_raise(uint8_t flags, float_status *status)
++static inline void float_raise(uint16_t flags, float_status *status)
+ {
+ status->float_exception_flags |= flags;
+ }
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0001-use-uint32t-for-reply-queue-head-tail-values.patch b/meta/recipes-devtools/qemu/qemu/0001-use-uint32t-for-reply-queue-head-tail-values.patch
new file mode 100644
index 0000000000..37e122f781
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0001-use-uint32t-for-reply-queue-head-tail-values.patch
@@ -0,0 +1,83 @@
+From 41d5e8da3d5e0a143a9fb397c9f34707ec544997 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Fri, 17 Dec 2021 22:43:05 +0100
+Subject: [PATCH] hw/scsi/megasas: Use uint32_t for reply queue head/tail
+ values
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+While the reply queue values fit in 16-bit, they are accessed
+as 32-bit:
+
+ 661: s->reply_queue_head = ldl_le_pci_dma(pcid, s->producer_pa);
+ 662: s->reply_queue_head %= MEGASAS_MAX_FRAMES;
+ 663: s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa);
+ 664: s->reply_queue_tail %= MEGASAS_MAX_FRAMES;
+
+Having:
+
+ 41:#define MEGASAS_MAX_FRAMES 2048 /* Firmware limit at 65535 */
+
+In order to update the ld/st*_pci_dma() API to pass the address
+of the value to access, it is simpler to have the head/tail declared
+as 32-bit values. Replace the uint16_t by uint32_t, wasting 4 bytes in
+the MegasasState structure.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=41d5e8da3d5e0a143a9fb397c9f34707ec544997]
+
+Acked-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-20-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/scsi/megasas.c | 4 ++--
+ hw/scsi/trace-events | 8 ++++----
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
+index 8f35784..14ec6d6 100644
+--- a/hw/scsi/megasas.c
++++ b/hw/scsi/megasas.c
+@@ -109,8 +109,8 @@ struct MegasasState {
+ uint64_t reply_queue_pa;
+ void *reply_queue;
+ uint16_t reply_queue_len;
+- uint16_t reply_queue_head;
+- uint16_t reply_queue_tail;
++ uint32_t reply_queue_head;
++ uint32_t reply_queue_tail;
+ uint64_t consumer_pa;
+ uint64_t producer_pa;
+
+diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events
+index 92d5b40..ae8551f 100644
+--- a/hw/scsi/trace-events
++++ b/hw/scsi/trace-events
+@@ -42,18 +42,18 @@ mptsas_config_sas_phy(void *dev, int address, int port, int phy_handle, int dev_
+
+ # megasas.c
+ megasas_init_firmware(uint64_t pa) "pa 0x%" PRIx64 " "
+-megasas_init_queue(uint64_t queue_pa, int queue_len, uint64_t head, uint64_t tail, uint32_t flags) "queue at 0x%" PRIx64 " len %d head 0x%" PRIx64 " tail 0x%" PRIx64 " flags 0x%x"
++megasas_init_queue(uint64_t queue_pa, int queue_len, uint32_t head, uint32_t tail, uint32_t flags) "queue at 0x%" PRIx64 " len %d head 0x%" PRIx32 " tail 0x%" PRIx32 " flags 0x%x"
+ megasas_initq_map_failed(int frame) "scmd %d: failed to map queue"
+ megasas_initq_mapped(uint64_t pa) "queue already mapped at 0x%" PRIx64
+ megasas_initq_mismatch(int queue_len, int fw_cmds) "queue size %d max fw cmds %d"
+ megasas_qf_mapped(unsigned int index) "skip mapped frame 0x%x"
+ megasas_qf_new(unsigned int index, uint64_t frame) "frame 0x%x addr 0x%" PRIx64
+ megasas_qf_busy(unsigned long pa) "all frames busy for frame 0x%lx"
+-megasas_qf_enqueue(unsigned int index, unsigned int count, uint64_t context, unsigned int head, unsigned int tail, int busy) "frame 0x%x count %d context 0x%" PRIx64 " head 0x%x tail 0x%x busy %d"
+-megasas_qf_update(unsigned int head, unsigned int tail, unsigned int busy) "head 0x%x tail 0x%x busy %d"
++megasas_qf_enqueue(unsigned int index, unsigned int count, uint64_t context, uint32_t head, uint32_t tail, unsigned int busy) "frame 0x%x count %d context 0x%" PRIx64 " head 0x%" PRIx32 " tail 0x%" PRIx32 " busy %u"
++megasas_qf_update(uint32_t head, uint32_t tail, unsigned int busy) "head 0x%" PRIx32 " tail 0x%" PRIx32 " busy %u"
+ megasas_qf_map_failed(int cmd, unsigned long frame) "scmd %d: frame %lu"
+ megasas_qf_complete_noirq(uint64_t context) "context 0x%" PRIx64 " "
+-megasas_qf_complete(uint64_t context, unsigned int head, unsigned int tail, int busy) "context 0x%" PRIx64 " head 0x%x tail 0x%x busy %d"
++megasas_qf_complete(uint64_t context, uint32_t head, uint32_t tail, int busy) "context 0x%" PRIx64 " head 0x%" PRIx32 " tail 0x%" PRIx32 " busy %u"
+ megasas_frame_busy(uint64_t addr) "frame 0x%" PRIx64 " busy"
+ megasas_unhandled_frame_cmd(int cmd, uint8_t frame_cmd) "scmd %d: MFI cmd 0x%x"
+ megasas_handle_scsi(const char *frame, int bus, int dev, int lun, void *sdev, unsigned long size) "%s dev %x/%x/%x sdev %p xfer %lu"
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0002-softfloat-Add-flag-specific-to-Inf-Inf.patch b/meta/recipes-devtools/qemu/qemu/0002-softfloat-Add-flag-specific-to-Inf-Inf.patch
new file mode 100644
index 0000000000..2713ff370d
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0002-softfloat-Add-flag-specific-to-Inf-Inf.patch
@@ -0,0 +1,59 @@
+From 9b0737858b2b68c3a4d1e0611f2732679c997c6d Mon Sep 17 00:00:00 2001
+From: Richard Henderson <richard.henderson@linaro.org>
+Date: Fri, 17 Dec 2021 17:57:14 +0100
+Subject: [PATCH 02/21] softfloat: Add flag specific to Inf - Inf
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+PowerPC has this flag, and it's easier to compute it here
+than after the fact.
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=ba11446c40903b9d97fb75a078d43fee6444d3b6]
+
+Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
+Message-Id: <20211119160502.17432-3-richard.henderson@linaro.org>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ fpu/softfloat-parts.c.inc | 3 ++-
+ include/fpu/softfloat-types.h | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
+index 41d4b17e41..eb2b475ca4 100644
+--- a/fpu/softfloat-parts.c.inc
++++ b/fpu/softfloat-parts.c.inc
+@@ -354,7 +354,7 @@ static FloatPartsN *partsN(addsub)(FloatPartsN *a, FloatPartsN *b,
+ return a;
+ }
+ /* Inf - Inf */
+- float_raise(float_flag_invalid, s);
++ float_raise(float_flag_invalid | float_flag_invalid_isi, s);
+ parts_default_nan(a, s);
+ return a;
+ }
+@@ -494,6 +494,7 @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
+
+ if (ab_mask & float_cmask_inf) {
+ if (c->cls == float_class_inf && a->sign != c->sign) {
++ float_raise(float_flag_invalid | float_flag_invalid_isi, s);
+ goto d_nan;
+ }
+ goto return_inf;
+diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
+index 65a43aff59..eaa12e1e00 100644
+--- a/include/fpu/softfloat-types.h
++++ b/include/fpu/softfloat-types.h
+@@ -152,6 +152,7 @@ enum {
+ float_flag_inexact = 0x0010,
+ float_flag_input_denormal = 0x0020,
+ float_flag_output_denormal = 0x0040,
++ float_flag_invalid_isi = 0x0080, /* inf - inf */
+ };
+
+ /*
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0002_let_dma_memory_valid_function_take_MemTxAttrs_argument.patch b/meta/recipes-devtools/qemu/qemu/0002_let_dma_memory_valid_function_take_MemTxAttrs_argument.patch
new file mode 100644
index 0000000000..04a655315f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0002_let_dma_memory_valid_function_take_MemTxAttrs_argument.patch
@@ -0,0 +1,60 @@
+From 7ccb391ccd594b3f33de8deb293ff8d47bb4e219 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Thu, 3 Sep 2020 09:28:49 +0200
+Subject: [PATCH] dma: Let dma_memory_valid() take MemTxAttrs argument
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Let devices specify transaction attributes when calling
+dma_memory_valid().
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=7ccb391ccd594b3f33de8deb293ff8d47bb4e219]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
+Message-Id: <20211223115554.3155328-2-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ include/hw/ppc/spapr_vio.h | 2 +-
+ include/sysemu/dma.h | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/include/hw/ppc/spapr_vio.h b/include/hw/ppc/spapr_vio.h
+index 4bea87f..4c45f15 100644
+--- a/include/hw/ppc/spapr_vio.h
++++ b/include/hw/ppc/spapr_vio.h
+@@ -91,7 +91,7 @@ static inline void spapr_vio_irq_pulse(SpaprVioDevice *dev)
+ static inline bool spapr_vio_dma_valid(SpaprVioDevice *dev, uint64_t taddr,
+ uint32_t size, DMADirection dir)
+ {
+- return dma_memory_valid(&dev->as, taddr, size, dir);
++ return dma_memory_valid(&dev->as, taddr, size, dir, MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static inline int spapr_vio_dma_read(SpaprVioDevice *dev, uint64_t taddr,
+diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
+index 3201e79..296f3b5 100644
+--- a/include/sysemu/dma.h
++++ b/include/sysemu/dma.h
+@@ -73,11 +73,11 @@ static inline void dma_barrier(AddressSpace *as, DMADirection dir)
+ * dma_memory_{read,write}() and check for errors */
+ static inline bool dma_memory_valid(AddressSpace *as,
+ dma_addr_t addr, dma_addr_t len,
+- DMADirection dir)
++ DMADirection dir, MemTxAttrs attrs)
+ {
+ return address_space_access_valid(as, addr, len,
+ dir == DMA_DIRECTION_FROM_DEVICE,
+- MEMTXATTRS_UNSPECIFIED);
++ attrs);
+ }
+
+ static inline MemTxResult dma_memory_rw_relaxed(AddressSpace *as,
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0003-softfloat-Add-flag-specific-to-Inf-0.patch b/meta/recipes-devtools/qemu/qemu/0003-softfloat-Add-flag-specific-to-Inf-0.patch
new file mode 100644
index 0000000000..1b21e3cfeb
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0003-softfloat-Add-flag-specific-to-Inf-0.patch
@@ -0,0 +1,126 @@
+From 613f373f0b652ab2fb2572633e7a23807096790b Mon Sep 17 00:00:00 2001
+From: Richard Henderson <richard.henderson@linaro.org>
+Date: Fri, 17 Dec 2021 17:57:14 +0100
+Subject: [PATCH 03/21] softfloat: Add flag specific to Inf * 0
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+PowerPC has this flag, and it's easier to compute it here
+than after the fact.
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=bead3c9b0ff8efd652afb27923d8ab4458b3bbd9]
+
+Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
+Message-Id: <20211119160502.17432-4-richard.henderson@linaro.org>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ fpu/softfloat-parts.c.inc | 4 ++--
+ fpu/softfloat-specialize.c.inc | 12 ++++++------
+ include/fpu/softfloat-types.h | 1 +
+ 3 files changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
+index eb2b475ca4..3ed793347b 100644
+--- a/fpu/softfloat-parts.c.inc
++++ b/fpu/softfloat-parts.c.inc
+@@ -423,7 +423,7 @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
+
+ /* Inf * Zero == NaN */
+ if (unlikely(ab_mask == float_cmask_infzero)) {
+- float_raise(float_flag_invalid, s);
++ float_raise(float_flag_invalid | float_flag_invalid_imz, s);
+ parts_default_nan(a, s);
+ return a;
+ }
+@@ -489,6 +489,7 @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
+
+ if (unlikely(ab_mask != float_cmask_normal)) {
+ if (unlikely(ab_mask == float_cmask_infzero)) {
++ float_raise(float_flag_invalid | float_flag_invalid_imz, s);
+ goto d_nan;
+ }
+
+@@ -567,7 +568,6 @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
+ goto finish_sign;
+
+ d_nan:
+- float_raise(float_flag_invalid, s);
+ parts_default_nan(a, s);
+ return a;
+ }
+diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
+index f2ad0f335e..943e3301d2 100644
+--- a/fpu/softfloat-specialize.c.inc
++++ b/fpu/softfloat-specialize.c.inc
+@@ -506,7 +506,7 @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
+ * the default NaN
+ */
+ if (infzero && is_qnan(c_cls)) {
+- float_raise(float_flag_invalid, status);
++ float_raise(float_flag_invalid | float_flag_invalid_imz, status);
+ return 3;
+ }
+
+@@ -533,7 +533,7 @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
+ * case sets InvalidOp and returns the default NaN
+ */
+ if (infzero) {
+- float_raise(float_flag_invalid, status);
++ float_raise(float_flag_invalid | float_flag_invalid_imz, status);
+ return 3;
+ }
+ /* Prefer sNaN over qNaN, in the a, b, c order. */
+@@ -556,7 +556,7 @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
+ * case sets InvalidOp and returns the input value 'c'
+ */
+ if (infzero) {
+- float_raise(float_flag_invalid, status);
++ float_raise(float_flag_invalid | float_flag_invalid_imz, status);
+ return 2;
+ }
+ /* Prefer sNaN over qNaN, in the c, a, b order. */
+@@ -580,7 +580,7 @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
+ * a default NaN
+ */
+ if (infzero) {
+- float_raise(float_flag_invalid, status);
++ float_raise(float_flag_invalid | float_flag_invalid_imz, status);
+ return 2;
+ }
+
+@@ -597,7 +597,7 @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
+ #elif defined(TARGET_RISCV)
+ /* For RISC-V, InvalidOp is set when multiplicands are Inf and zero */
+ if (infzero) {
+- float_raise(float_flag_invalid, status);
++ float_raise(float_flag_invalid | float_flag_invalid_imz, status);
+ }
+ return 3; /* default NaN */
+ #elif defined(TARGET_XTENSA)
+@@ -606,7 +606,7 @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
+ * an input NaN if we have one (ie c).
+ */
+ if (infzero) {
+- float_raise(float_flag_invalid, status);
++ float_raise(float_flag_invalid | float_flag_invalid_imz, status);
+ return 2;
+ }
+ if (status->use_first_nan) {
+diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
+index eaa12e1e00..56b4cf7835 100644
+--- a/include/fpu/softfloat-types.h
++++ b/include/fpu/softfloat-types.h
+@@ -153,6 +153,7 @@ enum {
+ float_flag_input_denormal = 0x0020,
+ float_flag_output_denormal = 0x0040,
+ float_flag_invalid_isi = 0x0080, /* inf - inf */
++ float_flag_invalid_imz = 0x0100, /* inf * 0 */
+ };
+
+ /*
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0003_let_dma_memory_set_function_take_MemTxAttrs_argument.patch b/meta/recipes-devtools/qemu/qemu/0003_let_dma_memory_set_function_take_MemTxAttrs_argument.patch
new file mode 100644
index 0000000000..f13707a407
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0003_let_dma_memory_set_function_take_MemTxAttrs_argument.patch
@@ -0,0 +1,98 @@
+From 7a36e42d9114474278ce30ba36945cc62292eb60 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Thu, 3 Sep 2020 10:28:32 +0200
+Subject: [PATCH] dma: Let dma_memory_set() take MemTxAttrs argument
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Let devices specify transaction attributes when calling
+dma_memory_set().
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=7a36e42d9114474278ce30ba36945cc62292eb60]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
+Message-Id: <20211223115554.3155328-3-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/nvram/fw_cfg.c | 3 ++-
+ include/hw/ppc/spapr_vio.h | 3 ++-
+ include/sysemu/dma.h | 3 ++-
+ softmmu/dma-helpers.c | 5 ++---
+ 4 files changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c
+index c06b30d..f7803fe 100644
+--- a/hw/nvram/fw_cfg.c
++++ b/hw/nvram/fw_cfg.c
+@@ -399,7 +399,8 @@ static void fw_cfg_dma_transfer(FWCfgState *s)
+ * tested before.
+ */
+ if (read) {
+- if (dma_memory_set(s->dma_as, dma.address, 0, len)) {
++ if (dma_memory_set(s->dma_as, dma.address, 0, len,
++ MEMTXATTRS_UNSPECIFIED)) {
+ dma.control |= FW_CFG_DMA_CTL_ERROR;
+ }
+ }
+diff --git a/include/hw/ppc/spapr_vio.h b/include/hw/ppc/spapr_vio.h
+index 4c45f15..c90e74a 100644
+--- a/include/hw/ppc/spapr_vio.h
++++ b/include/hw/ppc/spapr_vio.h
+@@ -111,7 +111,8 @@ static inline int spapr_vio_dma_write(SpaprVioDevice *dev, uint64_t taddr,
+ static inline int spapr_vio_dma_set(SpaprVioDevice *dev, uint64_t taddr,
+ uint8_t c, uint32_t size)
+ {
+- return (dma_memory_set(&dev->as, taddr, c, size) != 0) ?
++ return (dma_memory_set(&dev->as, taddr,
++ c, size, MEMTXATTRS_UNSPECIFIED) != 0) ?
+ H_DEST_PARM : H_SUCCESS;
+ }
+
+diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
+index 296f3b5..d23516f 100644
+--- a/include/sysemu/dma.h
++++ b/include/sysemu/dma.h
+@@ -175,9 +175,10 @@ static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr,
+ * @addr: address within that address space
+ * @c: constant byte to fill the memory
+ * @len: the number of bytes to fill with the constant byte
++ * @attrs: memory transaction attributes
+ */
+ MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
+- uint8_t c, dma_addr_t len);
++ uint8_t c, dma_addr_t len, MemTxAttrs attrs);
+
+ /**
+ * address_space_map: Map a physical memory region into a host virtual address.
+diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c
+index 7d766a5..1f07217 100644
+--- a/softmmu/dma-helpers.c
++++ b/softmmu/dma-helpers.c
+@@ -19,7 +19,7 @@
+ /* #define DEBUG_IOMMU */
+
+ MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
+- uint8_t c, dma_addr_t len)
++ uint8_t c, dma_addr_t len, MemTxAttrs attrs)
+ {
+ dma_barrier(as, DMA_DIRECTION_FROM_DEVICE);
+
+@@ -31,8 +31,7 @@ MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
+ memset(fillbuf, c, FILLBUF_SIZE);
+ while (len > 0) {
+ l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
+- error |= address_space_write(as, addr, MEMTXATTRS_UNSPECIFIED,
+- fillbuf, l);
++ error |= address_space_write(as, addr, attrs, fillbuf, l);
+ len -= l;
+ addr += l;
+ }
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0004-softfloat-Add-flags-specific-to-Inf-Inf-and-0-0.patch b/meta/recipes-devtools/qemu/qemu/0004-softfloat-Add-flags-specific-to-Inf-Inf-and-0-0.patch
new file mode 100644
index 0000000000..c5377fbe70
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0004-softfloat-Add-flags-specific-to-Inf-Inf-and-0-0.patch
@@ -0,0 +1,73 @@
+From 52f1760d2d65e1a61028cb9d8610c8a38aa44cfc Mon Sep 17 00:00:00 2001
+From: Richard Henderson <richard.henderson@linaro.org>
+Date: Fri, 17 Dec 2021 17:57:14 +0100
+Subject: [PATCH 04/21] softfloat: Add flags specific to Inf / Inf and 0 / 0
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+PowerPC has these flags, and it's easier to compute them here
+than after the fact.
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=10cc964030fca459591d9353571f3b1b4e1b5aec]
+
+Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
+Message-Id: <20211119160502.17432-5-richard.henderson@linaro.org>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ fpu/softfloat-parts.c.inc | 16 +++++++++++-----
+ include/fpu/softfloat-types.h | 2 ++
+ 2 files changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
+index 3ed793347b..b8563cd2df 100644
+--- a/fpu/softfloat-parts.c.inc
++++ b/fpu/softfloat-parts.c.inc
+@@ -590,11 +590,13 @@ static FloatPartsN *partsN(div)(FloatPartsN *a, FloatPartsN *b,
+ }
+
+ /* 0/0 or Inf/Inf => NaN */
+- if (unlikely(ab_mask == float_cmask_zero) ||
+- unlikely(ab_mask == float_cmask_inf)) {
+- float_raise(float_flag_invalid, s);
+- parts_default_nan(a, s);
+- return a;
++ if (unlikely(ab_mask == float_cmask_zero)) {
++ float_raise(float_flag_invalid | float_flag_invalid_zdz, s);
++ goto d_nan;
++ }
++ if (unlikely(ab_mask == float_cmask_inf)) {
++ float_raise(float_flag_invalid | float_flag_invalid_idi, s);
++ goto d_nan;
+ }
+
+ /* All the NaN cases */
+@@ -625,6 +627,10 @@ static FloatPartsN *partsN(div)(FloatPartsN *a, FloatPartsN *b,
+ float_raise(float_flag_divbyzero, s);
+ a->cls = float_class_inf;
+ return a;
++
++ d_nan:
++ parts_default_nan(a, s);
++ return a;
+ }
+
+ /*
+diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
+index 56b4cf7835..5a9671e564 100644
+--- a/include/fpu/softfloat-types.h
++++ b/include/fpu/softfloat-types.h
+@@ -154,6 +154,8 @@ enum {
+ float_flag_output_denormal = 0x0040,
+ float_flag_invalid_isi = 0x0080, /* inf - inf */
+ float_flag_invalid_imz = 0x0100, /* inf * 0 */
++ float_flag_invalid_idi = 0x0200, /* inf / inf */
++ float_flag_invalid_zdz = 0x0400, /* 0 / 0 */
+ };
+
+ /*
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0004_let_dma_memory_rw_relaxed_function_take_MemTxAttrs_argument.patch b/meta/recipes-devtools/qemu/qemu/0004_let_dma_memory_rw_relaxed_function_take_MemTxAttrs_argument.patch
new file mode 100644
index 0000000000..cacb12909c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0004_let_dma_memory_rw_relaxed_function_take_MemTxAttrs_argument.patch
@@ -0,0 +1,78 @@
+From 4afd0f2f220ec3dc8518b8de0d66cbf8d2fd1be7 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Thu, 3 Sep 2020 09:30:10 +0200
+Subject: [PATCH] dma: Let dma_memory_rw_relaxed() take MemTxAttrs argument
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+We will add the MemTxAttrs argument to dma_memory_rw() in
+the next commit. Since dma_memory_rw_relaxed() is only used
+by dma_memory_rw(), modify it first in a separate commit to
+keep the next commit easier to review.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=4afd0f2f220ec3dc8518b8de0d66cbf8d2fd1be7]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
+Message-Id: <20211223115554.3155328-4-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ include/sysemu/dma.h | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
+index d23516f..3be803c 100644
+--- a/include/sysemu/dma.h
++++ b/include/sysemu/dma.h
+@@ -83,9 +83,10 @@ static inline bool dma_memory_valid(AddressSpace *as,
+ static inline MemTxResult dma_memory_rw_relaxed(AddressSpace *as,
+ dma_addr_t addr,
+ void *buf, dma_addr_t len,
+- DMADirection dir)
++ DMADirection dir,
++ MemTxAttrs attrs)
+ {
+- return address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
++ return address_space_rw(as, addr, attrs,
+ buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
+ }
+
+@@ -93,7 +94,9 @@ static inline MemTxResult dma_memory_read_relaxed(AddressSpace *as,
+ dma_addr_t addr,
+ void *buf, dma_addr_t len)
+ {
+- return dma_memory_rw_relaxed(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
++ return dma_memory_rw_relaxed(as, addr, buf, len,
++ DMA_DIRECTION_TO_DEVICE,
++ MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as,
+@@ -102,7 +105,8 @@ static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as,
+ dma_addr_t len)
+ {
+ return dma_memory_rw_relaxed(as, addr, (void *)buf, len,
+- DMA_DIRECTION_FROM_DEVICE);
++ DMA_DIRECTION_FROM_DEVICE,
++ MEMTXATTRS_UNSPECIFIED);
+ }
+
+ /**
+@@ -124,7 +128,8 @@ static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr,
+ {
+ dma_barrier(as, dir);
+
+- return dma_memory_rw_relaxed(as, addr, buf, len, dir);
++ return dma_memory_rw_relaxed(as, addr, buf, len, dir,
++ MEMTXATTRS_UNSPECIFIED);
+ }
+
+ /**
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0005-softfloat-Add-flag-specific-to-signaling-nans.patch b/meta/recipes-devtools/qemu/qemu/0005-softfloat-Add-flag-specific-to-signaling-nans.patch
new file mode 100644
index 0000000000..e4ecb496ae
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0005-softfloat-Add-flag-specific-to-signaling-nans.patch
@@ -0,0 +1,121 @@
+From 6bc0b2cffab0ee280ae9730262f162f25c16f6c2 Mon Sep 17 00:00:00 2001
+From: Richard Henderson <richard.henderson@linaro.org>
+Date: Fri, 17 Dec 2021 17:57:14 +0100
+Subject: [PATCH 05/21] softfloat: Add flag specific to signaling nans
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+PowerPC has this flag, and it's easier to compute it here
+than after the fact.
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=e706d4455b8d54252b11fc504c56df060151cb89]
+
+Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
+Message-Id: <20211119160502.17432-8-richard.henderson@linaro.org>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ fpu/softfloat-parts.c.inc | 18 ++++++++++++------
+ fpu/softfloat.c | 4 +++-
+ include/fpu/softfloat-types.h | 1 +
+ 3 files changed, 16 insertions(+), 7 deletions(-)
+
+diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
+index b8563cd2df..9518f3dc61 100644
+--- a/fpu/softfloat-parts.c.inc
++++ b/fpu/softfloat-parts.c.inc
+@@ -19,7 +19,7 @@ static void partsN(return_nan)(FloatPartsN *a, float_status *s)
+ {
+ switch (a->cls) {
+ case float_class_snan:
+- float_raise(float_flag_invalid, s);
++ float_raise(float_flag_invalid | float_flag_invalid_snan, s);
+ if (s->default_nan_mode) {
+ parts_default_nan(a, s);
+ } else {
+@@ -40,7 +40,7 @@ static FloatPartsN *partsN(pick_nan)(FloatPartsN *a, FloatPartsN *b,
+ float_status *s)
+ {
+ if (is_snan(a->cls) || is_snan(b->cls)) {
+- float_raise(float_flag_invalid, s);
++ float_raise(float_flag_invalid | float_flag_invalid_snan, s);
+ }
+
+ if (s->default_nan_mode) {
+@@ -68,7 +68,7 @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
+ int which;
+
+ if (unlikely(abc_mask & float_cmask_snan)) {
+- float_raise(float_flag_invalid, s);
++ float_raise(float_flag_invalid | float_flag_invalid_snan, s);
+ }
+
+ which = pickNaNMulAdd(a->cls, b->cls, c->cls,
+@@ -1049,8 +1049,10 @@ static int64_t partsN(float_to_sint)(FloatPartsN *p, FloatRoundMode rmode,
+
+ switch (p->cls) {
+ case float_class_snan:
++ flags |= float_flag_invalid_snan;
++ /* fall through */
+ case float_class_qnan:
+- flags = float_flag_invalid;
++ flags |= float_flag_invalid;
+ r = max;
+ break;
+
+@@ -1114,8 +1116,10 @@ static uint64_t partsN(float_to_uint)(FloatPartsN *p, FloatRoundMode rmode,
+
+ switch (p->cls) {
+ case float_class_snan:
++ flags |= float_flag_invalid_snan;
++ /* fall through */
+ case float_class_qnan:
+- flags = float_flag_invalid;
++ flags |= float_flag_invalid;
+ r = max;
+ break;
+
+@@ -1341,7 +1345,9 @@ static FloatRelation partsN(compare)(FloatPartsN *a, FloatPartsN *b,
+ }
+
+ if (unlikely(ab_mask & float_cmask_anynan)) {
+- if (!is_quiet || (ab_mask & float_cmask_snan)) {
++ if (ab_mask & float_cmask_snan) {
++ float_raise(float_flag_invalid | float_flag_invalid_snan, s);
++ } else if (!is_quiet) {
+ float_raise(float_flag_invalid, s);
+ }
+ return float_relation_unordered;
+diff --git a/fpu/softfloat.c b/fpu/softfloat.c
+index 9a28720d82..834ed3a054 100644
+--- a/fpu/softfloat.c
++++ b/fpu/softfloat.c
+@@ -2543,8 +2543,10 @@ floatx80 floatx80_mod(floatx80 a, floatx80 b, float_status *status)
+ static void parts_float_to_ahp(FloatParts64 *a, float_status *s)
+ {
+ switch (a->cls) {
+- case float_class_qnan:
+ case float_class_snan:
++ float_raise(float_flag_invalid_snan, s);
++ /* fall through */
++ case float_class_qnan:
+ /*
+ * There is no NaN in the destination format. Raise Invalid
+ * and return a zero with the sign of the input NaN.
+diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
+index 5a9671e564..e557b9126b 100644
+--- a/include/fpu/softfloat-types.h
++++ b/include/fpu/softfloat-types.h
+@@ -156,6 +156,7 @@ enum {
+ float_flag_invalid_imz = 0x0100, /* inf * 0 */
+ float_flag_invalid_idi = 0x0200, /* inf / inf */
+ float_flag_invalid_zdz = 0x0400, /* 0 / 0 */
++ float_flag_invalid_snan = 0x2000, /* any operand was snan */
+ };
+
+ /*
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0005_let_dma_memory_rw_function_take_MemTxAttrs_argument.patch b/meta/recipes-devtools/qemu/qemu/0005_let_dma_memory_rw_function_take_MemTxAttrs_argument.patch
new file mode 100644
index 0000000000..e5daf966d5
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0005_let_dma_memory_rw_function_take_MemTxAttrs_argument.patch
@@ -0,0 +1,158 @@
+From 23faf5694ff8054b847e9733297727be4a641132 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Thu, 3 Sep 2020 09:37:43 +0200
+Subject: [PATCH] dma: Let dma_memory_rw() take MemTxAttrs argument
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Let devices specify transaction attributes when calling
+dma_memory_rw().
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=23faf5694ff8054b847e9733297727be4a641132]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
+Message-Id: <20211223115554.3155328-5-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/intc/spapr_xive.c | 3 ++-
+ hw/usb/hcd-ohci.c | 10 ++++++----
+ include/hw/pci/pci.h | 3 ++-
+ include/sysemu/dma.h | 11 ++++++-----
+ softmmu/dma-helpers.c | 3 ++-
+ 5 files changed, 18 insertions(+), 12 deletions(-)
+
+diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c
+index 4ec659b..eae95c7 100644
+--- a/hw/intc/spapr_xive.c
++++ b/hw/intc/spapr_xive.c
+@@ -1684,7 +1684,8 @@ static target_ulong h_int_esb(PowerPCCPU *cpu,
+ mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset;
+
+ if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8,
+- (flags & SPAPR_XIVE_ESB_STORE))) {
++ (flags & SPAPR_XIVE_ESB_STORE),
++ MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%"
+ HWADDR_PRIx "\n", mmio_addr);
+ return H_HARDWARE;
+diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
+index 1cf2816..56e2315 100644
+--- a/hw/usb/hcd-ohci.c
++++ b/hw/usb/hcd-ohci.c
+@@ -586,7 +586,8 @@ static int ohci_copy_td(OHCIState *ohci, struct ohci_td *td,
+ if (n > len)
+ n = len;
+
+- if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, n, dir)) {
++ if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf,
++ n, dir, MEMTXATTRS_UNSPECIFIED)) {
+ return -1;
+ }
+ if (n == len) {
+@@ -595,7 +596,7 @@ static int ohci_copy_td(OHCIState *ohci, struct ohci_td *td,
+ ptr = td->be & ~0xfffu;
+ buf += n;
+ if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf,
+- len - n, dir)) {
++ len - n, dir, MEMTXATTRS_UNSPECIFIED)) {
+ return -1;
+ }
+ return 0;
+@@ -613,7 +614,8 @@ static int ohci_copy_iso_td(OHCIState *ohci,
+ if (n > len)
+ n = len;
+
+- if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, n, dir)) {
++ if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf,
++ n, dir, MEMTXATTRS_UNSPECIFIED)) {
+ return -1;
+ }
+ if (n == len) {
+@@ -622,7 +624,7 @@ static int ohci_copy_iso_td(OHCIState *ohci,
+ ptr = end_addr & ~0xfffu;
+ buf += n;
+ if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf,
+- len - n, dir)) {
++ len - n, dir, MEMTXATTRS_UNSPECIFIED)) {
+ return -1;
+ }
+ return 0;
+diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
+index e7cdf2d..4383f1c 100644
+--- a/include/hw/pci/pci.h
++++ b/include/hw/pci/pci.h
+@@ -808,7 +808,8 @@ static inline MemTxResult pci_dma_rw(PCIDevice *dev, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir)
+ {
+- return dma_memory_rw(pci_get_address_space(dev), addr, buf, len, dir);
++ return dma_memory_rw(pci_get_address_space(dev), addr, buf, len,
++ dir, MEMTXATTRS_UNSPECIFIED);
+ }
+
+ /**
+diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
+index 3be803c..e8ad422 100644
+--- a/include/sysemu/dma.h
++++ b/include/sysemu/dma.h
+@@ -121,15 +121,15 @@ static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as,
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to read or write
+ * @dir: indicates the transfer direction
++ * @attrs: memory transaction attributes
+ */
+ static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+- DMADirection dir)
++ DMADirection dir, MemTxAttrs attrs)
+ {
+ dma_barrier(as, dir);
+
+- return dma_memory_rw_relaxed(as, addr, buf, len, dir,
+- MEMTXATTRS_UNSPECIFIED);
++ return dma_memory_rw_relaxed(as, addr, buf, len, dir, attrs);
+ }
+
+ /**
+@@ -147,7 +147,8 @@ static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr,
+ static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr,
+ void *buf, dma_addr_t len)
+ {
+- return dma_memory_rw(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
++ return dma_memory_rw(as, addr, buf, len,
++ DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED);
+ }
+
+ /**
+@@ -166,7 +167,7 @@ static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr,
+ const void *buf, dma_addr_t len)
+ {
+ return dma_memory_rw(as, addr, (void *)buf, len,
+- DMA_DIRECTION_FROM_DEVICE);
++ DMA_DIRECTION_FROM_DEVICE, MEMTXATTRS_UNSPECIFIED);
+ }
+
+ /**
+diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c
+index 1f07217..5bf76ff 100644
+--- a/softmmu/dma-helpers.c
++++ b/softmmu/dma-helpers.c
+@@ -305,7 +305,8 @@ static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
+ while (len > 0) {
+ ScatterGatherEntry entry = sg->sg[sg_cur_index++];
+ int32_t xfer = MIN(len, entry.len);
+- dma_memory_rw(sg->as, entry.base, ptr, xfer, dir);
++ dma_memory_rw(sg->as, entry.base, ptr, xfer, dir,
++ MEMTXATTRS_UNSPECIFIED);
+ ptr += xfer;
+ len -= xfer;
+ resid -= xfer;
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0006-target-ppc-Update-float_invalid_op_addsub-for-new-fl.patch b/meta/recipes-devtools/qemu/qemu/0006-target-ppc-Update-float_invalid_op_addsub-for-new-fl.patch
new file mode 100644
index 0000000000..5f38c7265f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0006-target-ppc-Update-float_invalid_op_addsub-for-new-fl.patch
@@ -0,0 +1,114 @@
+From ba4a60dd5df31b9fff8b7b8006bf9f15140cc6c5 Mon Sep 17 00:00:00 2001
+From: Richard Henderson <richard.henderson@linaro.org>
+Date: Fri, 17 Dec 2021 17:57:14 +0100
+Subject: [PATCH 06/21] target/ppc: Update float_invalid_op_addsub for new
+ flags
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Now that vxisi and vxsnan are computed directly by
+softfloat, we don't need to recompute it via classes.
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=941298ecd7e3103d3789d2dd87dd0f119e81c69e]
+
+Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
+Message-Id: <20211119160502.17432-9-richard.henderson@linaro.org>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/fpu_helper.c | 38 ++++++++++++++------------------------
+ 1 file changed, 14 insertions(+), 24 deletions(-)
+
+diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
+index c4896cecc8..f0deada84b 100644
+--- a/target/ppc/fpu_helper.c
++++ b/target/ppc/fpu_helper.c
+@@ -450,13 +450,12 @@ void helper_reset_fpstatus(CPUPPCState *env)
+ set_float_exception_flags(0, &env->fp_status);
+ }
+
+-static void float_invalid_op_addsub(CPUPPCState *env, bool set_fpcc,
+- uintptr_t retaddr, int classes)
++static void float_invalid_op_addsub(CPUPPCState *env, int flags,
++ bool set_fpcc, uintptr_t retaddr)
+ {
+- if ((classes & ~is_neg) == is_inf) {
+- /* Magnitude subtraction of infinities */
++ if (flags & float_flag_invalid_isi) {
+ float_invalid_op_vxisi(env, set_fpcc, retaddr);
+- } else if (classes & is_snan) {
++ } else if (flags & float_flag_invalid_snan) {
+ float_invalid_op_vxsnan(env, retaddr);
+ }
+ }
+@@ -465,12 +464,10 @@ static void float_invalid_op_addsub(CPUPPCState *env, bool set_fpcc,
+ float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
+ {
+ float64 ret = float64_add(arg1, arg2, &env->fp_status);
+- int status = get_float_exception_flags(&env->fp_status);
++ int flags = get_float_exception_flags(&env->fp_status);
+
+- if (unlikely(status & float_flag_invalid)) {
+- float_invalid_op_addsub(env, 1, GETPC(),
+- float64_classify(arg1) |
+- float64_classify(arg2));
++ if (unlikely(flags & float_flag_invalid)) {
++ float_invalid_op_addsub(env, flags, 1, GETPC());
+ }
+
+ return ret;
+@@ -480,12 +477,10 @@ float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
+ float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
+ {
+ float64 ret = float64_sub(arg1, arg2, &env->fp_status);
+- int status = get_float_exception_flags(&env->fp_status);
++ int flags = get_float_exception_flags(&env->fp_status);
+
+- if (unlikely(status & float_flag_invalid)) {
+- float_invalid_op_addsub(env, 1, GETPC(),
+- float64_classify(arg1) |
+- float64_classify(arg2));
++ if (unlikely(flags & float_flag_invalid)) {
++ float_invalid_op_addsub(env, flags, 1, GETPC());
+ }
+
+ return ret;
+@@ -1616,9 +1611,8 @@ void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+- float_invalid_op_addsub(env, sfprf, GETPC(), \
+- tp##_classify(xa->fld) | \
+- tp##_classify(xb->fld)); \
++ float_invalid_op_addsub(env, tstat.float_exception_flags, \
++ sfprf, GETPC()); \
+ } \
+ \
+ if (r2sp) { \
+@@ -1660,9 +1654,7 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+- float_invalid_op_addsub(env, 1, GETPC(),
+- float128_classify(xa->f128) |
+- float128_classify(xb->f128));
++ float_invalid_op_addsub(env, tstat.float_exception_flags, 1, GETPC());
+ }
+
+ helper_compute_fprf_float128(env, t.f128);
+@@ -3278,9 +3270,7 @@ void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+- float_invalid_op_addsub(env, 1, GETPC(),
+- float128_classify(xa->f128) |
+- float128_classify(xb->f128));
++ float_invalid_op_addsub(env, tstat.float_exception_flags, 1, GETPC());
+ }
+
+ helper_compute_fprf_float128(env, t.f128);
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0006_let_dma_memory_read_write_function_take_MemTxAttrs_argument.patch b/meta/recipes-devtools/qemu/qemu/0006_let_dma_memory_read_write_function_take_MemTxAttrs_argument.patch
new file mode 100644
index 0000000000..1973e477f3
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0006_let_dma_memory_read_write_function_take_MemTxAttrs_argument.patch
@@ -0,0 +1,1453 @@
+From ba06fe8add5b788956a7317246c6280dfc157040 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Thu, 3 Sep 2020 10:08:29 +0200
+Subject: [PATCH] dma: Let dma_memory_read/write() take MemTxAttrs argument
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Let devices specify transaction attributes when calling
+dma_memory_read() or dma_memory_write().
+
+Patch created mechanically using spatch with this script:
+
+ @@
+ expression E1, E2, E3, E4;
+ @@
+ (
+ - dma_memory_read(E1, E2, E3, E4)
+ + dma_memory_read(E1, E2, E3, E4, MEMTXATTRS_UNSPECIFIED)
+ |
+ - dma_memory_write(E1, E2, E3, E4)
+ + dma_memory_write(E1, E2, E3, E4, MEMTXATTRS_UNSPECIFIED)
+ )
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=ba06fe8add5b788956a7317246c6280dfc157040]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
+Message-Id: <20211223115554.3155328-6-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/arm/musicpal.c | 13 +++++++------
+ hw/arm/smmu-common.c | 3 ++-
+ hw/arm/smmuv3.c | 14 +++++++++-----
+ hw/core/generic-loader.c | 3 ++-
+ hw/dma/pl330.c | 12 ++++++++----
+ hw/dma/sparc32_dma.c | 16 ++++++++++------
+ hw/dma/xlnx-zynq-devcfg.c | 6 ++++--
+ hw/dma/xlnx_dpdma.c | 10 ++++++----
+ hw/i386/amd_iommu.c | 16 +++++++++-------
+ hw/i386/intel_iommu.c | 28 +++++++++++++++++-----------
+ hw/ide/macio.c | 2 +-
+ hw/intc/xive.c | 7 ++++---
+ hw/misc/bcm2835_property.c | 3 ++-
+ hw/misc/macio/mac_dbdma.c | 10 ++++++----
+ hw/net/allwinner-sun8i-emac.c | 18 ++++++++++++------
+ hw/net/ftgmac100.c | 25 ++++++++++++++++---------
+ hw/net/imx_fec.c | 32 ++++++++++++++++++++------------
+ hw/net/npcm7xx_emc.c | 20 ++++++++++++--------
+ hw/nvram/fw_cfg.c | 9 ++++++---
+ hw/pci-host/pnv_phb3.c | 5 +++--
+ hw/pci-host/pnv_phb3_msi.c | 9 ++++++---
+ hw/pci-host/pnv_phb4.c | 5 +++--
+ hw/sd/allwinner-sdhost.c | 14 ++++++++------
+ hw/sd/sdhci.c | 35 ++++++++++++++++++++++-------------
+ hw/usb/hcd-dwc2.c | 8 ++++----
+ hw/usb/hcd-ehci.c | 6 ++++--
+ hw/usb/hcd-ohci.c | 18 +++++++++++-------
+ hw/usb/hcd-xhci.c | 18 +++++++++++-------
+ include/hw/ppc/spapr_vio.h | 6 ++++--
+ include/sysemu/dma.h | 20 ++++++++++++--------
+ 30 files changed, 241 insertions(+), 150 deletions(-)
+
+diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c
+index 2d612cc..2680ec5 100644
+--- a/hw/arm/musicpal.c
++++ b/hw/arm/musicpal.c
+@@ -185,13 +185,13 @@ static void eth_rx_desc_put(AddressSpace *dma_as, uint32_t addr,
+ cpu_to_le16s(&desc->buffer_size);
+ cpu_to_le32s(&desc->buffer);
+ cpu_to_le32s(&desc->next);
+- dma_memory_write(dma_as, addr, desc, sizeof(*desc));
++ dma_memory_write(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static void eth_rx_desc_get(AddressSpace *dma_as, uint32_t addr,
+ mv88w8618_rx_desc *desc)
+ {
+- dma_memory_read(dma_as, addr, desc, sizeof(*desc));
++ dma_memory_read(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED);
+ le32_to_cpus(&desc->cmdstat);
+ le16_to_cpus(&desc->bytes);
+ le16_to_cpus(&desc->buffer_size);
+@@ -215,7 +215,7 @@ static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size)
+ eth_rx_desc_get(&s->dma_as, desc_addr, &desc);
+ if ((desc.cmdstat & MP_ETH_RX_OWN) && desc.buffer_size >= size) {
+ dma_memory_write(&s->dma_as, desc.buffer + s->vlan_header,
+- buf, size);
++ buf, size, MEMTXATTRS_UNSPECIFIED);
+ desc.bytes = size + s->vlan_header;
+ desc.cmdstat &= ~MP_ETH_RX_OWN;
+ s->cur_rx[i] = desc.next;
+@@ -241,13 +241,13 @@ static void eth_tx_desc_put(AddressSpace *dma_as, uint32_t addr,
+ cpu_to_le16s(&desc->bytes);
+ cpu_to_le32s(&desc->buffer);
+ cpu_to_le32s(&desc->next);
+- dma_memory_write(dma_as, addr, desc, sizeof(*desc));
++ dma_memory_write(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static void eth_tx_desc_get(AddressSpace *dma_as, uint32_t addr,
+ mv88w8618_tx_desc *desc)
+ {
+- dma_memory_read(dma_as, addr, desc, sizeof(*desc));
++ dma_memory_read(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED);
+ le32_to_cpus(&desc->cmdstat);
+ le16_to_cpus(&desc->res);
+ le16_to_cpus(&desc->bytes);
+@@ -269,7 +269,8 @@ static void eth_send(mv88w8618_eth_state *s, int queue_index)
+ if (desc.cmdstat & MP_ETH_TX_OWN) {
+ len = desc.bytes;
+ if (len < 2048) {
+- dma_memory_read(&s->dma_as, desc.buffer, buf, len);
++ dma_memory_read(&s->dma_as, desc.buffer, buf, len,
++ MEMTXATTRS_UNSPECIFIED);
+ qemu_send_packet(qemu_get_queue(s->nic), buf, len);
+ }
+ desc.cmdstat &= ~MP_ETH_TX_OWN;
+diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
+index 0459850..e09b9c1 100644
+--- a/hw/arm/smmu-common.c
++++ b/hw/arm/smmu-common.c
+@@ -193,7 +193,8 @@ static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte,
+ dma_addr_t addr = baseaddr + index * sizeof(*pte);
+
+ /* TODO: guarantee 64-bit single-copy atomicity */
+- ret = dma_memory_read(&address_space_memory, addr, pte, sizeof(*pte));
++ ret = dma_memory_read(&address_space_memory, addr, pte, sizeof(*pte),
++ MEMTXATTRS_UNSPECIFIED);
+
+ if (ret != MEMTX_OK) {
+ info->type = SMMU_PTW_ERR_WALK_EABT;
+diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
+index 01b60be..3b43368 100644
+--- a/hw/arm/smmuv3.c
++++ b/hw/arm/smmuv3.c
+@@ -102,7 +102,8 @@ static inline MemTxResult queue_read(SMMUQueue *q, void *data)
+ {
+ dma_addr_t addr = Q_CONS_ENTRY(q);
+
+- return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
++ return dma_memory_read(&address_space_memory, addr, data, q->entry_size,
++ MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static MemTxResult queue_write(SMMUQueue *q, void *data)
+@@ -110,7 +111,8 @@ static MemTxResult queue_write(SMMUQueue *q, void *data)
+ dma_addr_t addr = Q_PROD_ENTRY(q);
+ MemTxResult ret;
+
+- ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
++ ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size,
++ MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ return ret;
+ }
+@@ -285,7 +287,8 @@ static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
+
+ trace_smmuv3_get_ste(addr);
+ /* TODO: guarantee 64-bit single-copy atomicity */
+- ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf));
++ ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf),
++ MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
+@@ -306,7 +309,8 @@ static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
+
+ trace_smmuv3_get_cd(addr);
+ /* TODO: guarantee 64-bit single-copy atomicity */
+- ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf));
++ ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf),
++ MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
+@@ -411,7 +415,7 @@ static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
+ l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
+ /* TODO: guarantee 64-bit single-copy atomicity */
+ ret = dma_memory_read(&address_space_memory, l1ptr, &l1std,
+- sizeof(l1std));
++ sizeof(l1std), MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
+diff --git a/hw/core/generic-loader.c b/hw/core/generic-loader.c
+index d14f932..9a24ffb 100644
+--- a/hw/core/generic-loader.c
++++ b/hw/core/generic-loader.c
+@@ -57,7 +57,8 @@ static void generic_loader_reset(void *opaque)
+
+ if (s->data_len) {
+ assert(s->data_len < sizeof(s->data));
+- dma_memory_write(s->cpu->as, s->addr, &s->data, s->data_len);
++ dma_memory_write(s->cpu->as, s->addr, &s->data, s->data_len,
++ MEMTXATTRS_UNSPECIFIED);
+ }
+ }
+
+diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c
+index 0cb4619..31ce01b 100644
+--- a/hw/dma/pl330.c
++++ b/hw/dma/pl330.c
+@@ -1111,7 +1111,8 @@ static inline const PL330InsnDesc *pl330_fetch_insn(PL330Chan *ch)
+ uint8_t opcode;
+ int i;
+
+- dma_memory_read(ch->parent->mem_as, ch->pc, &opcode, 1);
++ dma_memory_read(ch->parent->mem_as, ch->pc, &opcode, 1,
++ MEMTXATTRS_UNSPECIFIED);
+ for (i = 0; insn_desc[i].size; i++) {
+ if ((opcode & insn_desc[i].opmask) == insn_desc[i].opcode) {
+ return &insn_desc[i];
+@@ -1125,7 +1126,8 @@ static inline void pl330_exec_insn(PL330Chan *ch, const PL330InsnDesc *insn)
+ uint8_t buf[PL330_INSN_MAXSIZE];
+
+ assert(insn->size <= PL330_INSN_MAXSIZE);
+- dma_memory_read(ch->parent->mem_as, ch->pc, buf, insn->size);
++ dma_memory_read(ch->parent->mem_as, ch->pc, buf, insn->size,
++ MEMTXATTRS_UNSPECIFIED);
+ insn->exec(ch, buf[0], &buf[1], insn->size - 1);
+ }
+
+@@ -1189,7 +1191,8 @@ static int pl330_exec_cycle(PL330Chan *channel)
+ if (q != NULL && q->len <= pl330_fifo_num_free(&s->fifo)) {
+ int len = q->len - (q->addr & (q->len - 1));
+
+- dma_memory_read(s->mem_as, q->addr, buf, len);
++ dma_memory_read(s->mem_as, q->addr, buf, len,
++ MEMTXATTRS_UNSPECIFIED);
+ trace_pl330_exec_cycle(q->addr, len);
+ if (trace_event_get_state_backends(TRACE_PL330_HEXDUMP)) {
+ pl330_hexdump(buf, len);
+@@ -1220,7 +1223,8 @@ static int pl330_exec_cycle(PL330Chan *channel)
+ fifo_res = pl330_fifo_get(&s->fifo, buf, len, q->tag);
+ }
+ if (fifo_res == PL330_FIFO_OK || q->z) {
+- dma_memory_write(s->mem_as, q->addr, buf, len);
++ dma_memory_write(s->mem_as, q->addr, buf, len,
++ MEMTXATTRS_UNSPECIFIED);
+ trace_pl330_exec_cycle(q->addr, len);
+ if (trace_event_get_state_backends(TRACE_PL330_HEXDUMP)) {
+ pl330_hexdump(buf, len);
+diff --git a/hw/dma/sparc32_dma.c b/hw/dma/sparc32_dma.c
+index 03bc500..0ef13c5 100644
+--- a/hw/dma/sparc32_dma.c
++++ b/hw/dma/sparc32_dma.c
+@@ -81,11 +81,11 @@ void ledma_memory_read(void *opaque, hwaddr addr,
+ addr |= s->dmaregs[3];
+ trace_ledma_memory_read(addr, len);
+ if (do_bswap) {
+- dma_memory_read(&is->iommu_as, addr, buf, len);
++ dma_memory_read(&is->iommu_as, addr, buf, len, MEMTXATTRS_UNSPECIFIED);
+ } else {
+ addr &= ~1;
+ len &= ~1;
+- dma_memory_read(&is->iommu_as, addr, buf, len);
++ dma_memory_read(&is->iommu_as, addr, buf, len, MEMTXATTRS_UNSPECIFIED);
+ for(i = 0; i < len; i += 2) {
+ bswap16s((uint16_t *)(buf + i));
+ }
+@@ -103,7 +103,8 @@ void ledma_memory_write(void *opaque, hwaddr addr,
+ addr |= s->dmaregs[3];
+ trace_ledma_memory_write(addr, len);
+ if (do_bswap) {
+- dma_memory_write(&is->iommu_as, addr, buf, len);
++ dma_memory_write(&is->iommu_as, addr, buf, len,
++ MEMTXATTRS_UNSPECIFIED);
+ } else {
+ addr &= ~1;
+ len &= ~1;
+@@ -114,7 +115,8 @@ void ledma_memory_write(void *opaque, hwaddr addr,
+ for(i = 0; i < l; i += 2) {
+ tmp_buf[i >> 1] = bswap16(*(uint16_t *)(buf + i));
+ }
+- dma_memory_write(&is->iommu_as, addr, tmp_buf, l);
++ dma_memory_write(&is->iommu_as, addr, tmp_buf, l,
++ MEMTXATTRS_UNSPECIFIED);
+ len -= l;
+ buf += l;
+ addr += l;
+@@ -148,7 +150,8 @@ void espdma_memory_read(void *opaque, uint8_t *buf, int len)
+ IOMMUState *is = (IOMMUState *)s->iommu;
+
+ trace_espdma_memory_read(s->dmaregs[1], len);
+- dma_memory_read(&is->iommu_as, s->dmaregs[1], buf, len);
++ dma_memory_read(&is->iommu_as, s->dmaregs[1], buf, len,
++ MEMTXATTRS_UNSPECIFIED);
+ s->dmaregs[1] += len;
+ }
+
+@@ -158,7 +161,8 @@ void espdma_memory_write(void *opaque, uint8_t *buf, int len)
+ IOMMUState *is = (IOMMUState *)s->iommu;
+
+ trace_espdma_memory_write(s->dmaregs[1], len);
+- dma_memory_write(&is->iommu_as, s->dmaregs[1], buf, len);
++ dma_memory_write(&is->iommu_as, s->dmaregs[1], buf, len,
++ MEMTXATTRS_UNSPECIFIED);
+ s->dmaregs[1] += len;
+ }
+
+diff --git a/hw/dma/xlnx-zynq-devcfg.c b/hw/dma/xlnx-zynq-devcfg.c
+index e33112b..f5ad1a0 100644
+--- a/hw/dma/xlnx-zynq-devcfg.c
++++ b/hw/dma/xlnx-zynq-devcfg.c
+@@ -161,12 +161,14 @@ static void xlnx_zynq_devcfg_dma_go(XlnxZynqDevcfg *s)
+ btt = MIN(btt, dmah->dest_len);
+ }
+ DB_PRINT("reading %x bytes from %x\n", btt, dmah->src_addr);
+- dma_memory_read(&address_space_memory, dmah->src_addr, buf, btt);
++ dma_memory_read(&address_space_memory, dmah->src_addr, buf, btt,
++ MEMTXATTRS_UNSPECIFIED);
+ dmah->src_len -= btt;
+ dmah->src_addr += btt;
+ if (loopback && (dmah->src_len || dmah->dest_len)) {
+ DB_PRINT("writing %x bytes from %x\n", btt, dmah->dest_addr);
+- dma_memory_write(&address_space_memory, dmah->dest_addr, buf, btt);
++ dma_memory_write(&address_space_memory, dmah->dest_addr, buf, btt,
++ MEMTXATTRS_UNSPECIFIED);
+ dmah->dest_len -= btt;
+ dmah->dest_addr += btt;
+ }
+diff --git a/hw/dma/xlnx_dpdma.c b/hw/dma/xlnx_dpdma.c
+index 967548a..2d7eae7 100644
+--- a/hw/dma/xlnx_dpdma.c
++++ b/hw/dma/xlnx_dpdma.c
+@@ -652,7 +652,7 @@ size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel,
+ }
+
+ if (dma_memory_read(&address_space_memory, desc_addr, &desc,
+- sizeof(DPDMADescriptor))) {
++ sizeof(DPDMADescriptor), MEMTXATTRS_UNSPECIFIED)) {
+ s->registers[DPDMA_EISR] |= ((1 << 1) << channel);
+ xlnx_dpdma_update_irq(s);
+ s->operation_finished[channel] = true;
+@@ -708,7 +708,8 @@ size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel,
+ if (dma_memory_read(&address_space_memory,
+ source_addr[0],
+ &s->data[channel][ptr],
+- line_size)) {
++ line_size,
++ MEMTXATTRS_UNSPECIFIED)) {
+ s->registers[DPDMA_ISR] |= ((1 << 12) << channel);
+ xlnx_dpdma_update_irq(s);
+ DPRINTF("Can't get data.\n");
+@@ -736,7 +737,8 @@ size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel,
+ if (dma_memory_read(&address_space_memory,
+ source_addr[frag],
+ &(s->data[channel][ptr]),
+- fragment_len)) {
++ fragment_len,
++ MEMTXATTRS_UNSPECIFIED)) {
+ s->registers[DPDMA_ISR] |= ((1 << 12) << channel);
+ xlnx_dpdma_update_irq(s);
+ DPRINTF("Can't get data.\n");
+@@ -754,7 +756,7 @@ size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel,
+ DPRINTF("update the descriptor with the done flag set.\n");
+ xlnx_dpdma_desc_set_done(&desc);
+ dma_memory_write(&address_space_memory, desc_addr, &desc,
+- sizeof(DPDMADescriptor));
++ sizeof(DPDMADescriptor), MEMTXATTRS_UNSPECIFIED);
+ }
+
+ if (xlnx_dpdma_desc_completion_interrupt(&desc)) {
+diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
+index 91fe34a..4d13d8e 100644
+--- a/hw/i386/amd_iommu.c
++++ b/hw/i386/amd_iommu.c
+@@ -181,7 +181,7 @@ static void amdvi_log_event(AMDVIState *s, uint64_t *evt)
+ }
+
+ if (dma_memory_write(&address_space_memory, s->evtlog + s->evtlog_tail,
+- evt, AMDVI_EVENT_LEN)) {
++ evt, AMDVI_EVENT_LEN, MEMTXATTRS_UNSPECIFIED)) {
+ trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail);
+ }
+
+@@ -376,7 +376,8 @@ static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd)
+ }
+ if (extract64(cmd[0], 0, 1)) {
+ if (dma_memory_write(&address_space_memory, addr, &data,
+- AMDVI_COMPLETION_DATA_SIZE)) {
++ AMDVI_COMPLETION_DATA_SIZE,
++ MEMTXATTRS_UNSPECIFIED)) {
+ trace_amdvi_completion_wait_fail(addr);
+ }
+ }
+@@ -502,7 +503,7 @@ static void amdvi_cmdbuf_exec(AMDVIState *s)
+ uint64_t cmd[2];
+
+ if (dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head,
+- cmd, AMDVI_COMMAND_SIZE)) {
++ cmd, AMDVI_COMMAND_SIZE, MEMTXATTRS_UNSPECIFIED)) {
+ trace_amdvi_command_read_fail(s->cmdbuf, s->cmdbuf_head);
+ amdvi_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
+ return;
+@@ -836,7 +837,7 @@ static bool amdvi_get_dte(AMDVIState *s, int devid, uint64_t *entry)
+ uint32_t offset = devid * AMDVI_DEVTAB_ENTRY_SIZE;
+
+ if (dma_memory_read(&address_space_memory, s->devtab + offset, entry,
+- AMDVI_DEVTAB_ENTRY_SIZE)) {
++ AMDVI_DEVTAB_ENTRY_SIZE, MEMTXATTRS_UNSPECIFIED)) {
+ trace_amdvi_dte_get_fail(s->devtab, offset);
+ /* log error accessing dte */
+ amdvi_log_devtab_error(s, devid, s->devtab + offset, 0);
+@@ -881,7 +882,8 @@ static inline uint64_t amdvi_get_pte_entry(AMDVIState *s, uint64_t pte_addr,
+ {
+ uint64_t pte;
+
+- if (dma_memory_read(&address_space_memory, pte_addr, &pte, sizeof(pte))) {
++ if (dma_memory_read(&address_space_memory, pte_addr,
++ &pte, sizeof(pte), MEMTXATTRS_UNSPECIFIED)) {
+ trace_amdvi_get_pte_hwerror(pte_addr);
+ amdvi_log_pagetab_error(s, devid, pte_addr, 0);
+ pte = 0;
+@@ -1048,7 +1050,7 @@ static int amdvi_get_irte(AMDVIState *s, MSIMessage *origin, uint64_t *dte,
+ trace_amdvi_ir_irte(irte_root, offset);
+
+ if (dma_memory_read(&address_space_memory, irte_root + offset,
+- irte, sizeof(*irte))) {
++ irte, sizeof(*irte), MEMTXATTRS_UNSPECIFIED)) {
+ trace_amdvi_ir_err("failed to get irte");
+ return -AMDVI_IR_GET_IRTE;
+ }
+@@ -1108,7 +1110,7 @@ static int amdvi_get_irte_ga(AMDVIState *s, MSIMessage *origin, uint64_t *dte,
+ trace_amdvi_ir_irte(irte_root, offset);
+
+ if (dma_memory_read(&address_space_memory, irte_root + offset,
+- irte, sizeof(*irte))) {
++ irte, sizeof(*irte), MEMTXATTRS_UNSPECIFIED)) {
+ trace_amdvi_ir_err("failed to get irte_ga");
+ return -AMDVI_IR_GET_IRTE;
+ }
+diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
+index f584449..5b865ac 100644
+--- a/hw/i386/intel_iommu.c
++++ b/hw/i386/intel_iommu.c
+@@ -569,7 +569,8 @@ static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index,
+ dma_addr_t addr;
+
+ addr = s->root + index * sizeof(*re);
+- if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) {
++ if (dma_memory_read(&address_space_memory, addr,
++ re, sizeof(*re), MEMTXATTRS_UNSPECIFIED)) {
+ re->lo = 0;
+ return -VTD_FR_ROOT_TABLE_INV;
+ }
+@@ -602,7 +603,8 @@ static int vtd_get_context_entry_from_root(IntelIOMMUState *s,
+ }
+
+ addr = addr + index * ce_size;
+- if (dma_memory_read(&address_space_memory, addr, ce, ce_size)) {
++ if (dma_memory_read(&address_space_memory, addr,
++ ce, ce_size, MEMTXATTRS_UNSPECIFIED)) {
+ return -VTD_FR_CONTEXT_TABLE_INV;
+ }
+
+@@ -639,8 +641,8 @@ static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index)
+ assert(index < VTD_SL_PT_ENTRY_NR);
+
+ if (dma_memory_read(&address_space_memory,
+- base_addr + index * sizeof(slpte), &slpte,
+- sizeof(slpte))) {
++ base_addr + index * sizeof(slpte),
++ &slpte, sizeof(slpte), MEMTXATTRS_UNSPECIFIED)) {
+ slpte = (uint64_t)-1;
+ return slpte;
+ }
+@@ -704,7 +706,8 @@ static int vtd_get_pdire_from_pdir_table(dma_addr_t pasid_dir_base,
+ index = VTD_PASID_DIR_INDEX(pasid);
+ entry_size = VTD_PASID_DIR_ENTRY_SIZE;
+ addr = pasid_dir_base + index * entry_size;
+- if (dma_memory_read(&address_space_memory, addr, pdire, entry_size)) {
++ if (dma_memory_read(&address_space_memory, addr,
++ pdire, entry_size, MEMTXATTRS_UNSPECIFIED)) {
+ return -VTD_FR_PASID_TABLE_INV;
+ }
+
+@@ -728,7 +731,8 @@ static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState *s,
+ index = VTD_PASID_TABLE_INDEX(pasid);
+ entry_size = VTD_PASID_ENTRY_SIZE;
+ addr = addr + index * entry_size;
+- if (dma_memory_read(&address_space_memory, addr, pe, entry_size)) {
++ if (dma_memory_read(&address_space_memory, addr,
++ pe, entry_size, MEMTXATTRS_UNSPECIFIED)) {
+ return -VTD_FR_PASID_TABLE_INV;
+ }
+
+@@ -2275,7 +2279,8 @@ static bool vtd_get_inv_desc(IntelIOMMUState *s,
+ uint32_t dw = s->iq_dw ? 32 : 16;
+ dma_addr_t addr = base_addr + offset * dw;
+
+- if (dma_memory_read(&address_space_memory, addr, inv_desc, dw)) {
++ if (dma_memory_read(&address_space_memory, addr,
++ inv_desc, dw, MEMTXATTRS_UNSPECIFIED)) {
+ error_report_once("Read INV DESC failed.");
+ return false;
+ }
+@@ -2308,8 +2313,9 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
+ dma_addr_t status_addr = inv_desc->hi;
+ trace_vtd_inv_desc_wait_sw(status_addr, status_data);
+ status_data = cpu_to_le32(status_data);
+- if (dma_memory_write(&address_space_memory, status_addr, &status_data,
+- sizeof(status_data))) {
++ if (dma_memory_write(&address_space_memory, status_addr,
++ &status_data, sizeof(status_data),
++ MEMTXATTRS_UNSPECIFIED)) {
+ trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo);
+ return false;
+ }
+@@ -3120,8 +3126,8 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
+ }
+
+ addr = iommu->intr_root + index * sizeof(*entry);
+- if (dma_memory_read(&address_space_memory, addr, entry,
+- sizeof(*entry))) {
++ if (dma_memory_read(&address_space_memory, addr,
++ entry, sizeof(*entry), MEMTXATTRS_UNSPECIFIED)) {
+ error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64,
+ __func__, index, addr);
+ return -VTD_FR_IR_ROOT_INVAL;
+diff --git a/hw/ide/macio.c b/hw/ide/macio.c
+index b03d401..f08318c 100644
+--- a/hw/ide/macio.c
++++ b/hw/ide/macio.c
+@@ -97,7 +97,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
+ /* Non-block ATAPI transfer - just copy to RAM */
+ s->io_buffer_size = MIN(s->io_buffer_size, io->len);
+ dma_memory_write(&address_space_memory, io->addr, s->io_buffer,
+- s->io_buffer_size);
++ s->io_buffer_size, MEMTXATTRS_UNSPECIFIED);
+ io->len = 0;
+ ide_atapi_cmd_ok(s);
+ m->dma_active = false;
+diff --git a/hw/intc/xive.c b/hw/intc/xive.c
+index 190194d..f15f985 100644
+--- a/hw/intc/xive.c
++++ b/hw/intc/xive.c
+@@ -1246,8 +1246,8 @@ void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
+ uint64_t qaddr = qaddr_base + (qindex << 2);
+ uint32_t qdata = -1;
+
+- if (dma_memory_read(&address_space_memory, qaddr, &qdata,
+- sizeof(qdata))) {
++ if (dma_memory_read(&address_space_memory, qaddr,
++ &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
+ HWADDR_PRIx "\n", qaddr);
+ return;
+@@ -1311,7 +1311,8 @@ static void xive_end_enqueue(XiveEND *end, uint32_t data)
+ uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
+ uint32_t qentries = 1 << (qsize + 10);
+
+- if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) {
++ if (dma_memory_write(&address_space_memory, qaddr,
++ &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
+ HWADDR_PRIx "\n", qaddr);
+ return;
+diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c
+index 73941bd..76ea511 100644
+--- a/hw/misc/bcm2835_property.c
++++ b/hw/misc/bcm2835_property.c
+@@ -69,7 +69,8 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
+ break;
+ case 0x00010003: /* Get board MAC address */
+ resplen = sizeof(s->macaddr.a);
+- dma_memory_write(&s->dma_as, value + 12, s->macaddr.a, resplen);
++ dma_memory_write(&s->dma_as, value + 12, s->macaddr.a, resplen,
++ MEMTXATTRS_UNSPECIFIED);
+ break;
+ case 0x00010004: /* Get board serial */
+ qemu_log_mask(LOG_UNIMP,
+diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c
+index e220f1a..efcc026 100644
+--- a/hw/misc/macio/mac_dbdma.c
++++ b/hw/misc/macio/mac_dbdma.c
+@@ -94,7 +94,7 @@ static void dbdma_cmdptr_load(DBDMA_channel *ch)
+ DBDMA_DPRINTFCH(ch, "dbdma_cmdptr_load 0x%08x\n",
+ ch->regs[DBDMA_CMDPTR_LO]);
+ dma_memory_read(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO],
+- &ch->current, sizeof(dbdma_cmd));
++ &ch->current, sizeof(dbdma_cmd), MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static void dbdma_cmdptr_save(DBDMA_channel *ch)
+@@ -104,7 +104,7 @@ static void dbdma_cmdptr_save(DBDMA_channel *ch)
+ le16_to_cpu(ch->current.xfer_status),
+ le16_to_cpu(ch->current.res_count));
+ dma_memory_write(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO],
+- &ch->current, sizeof(dbdma_cmd));
++ &ch->current, sizeof(dbdma_cmd), MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static void kill_channel(DBDMA_channel *ch)
+@@ -371,7 +371,8 @@ static void load_word(DBDMA_channel *ch, int key, uint32_t addr,
+ return;
+ }
+
+- dma_memory_read(&address_space_memory, addr, &current->cmd_dep, len);
++ dma_memory_read(&address_space_memory, addr, &current->cmd_dep, len,
++ MEMTXATTRS_UNSPECIFIED);
+
+ if (conditional_wait(ch))
+ goto wait;
+@@ -403,7 +404,8 @@ static void store_word(DBDMA_channel *ch, int key, uint32_t addr,
+ return;
+ }
+
+- dma_memory_write(&address_space_memory, addr, &current->cmd_dep, len);
++ dma_memory_write(&address_space_memory, addr, &current->cmd_dep, len,
++ MEMTXATTRS_UNSPECIFIED);
+
+ if (conditional_wait(ch))
+ goto wait;
+diff --git a/hw/net/allwinner-sun8i-emac.c b/hw/net/allwinner-sun8i-emac.c
+index ff611f1..ecc0245 100644
+--- a/hw/net/allwinner-sun8i-emac.c
++++ b/hw/net/allwinner-sun8i-emac.c
+@@ -350,7 +350,8 @@ static void allwinner_sun8i_emac_get_desc(AwSun8iEmacState *s,
+ FrameDescriptor *desc,
+ uint32_t phys_addr)
+ {
+- dma_memory_read(&s->dma_as, phys_addr, desc, sizeof(*desc));
++ dma_memory_read(&s->dma_as, phys_addr, desc, sizeof(*desc),
++ MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static uint32_t allwinner_sun8i_emac_next_desc(AwSun8iEmacState *s,
+@@ -402,7 +403,8 @@ static void allwinner_sun8i_emac_flush_desc(AwSun8iEmacState *s,
+ FrameDescriptor *desc,
+ uint32_t phys_addr)
+ {
+- dma_memory_write(&s->dma_as, phys_addr, desc, sizeof(*desc));
++ dma_memory_write(&s->dma_as, phys_addr, desc, sizeof(*desc),
++ MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static bool allwinner_sun8i_emac_can_receive(NetClientState *nc)
+@@ -460,7 +462,8 @@ static ssize_t allwinner_sun8i_emac_receive(NetClientState *nc,
+ << RX_DESC_STATUS_FRM_LEN_SHIFT;
+ }
+
+- dma_memory_write(&s->dma_as, desc.addr, buf, desc_bytes);
++ dma_memory_write(&s->dma_as, desc.addr, buf, desc_bytes,
++ MEMTXATTRS_UNSPECIFIED);
+ allwinner_sun8i_emac_flush_desc(s, &desc, s->rx_desc_curr);
+ trace_allwinner_sun8i_emac_receive(s->rx_desc_curr, desc.addr,
+ desc_bytes);
+@@ -512,7 +515,8 @@ static void allwinner_sun8i_emac_transmit(AwSun8iEmacState *s)
+ desc.status |= TX_DESC_STATUS_LENGTH_ERR;
+ break;
+ }
+- dma_memory_read(&s->dma_as, desc.addr, packet_buf + packet_bytes, bytes);
++ dma_memory_read(&s->dma_as, desc.addr, packet_buf + packet_bytes,
++ bytes, MEMTXATTRS_UNSPECIFIED);
+ packet_bytes += bytes;
+ desc.status &= ~DESC_STATUS_CTL;
+ allwinner_sun8i_emac_flush_desc(s, &desc, s->tx_desc_curr);
+@@ -634,7 +638,8 @@ static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset,
+ break;
+ case REG_TX_CUR_BUF: /* Transmit Current Buffer */
+ if (s->tx_desc_curr != 0) {
+- dma_memory_read(&s->dma_as, s->tx_desc_curr, &desc, sizeof(desc));
++ dma_memory_read(&s->dma_as, s->tx_desc_curr, &desc, sizeof(desc),
++ MEMTXATTRS_UNSPECIFIED);
+ value = desc.addr;
+ } else {
+ value = 0;
+@@ -647,7 +652,8 @@ static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset,
+ break;
+ case REG_RX_CUR_BUF: /* Receive Current Buffer */
+ if (s->rx_desc_curr != 0) {
+- dma_memory_read(&s->dma_as, s->rx_desc_curr, &desc, sizeof(desc));
++ dma_memory_read(&s->dma_as, s->rx_desc_curr, &desc, sizeof(desc),
++ MEMTXATTRS_UNSPECIFIED);
+ value = desc.addr;
+ } else {
+ value = 0;
+diff --git a/hw/net/ftgmac100.c b/hw/net/ftgmac100.c
+index 25685ba..83ef0a7 100644
+--- a/hw/net/ftgmac100.c
++++ b/hw/net/ftgmac100.c
+@@ -453,7 +453,8 @@ static void do_phy_ctl(FTGMAC100State *s)
+
+ static int ftgmac100_read_bd(FTGMAC100Desc *bd, dma_addr_t addr)
+ {
+- if (dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd))) {
++ if (dma_memory_read(&address_space_memory, addr,
++ bd, sizeof(*bd), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to read descriptor @ 0x%"
+ HWADDR_PRIx "\n", __func__, addr);
+ return -1;
+@@ -473,7 +474,8 @@ static int ftgmac100_write_bd(FTGMAC100Desc *bd, dma_addr_t addr)
+ lebd.des1 = cpu_to_le32(bd->des1);
+ lebd.des2 = cpu_to_le32(bd->des2);
+ lebd.des3 = cpu_to_le32(bd->des3);
+- if (dma_memory_write(&address_space_memory, addr, &lebd, sizeof(lebd))) {
++ if (dma_memory_write(&address_space_memory, addr,
++ &lebd, sizeof(lebd), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to write descriptor @ 0x%"
+ HWADDR_PRIx "\n", __func__, addr);
+ return -1;
+@@ -554,7 +556,8 @@ static void ftgmac100_do_tx(FTGMAC100State *s, uint32_t tx_ring,
+ len = sizeof(s->frame) - frame_size;
+ }
+
+- if (dma_memory_read(&address_space_memory, bd.des3, ptr, len)) {
++ if (dma_memory_read(&address_space_memory, bd.des3,
++ ptr, len, MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to read packet @ 0x%x\n",
+ __func__, bd.des3);
+ s->isr |= FTGMAC100_INT_AHB_ERR;
+@@ -1030,20 +1033,24 @@ static ssize_t ftgmac100_receive(NetClientState *nc, const uint8_t *buf,
+ bd.des1 = lduw_be_p(buf + 14) | FTGMAC100_RXDES1_VLANTAG_AVAIL;
+
+ if (s->maccr & FTGMAC100_MACCR_RM_VLAN) {
+- dma_memory_write(&address_space_memory, buf_addr, buf, 12);
+- dma_memory_write(&address_space_memory, buf_addr + 12, buf + 16,
+- buf_len - 16);
++ dma_memory_write(&address_space_memory, buf_addr, buf, 12,
++ MEMTXATTRS_UNSPECIFIED);
++ dma_memory_write(&address_space_memory, buf_addr + 12,
++ buf + 16, buf_len - 16,
++ MEMTXATTRS_UNSPECIFIED);
+ } else {
+- dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
++ dma_memory_write(&address_space_memory, buf_addr, buf,
++ buf_len, MEMTXATTRS_UNSPECIFIED);
+ }
+ } else {
+ bd.des1 = 0;
+- dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
++ dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
++ MEMTXATTRS_UNSPECIFIED);
+ }
+ buf += buf_len;
+ if (size < 4) {
+ dma_memory_write(&address_space_memory, buf_addr + buf_len,
+- crc_ptr, 4 - size);
++ crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
+ crc_ptr += 4 - size;
+ }
+
+diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c
+index 9c7035b..0db9aaf 100644
+--- a/hw/net/imx_fec.c
++++ b/hw/net/imx_fec.c
+@@ -387,19 +387,22 @@ static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
+
+ static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
+ {
+- dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
++ dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
++ MEMTXATTRS_UNSPECIFIED);
+
+ trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data);
+ }
+
+ static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
+ {
+- dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
++ dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
++ MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
+ {
+- dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
++ dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
++ MEMTXATTRS_UNSPECIFIED);
+
+ trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data,
+ bd->option, bd->status);
+@@ -407,7 +410,8 @@ static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
+
+ static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
+ {
+- dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
++ dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
++ MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static void imx_eth_update(IMXFECState *s)
+@@ -474,7 +478,8 @@ static void imx_fec_do_tx(IMXFECState *s)
+ len = ENET_MAX_FRAME_SIZE - frame_size;
+ s->regs[ENET_EIR] |= ENET_INT_BABT;
+ }
+- dma_memory_read(&address_space_memory, bd.data, ptr, len);
++ dma_memory_read(&address_space_memory, bd.data, ptr, len,
++ MEMTXATTRS_UNSPECIFIED);
+ ptr += len;
+ frame_size += len;
+ if (bd.flags & ENET_BD_L) {
+@@ -555,7 +560,8 @@ static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
+ len = ENET_MAX_FRAME_SIZE - frame_size;
+ s->regs[ENET_EIR] |= ENET_INT_BABT;
+ }
+- dma_memory_read(&address_space_memory, bd.data, ptr, len);
++ dma_memory_read(&address_space_memory, bd.data, ptr, len,
++ MEMTXATTRS_UNSPECIFIED);
+ ptr += len;
+ frame_size += len;
+ if (bd.flags & ENET_BD_L) {
+@@ -1103,11 +1109,12 @@ static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
+ buf_len += size - 4;
+ }
+ buf_addr = bd.data;
+- dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
++ dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
++ MEMTXATTRS_UNSPECIFIED);
+ buf += buf_len;
+ if (size < 4) {
+ dma_memory_write(&address_space_memory, buf_addr + buf_len,
+- crc_ptr, 4 - size);
++ crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
+ crc_ptr += 4 - size;
+ }
+ bd.flags &= ~ENET_BD_E;
+@@ -1210,8 +1217,8 @@ static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
+ */
+ const uint8_t zeros[2] = { 0 };
+
+- dma_memory_write(&address_space_memory, buf_addr,
+- zeros, sizeof(zeros));
++ dma_memory_write(&address_space_memory, buf_addr, zeros,
++ sizeof(zeros), MEMTXATTRS_UNSPECIFIED);
+
+ buf_addr += sizeof(zeros);
+ buf_len -= sizeof(zeros);
+@@ -1220,11 +1227,12 @@ static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
+ shift16 = false;
+ }
+
+- dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
++ dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
++ MEMTXATTRS_UNSPECIFIED);
+ buf += buf_len;
+ if (size < 4) {
+ dma_memory_write(&address_space_memory, buf_addr + buf_len,
+- crc_ptr, 4 - size);
++ crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
+ crc_ptr += 4 - size;
+ }
+ bd.flags &= ~ENET_BD_E;
+diff --git a/hw/net/npcm7xx_emc.c b/hw/net/npcm7xx_emc.c
+index 545b2b7..9a23289 100644
+--- a/hw/net/npcm7xx_emc.c
++++ b/hw/net/npcm7xx_emc.c
+@@ -200,7 +200,8 @@ static void emc_update_irq_from_reg_change(NPCM7xxEMCState *emc)
+
+ static int emc_read_tx_desc(dma_addr_t addr, NPCM7xxEMCTxDesc *desc)
+ {
+- if (dma_memory_read(&address_space_memory, addr, desc, sizeof(*desc))) {
++ if (dma_memory_read(&address_space_memory, addr, desc,
++ sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%"
+ HWADDR_PRIx "\n", __func__, addr);
+ return -1;
+@@ -221,7 +222,7 @@ static int emc_write_tx_desc(const NPCM7xxEMCTxDesc *desc, dma_addr_t addr)
+ le_desc.status_and_length = cpu_to_le32(desc->status_and_length);
+ le_desc.ntxdsa = cpu_to_le32(desc->ntxdsa);
+ if (dma_memory_write(&address_space_memory, addr, &le_desc,
+- sizeof(le_desc))) {
++ sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%"
+ HWADDR_PRIx "\n", __func__, addr);
+ return -1;
+@@ -231,7 +232,8 @@ static int emc_write_tx_desc(const NPCM7xxEMCTxDesc *desc, dma_addr_t addr)
+
+ static int emc_read_rx_desc(dma_addr_t addr, NPCM7xxEMCRxDesc *desc)
+ {
+- if (dma_memory_read(&address_space_memory, addr, desc, sizeof(*desc))) {
++ if (dma_memory_read(&address_space_memory, addr, desc,
++ sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%"
+ HWADDR_PRIx "\n", __func__, addr);
+ return -1;
+@@ -252,7 +254,7 @@ static int emc_write_rx_desc(const NPCM7xxEMCRxDesc *desc, dma_addr_t addr)
+ le_desc.reserved = cpu_to_le32(desc->reserved);
+ le_desc.nrxdsa = cpu_to_le32(desc->nrxdsa);
+ if (dma_memory_write(&address_space_memory, addr, &le_desc,
+- sizeof(le_desc))) {
++ sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%"
+ HWADDR_PRIx "\n", __func__, addr);
+ return -1;
+@@ -366,7 +368,8 @@ static void emc_try_send_next_packet(NPCM7xxEMCState *emc)
+ buf = malloced_buf;
+ }
+
+- if (dma_memory_read(&address_space_memory, next_buf_addr, buf, length)) {
++ if (dma_memory_read(&address_space_memory, next_buf_addr, buf,
++ length, MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n",
+ __func__, next_buf_addr);
+ emc_set_mista(emc, REG_MISTA_TXBERR);
+@@ -551,10 +554,11 @@ static ssize_t emc_receive(NetClientState *nc, const uint8_t *buf, size_t len1)
+
+ buf_addr = rx_desc.rxbsa;
+ emc->regs[REG_CRXBSA] = buf_addr;
+- if (dma_memory_write(&address_space_memory, buf_addr, buf, len) ||
++ if (dma_memory_write(&address_space_memory, buf_addr, buf,
++ len, MEMTXATTRS_UNSPECIFIED) ||
+ (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC) &&
+- dma_memory_write(&address_space_memory, buf_addr + len, crc_ptr,
+- 4))) {
++ dma_memory_write(&address_space_memory, buf_addr + len,
++ crc_ptr, 4, MEMTXATTRS_UNSPECIFIED))) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bus error writing packet\n",
+ __func__);
+ emc_set_mista(emc, REG_MISTA_RXBERR);
+diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c
+index f7803fe..9b91b15 100644
+--- a/hw/nvram/fw_cfg.c
++++ b/hw/nvram/fw_cfg.c
+@@ -357,7 +357,8 @@ static void fw_cfg_dma_transfer(FWCfgState *s)
+ dma_addr = s->dma_addr;
+ s->dma_addr = 0;
+
+- if (dma_memory_read(s->dma_as, dma_addr, &dma, sizeof(dma))) {
++ if (dma_memory_read(s->dma_as, dma_addr,
++ &dma, sizeof(dma), MEMTXATTRS_UNSPECIFIED)) {
+ stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control),
+ FW_CFG_DMA_CTL_ERROR);
+ return;
+@@ -419,7 +420,8 @@ static void fw_cfg_dma_transfer(FWCfgState *s)
+ */
+ if (read) {
+ if (dma_memory_write(s->dma_as, dma.address,
+- &e->data[s->cur_offset], len)) {
++ &e->data[s->cur_offset], len,
++ MEMTXATTRS_UNSPECIFIED)) {
+ dma.control |= FW_CFG_DMA_CTL_ERROR;
+ }
+ }
+@@ -427,7 +429,8 @@ static void fw_cfg_dma_transfer(FWCfgState *s)
+ if (!e->allow_write ||
+ len != dma.length ||
+ dma_memory_read(s->dma_as, dma.address,
+- &e->data[s->cur_offset], len)) {
++ &e->data[s->cur_offset], len,
++ MEMTXATTRS_UNSPECIFIED)) {
+ dma.control |= FW_CFG_DMA_CTL_ERROR;
+ } else if (e->write_cb) {
+ e->write_cb(e->callback_opaque, s->cur_offset, len);
+diff --git a/hw/pci-host/pnv_phb3.c b/hw/pci-host/pnv_phb3.c
+index 9c4451c..c6e7871 100644
+--- a/hw/pci-host/pnv_phb3.c
++++ b/hw/pci-host/pnv_phb3.c
+@@ -715,7 +715,8 @@ static bool pnv_phb3_resolve_pe(PnvPhb3DMASpace *ds)
+ bus_num = pci_bus_num(ds->bus);
+ addr = rtt & PHB_RTT_BASE_ADDRESS_MASK;
+ addr += 2 * ((bus_num << 8) | ds->devfn);
+- if (dma_memory_read(&address_space_memory, addr, &rte, sizeof(rte))) {
++ if (dma_memory_read(&address_space_memory, addr, &rte,
++ sizeof(rte), MEMTXATTRS_UNSPECIFIED)) {
+ phb3_error(ds->phb, "Failed to read RTT entry at 0x%"PRIx64, addr);
+ /* Set error bits ? fence ? ... */
+ return false;
+@@ -794,7 +795,7 @@ static void pnv_phb3_translate_tve(PnvPhb3DMASpace *ds, hwaddr addr,
+ /* Grab the TCE address */
+ taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3);
+ if (dma_memory_read(&address_space_memory, taddr, &tce,
+- sizeof(tce))) {
++ sizeof(tce), MEMTXATTRS_UNSPECIFIED)) {
+ phb3_error(phb, "Failed to read TCE at 0x%"PRIx64, taddr);
+ return;
+ }
+diff --git a/hw/pci-host/pnv_phb3_msi.c b/hw/pci-host/pnv_phb3_msi.c
+index 099d209..8bcbc2c 100644
+--- a/hw/pci-host/pnv_phb3_msi.c
++++ b/hw/pci-host/pnv_phb3_msi.c
+@@ -53,7 +53,8 @@ static bool phb3_msi_read_ive(PnvPHB3 *phb, int srcno, uint64_t *out_ive)
+ return false;
+ }
+
+- if (dma_memory_read(&address_space_memory, ive_addr, &ive, sizeof(ive))) {
++ if (dma_memory_read(&address_space_memory, ive_addr,
++ &ive, sizeof(ive), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Failed to read IVE at 0x%" PRIx64,
+ ive_addr);
+ return false;
+@@ -73,7 +74,8 @@ static void phb3_msi_set_p(Phb3MsiState *msi, int srcno, uint8_t gen)
+ return;
+ }
+
+- if (dma_memory_write(&address_space_memory, ive_addr + 4, &p, 1)) {
++ if (dma_memory_write(&address_space_memory, ive_addr + 4,
++ &p, 1, MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Failed to write IVE (set P) at 0x%" PRIx64, ive_addr);
+ }
+@@ -89,7 +91,8 @@ static void phb3_msi_set_q(Phb3MsiState *msi, int srcno)
+ return;
+ }
+
+- if (dma_memory_write(&address_space_memory, ive_addr + 5, &q, 1)) {
++ if (dma_memory_write(&address_space_memory, ive_addr + 5,
++ &q, 1, MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Failed to write IVE (set Q) at 0x%" PRIx64, ive_addr);
+ }
+diff --git a/hw/pci-host/pnv_phb4.c b/hw/pci-host/pnv_phb4.c
+index 40b7932..1fbf732 100644
+--- a/hw/pci-host/pnv_phb4.c
++++ b/hw/pci-host/pnv_phb4.c
+@@ -891,7 +891,8 @@ static bool pnv_phb4_resolve_pe(PnvPhb4DMASpace *ds)
+ bus_num = pci_bus_num(ds->bus);
+ addr = rtt & PHB_RTT_BASE_ADDRESS_MASK;
+ addr += 2 * PCI_BUILD_BDF(bus_num, ds->devfn);
+- if (dma_memory_read(&address_space_memory, addr, &rte, sizeof(rte))) {
++ if (dma_memory_read(&address_space_memory, addr, &rte,
++ sizeof(rte), MEMTXATTRS_UNSPECIFIED)) {
+ phb_error(ds->phb, "Failed to read RTT entry at 0x%"PRIx64, addr);
+ /* Set error bits ? fence ? ... */
+ return false;
+@@ -961,7 +962,7 @@ static void pnv_phb4_translate_tve(PnvPhb4DMASpace *ds, hwaddr addr,
+ /* Grab the TCE address */
+ taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3);
+ if (dma_memory_read(&address_space_memory, taddr, &tce,
+- sizeof(tce))) {
++ sizeof(tce), MEMTXATTRS_UNSPECIFIED)) {
+ phb_error(ds->phb, "Failed to read TCE at 0x%"PRIx64, taddr);
+ return;
+ }
+diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c
+index 9166d66..de5bc49 100644
+--- a/hw/sd/allwinner-sdhost.c
++++ b/hw/sd/allwinner-sdhost.c
+@@ -311,7 +311,8 @@ static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s,
+ uint8_t buf[1024];
+
+ /* Read descriptor */
+- dma_memory_read(&s->dma_as, desc_addr, desc, sizeof(*desc));
++ dma_memory_read(&s->dma_as, desc_addr, desc, sizeof(*desc),
++ MEMTXATTRS_UNSPECIFIED);
+ if (desc->size == 0) {
+ desc->size = klass->max_desc_size;
+ } else if (desc->size > klass->max_desc_size) {
+@@ -337,23 +338,24 @@ static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s,
+ /* Write to SD bus */
+ if (is_write) {
+ dma_memory_read(&s->dma_as,
+- (desc->addr & DESC_SIZE_MASK) + num_done,
+- buf, buf_bytes);
++ (desc->addr & DESC_SIZE_MASK) + num_done, buf,
++ buf_bytes, MEMTXATTRS_UNSPECIFIED);
+ sdbus_write_data(&s->sdbus, buf, buf_bytes);
+
+ /* Read from SD bus */
+ } else {
+ sdbus_read_data(&s->sdbus, buf, buf_bytes);
+ dma_memory_write(&s->dma_as,
+- (desc->addr & DESC_SIZE_MASK) + num_done,
+- buf, buf_bytes);
++ (desc->addr & DESC_SIZE_MASK) + num_done, buf,
++ buf_bytes, MEMTXATTRS_UNSPECIFIED);
+ }
+ num_done += buf_bytes;
+ }
+
+ /* Clear hold flag and flush descriptor */
+ desc->status &= ~DESC_STATUS_HOLD;
+- dma_memory_write(&s->dma_as, desc_addr, desc, sizeof(*desc));
++ dma_memory_write(&s->dma_as, desc_addr, desc, sizeof(*desc),
++ MEMTXATTRS_UNSPECIFIED);
+
+ return num_done;
+ }
+diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c
+index c9dc065..e0bbc90 100644
+--- a/hw/sd/sdhci.c
++++ b/hw/sd/sdhci.c
+@@ -616,8 +616,8 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s)
+ s->blkcnt--;
+ }
+ }
+- dma_memory_write(s->dma_as, s->sdmasysad,
+- &s->fifo_buffer[begin], s->data_count - begin);
++ dma_memory_write(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin],
++ s->data_count - begin, MEMTXATTRS_UNSPECIFIED);
+ s->sdmasysad += s->data_count - begin;
+ if (s->data_count == block_size) {
+ s->data_count = 0;
+@@ -637,8 +637,8 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s)
+ s->data_count = block_size;
+ boundary_count -= block_size - begin;
+ }
+- dma_memory_read(s->dma_as, s->sdmasysad,
+- &s->fifo_buffer[begin], s->data_count - begin);
++ dma_memory_read(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin],
++ s->data_count - begin, MEMTXATTRS_UNSPECIFIED);
+ s->sdmasysad += s->data_count - begin;
+ if (s->data_count == block_size) {
+ sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size);
+@@ -670,9 +670,11 @@ static void sdhci_sdma_transfer_single_block(SDHCIState *s)
+
+ if (s->trnmod & SDHC_TRNS_READ) {
+ sdbus_read_data(&s->sdbus, s->fifo_buffer, datacnt);
+- dma_memory_write(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt);
++ dma_memory_write(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt,
++ MEMTXATTRS_UNSPECIFIED);
+ } else {
+- dma_memory_read(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt);
++ dma_memory_read(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt,
++ MEMTXATTRS_UNSPECIFIED);
+ sdbus_write_data(&s->sdbus, s->fifo_buffer, datacnt);
+ }
+ s->blkcnt--;
+@@ -694,7 +696,8 @@ static void get_adma_description(SDHCIState *s, ADMADescr *dscr)
+ hwaddr entry_addr = (hwaddr)s->admasysaddr;
+ switch (SDHC_DMA_TYPE(s->hostctl1)) {
+ case SDHC_CTRL_ADMA2_32:
+- dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2));
++ dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2),
++ MEMTXATTRS_UNSPECIFIED);
+ adma2 = le64_to_cpu(adma2);
+ /* The spec does not specify endianness of descriptor table.
+ * We currently assume that it is LE.
+@@ -705,7 +708,8 @@ static void get_adma_description(SDHCIState *s, ADMADescr *dscr)
+ dscr->incr = 8;
+ break;
+ case SDHC_CTRL_ADMA1_32:
+- dma_memory_read(s->dma_as, entry_addr, &adma1, sizeof(adma1));
++ dma_memory_read(s->dma_as, entry_addr, &adma1, sizeof(adma1),
++ MEMTXATTRS_UNSPECIFIED);
+ adma1 = le32_to_cpu(adma1);
+ dscr->addr = (hwaddr)(adma1 & 0xFFFFF000);
+ dscr->attr = (uint8_t)extract32(adma1, 0, 7);
+@@ -717,10 +721,13 @@ static void get_adma_description(SDHCIState *s, ADMADescr *dscr)
+ }
+ break;
+ case SDHC_CTRL_ADMA2_64:
+- dma_memory_read(s->dma_as, entry_addr, &dscr->attr, 1);
+- dma_memory_read(s->dma_as, entry_addr + 2, &dscr->length, 2);
++ dma_memory_read(s->dma_as, entry_addr, &dscr->attr, 1,
++ MEMTXATTRS_UNSPECIFIED);
++ dma_memory_read(s->dma_as, entry_addr + 2, &dscr->length, 2,
++ MEMTXATTRS_UNSPECIFIED);
+ dscr->length = le16_to_cpu(dscr->length);
+- dma_memory_read(s->dma_as, entry_addr + 4, &dscr->addr, 8);
++ dma_memory_read(s->dma_as, entry_addr + 4, &dscr->addr, 8,
++ MEMTXATTRS_UNSPECIFIED);
+ dscr->addr = le64_to_cpu(dscr->addr);
+ dscr->attr &= (uint8_t) ~0xC0;
+ dscr->incr = 12;
+@@ -785,7 +792,8 @@ static void sdhci_do_adma(SDHCIState *s)
+ }
+ dma_memory_write(s->dma_as, dscr.addr,
+ &s->fifo_buffer[begin],
+- s->data_count - begin);
++ s->data_count - begin,
++ MEMTXATTRS_UNSPECIFIED);
+ dscr.addr += s->data_count - begin;
+ if (s->data_count == block_size) {
+ s->data_count = 0;
+@@ -810,7 +818,8 @@ static void sdhci_do_adma(SDHCIState *s)
+ }
+ dma_memory_read(s->dma_as, dscr.addr,
+ &s->fifo_buffer[begin],
+- s->data_count - begin);
++ s->data_count - begin,
++ MEMTXATTRS_UNSPECIFIED);
+ dscr.addr += s->data_count - begin;
+ if (s->data_count == block_size) {
+ sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size);
+diff --git a/hw/usb/hcd-dwc2.c b/hw/usb/hcd-dwc2.c
+index e1d96ac..8755e9c 100644
+--- a/hw/usb/hcd-dwc2.c
++++ b/hw/usb/hcd-dwc2.c
+@@ -272,8 +272,8 @@ static void dwc2_handle_packet(DWC2State *s, uint32_t devadr, USBDevice *dev,
+
+ if (pid != USB_TOKEN_IN) {
+ trace_usb_dwc2_memory_read(hcdma, tlen);
+- if (dma_memory_read(&s->dma_as, hcdma,
+- s->usb_buf[chan], tlen) != MEMTX_OK) {
++ if (dma_memory_read(&s->dma_as, hcdma, s->usb_buf[chan], tlen,
++ MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: dma_memory_read failed\n",
+ __func__);
+ }
+@@ -328,8 +328,8 @@ babble:
+
+ if (pid == USB_TOKEN_IN) {
+ trace_usb_dwc2_memory_write(hcdma, actual);
+- if (dma_memory_write(&s->dma_as, hcdma, s->usb_buf[chan],
+- actual) != MEMTX_OK) {
++ if (dma_memory_write(&s->dma_as, hcdma, s->usb_buf[chan], actual,
++ MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: dma_memory_write failed\n",
+ __func__);
+ }
+diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c
+index 6caa7ac..33a8a37 100644
+--- a/hw/usb/hcd-ehci.c
++++ b/hw/usb/hcd-ehci.c
+@@ -383,7 +383,8 @@ static inline int get_dwords(EHCIState *ehci, uint32_t addr,
+ }
+
+ for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) {
+- dma_memory_read(ehci->as, addr, buf, sizeof(*buf));
++ dma_memory_read(ehci->as, addr, buf, sizeof(*buf),
++ MEMTXATTRS_UNSPECIFIED);
+ *buf = le32_to_cpu(*buf);
+ }
+
+@@ -405,7 +406,8 @@ static inline int put_dwords(EHCIState *ehci, uint32_t addr,
+
+ for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) {
+ uint32_t tmp = cpu_to_le32(*buf);
+- dma_memory_write(ehci->as, addr, &tmp, sizeof(tmp));
++ dma_memory_write(ehci->as, addr, &tmp, sizeof(tmp),
++ MEMTXATTRS_UNSPECIFIED);
+ }
+
+ return num;
+diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
+index 56e2315..a93d6b2 100644
+--- a/hw/usb/hcd-ohci.c
++++ b/hw/usb/hcd-ohci.c
+@@ -452,7 +452,8 @@ static inline int get_dwords(OHCIState *ohci,
+ addr += ohci->localmem_base;
+
+ for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) {
+- if (dma_memory_read(ohci->as, addr, buf, sizeof(*buf))) {
++ if (dma_memory_read(ohci->as, addr,
++ buf, sizeof(*buf), MEMTXATTRS_UNSPECIFIED)) {
+ return -1;
+ }
+ *buf = le32_to_cpu(*buf);
+@@ -471,7 +472,8 @@ static inline int put_dwords(OHCIState *ohci,
+
+ for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) {
+ uint32_t tmp = cpu_to_le32(*buf);
+- if (dma_memory_write(ohci->as, addr, &tmp, sizeof(tmp))) {
++ if (dma_memory_write(ohci->as, addr,
++ &tmp, sizeof(tmp), MEMTXATTRS_UNSPECIFIED)) {
+ return -1;
+ }
+ }
+@@ -488,7 +490,8 @@ static inline int get_words(OHCIState *ohci,
+ addr += ohci->localmem_base;
+
+ for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) {
+- if (dma_memory_read(ohci->as, addr, buf, sizeof(*buf))) {
++ if (dma_memory_read(ohci->as, addr,
++ buf, sizeof(*buf), MEMTXATTRS_UNSPECIFIED)) {
+ return -1;
+ }
+ *buf = le16_to_cpu(*buf);
+@@ -507,7 +510,8 @@ static inline int put_words(OHCIState *ohci,
+
+ for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) {
+ uint16_t tmp = cpu_to_le16(*buf);
+- if (dma_memory_write(ohci->as, addr, &tmp, sizeof(tmp))) {
++ if (dma_memory_write(ohci->as, addr,
++ &tmp, sizeof(tmp), MEMTXATTRS_UNSPECIFIED)) {
+ return -1;
+ }
+ }
+@@ -537,8 +541,8 @@ static inline int ohci_read_iso_td(OHCIState *ohci,
+ static inline int ohci_read_hcca(OHCIState *ohci,
+ dma_addr_t addr, struct ohci_hcca *hcca)
+ {
+- return dma_memory_read(ohci->as, addr + ohci->localmem_base,
+- hcca, sizeof(*hcca));
++ return dma_memory_read(ohci->as, addr + ohci->localmem_base, hcca,
++ sizeof(*hcca), MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static inline int ohci_put_ed(OHCIState *ohci,
+@@ -572,7 +576,7 @@ static inline int ohci_put_hcca(OHCIState *ohci,
+ return dma_memory_write(ohci->as,
+ addr + ohci->localmem_base + HCCA_WRITEBACK_OFFSET,
+ (char *)hcca + HCCA_WRITEBACK_OFFSET,
+- HCCA_WRITEBACK_SIZE);
++ HCCA_WRITEBACK_SIZE, MEMTXATTRS_UNSPECIFIED);
+ }
+
+ /* Read/Write the contents of a TD from/to main memory. */
+diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
+index e017000..ed2b9ea 100644
+--- a/hw/usb/hcd-xhci.c
++++ b/hw/usb/hcd-xhci.c
+@@ -487,7 +487,7 @@ static inline void xhci_dma_read_u32s(XHCIState *xhci, dma_addr_t addr,
+
+ assert((len % sizeof(uint32_t)) == 0);
+
+- dma_memory_read(xhci->as, addr, buf, len);
++ dma_memory_read(xhci->as, addr, buf, len, MEMTXATTRS_UNSPECIFIED);
+
+ for (i = 0; i < (len / sizeof(uint32_t)); i++) {
+ buf[i] = le32_to_cpu(buf[i]);
+@@ -507,7 +507,7 @@ static inline void xhci_dma_write_u32s(XHCIState *xhci, dma_addr_t addr,
+ for (i = 0; i < n; i++) {
+ tmp[i] = cpu_to_le32(buf[i]);
+ }
+- dma_memory_write(xhci->as, addr, tmp, len);
++ dma_memory_write(xhci->as, addr, tmp, len, MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static XHCIPort *xhci_lookup_port(XHCIState *xhci, struct USBPort *uport)
+@@ -618,7 +618,7 @@ static void xhci_write_event(XHCIState *xhci, XHCIEvent *event, int v)
+ ev_trb.status, ev_trb.control);
+
+ addr = intr->er_start + TRB_SIZE*intr->er_ep_idx;
+- dma_memory_write(xhci->as, addr, &ev_trb, TRB_SIZE);
++ dma_memory_write(xhci->as, addr, &ev_trb, TRB_SIZE, MEMTXATTRS_UNSPECIFIED);
+
+ intr->er_ep_idx++;
+ if (intr->er_ep_idx >= intr->er_size) {
+@@ -679,7 +679,8 @@ static TRBType xhci_ring_fetch(XHCIState *xhci, XHCIRing *ring, XHCITRB *trb,
+
+ while (1) {
+ TRBType type;
+- dma_memory_read(xhci->as, ring->dequeue, trb, TRB_SIZE);
++ dma_memory_read(xhci->as, ring->dequeue, trb, TRB_SIZE,
++ MEMTXATTRS_UNSPECIFIED);
+ trb->addr = ring->dequeue;
+ trb->ccs = ring->ccs;
+ le64_to_cpus(&trb->parameter);
+@@ -726,7 +727,8 @@ static int xhci_ring_chain_length(XHCIState *xhci, const XHCIRing *ring)
+
+ while (1) {
+ TRBType type;
+- dma_memory_read(xhci->as, dequeue, &trb, TRB_SIZE);
++ dma_memory_read(xhci->as, dequeue, &trb, TRB_SIZE,
++ MEMTXATTRS_UNSPECIFIED);
+ le64_to_cpus(&trb.parameter);
+ le32_to_cpus(&trb.status);
+ le32_to_cpus(&trb.control);
+@@ -781,7 +783,8 @@ static void xhci_er_reset(XHCIState *xhci, int v)
+ xhci_die(xhci);
+ return;
+ }
+- dma_memory_read(xhci->as, erstba, &seg, sizeof(seg));
++ dma_memory_read(xhci->as, erstba, &seg, sizeof(seg),
++ MEMTXATTRS_UNSPECIFIED);
+ le32_to_cpus(&seg.addr_low);
+ le32_to_cpus(&seg.addr_high);
+ le32_to_cpus(&seg.size);
+@@ -2397,7 +2400,8 @@ static TRBCCode xhci_get_port_bandwidth(XHCIState *xhci, uint64_t pctx)
+ /* TODO: actually implement real values here */
+ bw_ctx[0] = 0;
+ memset(&bw_ctx[1], 80, xhci->numports); /* 80% */
+- dma_memory_write(xhci->as, ctx, bw_ctx, sizeof(bw_ctx));
++ dma_memory_write(xhci->as, ctx, bw_ctx, sizeof(bw_ctx),
++ MEMTXATTRS_UNSPECIFIED);
+
+ return CC_SUCCESS;
+ }
+diff --git a/include/hw/ppc/spapr_vio.h b/include/hw/ppc/spapr_vio.h
+index c90e74a..5d2ea8e 100644
+--- a/include/hw/ppc/spapr_vio.h
++++ b/include/hw/ppc/spapr_vio.h
+@@ -97,14 +97,16 @@ static inline bool spapr_vio_dma_valid(SpaprVioDevice *dev, uint64_t taddr,
+ static inline int spapr_vio_dma_read(SpaprVioDevice *dev, uint64_t taddr,
+ void *buf, uint32_t size)
+ {
+- return (dma_memory_read(&dev->as, taddr, buf, size) != 0) ?
++ return (dma_memory_read(&dev->as, taddr,
++ buf, size, MEMTXATTRS_UNSPECIFIED) != 0) ?
+ H_DEST_PARM : H_SUCCESS;
+ }
+
+ static inline int spapr_vio_dma_write(SpaprVioDevice *dev, uint64_t taddr,
+ const void *buf, uint32_t size)
+ {
+- return (dma_memory_write(&dev->as, taddr, buf, size) != 0) ?
++ return (dma_memory_write(&dev->as, taddr,
++ buf, size, MEMTXATTRS_UNSPECIFIED) != 0) ?
+ H_DEST_PARM : H_SUCCESS;
+ }
+
+diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
+index e8ad422..522682b 100644
+--- a/include/sysemu/dma.h
++++ b/include/sysemu/dma.h
+@@ -143,12 +143,14 @@ static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr,
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @len: length of the data transferred
++ * @attrs: memory transaction attributes
+ */
+ static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr,
+- void *buf, dma_addr_t len)
++ void *buf, dma_addr_t len,
++ MemTxAttrs attrs)
+ {
+ return dma_memory_rw(as, addr, buf, len,
+- DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED);
++ DMA_DIRECTION_TO_DEVICE, attrs);
+ }
+
+ /**
+@@ -162,12 +164,14 @@ static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr,
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to write
++ * @attrs: memory transaction attributes
+ */
+ static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr,
+- const void *buf, dma_addr_t len)
++ const void *buf, dma_addr_t len,
++ MemTxAttrs attrs)
+ {
+ return dma_memory_rw(as, addr, (void *)buf, len,
+- DMA_DIRECTION_FROM_DEVICE, MEMTXATTRS_UNSPECIFIED);
++ DMA_DIRECTION_FROM_DEVICE, attrs);
+ }
+
+ /**
+@@ -239,7 +243,7 @@ static inline void dma_memory_unmap(AddressSpace *as,
+ dma_addr_t addr) \
+ { \
+ uint##_bits##_t val; \
+- dma_memory_read(as, addr, &val, (_bits) / 8); \
++ dma_memory_read(as, addr, &val, (_bits) / 8, MEMTXATTRS_UNSPECIFIED); \
+ return _end##_bits##_to_cpu(val); \
+ } \
+ static inline void st##_sname##_##_end##_dma(AddressSpace *as, \
+@@ -247,20 +251,20 @@ static inline void dma_memory_unmap(AddressSpace *as,
+ uint##_bits##_t val) \
+ { \
+ val = cpu_to_##_end##_bits(val); \
+- dma_memory_write(as, addr, &val, (_bits) / 8); \
++ dma_memory_write(as, addr, &val, (_bits) / 8, MEMTXATTRS_UNSPECIFIED); \
+ }
+
+ static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr)
+ {
+ uint8_t val;
+
+- dma_memory_read(as, addr, &val, 1);
++ dma_memory_read(as, addr, &val, 1, MEMTXATTRS_UNSPECIFIED);
+ return val;
+ }
+
+ static inline void stb_dma(AddressSpace *as, dma_addr_t addr, uint8_t val)
+ {
+- dma_memory_write(as, addr, &val, 1);
++ dma_memory_write(as, addr, &val, 1, MEMTXATTRS_UNSPECIFIED);
+ }
+
+ DEFINE_LDST_DMA(uw, w, 16, le);
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0007-target-ppc-Update-float_invalid_op_mul-for-new-flags.patch b/meta/recipes-devtools/qemu/qemu/0007-target-ppc-Update-float_invalid_op_mul-for-new-flags.patch
new file mode 100644
index 0000000000..1cc4e9e35c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0007-target-ppc-Update-float_invalid_op_mul-for-new-flags.patch
@@ -0,0 +1,86 @@
+From ee8ba2dbb046f48457566b64ad95bf0440d2513e Mon Sep 17 00:00:00 2001
+From: Richard Henderson <richard.henderson@linaro.org>
+Date: Fri, 17 Dec 2021 17:57:14 +0100
+Subject: [PATCH 07/21] target/ppc: Update float_invalid_op_mul for new flags
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Now that vximz and vxsnan are computed directly by
+softfloat, we don't need to recompute it via classes.
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=4edf55698fc2ea30903657c63ed95db0d5548943]
+
+Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
+Message-Id: <20211119160502.17432-10-richard.henderson@linaro.org>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/fpu_helper.c | 26 ++++++++++----------------
+ 1 file changed, 10 insertions(+), 16 deletions(-)
+
+diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
+index f0deada84b..23264e6528 100644
+--- a/target/ppc/fpu_helper.c
++++ b/target/ppc/fpu_helper.c
+@@ -486,13 +486,12 @@ float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
+ return ret;
+ }
+
+-static void float_invalid_op_mul(CPUPPCState *env, bool set_fprc,
+- uintptr_t retaddr, int classes)
++static void float_invalid_op_mul(CPUPPCState *env, int flags,
++ bool set_fprc, uintptr_t retaddr)
+ {
+- if ((classes & (is_zero | is_inf)) == (is_zero | is_inf)) {
+- /* Multiplication of zero by infinity */
++ if (flags & float_flag_invalid_imz) {
+ float_invalid_op_vximz(env, set_fprc, retaddr);
+- } else if (classes & is_snan) {
++ } else if (flags & float_flag_invalid_snan) {
+ float_invalid_op_vxsnan(env, retaddr);
+ }
+ }
+@@ -501,12 +500,10 @@ static void float_invalid_op_mul(CPUPPCState *env, bool set_fprc,
+ float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
+ {
+ float64 ret = float64_mul(arg1, arg2, &env->fp_status);
+- int status = get_float_exception_flags(&env->fp_status);
++ int flags = get_float_exception_flags(&env->fp_status);
+
+- if (unlikely(status & float_flag_invalid)) {
+- float_invalid_op_mul(env, 1, GETPC(),
+- float64_classify(arg1) |
+- float64_classify(arg2));
++ if (unlikely(flags & float_flag_invalid)) {
++ float_invalid_op_mul(env, flags, 1, GETPC());
+ }
+
+ return ret;
+@@ -1687,9 +1684,8 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+- float_invalid_op_mul(env, sfprf, GETPC(), \
+- tp##_classify(xa->fld) | \
+- tp##_classify(xb->fld)); \
++ float_invalid_op_mul(env, tstat.float_exception_flags, \
++ sfprf, GETPC()); \
+ } \
+ \
+ if (r2sp) { \
+@@ -1727,9 +1723,7 @@ void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+- float_invalid_op_mul(env, 1, GETPC(),
+- float128_classify(xa->f128) |
+- float128_classify(xb->f128));
++ float_invalid_op_mul(env, tstat.float_exception_flags, 1, GETPC());
+ }
+ helper_compute_fprf_float128(env, t.f128);
+
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0007_let_dma_memory_map_function_take_MemTxAttrs_argument.patch b/meta/recipes-devtools/qemu/qemu/0007_let_dma_memory_map_function_take_MemTxAttrs_argument.patch
new file mode 100644
index 0000000000..8dd0476953
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0007_let_dma_memory_map_function_take_MemTxAttrs_argument.patch
@@ -0,0 +1,227 @@
+From a1d4b0a3051b3079c8db607f519bc0fcb30e17ec Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Thu, 3 Sep 2020 11:00:47 +0200
+Subject: [PATCH] dma: Let dma_memory_map() take MemTxAttrs argument
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Let devices specify transaction attributes when calling
+dma_memory_map().
+
+Patch created mechanically using spatch with this script:
+
+ @@
+ expression E1, E2, E3, E4;
+ @@
+ - dma_memory_map(E1, E2, E3, E4)
+ + dma_memory_map(E1, E2, E3, E4, MEMTXATTRS_UNSPECIFIED)
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=a1d4b0a3051b3079c8db607f519bc0fcb30e17ec]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
+Message-Id: <20211223115554.3155328-7-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/display/virtio-gpu.c | 10 ++++++----
+ hw/hyperv/vmbus.c | 8 +++++---
+ hw/ide/ahci.c | 8 +++++---
+ hw/usb/libhw.c | 3 ++-
+ hw/virtio/virtio.c | 6 ++++--
+ include/hw/pci/pci.h | 3 ++-
+ include/sysemu/dma.h | 5 +++--
+ softmmu/dma-helpers.c | 3 ++-
+ 8 files changed, 29 insertions(+), 17 deletions(-)
+
+diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
+index d78b970..c6dc818 100644
+--- a/hw/display/virtio-gpu.c
++++ b/hw/display/virtio-gpu.c
+@@ -814,8 +814,9 @@ int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
+
+ do {
+ len = l;
+- map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
+- a, &len, DMA_DIRECTION_TO_DEVICE);
++ map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len,
++ DMA_DIRECTION_TO_DEVICE,
++ MEMTXATTRS_UNSPECIFIED);
+ if (!map) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
+ " element %d\n", __func__, e);
+@@ -1252,8 +1253,9 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
+ for (i = 0; i < res->iov_cnt; i++) {
+ hwaddr len = res->iov[i].iov_len;
+ res->iov[i].iov_base =
+- dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
+- res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE);
++ dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len,
++ DMA_DIRECTION_TO_DEVICE,
++ MEMTXATTRS_UNSPECIFIED);
+
+ if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
+ /* Clean up the half-a-mapping we just created... */
+diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c
+index dbce3b3..8aad29f 100644
+--- a/hw/hyperv/vmbus.c
++++ b/hw/hyperv/vmbus.c
+@@ -373,7 +373,8 @@ static ssize_t gpadl_iter_io(GpadlIter *iter, void *buf, uint32_t len)
+
+ maddr = (iter->gpadl->gfns[idx] << TARGET_PAGE_BITS) | off_in_page;
+
+- iter->map = dma_memory_map(iter->as, maddr, &mlen, iter->dir);
++ iter->map = dma_memory_map(iter->as, maddr, &mlen, iter->dir,
++ MEMTXATTRS_UNSPECIFIED);
+ if (mlen != pgleft) {
+ dma_memory_unmap(iter->as, iter->map, mlen, iter->dir, 0);
+ iter->map = NULL;
+@@ -490,7 +491,8 @@ int vmbus_map_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov,
+ goto err;
+ }
+
+- iov[ret_cnt].iov_base = dma_memory_map(sgl->as, a, &l, dir);
++ iov[ret_cnt].iov_base = dma_memory_map(sgl->as, a, &l, dir,
++ MEMTXATTRS_UNSPECIFIED);
+ if (!l) {
+ ret = -EFAULT;
+ goto err;
+@@ -566,7 +568,7 @@ static vmbus_ring_buffer *ringbuf_map_hdr(VMBusRingBufCommon *ringbuf)
+ dma_addr_t mlen = sizeof(*rb);
+
+ rb = dma_memory_map(ringbuf->as, ringbuf->rb_addr, &mlen,
+- DMA_DIRECTION_FROM_DEVICE);
++ DMA_DIRECTION_FROM_DEVICE, MEMTXATTRS_UNSPECIFIED);
+ if (mlen != sizeof(*rb)) {
+ dma_memory_unmap(ringbuf->as, rb, mlen,
+ DMA_DIRECTION_FROM_DEVICE, 0);
+diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
+index a94c6e2..8e77ddb 100644
+--- a/hw/ide/ahci.c
++++ b/hw/ide/ahci.c
+@@ -249,7 +249,8 @@ static void map_page(AddressSpace *as, uint8_t **ptr, uint64_t addr,
+ dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len);
+ }
+
+- *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE);
++ *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE,
++ MEMTXATTRS_UNSPECIFIED);
+ if (len < wanted && *ptr) {
+ dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len);
+ *ptr = NULL;
+@@ -939,7 +940,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist,
+
+ /* map PRDT */
+ if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len,
+- DMA_DIRECTION_TO_DEVICE))){
++ DMA_DIRECTION_TO_DEVICE,
++ MEMTXATTRS_UNSPECIFIED))){
+ trace_ahci_populate_sglist_no_map(ad->hba, ad->port_no);
+ return -1;
+ }
+@@ -1301,7 +1303,7 @@ static int handle_cmd(AHCIState *s, int port, uint8_t slot)
+ tbl_addr = le64_to_cpu(cmd->tbl_addr);
+ cmd_len = 0x80;
+ cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len,
+- DMA_DIRECTION_TO_DEVICE);
++ DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED);
+ if (!cmd_fis) {
+ trace_handle_cmd_badfis(s, port);
+ return -1;
+diff --git a/hw/usb/libhw.c b/hw/usb/libhw.c
+index 9c33a16..f350eae 100644
+--- a/hw/usb/libhw.c
++++ b/hw/usb/libhw.c
+@@ -36,7 +36,8 @@ int usb_packet_map(USBPacket *p, QEMUSGList *sgl)
+
+ while (len) {
+ dma_addr_t xlen = len;
+- mem = dma_memory_map(sgl->as, base, &xlen, dir);
++ mem = dma_memory_map(sgl->as, base, &xlen, dir,
++ MEMTXATTRS_UNSPECIFIED);
+ if (!mem) {
+ goto err;
+ }
+diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
+index ea7c079..e11a8a0d 100644
+--- a/hw/virtio/virtio.c
++++ b/hw/virtio/virtio.c
+@@ -1306,7 +1306,8 @@ static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
+ iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
+ is_write ?
+ DMA_DIRECTION_FROM_DEVICE :
+- DMA_DIRECTION_TO_DEVICE);
++ DMA_DIRECTION_TO_DEVICE,
++ MEMTXATTRS_UNSPECIFIED);
+ if (!iov[num_sg].iov_base) {
+ virtio_error(vdev, "virtio: bogus descriptor or out of resources");
+ goto out;
+@@ -1355,7 +1356,8 @@ static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
+ sg[i].iov_base = dma_memory_map(vdev->dma_as,
+ addr[i], &len, is_write ?
+ DMA_DIRECTION_FROM_DEVICE :
+- DMA_DIRECTION_TO_DEVICE);
++ DMA_DIRECTION_TO_DEVICE,
++ MEMTXATTRS_UNSPECIFIED);
+ if (!sg[i].iov_base) {
+ error_report("virtio: error trying to map MMIO memory");
+ exit(1);
+diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
+index 4383f1c..1acefc2 100644
+--- a/include/hw/pci/pci.h
++++ b/include/hw/pci/pci.h
+@@ -875,7 +875,8 @@ static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr,
+ {
+ void *buf;
+
+- buf = dma_memory_map(pci_get_address_space(dev), addr, plen, dir);
++ buf = dma_memory_map(pci_get_address_space(dev), addr, plen, dir,
++ MEMTXATTRS_UNSPECIFIED);
+ return buf;
+ }
+
+diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
+index 522682b..97ff6f2 100644
+--- a/include/sysemu/dma.h
++++ b/include/sysemu/dma.h
+@@ -202,16 +202,17 @@ MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
+ * @addr: address within that address space
+ * @len: pointer to length of buffer; updated on return
+ * @dir: indicates the transfer direction
++ * @attrs: memory attributes
+ */
+ static inline void *dma_memory_map(AddressSpace *as,
+ dma_addr_t addr, dma_addr_t *len,
+- DMADirection dir)
++ DMADirection dir, MemTxAttrs attrs)
+ {
+ hwaddr xlen = *len;
+ void *p;
+
+ p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE,
+- MEMTXATTRS_UNSPECIFIED);
++ attrs);
+ *len = xlen;
+ return p;
+ }
+diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c
+index 5bf76ff..3c06a2f 100644
+--- a/softmmu/dma-helpers.c
++++ b/softmmu/dma-helpers.c
+@@ -143,7 +143,8 @@ static void dma_blk_cb(void *opaque, int ret)
+ while (dbs->sg_cur_index < dbs->sg->nsg) {
+ cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
+ cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
+- mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir);
++ mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir,
++ MEMTXATTRS_UNSPECIFIED);
+ /*
+ * Make reads deterministic in icount mode. Windows sometimes issues
+ * disk read requests with overlapping SGs. It leads
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0008-target-ppc-Update-float_invalid_op_div-for-new-flags.patch b/meta/recipes-devtools/qemu/qemu/0008-target-ppc-Update-float_invalid_op_div-for-new-flags.patch
new file mode 100644
index 0000000000..cb657eefd5
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0008-target-ppc-Update-float_invalid_op_div-for-new-flags.patch
@@ -0,0 +1,99 @@
+From a13c0819ef14120a0e30077fcc6a7470409fa732 Mon Sep 17 00:00:00 2001
+From: Richard Henderson <richard.henderson@linaro.org>
+Date: Fri, 17 Dec 2021 17:57:14 +0100
+Subject: [PATCH 08/21] target/ppc: Update float_invalid_op_div for new flags
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Now that vxidi, vxzdz, and vxsnan are computed directly by
+softfloat, we don't need to recompute it via classes.
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=c07f82416cb7973c64d1e21c09957182b4b033dc]
+
+Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
+Message-Id: <20211119160502.17432-11-richard.henderson@linaro.org>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/fpu_helper.c | 38 ++++++++++++++------------------------
+ 1 file changed, 14 insertions(+), 24 deletions(-)
+
+diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
+index 23264e6528..2ab34236a3 100644
+--- a/target/ppc/fpu_helper.c
++++ b/target/ppc/fpu_helper.c
+@@ -509,17 +509,14 @@ float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
+ return ret;
+ }
+
+-static void float_invalid_op_div(CPUPPCState *env, bool set_fprc,
+- uintptr_t retaddr, int classes)
++static void float_invalid_op_div(CPUPPCState *env, int flags,
++ bool set_fprc, uintptr_t retaddr)
+ {
+- classes &= ~is_neg;
+- if (classes == is_inf) {
+- /* Division of infinity by infinity */
++ if (flags & float_flag_invalid_idi) {
+ float_invalid_op_vxidi(env, set_fprc, retaddr);
+- } else if (classes == is_zero) {
+- /* Division of zero by zero */
++ } else if (flags & float_flag_invalid_zdz) {
+ float_invalid_op_vxzdz(env, set_fprc, retaddr);
+- } else if (classes & is_snan) {
++ } else if (flags & float_flag_invalid_snan) {
+ float_invalid_op_vxsnan(env, retaddr);
+ }
+ }
+@@ -528,17 +525,13 @@ static void float_invalid_op_div(CPUPPCState *env, bool set_fprc,
+ float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2)
+ {
+ float64 ret = float64_div(arg1, arg2, &env->fp_status);
+- int status = get_float_exception_flags(&env->fp_status);
++ int flags = get_float_exception_flags(&env->fp_status);
+
+- if (unlikely(status)) {
+- if (status & float_flag_invalid) {
+- float_invalid_op_div(env, 1, GETPC(),
+- float64_classify(arg1) |
+- float64_classify(arg2));
+- }
+- if (status & float_flag_divbyzero) {
+- float_zero_divide_excp(env, GETPC());
+- }
++ if (unlikely(flags & float_flag_invalid)) {
++ float_invalid_op_div(env, flags, 1, GETPC());
++ }
++ if (unlikely(flags & float_flag_divbyzero)) {
++ float_zero_divide_excp(env, GETPC());
+ }
+
+ return ret;
+@@ -1755,9 +1748,8 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+- float_invalid_op_div(env, sfprf, GETPC(), \
+- tp##_classify(xa->fld) | \
+- tp##_classify(xb->fld)); \
++ float_invalid_op_div(env, tstat.float_exception_flags, \
++ sfprf, GETPC()); \
+ } \
+ if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
+ float_zero_divide_excp(env, GETPC()); \
+@@ -1798,9 +1790,7 @@ void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+- float_invalid_op_div(env, 1, GETPC(),
+- float128_classify(xa->f128) |
+- float128_classify(xb->f128));
++ float_invalid_op_div(env, tstat.float_exception_flags, 1, GETPC());
+ }
+ if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
+ float_zero_divide_excp(env, GETPC());
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0008_have_dma_buf_rw_function_take_a_void_pointer.patch b/meta/recipes-devtools/qemu/qemu/0008_have_dma_buf_rw_function_take_a_void_pointer.patch
new file mode 100644
index 0000000000..0876ef184d
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0008_have_dma_buf_rw_function_take_a_void_pointer.patch
@@ -0,0 +1,41 @@
+From c0ee1527358474c75067993d1bb233ad3a4ee081 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Thu, 16 Dec 2021 11:24:56 +0100
+Subject: [PATCH] dma: Have dma_buf_rw() take a void pointer
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+DMA operations are run on any kind of buffer, not arrays of
+uint8_t. Convert dma_buf_rw() to take a void pointer argument
+to save us pointless casts to uint8_t *.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=c0ee1527358474c75067993d1bb233ad3a4ee081]
+
+Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-8-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ softmmu/dma-helpers.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c
+index 3c06a2f..09e2999 100644
+--- a/softmmu/dma-helpers.c
++++ b/softmmu/dma-helpers.c
+@@ -294,9 +294,10 @@ BlockAIOCB *dma_blk_write(BlockBackend *blk,
+ }
+
+
+-static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
++static uint64_t dma_buf_rw(void *buf, int32_t len, QEMUSGList *sg,
+ DMADirection dir)
+ {
++ uint8_t *ptr = buf;
+ uint64_t resid;
+ int sg_cur_index;
+
+--
+1.8.3.1
diff --git a/meta/recipes-devtools/qemu/qemu/0009-target-ppc-Update-fmadd-for-new-flags.patch b/meta/recipes-devtools/qemu/qemu/0009-target-ppc-Update-fmadd-for-new-flags.patch
new file mode 100644
index 0000000000..2e723582b7
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0009-target-ppc-Update-fmadd-for-new-flags.patch
@@ -0,0 +1,102 @@
+From ce768160ee1ee9673d60e800389c41b3c707411a Mon Sep 17 00:00:00 2001
+From: Richard Henderson <richard.henderson@linaro.org>
+Date: Fri, 17 Dec 2021 17:57:15 +0100
+Subject: [PATCH 09/21] target/ppc: Update fmadd for new flags
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Now that vximz, vxisi, and vxsnan are computed directly by
+softfloat, we don't need to recompute it. This replaces the
+separate float{32,64}_maddsub_update_excp functions with a
+single float_invalid_op_madd function.
+
+Fix VSX_MADD by passing sfprf to float_invalid_op_madd,
+whereas the previous *_maddsub_update_excp assumed it true.
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=e4052bb773cc829a27786d68caa22f28cff19d39]
+
+Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
+Message-Id: <20211119160502.17432-19-richard.henderson@linaro.org>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/fpu_helper.c | 46 ++++++++++-------------------------------
+ 1 file changed, 11 insertions(+), 35 deletions(-)
+
+diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
+index 2ab34236a3..3b1cb25666 100644
+--- a/target/ppc/fpu_helper.c
++++ b/target/ppc/fpu_helper.c
+@@ -639,38 +639,15 @@ uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
+ return do_fri(env, arg, float_round_down);
+ }
+
+-#define FPU_MADDSUB_UPDATE(NAME, TP) \
+-static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \
+- unsigned int madd_flags, uintptr_t retaddr) \
+-{ \
+- if (TP##_is_signaling_nan(arg1, &env->fp_status) || \
+- TP##_is_signaling_nan(arg2, &env->fp_status) || \
+- TP##_is_signaling_nan(arg3, &env->fp_status)) { \
+- /* sNaN operation */ \
+- float_invalid_op_vxsnan(env, retaddr); \
+- } \
+- if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \
+- (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \
+- /* Multiplication of zero by infinity */ \
+- float_invalid_op_vximz(env, 1, retaddr); \
+- } \
+- if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \
+- TP##_is_infinity(arg3)) { \
+- uint8_t aSign, bSign, cSign; \
+- \
+- aSign = TP##_is_neg(arg1); \
+- bSign = TP##_is_neg(arg2); \
+- cSign = TP##_is_neg(arg3); \
+- if (madd_flags & float_muladd_negate_c) { \
+- cSign ^= 1; \
+- } \
+- if (aSign ^ bSign ^ cSign) { \
+- float_invalid_op_vxisi(env, 1, retaddr); \
+- } \
+- } \
++static void float_invalid_op_madd(CPUPPCState *env, int flags,
++ bool set_fpcc, uintptr_t retaddr)
++{
++ if (flags & float_flag_invalid_imz) {
++ float_invalid_op_vximz(env, set_fpcc, retaddr);
++ } else {
++ float_invalid_op_addsub(env, flags, set_fpcc, retaddr);
++ }
+ }
+-FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32)
+-FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64)
+
+ #define FPU_FMADD(op, madd_flags) \
+ uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
+@@ -682,8 +659,7 @@ uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
+ flags = get_float_exception_flags(&env->fp_status); \
+ if (flags) { \
+ if (flags & float_flag_invalid) { \
+- float64_maddsub_update_excp(env, arg1, arg2, arg3, \
+- madd_flags, GETPC()); \
++ float_invalid_op_madd(env, flags, 1, GETPC()); \
+ } \
+ do_float_check_status(env, GETPC()); \
+ } \
+@@ -2087,8 +2063,8 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+- tp##_maddsub_update_excp(env, xa->fld, b->fld, \
+- c->fld, maddflgs, GETPC()); \
++ float_invalid_op_madd(env, tstat.float_exception_flags, \
++ sfprf, GETPC()); \
+ } \
+ \
+ if (r2sp) { \
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0009_have_dma_buf_read_and_dma_buf_write_functions_take_a_void.patch b/meta/recipes-devtools/qemu/qemu/0009_have_dma_buf_read_and_dma_buf_write_functions_take_a_void.patch
new file mode 100644
index 0000000000..d65e0b4305
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0009_have_dma_buf_read_and_dma_buf_write_functions_take_a_void.patch
@@ -0,0 +1,167 @@
+From 5e468a36dcdd8fd5eb04282842b72967a29875e4 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Thu, 16 Dec 2021 11:27:23 +0100
+Subject: [PATCH] dma: Have dma_buf_read() / dma_buf_write() take a void
+ pointer
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+DMA operations are run on any kind of buffer, not arrays of
+uint8_t. Convert dma_buf_read/dma_buf_write functions to take
+a void pointer argument and save us pointless casts to uint8_t *.
+
+Remove this pointless casts in the megasas device model.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=5e468a36dcdd8fd5eb04282842b72967a29875e4]
+
+Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-9-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/scsi/megasas.c | 22 +++++++++++-----------
+ include/sysemu/dma.h | 4 ++--
+ softmmu/dma-helpers.c | 4 ++--
+ 3 files changed, 15 insertions(+), 15 deletions(-)
+
+diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
+index 14ec6d6..2dae33f 100644
+--- a/hw/scsi/megasas.c
++++ b/hw/scsi/megasas.c
+@@ -848,7 +848,7 @@ static int megasas_ctrl_get_info(MegasasState *s, MegasasCmd *cmd)
+ MFI_INFO_PDMIX_SATA |
+ MFI_INFO_PDMIX_LD);
+
+- cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg);
+ return MFI_STAT_OK;
+ }
+
+@@ -878,7 +878,7 @@ static int megasas_mfc_get_defaults(MegasasState *s, MegasasCmd *cmd)
+ info.disable_preboot_cli = 1;
+ info.cluster_disable = 1;
+
+- cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg);
+ return MFI_STAT_OK;
+ }
+
+@@ -899,7 +899,7 @@ static int megasas_dcmd_get_bios_info(MegasasState *s, MegasasCmd *cmd)
+ info.expose_all_drives = 1;
+ }
+
+- cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg);
+ return MFI_STAT_OK;
+ }
+
+@@ -910,7 +910,7 @@ static int megasas_dcmd_get_fw_time(MegasasState *s, MegasasCmd *cmd)
+
+ fw_time = cpu_to_le64(megasas_fw_time());
+
+- cmd->iov_size -= dma_buf_read((uint8_t *)&fw_time, dcmd_size, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(&fw_time, dcmd_size, &cmd->qsg);
+ return MFI_STAT_OK;
+ }
+
+@@ -937,7 +937,7 @@ static int megasas_event_info(MegasasState *s, MegasasCmd *cmd)
+ info.shutdown_seq_num = cpu_to_le32(s->shutdown_event);
+ info.boot_seq_num = cpu_to_le32(s->boot_event);
+
+- cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg);
+ return MFI_STAT_OK;
+ }
+
+@@ -1006,7 +1006,7 @@ static int megasas_dcmd_pd_get_list(MegasasState *s, MegasasCmd *cmd)
+ info.size = cpu_to_le32(offset);
+ info.count = cpu_to_le32(num_pd_disks);
+
+- cmd->iov_size -= dma_buf_read((uint8_t *)&info, offset, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(&info, offset, &cmd->qsg);
+ return MFI_STAT_OK;
+ }
+
+@@ -1172,7 +1172,7 @@ static int megasas_dcmd_ld_get_list(MegasasState *s, MegasasCmd *cmd)
+ info.ld_count = cpu_to_le32(num_ld_disks);
+ trace_megasas_dcmd_ld_get_list(cmd->index, num_ld_disks, max_ld_disks);
+
+- resid = dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg);
++ resid = dma_buf_read(&info, dcmd_size, &cmd->qsg);
+ cmd->iov_size = dcmd_size - resid;
+ return MFI_STAT_OK;
+ }
+@@ -1221,7 +1221,7 @@ static int megasas_dcmd_ld_list_query(MegasasState *s, MegasasCmd *cmd)
+ info.size = dcmd_size;
+ trace_megasas_dcmd_ld_get_list(cmd->index, num_ld_disks, max_ld_disks);
+
+- resid = dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg);
++ resid = dma_buf_read(&info, dcmd_size, &cmd->qsg);
+ cmd->iov_size = dcmd_size - resid;
+ return MFI_STAT_OK;
+ }
+@@ -1390,7 +1390,7 @@ static int megasas_dcmd_cfg_read(MegasasState *s, MegasasCmd *cmd)
+ ld_offset += sizeof(struct mfi_ld_config);
+ }
+
+- cmd->iov_size -= dma_buf_read((uint8_t *)data, info->size, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(data, info->size, &cmd->qsg);
+ return MFI_STAT_OK;
+ }
+
+@@ -1420,7 +1420,7 @@ static int megasas_dcmd_get_properties(MegasasState *s, MegasasCmd *cmd)
+ info.ecc_bucket_leak_rate = cpu_to_le16(1440);
+ info.expose_encl_devices = 1;
+
+- cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg);
+ return MFI_STAT_OK;
+ }
+
+@@ -1465,7 +1465,7 @@ static int megasas_dcmd_set_properties(MegasasState *s, MegasasCmd *cmd)
+ dcmd_size);
+ return MFI_STAT_INVALID_PARAMETER;
+ }
+- dma_buf_write((uint8_t *)&info, dcmd_size, &cmd->qsg);
++ dma_buf_write(&info, dcmd_size, &cmd->qsg);
+ trace_megasas_dcmd_unsupported(cmd->index, cmd->iov_size);
+ return MFI_STAT_OK;
+ }
+diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
+index 97ff6f2..0d5b836 100644
+--- a/include/sysemu/dma.h
++++ b/include/sysemu/dma.h
+@@ -302,8 +302,8 @@ BlockAIOCB *dma_blk_read(BlockBackend *blk,
+ BlockAIOCB *dma_blk_write(BlockBackend *blk,
+ QEMUSGList *sg, uint64_t offset, uint32_t align,
+ BlockCompletionFunc *cb, void *opaque);
+-uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
+-uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
++uint64_t dma_buf_read(void *ptr, int32_t len, QEMUSGList *sg);
++uint64_t dma_buf_write(void *ptr, int32_t len, QEMUSGList *sg);
+
+ void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
+ QEMUSGList *sg, enum BlockAcctType type);
+diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c
+index 09e2999..7f37548 100644
+--- a/softmmu/dma-helpers.c
++++ b/softmmu/dma-helpers.c
+@@ -317,12 +317,12 @@ static uint64_t dma_buf_rw(void *buf, int32_t len, QEMUSGList *sg,
+ return resid;
+ }
+
+-uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
++uint64_t dma_buf_read(void *ptr, int32_t len, QEMUSGList *sg)
+ {
+ return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
+ }
+
+-uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
++uint64_t dma_buf_write(void *ptr, int32_t len, QEMUSGList *sg)
+ {
+ return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
+ }
+--
+1.8.3.1
diff --git a/meta/recipes-devtools/qemu/qemu/0010-target-ppc-Split-out-do_fmadd.patch b/meta/recipes-devtools/qemu/qemu/0010-target-ppc-Split-out-do_fmadd.patch
new file mode 100644
index 0000000000..4d19773200
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0010-target-ppc-Split-out-do_fmadd.patch
@@ -0,0 +1,71 @@
+From f024b8937d8b614994b94e86d2240fafcc7d2d73 Mon Sep 17 00:00:00 2001
+From: Richard Henderson <richard.henderson@linaro.org>
+Date: Fri, 17 Dec 2021 17:57:15 +0100
+Subject: [PATCH 10/21] target/ppc: Split out do_fmadd
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Create a common function for all of the madd helpers.
+Let the compiler tail call or inline as it chooses.
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=ffdaff8e9c698061f57a6b1827570562c5a1c909]
+
+Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
+Message-Id: <20211119160502.17432-20-richard.henderson@linaro.org>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/fpu_helper.c | 33 ++++++++++++++++++---------------
+ 1 file changed, 18 insertions(+), 15 deletions(-)
+
+diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
+index 3b1cb25666..9a1e7e6244 100644
+--- a/target/ppc/fpu_helper.c
++++ b/target/ppc/fpu_helper.c
+@@ -649,23 +649,26 @@ static void float_invalid_op_madd(CPUPPCState *env, int flags,
+ }
+ }
+
+-#define FPU_FMADD(op, madd_flags) \
+-uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
+- uint64_t arg2, uint64_t arg3) \
+-{ \
+- uint32_t flags; \
+- float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \
+- &env->fp_status); \
+- flags = get_float_exception_flags(&env->fp_status); \
+- if (flags) { \
+- if (flags & float_flag_invalid) { \
+- float_invalid_op_madd(env, flags, 1, GETPC()); \
+- } \
+- do_float_check_status(env, GETPC()); \
+- } \
+- return ret; \
++static float64 do_fmadd(CPUPPCState *env, float64 a, float64 b,
++ float64 c, int madd_flags, uintptr_t retaddr)
++{
++ float64 ret = float64_muladd(a, b, c, madd_flags, &env->fp_status);
++ int flags = get_float_exception_flags(&env->fp_status);
++
++ if (flags) {
++ if (flags & float_flag_invalid) {
++ float_invalid_op_madd(env, flags, 1, retaddr);
++ }
++ do_float_check_status(env, retaddr);
++ }
++ return ret;
+ }
+
++#define FPU_FMADD(op, madd_flags) \
++ uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
++ uint64_t arg2, uint64_t arg3) \
++ { return do_fmadd(env, arg1, arg2, arg3, madd_flags, GETPC()); }
++
+ #define MADD_FLGS 0
+ #define MSUB_FLGS float_muladd_negate_c
+ #define NMADD_FLGS float_muladd_negate_result
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0010_let_pci_dma_rw_function_take_MemTxAttrs_argument.patch b/meta/recipes-devtools/qemu/qemu/0010_let_pci_dma_rw_function_take_MemTxAttrs_argument.patch
new file mode 100644
index 0000000000..8207058aca
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0010_let_pci_dma_rw_function_take_MemTxAttrs_argument.patch
@@ -0,0 +1,91 @@
+From e2d784b67dc724a9b0854b49255ba0ee8ca46543 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Wed, 15 Dec 2021 22:18:19 +0100
+Subject: [PATCH] pci: Let pci_dma_rw() take MemTxAttrs argument
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Let devices specify transaction attributes when calling pci_dma_rw().
+
+Keep the default MEMTXATTRS_UNSPECIFIED in the few callers.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=e2d784b67dc724a9b0854b49255ba0ee8ca46543]
+
+Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-10-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/audio/intel-hda.c | 3 ++-
+ hw/scsi/esp-pci.c | 2 +-
+ include/hw/pci/pci.h | 10 ++++++----
+ 3 files changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c
+index 8ce9df6..fb3d34a 100644
+--- a/hw/audio/intel-hda.c
++++ b/hw/audio/intel-hda.c
+@@ -427,7 +427,8 @@ static bool intel_hda_xfer(HDACodecDevice *dev, uint32_t stnr, bool output,
+ dprint(d, 3, "dma: entry %d, pos %d/%d, copy %d\n",
+ st->be, st->bp, st->bpl[st->be].len, copy);
+
+- pci_dma_rw(&d->pci, st->bpl[st->be].addr + st->bp, buf, copy, !output);
++ pci_dma_rw(&d->pci, st->bpl[st->be].addr + st->bp, buf, copy, !output,
++ MEMTXATTRS_UNSPECIFIED);
+ st->lpib += copy;
+ st->bp += copy;
+ buf += copy;
+diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c
+index dac054a..1792f84 100644
+--- a/hw/scsi/esp-pci.c
++++ b/hw/scsi/esp-pci.c
+@@ -280,7 +280,7 @@ static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len,
+ len = pci->dma_regs[DMA_WBC];
+ }
+
+- pci_dma_rw(PCI_DEVICE(pci), addr, buf, len, dir);
++ pci_dma_rw(PCI_DEVICE(pci), addr, buf, len, dir, MEMTXATTRS_UNSPECIFIED);
+
+ /* update status registers */
+ pci->dma_regs[DMA_WBC] -= len;
+diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
+index 1acefc2..a751ab5 100644
+--- a/include/hw/pci/pci.h
++++ b/include/hw/pci/pci.h
+@@ -806,10 +806,10 @@ static inline AddressSpace *pci_get_address_space(PCIDevice *dev)
+ */
+ static inline MemTxResult pci_dma_rw(PCIDevice *dev, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+- DMADirection dir)
++ DMADirection dir, MemTxAttrs attrs)
+ {
+ return dma_memory_rw(pci_get_address_space(dev), addr, buf, len,
+- dir, MEMTXATTRS_UNSPECIFIED);
++ dir, attrs);
+ }
+
+ /**
+@@ -827,7 +827,8 @@ static inline MemTxResult pci_dma_rw(PCIDevice *dev, dma_addr_t addr,
+ static inline MemTxResult pci_dma_read(PCIDevice *dev, dma_addr_t addr,
+ void *buf, dma_addr_t len)
+ {
+- return pci_dma_rw(dev, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
++ return pci_dma_rw(dev, addr, buf, len,
++ DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED);
+ }
+
+ /**
+@@ -845,7 +846,8 @@ static inline MemTxResult pci_dma_read(PCIDevice *dev, dma_addr_t addr,
+ static inline MemTxResult pci_dma_write(PCIDevice *dev, dma_addr_t addr,
+ const void *buf, dma_addr_t len)
+ {
+- return pci_dma_rw(dev, addr, (void *) buf, len, DMA_DIRECTION_FROM_DEVICE);
++ return pci_dma_rw(dev, addr, (void *) buf, len,
++ DMA_DIRECTION_FROM_DEVICE, MEMTXATTRS_UNSPECIFIED);
+ }
+
+ #define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \
+--
+1.8.3.1
diff --git a/meta/recipes-devtools/qemu/qemu/0011-target-ppc-Fix-xs-max-min-cj-dp-to-use-VSX-registers.patch b/meta/recipes-devtools/qemu/qemu/0011-target-ppc-Fix-xs-max-min-cj-dp-to-use-VSX-registers.patch
new file mode 100644
index 0000000000..0daae55b99
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0011-target-ppc-Fix-xs-max-min-cj-dp-to-use-VSX-registers.patch
@@ -0,0 +1,93 @@
+From a1821ad612994b95cb6597efd15e0a888676386c Mon Sep 17 00:00:00 2001
+From: Victor Colombo <victor.colombo@eldorado.org.br>
+Date: Fri, 17 Dec 2021 17:57:18 +0100
+Subject: [PATCH 11/21] target/ppc: Fix xs{max, min}[cj]dp to use VSX registers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+PPC instruction xsmaxcdp, xsmincdp, xsmaxjdp, and xsminjdp are using
+vector registers when they should be using VSX ones. This happens
+because the instructions are using GEN_VSX_HELPER_R3, which adds 32
+to the register numbers, effectively making them vector registers.
+
+This patch fixes it by changing these instructions to use
+GEN_VSX_HELPER_X3.
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=201fc774e0e1cc76ec23b595968004a7b14fb6e8]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Victor Colombo <victor.colombo@eldorado.org.br>
+Message-Id: <20211213120958.24443-2-victor.colombo@eldorado.org.br>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/fpu_helper.c | 4 ++--
+ target/ppc/helper.h | 8 ++++----
+ target/ppc/translate/vsx-impl.c.inc | 8 ++++----
+ 3 files changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
+index 9a1e7e6244..ecdcd36a11 100644
+--- a/target/ppc/fpu_helper.c
++++ b/target/ppc/fpu_helper.c
+@@ -2375,7 +2375,7 @@ VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
+ VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
+
+ #define VSX_MAX_MINC(name, max) \
+-void helper_##name(CPUPPCState *env, uint32_t opcode, \
++void helper_##name(CPUPPCState *env, \
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
+ { \
+ ppc_vsr_t t = *xt; \
+@@ -2410,7 +2410,7 @@ VSX_MAX_MINC(xsmaxcdp, 1);
+ VSX_MAX_MINC(xsmincdp, 0);
+
+ #define VSX_MAX_MINJ(name, max) \
+-void helper_##name(CPUPPCState *env, uint32_t opcode, \
++void helper_##name(CPUPPCState *env, \
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
+ { \
+ ppc_vsr_t t = *xt; \
+diff --git a/target/ppc/helper.h b/target/ppc/helper.h
+index 627811cefc..12a3d5f269 100644
+--- a/target/ppc/helper.h
++++ b/target/ppc/helper.h
+@@ -392,10 +392,10 @@ DEF_HELPER_4(xscmpoqp, void, env, i32, vsr, vsr)
+ DEF_HELPER_4(xscmpuqp, void, env, i32, vsr, vsr)
+ DEF_HELPER_4(xsmaxdp, void, env, vsr, vsr, vsr)
+ DEF_HELPER_4(xsmindp, void, env, vsr, vsr, vsr)
+-DEF_HELPER_5(xsmaxcdp, void, env, i32, vsr, vsr, vsr)
+-DEF_HELPER_5(xsmincdp, void, env, i32, vsr, vsr, vsr)
+-DEF_HELPER_5(xsmaxjdp, void, env, i32, vsr, vsr, vsr)
+-DEF_HELPER_5(xsminjdp, void, env, i32, vsr, vsr, vsr)
++DEF_HELPER_4(xsmaxcdp, void, env, vsr, vsr, vsr)
++DEF_HELPER_4(xsmincdp, void, env, vsr, vsr, vsr)
++DEF_HELPER_4(xsmaxjdp, void, env, vsr, vsr, vsr)
++DEF_HELPER_4(xsminjdp, void, env, vsr, vsr, vsr)
+ DEF_HELPER_3(xscvdphp, void, env, vsr, vsr)
+ DEF_HELPER_4(xscvdpqp, void, env, i32, vsr, vsr)
+ DEF_HELPER_3(xscvdpsp, void, env, vsr, vsr)
+diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
+index c0e38060b4..02df75339e 100644
+--- a/target/ppc/translate/vsx-impl.c.inc
++++ b/target/ppc/translate/vsx-impl.c.inc
+@@ -1098,10 +1098,10 @@ GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
+ GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
+ GEN_VSX_HELPER_X3(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
+ GEN_VSX_HELPER_X3(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
+-GEN_VSX_HELPER_R3(xsmaxcdp, 0x00, 0x10, 0, PPC2_ISA300)
+-GEN_VSX_HELPER_R3(xsmincdp, 0x00, 0x11, 0, PPC2_ISA300)
+-GEN_VSX_HELPER_R3(xsmaxjdp, 0x00, 0x12, 0, PPC2_ISA300)
+-GEN_VSX_HELPER_R3(xsminjdp, 0x00, 0x12, 0, PPC2_ISA300)
++GEN_VSX_HELPER_X3(xsmaxcdp, 0x00, 0x10, 0, PPC2_ISA300)
++GEN_VSX_HELPER_X3(xsmincdp, 0x00, 0x11, 0, PPC2_ISA300)
++GEN_VSX_HELPER_X3(xsmaxjdp, 0x00, 0x12, 0, PPC2_ISA300)
++GEN_VSX_HELPER_X3(xsminjdp, 0x00, 0x12, 0, PPC2_ISA300)
+ GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
+ GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
+ GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0011_let_dma_buf_rw_function_take_MemTxAttrs_argument.patch b/meta/recipes-devtools/qemu/qemu/0011_let_dma_buf_rw_function_take_MemTxAttrs_argument.patch
new file mode 100644
index 0000000000..4f7276ef8b
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0011_let_dma_buf_rw_function_take_MemTxAttrs_argument.patch
@@ -0,0 +1,65 @@
+From 959384e74e1b508acc3af6e806b3d7b87335fc2a Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Wed, 15 Dec 2021 22:59:46 +0100
+Subject: [PATCH] dma: Let dma_buf_rw() take MemTxAttrs argument
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Let devices specify transaction attributes when calling dma_buf_rw().
+
+Keep the default MEMTXATTRS_UNSPECIFIED in the 2 callers.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=959384e74e1b508acc3af6e806b3d7b87335fc2a]
+
+Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-11-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ softmmu/dma-helpers.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c
+index 7f37548..fa81d2b 100644
+--- a/softmmu/dma-helpers.c
++++ b/softmmu/dma-helpers.c
+@@ -295,7 +295,7 @@ BlockAIOCB *dma_blk_write(BlockBackend *blk,
+
+
+ static uint64_t dma_buf_rw(void *buf, int32_t len, QEMUSGList *sg,
+- DMADirection dir)
++ DMADirection dir, MemTxAttrs attrs)
+ {
+ uint8_t *ptr = buf;
+ uint64_t resid;
+@@ -307,8 +307,7 @@ static uint64_t dma_buf_rw(void *buf, int32_t len, QEMUSGList *sg,
+ while (len > 0) {
+ ScatterGatherEntry entry = sg->sg[sg_cur_index++];
+ int32_t xfer = MIN(len, entry.len);
+- dma_memory_rw(sg->as, entry.base, ptr, xfer, dir,
+- MEMTXATTRS_UNSPECIFIED);
++ dma_memory_rw(sg->as, entry.base, ptr, xfer, dir, attrs);
+ ptr += xfer;
+ len -= xfer;
+ resid -= xfer;
+@@ -319,12 +318,14 @@ static uint64_t dma_buf_rw(void *buf, int32_t len, QEMUSGList *sg,
+
+ uint64_t dma_buf_read(void *ptr, int32_t len, QEMUSGList *sg)
+ {
+- return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
++ return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE,
++ MEMTXATTRS_UNSPECIFIED);
+ }
+
+ uint64_t dma_buf_write(void *ptr, int32_t len, QEMUSGList *sg)
+ {
+- return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
++ return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE,
++ MEMTXATTRS_UNSPECIFIED);
+ }
+
+ void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
+--
+1.8.3.1
diff --git a/meta/recipes-devtools/qemu/qemu/0012-target-ppc-Move-xs-max-min-cj-dp-to-decodetree.patch b/meta/recipes-devtools/qemu/qemu/0012-target-ppc-Move-xs-max-min-cj-dp-to-decodetree.patch
new file mode 100644
index 0000000000..e9b99c9b4e
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0012-target-ppc-Move-xs-max-min-cj-dp-to-decodetree.patch
@@ -0,0 +1,121 @@
+From 1cbb2622de34ee034f1dd7196567673c52c84805 Mon Sep 17 00:00:00 2001
+From: Victor Colombo <victor.colombo@eldorado.org.br>
+Date: Fri, 17 Dec 2021 17:57:18 +0100
+Subject: [PATCH 12/21] target/ppc: Move xs{max,min}[cj]dp to decodetree
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=c5df1898a147c232f0502cda5dac8df6074070fc]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Victor Colombo <victor.colombo@eldorado.org.br>
+Message-Id: <20211213120958.24443-3-victor.colombo@eldorado.org.br>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/insn32.decode | 17 +++++++++++++---
+ target/ppc/translate/vsx-impl.c.inc | 30 +++++++++++++++++++++++++----
+ target/ppc/translate/vsx-ops.c.inc | 4 ----
+ 3 files changed, 40 insertions(+), 11 deletions(-)
+
+diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode
+index e135b8aba4..759b2a9aa5 100644
+--- a/target/ppc/insn32.decode
++++ b/target/ppc/insn32.decode
+@@ -123,10 +123,14 @@
+ &X_vrt_frbp vrt frbp
+ @X_vrt_frbp ...... vrt:5 ..... ....0 .......... . &X_vrt_frbp frbp=%x_frbp
+
++%xx_xt 0:1 21:5
++%xx_xb 1:1 11:5
++%xx_xa 2:1 16:5
+ &XX2 xt xb uim:uint8_t
+-%xx2_xt 0:1 21:5
+-%xx2_xb 1:1 11:5
+-@XX2 ...... ..... ... uim:2 ..... ......... .. &XX2 xt=%xx2_xt xb=%xx2_xb
++@XX2 ...... ..... ... uim:2 ..... ......... .. &XX2 xt=%xx_xt xb=%xx_xb
++
++&XX3 xt xa xb
++@XX3 ...... ..... ..... ..... ........ ... &XX3 xt=%xx_xt xa=%xx_xa xb=%xx_xb
+
+ &Z22_bf_fra bf fra dm
+ @Z22_bf_fra ...... bf:3 .. fra:5 dm:6 ......... . &Z22_bf_fra
+@@ -427,3 +431,10 @@ XXSPLTW 111100 ..... ---.. ..... 010100100 . . @XX2
+ ## VSX Vector Load Special Value Instruction
+
+ LXVKQ 111100 ..... 11111 ..... 0101101000 . @X_uim5
++
++## VSX Comparison Instructions
++
++XSMAXCDP 111100 ..... ..... ..... 10000000 ... @XX3
++XSMINCDP 111100 ..... ..... ..... 10001000 ... @XX3
++XSMAXJDP 111100 ..... ..... ..... 10010000 ... @XX3
++XSMINJDP 111100 ..... ..... ..... 10011000 ... @XX3
+diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
+index 02df75339e..e2447750dd 100644
+--- a/target/ppc/translate/vsx-impl.c.inc
++++ b/target/ppc/translate/vsx-impl.c.inc
+@@ -1098,10 +1098,6 @@ GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
+ GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
+ GEN_VSX_HELPER_X3(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
+ GEN_VSX_HELPER_X3(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
+-GEN_VSX_HELPER_X3(xsmaxcdp, 0x00, 0x10, 0, PPC2_ISA300)
+-GEN_VSX_HELPER_X3(xsmincdp, 0x00, 0x11, 0, PPC2_ISA300)
+-GEN_VSX_HELPER_X3(xsmaxjdp, 0x00, 0x12, 0, PPC2_ISA300)
+-GEN_VSX_HELPER_X3(xsminjdp, 0x00, 0x12, 0, PPC2_ISA300)
+ GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
+ GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
+ GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
+@@ -2185,6 +2181,32 @@ TRANS(XXBLENDVH, do_xxblendv, MO_16)
+ TRANS(XXBLENDVW, do_xxblendv, MO_32)
+ TRANS(XXBLENDVD, do_xxblendv, MO_64)
+
++static bool do_xsmaxmincjdp(DisasContext *ctx, arg_XX3 *a,
++ void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
++{
++ TCGv_ptr xt, xa, xb;
++
++ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
++ REQUIRE_VSX(ctx);
++
++ xt = gen_vsr_ptr(a->xt);
++ xa = gen_vsr_ptr(a->xa);
++ xb = gen_vsr_ptr(a->xb);
++
++ helper(cpu_env, xt, xa, xb);
++
++ tcg_temp_free_ptr(xt);
++ tcg_temp_free_ptr(xa);
++ tcg_temp_free_ptr(xb);
++
++ return true;
++}
++
++TRANS(XSMAXCDP, do_xsmaxmincjdp, gen_helper_xsmaxcdp)
++TRANS(XSMINCDP, do_xsmaxmincjdp, gen_helper_xsmincdp)
++TRANS(XSMAXJDP, do_xsmaxmincjdp, gen_helper_xsmaxjdp)
++TRANS(XSMINJDP, do_xsmaxmincjdp, gen_helper_xsminjdp)
++
+ #undef GEN_XX2FORM
+ #undef GEN_XX3FORM
+ #undef GEN_XX2IFORM
+diff --git a/target/ppc/translate/vsx-ops.c.inc b/target/ppc/translate/vsx-ops.c.inc
+index 152d1e5c3b..f980bc1bae 100644
+--- a/target/ppc/translate/vsx-ops.c.inc
++++ b/target/ppc/translate/vsx-ops.c.inc
+@@ -207,10 +207,6 @@ GEN_VSX_XFORM_300(xscmpoqp, 0x04, 0x04, 0x00600001),
+ GEN_VSX_XFORM_300(xscmpuqp, 0x04, 0x14, 0x00600001),
+ GEN_XX3FORM(xsmaxdp, 0x00, 0x14, PPC2_VSX),
+ GEN_XX3FORM(xsmindp, 0x00, 0x15, PPC2_VSX),
+-GEN_XX3FORM(xsmaxcdp, 0x00, 0x10, PPC2_ISA300),
+-GEN_XX3FORM(xsmincdp, 0x00, 0x11, PPC2_ISA300),
+-GEN_XX3FORM(xsmaxjdp, 0x00, 0x12, PPC2_ISA300),
+-GEN_XX3FORM(xsminjdp, 0x00, 0x13, PPC2_ISA300),
+ GEN_XX2FORM_EO(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300),
+ GEN_XX2FORM(xscvdpsp, 0x12, 0x10, PPC2_VSX),
+ GEN_XX2FORM(xscvdpspn, 0x16, 0x10, PPC2_VSX207),
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0012_let_dma_buf_write_function_take_MemTxAttrs_argument.patch b/meta/recipes-devtools/qemu/qemu/0012_let_dma_buf_write_function_take_MemTxAttrs_argument.patch
new file mode 100644
index 0000000000..9837516422
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0012_let_dma_buf_write_function_take_MemTxAttrs_argument.patch
@@ -0,0 +1,129 @@
+From 392e48af3468d7f8e49db33fdc9e28b5f99276ce Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Wed, 15 Dec 2021 23:02:21 +0100
+Subject: [PATCH] dma: Let dma_buf_write() take MemTxAttrs argument
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Let devices specify transaction attributes when calling
+dma_buf_write().
+
+Keep the default MEMTXATTRS_UNSPECIFIED in the few callers.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=392e48af3468d7f8e49db33fdc9e28b5f99276ce]
+
+Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-12-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/ide/ahci.c | 6 ++++--
+ hw/nvme/ctrl.c | 3 ++-
+ hw/scsi/megasas.c | 2 +-
+ hw/scsi/scsi-bus.c | 2 +-
+ include/sysemu/dma.h | 2 +-
+ softmmu/dma-helpers.c | 5 ++---
+ 6 files changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
+index 8e77ddb..079d297 100644
+--- a/hw/ide/ahci.c
++++ b/hw/ide/ahci.c
+@@ -1381,8 +1381,10 @@ static void ahci_pio_transfer(const IDEDMA *dma)
+ has_sglist ? "" : "o");
+
+ if (has_sglist && size) {
++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
++
+ if (is_write) {
+- dma_buf_write(s->data_ptr, size, &s->sg);
++ dma_buf_write(s->data_ptr, size, &s->sg, attrs);
+ } else {
+ dma_buf_read(s->data_ptr, size, &s->sg);
+ }
+@@ -1479,7 +1481,7 @@ static int ahci_dma_rw_buf(const IDEDMA *dma, bool is_write)
+ if (is_write) {
+ dma_buf_read(p, l, &s->sg);
+ } else {
+- dma_buf_write(p, l, &s->sg);
++ dma_buf_write(p, l, &s->sg, MEMTXATTRS_UNSPECIFIED);
+ }
+
+ /* free sglist, update byte count */
+diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
+index 5f573c4..e1a531d 100644
+--- a/hw/nvme/ctrl.c
++++ b/hw/nvme/ctrl.c
+@@ -1146,10 +1146,11 @@ static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, uint32_t len,
+ assert(sg->flags & NVME_SG_ALLOC);
+
+ if (sg->flags & NVME_SG_DMA) {
++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ uint64_t residual;
+
+ if (dir == NVME_TX_DIRECTION_TO_DEVICE) {
+- residual = dma_buf_write(ptr, len, &sg->qsg);
++ residual = dma_buf_write(ptr, len, &sg->qsg, attrs);
+ } else {
+ residual = dma_buf_read(ptr, len, &sg->qsg);
+ }
+diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
+index 2dae33f..79fd14c 100644
+--- a/hw/scsi/megasas.c
++++ b/hw/scsi/megasas.c
+@@ -1465,7 +1465,7 @@ static int megasas_dcmd_set_properties(MegasasState *s, MegasasCmd *cmd)
+ dcmd_size);
+ return MFI_STAT_INVALID_PARAMETER;
+ }
+- dma_buf_write(&info, dcmd_size, &cmd->qsg);
++ dma_buf_write(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED);
+ trace_megasas_dcmd_unsupported(cmd->index, cmd->iov_size);
+ return MFI_STAT_OK;
+ }
+diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
+index 77325d8..64a506a 100644
+--- a/hw/scsi/scsi-bus.c
++++ b/hw/scsi/scsi-bus.c
+@@ -1423,7 +1423,7 @@ void scsi_req_data(SCSIRequest *req, int len)
+ if (req->cmd.mode == SCSI_XFER_FROM_DEV) {
+ req->resid = dma_buf_read(buf, len, req->sg);
+ } else {
+- req->resid = dma_buf_write(buf, len, req->sg);
++ req->resid = dma_buf_write(buf, len, req->sg, MEMTXATTRS_UNSPECIFIED);
+ }
+ scsi_req_continue(req);
+ }
+diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
+index 0d5b836..e3dd74a 100644
+--- a/include/sysemu/dma.h
++++ b/include/sysemu/dma.h
+@@ -303,7 +303,7 @@ BlockAIOCB *dma_blk_write(BlockBackend *blk,
+ QEMUSGList *sg, uint64_t offset, uint32_t align,
+ BlockCompletionFunc *cb, void *opaque);
+ uint64_t dma_buf_read(void *ptr, int32_t len, QEMUSGList *sg);
+-uint64_t dma_buf_write(void *ptr, int32_t len, QEMUSGList *sg);
++uint64_t dma_buf_write(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs);
+
+ void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
+ QEMUSGList *sg, enum BlockAcctType type);
+diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c
+index fa81d2b..2f1a241 100644
+--- a/softmmu/dma-helpers.c
++++ b/softmmu/dma-helpers.c
+@@ -322,10 +322,9 @@ uint64_t dma_buf_read(void *ptr, int32_t len, QEMUSGList *sg)
+ MEMTXATTRS_UNSPECIFIED);
+ }
+
+-uint64_t dma_buf_write(void *ptr, int32_t len, QEMUSGList *sg)
++uint64_t dma_buf_write(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs)
+ {
+- return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE,
+- MEMTXATTRS_UNSPECIFIED);
++ return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE, attrs);
+ }
+
+ void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
+--
+1.8.3.1
diff --git a/meta/recipes-devtools/qemu/qemu/0013-target-ppc-fix-xscvqpdp-register-access.patch b/meta/recipes-devtools/qemu/qemu/0013-target-ppc-fix-xscvqpdp-register-access.patch
new file mode 100644
index 0000000000..100dcd25bc
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0013-target-ppc-fix-xscvqpdp-register-access.patch
@@ -0,0 +1,41 @@
+From 98ff271a4d1a1d60ae53b1f742df7c188b163375 Mon Sep 17 00:00:00 2001
+From: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Date: Fri, 17 Dec 2021 17:57:18 +0100
+Subject: [PATCH 13/21] target/ppc: fix xscvqpdp register access
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This instruction has VRT and VRB fields instead of T/TX and B/BX.
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=38d4914c5065e14f0969161274793ded448f067f]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Message-Id: <20211213120958.24443-4-victor.colombo@eldorado.org.br>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/translate/vsx-impl.c.inc | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
+index e2447750dd..ab5cb21f13 100644
+--- a/target/ppc/translate/vsx-impl.c.inc
++++ b/target/ppc/translate/vsx-impl.c.inc
+@@ -913,8 +913,9 @@ static void gen_xscvqpdp(DisasContext *ctx)
+ return;
+ }
+ opc = tcg_const_i32(ctx->opcode);
+- xt = gen_vsr_ptr(xT(ctx->opcode));
+- xb = gen_vsr_ptr(xB(ctx->opcode));
++
++ xt = gen_vsr_ptr(rD(ctx->opcode) + 32);
++ xb = gen_vsr_ptr(rB(ctx->opcode) + 32);
+ gen_helper_xscvqpdp(cpu_env, opc, xt, xb);
+ tcg_temp_free_i32(opc);
+ tcg_temp_free_ptr(xt);
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0013_let_dma_buf_read_function_take_MemTxAttrs_argument.patch b/meta/recipes-devtools/qemu/qemu/0013_let_dma_buf_read_function_take_MemTxAttrs_argument.patch
new file mode 100644
index 0000000000..4057caa8b0
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0013_let_dma_buf_read_function_take_MemTxAttrs_argument.patch
@@ -0,0 +1,222 @@
+From 1e5a3f8b2a976054da96cbbb9de6cbac7c2efb79 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Wed, 15 Dec 2021 23:29:52 +0100
+Subject: [PATCH] dma: Let dma_buf_read() take MemTxAttrs argument
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Let devices specify transaction attributes when calling
+dma_buf_read().
+
+Keep the default MEMTXATTRS_UNSPECIFIED in the few callers.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=1e5a3f8b2a976054da96cbbb9de6cbac7c2efb79]
+
+Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-13-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/ide/ahci.c | 4 ++--
+ hw/nvme/ctrl.c | 2 +-
+ hw/scsi/megasas.c | 24 ++++++++++++------------
+ hw/scsi/scsi-bus.c | 2 +-
+ include/sysemu/dma.h | 2 +-
+ softmmu/dma-helpers.c | 5 ++---
+ 6 files changed, 19 insertions(+), 20 deletions(-)
+
+diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
+index 079d297..205dfdc 100644
+--- a/hw/ide/ahci.c
++++ b/hw/ide/ahci.c
+@@ -1386,7 +1386,7 @@ static void ahci_pio_transfer(const IDEDMA *dma)
+ if (is_write) {
+ dma_buf_write(s->data_ptr, size, &s->sg, attrs);
+ } else {
+- dma_buf_read(s->data_ptr, size, &s->sg);
++ dma_buf_read(s->data_ptr, size, &s->sg, attrs);
+ }
+ }
+
+@@ -1479,7 +1479,7 @@ static int ahci_dma_rw_buf(const IDEDMA *dma, bool is_write)
+ }
+
+ if (is_write) {
+- dma_buf_read(p, l, &s->sg);
++ dma_buf_read(p, l, &s->sg, MEMTXATTRS_UNSPECIFIED);
+ } else {
+ dma_buf_write(p, l, &s->sg, MEMTXATTRS_UNSPECIFIED);
+ }
+diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
+index e1a531d..462f79a 100644
+--- a/hw/nvme/ctrl.c
++++ b/hw/nvme/ctrl.c
+@@ -1152,7 +1152,7 @@ static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, uint32_t len,
+ if (dir == NVME_TX_DIRECTION_TO_DEVICE) {
+ residual = dma_buf_write(ptr, len, &sg->qsg, attrs);
+ } else {
+- residual = dma_buf_read(ptr, len, &sg->qsg);
++ residual = dma_buf_read(ptr, len, &sg->qsg, attrs);
+ }
+
+ if (unlikely(residual)) {
+diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
+index 79fd14c..091a350 100644
+--- a/hw/scsi/megasas.c
++++ b/hw/scsi/megasas.c
+@@ -848,7 +848,7 @@ static int megasas_ctrl_get_info(MegasasState *s, MegasasCmd *cmd)
+ MFI_INFO_PDMIX_SATA |
+ MFI_INFO_PDMIX_LD);
+
+- cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED);
+ return MFI_STAT_OK;
+ }
+
+@@ -878,7 +878,7 @@ static int megasas_mfc_get_defaults(MegasasState *s, MegasasCmd *cmd)
+ info.disable_preboot_cli = 1;
+ info.cluster_disable = 1;
+
+- cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED);
+ return MFI_STAT_OK;
+ }
+
+@@ -899,7 +899,7 @@ static int megasas_dcmd_get_bios_info(MegasasState *s, MegasasCmd *cmd)
+ info.expose_all_drives = 1;
+ }
+
+- cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED);
+ return MFI_STAT_OK;
+ }
+
+@@ -910,7 +910,7 @@ static int megasas_dcmd_get_fw_time(MegasasState *s, MegasasCmd *cmd)
+
+ fw_time = cpu_to_le64(megasas_fw_time());
+
+- cmd->iov_size -= dma_buf_read(&fw_time, dcmd_size, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(&fw_time, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED);
+ return MFI_STAT_OK;
+ }
+
+@@ -937,7 +937,7 @@ static int megasas_event_info(MegasasState *s, MegasasCmd *cmd)
+ info.shutdown_seq_num = cpu_to_le32(s->shutdown_event);
+ info.boot_seq_num = cpu_to_le32(s->boot_event);
+
+- cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED);
+ return MFI_STAT_OK;
+ }
+
+@@ -1006,7 +1006,7 @@ static int megasas_dcmd_pd_get_list(MegasasState *s, MegasasCmd *cmd)
+ info.size = cpu_to_le32(offset);
+ info.count = cpu_to_le32(num_pd_disks);
+
+- cmd->iov_size -= dma_buf_read(&info, offset, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(&info, offset, &cmd->qsg, MEMTXATTRS_UNSPECIFIED);
+ return MFI_STAT_OK;
+ }
+
+@@ -1100,7 +1100,7 @@ static int megasas_pd_get_info_submit(SCSIDevice *sdev, int lun,
+ info->connected_port_bitmap = 0x1;
+ info->device_speed = 1;
+ info->link_speed = 1;
+- resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg);
++ resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED);
+ g_free(cmd->iov_buf);
+ cmd->iov_size = dcmd_size - resid;
+ cmd->iov_buf = NULL;
+@@ -1172,7 +1172,7 @@ static int megasas_dcmd_ld_get_list(MegasasState *s, MegasasCmd *cmd)
+ info.ld_count = cpu_to_le32(num_ld_disks);
+ trace_megasas_dcmd_ld_get_list(cmd->index, num_ld_disks, max_ld_disks);
+
+- resid = dma_buf_read(&info, dcmd_size, &cmd->qsg);
++ resid = dma_buf_read(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED);
+ cmd->iov_size = dcmd_size - resid;
+ return MFI_STAT_OK;
+ }
+@@ -1221,7 +1221,7 @@ static int megasas_dcmd_ld_list_query(MegasasState *s, MegasasCmd *cmd)
+ info.size = dcmd_size;
+ trace_megasas_dcmd_ld_get_list(cmd->index, num_ld_disks, max_ld_disks);
+
+- resid = dma_buf_read(&info, dcmd_size, &cmd->qsg);
++ resid = dma_buf_read(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED);
+ cmd->iov_size = dcmd_size - resid;
+ return MFI_STAT_OK;
+ }
+@@ -1271,7 +1271,7 @@ static int megasas_ld_get_info_submit(SCSIDevice *sdev, int lun,
+ info->ld_config.span[0].num_blocks = info->size;
+ info->ld_config.span[0].array_ref = cpu_to_le16(sdev_id);
+
+- resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg);
++ resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED);
+ g_free(cmd->iov_buf);
+ cmd->iov_size = dcmd_size - resid;
+ cmd->iov_buf = NULL;
+@@ -1390,7 +1390,7 @@ static int megasas_dcmd_cfg_read(MegasasState *s, MegasasCmd *cmd)
+ ld_offset += sizeof(struct mfi_ld_config);
+ }
+
+- cmd->iov_size -= dma_buf_read(data, info->size, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(data, info->size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED);
+ return MFI_STAT_OK;
+ }
+
+@@ -1420,7 +1420,7 @@ static int megasas_dcmd_get_properties(MegasasState *s, MegasasCmd *cmd)
+ info.ecc_bucket_leak_rate = cpu_to_le16(1440);
+ info.expose_encl_devices = 1;
+
+- cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg);
++ cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED);
+ return MFI_STAT_OK;
+ }
+
+diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
+index 64a506a..2b5e9dc 100644
+--- a/hw/scsi/scsi-bus.c
++++ b/hw/scsi/scsi-bus.c
+@@ -1421,7 +1421,7 @@ void scsi_req_data(SCSIRequest *req, int len)
+
+ buf = scsi_req_get_buf(req);
+ if (req->cmd.mode == SCSI_XFER_FROM_DEV) {
+- req->resid = dma_buf_read(buf, len, req->sg);
++ req->resid = dma_buf_read(buf, len, req->sg, MEMTXATTRS_UNSPECIFIED);
+ } else {
+ req->resid = dma_buf_write(buf, len, req->sg, MEMTXATTRS_UNSPECIFIED);
+ }
+diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
+index e3dd74a..fd8f160 100644
+--- a/include/sysemu/dma.h
++++ b/include/sysemu/dma.h
+@@ -302,7 +302,7 @@ BlockAIOCB *dma_blk_read(BlockBackend *blk,
+ BlockAIOCB *dma_blk_write(BlockBackend *blk,
+ QEMUSGList *sg, uint64_t offset, uint32_t align,
+ BlockCompletionFunc *cb, void *opaque);
+-uint64_t dma_buf_read(void *ptr, int32_t len, QEMUSGList *sg);
++uint64_t dma_buf_read(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs);
+ uint64_t dma_buf_write(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs);
+
+ void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
+diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c
+index 2f1a241..a391773 100644
+--- a/softmmu/dma-helpers.c
++++ b/softmmu/dma-helpers.c
+@@ -316,10 +316,9 @@ static uint64_t dma_buf_rw(void *buf, int32_t len, QEMUSGList *sg,
+ return resid;
+ }
+
+-uint64_t dma_buf_read(void *ptr, int32_t len, QEMUSGList *sg)
++uint64_t dma_buf_read(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs)
+ {
+- return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE,
+- MEMTXATTRS_UNSPECIFIED);
++ return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE, attrs);
+ }
+
+ uint64_t dma_buf_write(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs)
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0014-target-ppc-move-xscvqpdp-to-decodetree.patch b/meta/recipes-devtools/qemu/qemu/0014-target-ppc-move-xscvqpdp-to-decodetree.patch
new file mode 100644
index 0000000000..345a49c90c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0014-target-ppc-move-xscvqpdp-to-decodetree.patch
@@ -0,0 +1,130 @@
+From c76ea6322bd70c36c9b396cf356167b36928e811 Mon Sep 17 00:00:00 2001
+From: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Date: Fri, 17 Dec 2021 17:57:18 +0100
+Subject: [PATCH 14/21] target/ppc: move xscvqpdp to decodetree
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=caf6f9b568479bea6f6d97798be670f21641a006]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Message-Id: <20211213120958.24443-5-victor.colombo@eldorado.org.br>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/fpu_helper.c | 10 +++-------
+ target/ppc/helper.h | 2 +-
+ target/ppc/insn32.decode | 4 ++++
+ target/ppc/translate/vsx-impl.c.inc | 24 +++++++++++++-----------
+ target/ppc/translate/vsx-ops.c.inc | 1 -
+ 5 files changed, 21 insertions(+), 20 deletions(-)
+
+diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
+index ecdcd36a11..5cc7fb1dcb 100644
+--- a/target/ppc/fpu_helper.c
++++ b/target/ppc/fpu_helper.c
+@@ -2631,18 +2631,14 @@ VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
+ VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i + 1), 0)
+ VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
+
+-/*
+- * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
+- * added to this later.
+- */
+-void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode,
+- ppc_vsr_t *xt, ppc_vsr_t *xb)
++void helper_XSCVQPDP(CPUPPCState *env, uint32_t ro, ppc_vsr_t *xt,
++ ppc_vsr_t *xb)
+ {
+ ppc_vsr_t t = { };
+ float_status tstat;
+
+ tstat = env->fp_status;
+- if (unlikely(Rc(opcode) != 0)) {
++ if (ro != 0) {
+ tstat.float_rounding_mode = float_round_to_odd;
+ }
+
+diff --git a/target/ppc/helper.h b/target/ppc/helper.h
+index 12a3d5f269..ef5bdd38a7 100644
+--- a/target/ppc/helper.h
++++ b/target/ppc/helper.h
+@@ -400,7 +400,7 @@ DEF_HELPER_3(xscvdphp, void, env, vsr, vsr)
+ DEF_HELPER_4(xscvdpqp, void, env, i32, vsr, vsr)
+ DEF_HELPER_3(xscvdpsp, void, env, vsr, vsr)
+ DEF_HELPER_2(xscvdpspn, i64, env, i64)
+-DEF_HELPER_4(xscvqpdp, void, env, i32, vsr, vsr)
++DEF_HELPER_4(XSCVQPDP, void, env, i32, vsr, vsr)
+ DEF_HELPER_4(xscvqpsdz, void, env, i32, vsr, vsr)
+ DEF_HELPER_4(xscvqpswz, void, env, i32, vsr, vsr)
+ DEF_HELPER_4(xscvqpudz, void, env, i32, vsr, vsr)
+diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode
+index 759b2a9aa5..fd6bb13fa0 100644
+--- a/target/ppc/insn32.decode
++++ b/target/ppc/insn32.decode
+@@ -438,3 +438,7 @@ XSMAXCDP 111100 ..... ..... ..... 10000000 ... @XX3
+ XSMINCDP 111100 ..... ..... ..... 10001000 ... @XX3
+ XSMAXJDP 111100 ..... ..... ..... 10010000 ... @XX3
+ XSMINJDP 111100 ..... ..... ..... 10011000 ... @XX3
++
++## VSX Binary Floating-Point Convert Instructions
++
++XSCVQPDP 111111 ..... 10100 ..... 1101000100 . @X_tb_rc
+diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
+index ab5cb21f13..c08185e857 100644
+--- a/target/ppc/translate/vsx-impl.c.inc
++++ b/target/ppc/translate/vsx-impl.c.inc
+@@ -904,22 +904,24 @@ VSX_CMP(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
+ VSX_CMP(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
+ VSX_CMP(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX)
+
+-static void gen_xscvqpdp(DisasContext *ctx)
++static bool trans_XSCVQPDP(DisasContext *ctx, arg_X_tb_rc *a)
+ {
+- TCGv_i32 opc;
++ TCGv_i32 ro;
+ TCGv_ptr xt, xb;
+- if (unlikely(!ctx->vsx_enabled)) {
+- gen_exception(ctx, POWERPC_EXCP_VSXU);
+- return;
+- }
+- opc = tcg_const_i32(ctx->opcode);
+
+- xt = gen_vsr_ptr(rD(ctx->opcode) + 32);
+- xb = gen_vsr_ptr(rB(ctx->opcode) + 32);
+- gen_helper_xscvqpdp(cpu_env, opc, xt, xb);
+- tcg_temp_free_i32(opc);
++ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
++ REQUIRE_VSX(ctx);
++
++ ro = tcg_const_i32(a->rc);
++
++ xt = gen_avr_ptr(a->rt);
++ xb = gen_avr_ptr(a->rb);
++ gen_helper_XSCVQPDP(cpu_env, ro, xt, xb);
++ tcg_temp_free_i32(ro);
+ tcg_temp_free_ptr(xt);
+ tcg_temp_free_ptr(xb);
++
++ return true;
+ }
+
+ #define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \
+diff --git a/target/ppc/translate/vsx-ops.c.inc b/target/ppc/translate/vsx-ops.c.inc
+index f980bc1bae..c974324c4c 100644
+--- a/target/ppc/translate/vsx-ops.c.inc
++++ b/target/ppc/translate/vsx-ops.c.inc
+@@ -133,7 +133,6 @@ GEN_VSX_XFORM_300_EO(xsnabsqp, 0x04, 0x19, 0x08, 0x00000001),
+ GEN_VSX_XFORM_300_EO(xsnegqp, 0x04, 0x19, 0x10, 0x00000001),
+ GEN_VSX_XFORM_300(xscpsgnqp, 0x04, 0x03, 0x00000001),
+ GEN_VSX_XFORM_300_EO(xscvdpqp, 0x04, 0x1A, 0x16, 0x00000001),
+-GEN_VSX_XFORM_300_EO(xscvqpdp, 0x04, 0x1A, 0x14, 0x0),
+ GEN_VSX_XFORM_300_EO(xscvqpsdz, 0x04, 0x1A, 0x19, 0x00000001),
+ GEN_VSX_XFORM_300_EO(xscvqpswz, 0x04, 0x1A, 0x09, 0x00000001),
+ GEN_VSX_XFORM_300_EO(xscvqpudz, 0x04, 0x1A, 0x11, 0x00000001),
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0014_let_dma_buf_rw_function_propagate_MemTxResult.patch b/meta/recipes-devtools/qemu/qemu/0014_let_dma_buf_rw_function_propagate_MemTxResult.patch
new file mode 100644
index 0000000000..571ce9cc9b
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0014_let_dma_buf_rw_function_propagate_MemTxResult.patch
@@ -0,0 +1,91 @@
+From 292e13142d277c15bdd68331abc607e46628b7e1 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Wed, 15 Dec 2021 23:38:52 +0100
+Subject: [PATCH] dma: Let dma_buf_rw() propagate MemTxResult
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+dma_memory_rw() returns a MemTxResult type. Do not discard
+it, return it to the caller.
+
+Since dma_buf_rw() was previously returning the QEMUSGList
+size not consumed, add an extra argument where this size
+can be stored.
+
+Update the 2 callers.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=292e13142d277c15bdd68331abc607e46628b7e1]
+
+Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-14-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ softmmu/dma-helpers.c | 25 +++++++++++++++++++------
+ 1 file changed, 19 insertions(+), 6 deletions(-)
+
+diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c
+index a391773..b0be156 100644
+--- a/softmmu/dma-helpers.c
++++ b/softmmu/dma-helpers.c
+@@ -294,12 +294,14 @@ BlockAIOCB *dma_blk_write(BlockBackend *blk,
+ }
+
+
+-static uint64_t dma_buf_rw(void *buf, int32_t len, QEMUSGList *sg,
+- DMADirection dir, MemTxAttrs attrs)
++static MemTxResult dma_buf_rw(void *buf, int32_t len, uint64_t *residp,
++ QEMUSGList *sg, DMADirection dir,
++ MemTxAttrs attrs)
+ {
+ uint8_t *ptr = buf;
+ uint64_t resid;
+ int sg_cur_index;
++ MemTxResult res = MEMTX_OK;
+
+ resid = sg->size;
+ sg_cur_index = 0;
+@@ -307,23 +309,34 @@ static uint64_t dma_buf_rw(void *buf, int32_t len, QEMUSGList *sg,
+ while (len > 0) {
+ ScatterGatherEntry entry = sg->sg[sg_cur_index++];
+ int32_t xfer = MIN(len, entry.len);
+- dma_memory_rw(sg->as, entry.base, ptr, xfer, dir, attrs);
++ res |= dma_memory_rw(sg->as, entry.base, ptr, xfer, dir, attrs);
+ ptr += xfer;
+ len -= xfer;
+ resid -= xfer;
+ }
+
+- return resid;
++ if (residp) {
++ *residp = resid;
++ }
++ return res;
+ }
+
+ uint64_t dma_buf_read(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs)
+ {
+- return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE, attrs);
++ uint64_t resid;
++
++ dma_buf_rw(ptr, len, &resid, sg, DMA_DIRECTION_FROM_DEVICE, attrs);
++
++ return resid;
+ }
+
+ uint64_t dma_buf_write(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs)
+ {
+- return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE, attrs);
++ uint64_t resid;
++
++ dma_buf_rw(ptr, len, &resid, sg, DMA_DIRECTION_TO_DEVICE, attrs);
++
++ return resid;
+ }
+
+ void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0015-target-ppc-ppc_store_fpscr-doesn-t-update-bits-0-to-.patch b/meta/recipes-devtools/qemu/qemu/0015-target-ppc-ppc_store_fpscr-doesn-t-update-bits-0-to-.patch
new file mode 100644
index 0000000000..5c5f972961
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0015-target-ppc-ppc_store_fpscr-doesn-t-update-bits-0-to-.patch
@@ -0,0 +1,70 @@
+From 7448ee811d86b18a7f7f59e20853bd852e548f59 Mon Sep 17 00:00:00 2001
+From: "Lucas Mateus Castro (alqotel)" <lucas.araujo@eldorado.org.br>
+Date: Fri, 17 Dec 2021 17:57:13 +0100
+Subject: [PATCH 15/21] target/ppc: ppc_store_fpscr doesn't update bits 0 to 28
+ and 52
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This commit fixes the difference reported in the bug in the reserved
+bit 52, it does this by adding this bit to the mask of bits to not be
+directly altered in the ppc_store_fpscr function (the hardware used to
+compare to QEMU was a Power9).
+
+The bits 0 to 27 were also added to the mask, as they are marked as
+reserved in the PowerISA and bit 28 is a reserved extension of the DRN
+field (bits 29:31) but can't be set using mtfsfi, while the other DRN
+bits may be set using mtfsfi instruction, so bit 28 was also added to
+the mask.
+
+Although this is a difference reported in the bug, since it's a reserved
+bit it may be a "don't care" case, as put in the bug report. Looking at
+the ISA it doesn't explicitly mention this bit can't be set, like it
+does for FEX and VX, so I'm unsure if this is necessary.
+
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/266
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=25ee608d79c1890c0f4e8c495ec8629d5712de45]
+
+Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
+Message-Id: <20211201163808.440385-4-lucas.araujo@eldorado.org.br>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/cpu.c | 2 +-
+ target/ppc/cpu.h | 4 ++++
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/target/ppc/cpu.c b/target/ppc/cpu.c
+index f933d9f2bd..d7b42bae52 100644
+--- a/target/ppc/cpu.c
++++ b/target/ppc/cpu.c
+@@ -112,7 +112,7 @@ static inline void fpscr_set_rounding_mode(CPUPPCState *env)
+
+ void ppc_store_fpscr(CPUPPCState *env, target_ulong val)
+ {
+- val &= ~(FP_VX | FP_FEX);
++ val &= FPSCR_MTFS_MASK;
+ if (val & FPSCR_IX) {
+ val |= FP_VX;
+ }
+diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
+index e946da5f3a..441d3dce19 100644
+--- a/target/ppc/cpu.h
++++ b/target/ppc/cpu.h
+@@ -759,6 +759,10 @@ enum {
+ FP_VXZDZ | FP_VXIMZ | FP_VXVC | FP_VXSOFT | \
+ FP_VXSQRT | FP_VXCVI)
+
++/* FPSCR bits that can be set by mtfsf, mtfsfi and mtfsb1 */
++#define FPSCR_MTFS_MASK (~(MAKE_64BIT_MASK(36, 28) | PPC_BIT(28) | \
++ FP_FEX | FP_VX | PPC_BIT(52)))
++
+ /*****************************************************************************/
+ /* Vector status and control register */
+ #define VSCR_NJ 16 /* Vector non-java */
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0015_let_st_pointer_dma_function_take_MemTxAttrs_argument.patch b/meta/recipes-devtools/qemu/qemu/0015_let_st_pointer_dma_function_take_MemTxAttrs_argument.patch
new file mode 100644
index 0000000000..7f56dcb6eb
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0015_let_st_pointer_dma_function_take_MemTxAttrs_argument.patch
@@ -0,0 +1,120 @@
+From 2280c27afc65bb2af95dd44a88e3b7117bfe240a Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Fri, 17 Dec 2021 23:53:34 +0100
+Subject: [PATCH] dma: Let st*_dma() take MemTxAttrs argument
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Let devices specify transaction attributes when calling st*_dma().
+
+Keep the default MEMTXATTRS_UNSPECIFIED in the few callers.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=2280c27afc65bb2af95dd44a88e3b7117bfe240a]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-16-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/nvram/fw_cfg.c | 4 ++--
+ include/hw/pci/pci.h | 3 ++-
+ include/hw/ppc/spapr_vio.h | 12 ++++++++----
+ include/sysemu/dma.h | 10 ++++++----
+ 4 files changed, 18 insertions(+), 11 deletions(-)
+
+diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c
+index 9b91b15..e5f3c981 100644
+--- a/hw/nvram/fw_cfg.c
++++ b/hw/nvram/fw_cfg.c
+@@ -360,7 +360,7 @@ static void fw_cfg_dma_transfer(FWCfgState *s)
+ if (dma_memory_read(s->dma_as, dma_addr,
+ &dma, sizeof(dma), MEMTXATTRS_UNSPECIFIED)) {
+ stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control),
+- FW_CFG_DMA_CTL_ERROR);
++ FW_CFG_DMA_CTL_ERROR, MEMTXATTRS_UNSPECIFIED);
+ return;
+ }
+
+@@ -446,7 +446,7 @@ static void fw_cfg_dma_transfer(FWCfgState *s)
+ }
+
+ stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control),
+- dma.control);
++ dma.control, MEMTXATTRS_UNSPECIFIED);
+
+ trace_fw_cfg_read(s, 0);
+ }
+diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
+index a751ab5..d07e970 100644
+--- a/include/hw/pci/pci.h
++++ b/include/hw/pci/pci.h
+@@ -859,7 +859,8 @@ static inline MemTxResult pci_dma_write(PCIDevice *dev, dma_addr_t addr,
+ static inline void st##_s##_pci_dma(PCIDevice *dev, \
+ dma_addr_t addr, uint##_bits##_t val) \
+ { \
+- st##_s##_dma(pci_get_address_space(dev), addr, val); \
++ st##_s##_dma(pci_get_address_space(dev), addr, val, \
++ MEMTXATTRS_UNSPECIFIED); \
+ }
+
+ PCI_DMA_DEFINE_LDST(ub, b, 8);
+diff --git a/include/hw/ppc/spapr_vio.h b/include/hw/ppc/spapr_vio.h
+index 5d2ea8e..e87f8e6 100644
+--- a/include/hw/ppc/spapr_vio.h
++++ b/include/hw/ppc/spapr_vio.h
+@@ -118,10 +118,14 @@ static inline int spapr_vio_dma_set(SpaprVioDevice *dev, uint64_t taddr,
+ H_DEST_PARM : H_SUCCESS;
+ }
+
+-#define vio_stb(_dev, _addr, _val) (stb_dma(&(_dev)->as, (_addr), (_val)))
+-#define vio_sth(_dev, _addr, _val) (stw_be_dma(&(_dev)->as, (_addr), (_val)))
+-#define vio_stl(_dev, _addr, _val) (stl_be_dma(&(_dev)->as, (_addr), (_val)))
+-#define vio_stq(_dev, _addr, _val) (stq_be_dma(&(_dev)->as, (_addr), (_val)))
++#define vio_stb(_dev, _addr, _val) \
++ (stb_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
++#define vio_sth(_dev, _addr, _val) \
++ (stw_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
++#define vio_stl(_dev, _addr, _val) \
++ (stl_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
++#define vio_stq(_dev, _addr, _val) \
++ (stq_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
+ #define vio_ldq(_dev, _addr) (ldq_be_dma(&(_dev)->as, (_addr)))
+
+ int spapr_vio_send_crq(SpaprVioDevice *dev, uint8_t *crq);
+diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
+index fd8f160..009dd3c 100644
+--- a/include/sysemu/dma.h
++++ b/include/sysemu/dma.h
+@@ -249,10 +249,11 @@ static inline void dma_memory_unmap(AddressSpace *as,
+ } \
+ static inline void st##_sname##_##_end##_dma(AddressSpace *as, \
+ dma_addr_t addr, \
+- uint##_bits##_t val) \
++ uint##_bits##_t val, \
++ MemTxAttrs attrs) \
+ { \
+ val = cpu_to_##_end##_bits(val); \
+- dma_memory_write(as, addr, &val, (_bits) / 8, MEMTXATTRS_UNSPECIFIED); \
++ dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \
+ }
+
+ static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr)
+@@ -263,9 +264,10 @@ static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr)
+ return val;
+ }
+
+-static inline void stb_dma(AddressSpace *as, dma_addr_t addr, uint8_t val)
++static inline void stb_dma(AddressSpace *as, dma_addr_t addr,
++ uint8_t val, MemTxAttrs attrs)
+ {
+- dma_memory_write(as, addr, &val, 1, MEMTXATTRS_UNSPECIFIED);
++ dma_memory_write(as, addr, &val, 1, attrs);
+ }
+
+ DEFINE_LDST_DMA(uw, w, 16, le);
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0016-target-ppc-Introduce-TRANS-FLAGS-macros.patch b/meta/recipes-devtools/qemu/qemu/0016-target-ppc-Introduce-TRANS-FLAGS-macros.patch
new file mode 100644
index 0000000000..3b651c0b3e
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0016-target-ppc-Introduce-TRANS-FLAGS-macros.patch
@@ -0,0 +1,133 @@
+From 232f979babccd6dfac40a54ee33521e652a0577c Mon Sep 17 00:00:00 2001
+From: Luis Pires <luis.pires@eldorado.org.br>
+Date: Wed, 2 Mar 2022 06:51:36 +0100
+Subject: [PATCH 16/21] target/ppc: Introduce TRANS*FLAGS macros
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+New macros that add FLAGS and FLAGS2 checking were added for
+both TRANS and TRANS64.
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=19f0862dd8fa6510b2f5b3aff4859363602cd0cf]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
+[ferst: - TRANS_FLAGS2 instead of TRANS_FLAGS_E
+ - Use the new macros in load/store vector insns ]
+Signed-off-by: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Message-Id: <20220225210936.1749575-2-matheus.ferst@eldorado.org.br>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/translate.c | 19 +++++++++++++++
+ target/ppc/translate/vsx-impl.c.inc | 37 ++++++++++-------------------
+ 2 files changed, 31 insertions(+), 25 deletions(-)
+
+diff --git a/target/ppc/translate.c b/target/ppc/translate.c
+index 9960df6e18..c12abc32f6 100644
+--- a/target/ppc/translate.c
++++ b/target/ppc/translate.c
+@@ -7377,10 +7377,29 @@ static int times_16(DisasContext *ctx, int x)
+ #define TRANS(NAME, FUNC, ...) \
+ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+ { return FUNC(ctx, a, __VA_ARGS__); }
++#define TRANS_FLAGS(FLAGS, NAME, FUNC, ...) \
++ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
++ { \
++ REQUIRE_INSNS_FLAGS(ctx, FLAGS); \
++ return FUNC(ctx, a, __VA_ARGS__); \
++ }
++#define TRANS_FLAGS2(FLAGS2, NAME, FUNC, ...) \
++ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
++ { \
++ REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \
++ return FUNC(ctx, a, __VA_ARGS__); \
++ }
+
+ #define TRANS64(NAME, FUNC, ...) \
+ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+ { REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); }
++#define TRANS64_FLAGS2(FLAGS2, NAME, FUNC, ...) \
++ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
++ { \
++ REQUIRE_64BIT(ctx); \
++ REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \
++ return FUNC(ctx, a, __VA_ARGS__); \
++ }
+
+ /* TODO: More TRANS* helpers for extra insn_flags checks. */
+
+diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
+index c08185e857..99c8a57e50 100644
+--- a/target/ppc/translate/vsx-impl.c.inc
++++ b/target/ppc/translate/vsx-impl.c.inc
+@@ -2070,12 +2070,6 @@ static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ,
+
+ static bool do_lstxv_D(DisasContext *ctx, arg_D *a, bool store, bool paired)
+ {
+- if (paired) {
+- REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+- } else {
+- REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+- }
+-
+ if (paired || a->rt >= 32) {
+ REQUIRE_VSX(ctx);
+ } else {
+@@ -2089,7 +2083,6 @@ static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a,
+ bool store, bool paired)
+ {
+ arg_D d;
+- REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ if (!resolve_PLS_D(ctx, &d, a)) {
+@@ -2101,12 +2094,6 @@ static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a,
+
+ static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired)
+ {
+- if (paired) {
+- REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+- } else {
+- REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+- }
+-
+ if (paired || a->rt >= 32) {
+ REQUIRE_VSX(ctx);
+ } else {
+@@ -2116,18 +2103,18 @@ static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired)
+ return do_lstxv(ctx, a->ra, cpu_gpr[a->rb], a->rt, store, paired);
+ }
+
+-TRANS(STXV, do_lstxv_D, true, false)
+-TRANS(LXV, do_lstxv_D, false, false)
+-TRANS(STXVP, do_lstxv_D, true, true)
+-TRANS(LXVP, do_lstxv_D, false, true)
+-TRANS(STXVX, do_lstxv_X, true, false)
+-TRANS(LXVX, do_lstxv_X, false, false)
+-TRANS(STXVPX, do_lstxv_X, true, true)
+-TRANS(LXVPX, do_lstxv_X, false, true)
+-TRANS64(PSTXV, do_lstxv_PLS_D, true, false)
+-TRANS64(PLXV, do_lstxv_PLS_D, false, false)
+-TRANS64(PSTXVP, do_lstxv_PLS_D, true, true)
+-TRANS64(PLXVP, do_lstxv_PLS_D, false, true)
++TRANS_FLAGS2(ISA300, STXV, do_lstxv_D, true, false)
++TRANS_FLAGS2(ISA300, LXV, do_lstxv_D, false, false)
++TRANS_FLAGS2(ISA310, STXVP, do_lstxv_D, true, true)
++TRANS_FLAGS2(ISA310, LXVP, do_lstxv_D, false, true)
++TRANS_FLAGS2(ISA300, STXVX, do_lstxv_X, true, false)
++TRANS_FLAGS2(ISA300, LXVX, do_lstxv_X, false, false)
++TRANS_FLAGS2(ISA310, STXVPX, do_lstxv_X, true, true)
++TRANS_FLAGS2(ISA310, LXVPX, do_lstxv_X, false, true)
++TRANS64_FLAGS2(ISA310, PSTXV, do_lstxv_PLS_D, true, false)
++TRANS64_FLAGS2(ISA310, PLXV, do_lstxv_PLS_D, false, false)
++TRANS64_FLAGS2(ISA310, PSTXVP, do_lstxv_PLS_D, true, true)
++TRANS64_FLAGS2(ISA310, PLXVP, do_lstxv_PLS_D, false, true)
+
+ static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
+ TCGv_vec c)
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0016_let_ld_pointer_dma_function_take_MemTxAttrs_argument.patch b/meta/recipes-devtools/qemu/qemu/0016_let_ld_pointer_dma_function_take_MemTxAttrs_argument.patch
new file mode 100644
index 0000000000..a51451d343
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0016_let_ld_pointer_dma_function_take_MemTxAttrs_argument.patch
@@ -0,0 +1,151 @@
+From 34cdea1db600540a5261dc474e986f28b637c8e6 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Fri, 17 Dec 2021 22:18:07 +0100
+Subject: [PATCH] dma: Let ld*_dma() take MemTxAttrs argument
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Let devices specify transaction attributes when calling ld*_dma().
+
+Keep the default MEMTXATTRS_UNSPECIFIED in the few callers.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=34cdea1db600540a5261dc474e986f28b637c8e6]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-17-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/intc/pnv_xive.c | 7 ++++---
+ hw/usb/hcd-xhci.c | 6 +++---
+ include/hw/pci/pci.h | 3 ++-
+ include/hw/ppc/spapr_vio.h | 3 ++-
+ include/sysemu/dma.h | 11 ++++++-----
+ 5 files changed, 17 insertions(+), 13 deletions(-)
+
+diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c
+index ad43483..d9249bb 100644
+--- a/hw/intc/pnv_xive.c
++++ b/hw/intc/pnv_xive.c
+@@ -172,7 +172,7 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
+
+ /* Get the page size of the indirect table. */
+ vsd_addr = vsd & VSD_ADDRESS_MASK;
+- vsd = ldq_be_dma(&address_space_memory, vsd_addr);
++ vsd = ldq_be_dma(&address_space_memory, vsd_addr, MEMTXATTRS_UNSPECIFIED);
+
+ if (!(vsd & VSD_ADDRESS_MASK)) {
+ #ifdef XIVE_DEBUG
+@@ -195,7 +195,8 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
+ /* Load the VSD we are looking for, if not already done */
+ if (vsd_idx) {
+ vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
+- vsd = ldq_be_dma(&address_space_memory, vsd_addr);
++ vsd = ldq_be_dma(&address_space_memory, vsd_addr,
++ MEMTXATTRS_UNSPECIFIED);
+
+ if (!(vsd & VSD_ADDRESS_MASK)) {
+ #ifdef XIVE_DEBUG
+@@ -542,7 +543,7 @@ static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
+
+ /* Get the page size of the indirect table. */
+ vsd_addr = vsd & VSD_ADDRESS_MASK;
+- vsd = ldq_be_dma(&address_space_memory, vsd_addr);
++ vsd = ldq_be_dma(&address_space_memory, vsd_addr, MEMTXATTRS_UNSPECIFIED);
+
+ if (!(vsd & VSD_ADDRESS_MASK)) {
+ #ifdef XIVE_DEBUG
+diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
+index ed2b9ea..d960b81 100644
+--- a/hw/usb/hcd-xhci.c
++++ b/hw/usb/hcd-xhci.c
+@@ -2062,7 +2062,7 @@ static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid,
+ assert(slotid >= 1 && slotid <= xhci->numslots);
+
+ dcbaap = xhci_addr64(xhci->dcbaap_low, xhci->dcbaap_high);
+- poctx = ldq_le_dma(xhci->as, dcbaap + 8 * slotid);
++ poctx = ldq_le_dma(xhci->as, dcbaap + 8 * slotid, MEMTXATTRS_UNSPECIFIED);
+ ictx = xhci_mask64(pictx);
+ octx = xhci_mask64(poctx);
+
+@@ -3437,8 +3437,8 @@ static int usb_xhci_post_load(void *opaque, int version_id)
+ if (!slot->addressed) {
+ continue;
+ }
+- slot->ctx =
+- xhci_mask64(ldq_le_dma(xhci->as, dcbaap + 8 * slotid));
++ slot->ctx = xhci_mask64(ldq_le_dma(xhci->as, dcbaap + 8 * slotid,
++ MEMTXATTRS_UNSPECIFIED));
+ xhci_dma_read_u32s(xhci, slot->ctx, slot_ctx, sizeof(slot_ctx));
+ slot->uport = xhci_lookup_uport(xhci, slot_ctx);
+ if (!slot->uport) {
+diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
+index d07e970..0613308 100644
+--- a/include/hw/pci/pci.h
++++ b/include/hw/pci/pci.h
+@@ -854,7 +854,8 @@ static inline MemTxResult pci_dma_write(PCIDevice *dev, dma_addr_t addr,
+ static inline uint##_bits##_t ld##_l##_pci_dma(PCIDevice *dev, \
+ dma_addr_t addr) \
+ { \
+- return ld##_l##_dma(pci_get_address_space(dev), addr); \
++ return ld##_l##_dma(pci_get_address_space(dev), addr, \
++ MEMTXATTRS_UNSPECIFIED); \
+ } \
+ static inline void st##_s##_pci_dma(PCIDevice *dev, \
+ dma_addr_t addr, uint##_bits##_t val) \
+diff --git a/include/hw/ppc/spapr_vio.h b/include/hw/ppc/spapr_vio.h
+index e87f8e6..d2ec9b0 100644
+--- a/include/hw/ppc/spapr_vio.h
++++ b/include/hw/ppc/spapr_vio.h
+@@ -126,7 +126,8 @@ static inline int spapr_vio_dma_set(SpaprVioDevice *dev, uint64_t taddr,
+ (stl_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
+ #define vio_stq(_dev, _addr, _val) \
+ (stq_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
+-#define vio_ldq(_dev, _addr) (ldq_be_dma(&(_dev)->as, (_addr)))
++#define vio_ldq(_dev, _addr) \
++ (ldq_be_dma(&(_dev)->as, (_addr), MEMTXATTRS_UNSPECIFIED))
+
+ int spapr_vio_send_crq(SpaprVioDevice *dev, uint8_t *crq);
+
+diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
+index 009dd3c..d1635f5 100644
+--- a/include/sysemu/dma.h
++++ b/include/sysemu/dma.h
+@@ -241,10 +241,11 @@ static inline void dma_memory_unmap(AddressSpace *as,
+
+ #define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
+ static inline uint##_bits##_t ld##_lname##_##_end##_dma(AddressSpace *as, \
+- dma_addr_t addr) \
++ dma_addr_t addr, \
++ MemTxAttrs attrs) \
+ { \
+ uint##_bits##_t val; \
+- dma_memory_read(as, addr, &val, (_bits) / 8, MEMTXATTRS_UNSPECIFIED); \
++ dma_memory_read(as, addr, &val, (_bits) / 8, attrs); \
+ return _end##_bits##_to_cpu(val); \
+ } \
+ static inline void st##_sname##_##_end##_dma(AddressSpace *as, \
+@@ -253,14 +254,14 @@ static inline void dma_memory_unmap(AddressSpace *as,
+ MemTxAttrs attrs) \
+ { \
+ val = cpu_to_##_end##_bits(val); \
+- dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \
++ dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \
+ }
+
+-static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr)
++static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr, MemTxAttrs attrs)
+ {
+ uint8_t val;
+
+- dma_memory_read(as, addr, &val, 1, MEMTXATTRS_UNSPECIFIED);
++ dma_memory_read(as, addr, &val, 1, attrs);
+ return val;
+ }
+
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0017-target-ppc-Implement-Vector-Expand-Mask.patch b/meta/recipes-devtools/qemu/qemu/0017-target-ppc-Implement-Vector-Expand-Mask.patch
new file mode 100644
index 0000000000..6d6d6b86ed
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0017-target-ppc-Implement-Vector-Expand-Mask.patch
@@ -0,0 +1,105 @@
+From 4c6a16c2bcdd14249eef876d3d029c445716fb13 Mon Sep 17 00:00:00 2001
+From: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Date: Fri, 17 Dec 2021 17:57:13 +0100
+Subject: [PATCH 17/21] target/ppc: Implement Vector Expand Mask
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Implement the following PowerISA v3.1 instructions:
+vexpandbm: Vector Expand Byte Mask
+vexpandhm: Vector Expand Halfword Mask
+vexpandwm: Vector Expand Word Mask
+vexpanddm: Vector Expand Doubleword Mask
+vexpandqm: Vector Expand Quadword Mask
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=5f1470b091007f24035d6d33149df49a6dd61682]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Message-Id: <20211203194229.746275-2-matheus.ferst@eldorado.org.br>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/insn32.decode | 11 ++++++++++
+ target/ppc/translate/vmx-impl.c.inc | 34 +++++++++++++++++++++++++++++
+ 2 files changed, 45 insertions(+)
+
+diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode
+index fd6bb13fa0..e032251c74 100644
+--- a/target/ppc/insn32.decode
++++ b/target/ppc/insn32.decode
+@@ -56,6 +56,9 @@
+ &VX_uim4 vrt uim vrb
+ @VX_uim4 ...... vrt:5 . uim:4 vrb:5 ........... &VX_uim4
+
++&VX_tb vrt vrb
++@VX_tb ...... vrt:5 ..... vrb:5 ........... &VX_tb
++
+ &X rt ra rb
+ @X ...... rt:5 ra:5 rb:5 .......... . &X
+
+@@ -412,6 +415,14 @@ VINSWVRX 000100 ..... ..... ..... 00110001111 @VX
+ VSLDBI 000100 ..... ..... ..... 00 ... 010110 @VN
+ VSRDBI 000100 ..... ..... ..... 01 ... 010110 @VN
+
++## Vector Mask Manipulation Instructions
++
++VEXPANDBM 000100 ..... 00000 ..... 11001000010 @VX_tb
++VEXPANDHM 000100 ..... 00001 ..... 11001000010 @VX_tb
++VEXPANDWM 000100 ..... 00010 ..... 11001000010 @VX_tb
++VEXPANDDM 000100 ..... 00011 ..... 11001000010 @VX_tb
++VEXPANDQM 000100 ..... 00100 ..... 11001000010 @VX_tb
++
+ # VSX Load/Store Instructions
+
+ LXV 111101 ..... ..... ............ . 001 @DQ_TSX
+diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
+index 8eb8d3a067..ebb0484323 100644
+--- a/target/ppc/translate/vmx-impl.c.inc
++++ b/target/ppc/translate/vmx-impl.c.inc
+@@ -1491,6 +1491,40 @@ static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a)
+ return true;
+ }
+
++static bool do_vexpand(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
++{
++ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
++ REQUIRE_VECTOR(ctx);
++
++ tcg_gen_gvec_sari(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb),
++ (8 << vece) - 1, 16, 16);
++
++ return true;
++}
++
++TRANS(VEXPANDBM, do_vexpand, MO_8)
++TRANS(VEXPANDHM, do_vexpand, MO_16)
++TRANS(VEXPANDWM, do_vexpand, MO_32)
++TRANS(VEXPANDDM, do_vexpand, MO_64)
++
++static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a)
++{
++ TCGv_i64 tmp;
++
++ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
++ REQUIRE_VECTOR(ctx);
++
++ tmp = tcg_temp_new_i64();
++
++ get_avr64(tmp, a->vrb, true);
++ tcg_gen_sari_i64(tmp, tmp, 63);
++ set_avr64(a->vrt, tmp, false);
++ set_avr64(a->vrt, tmp, true);
++
++ tcg_temp_free_i64(tmp);
++ return true;
++}
++
+ #define GEN_VAFORM_PAIRED(name0, name1, opc2) \
+ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+ { \
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0017_let_st_pointer_dma_function_propagate_MemTxResult.patch b/meta/recipes-devtools/qemu/qemu/0017_let_st_pointer_dma_function_propagate_MemTxResult.patch
new file mode 100644
index 0000000000..3fc7b631a4
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0017_let_st_pointer_dma_function_propagate_MemTxResult.patch
@@ -0,0 +1,65 @@
+From 24aed6bcb6b6d266149591f955c2460c28759eb4 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Fri, 17 Dec 2021 23:56:14 +0100
+Subject: [PATCH] dma: Let st*_dma() propagate MemTxResult
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+dma_memory_write() returns a MemTxResult type. Do not discard
+it, return it to the caller.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=24aed6bcb6b6d266149591f955c2460c28759eb4]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-18-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ include/sysemu/dma.h | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
+index d1635f5..895044d 100644
+--- a/include/sysemu/dma.h
++++ b/include/sysemu/dma.h
+@@ -248,13 +248,13 @@ static inline void dma_memory_unmap(AddressSpace *as,
+ dma_memory_read(as, addr, &val, (_bits) / 8, attrs); \
+ return _end##_bits##_to_cpu(val); \
+ } \
+- static inline void st##_sname##_##_end##_dma(AddressSpace *as, \
+- dma_addr_t addr, \
+- uint##_bits##_t val, \
+- MemTxAttrs attrs) \
+- { \
+- val = cpu_to_##_end##_bits(val); \
+- dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \
++ static inline MemTxResult st##_sname##_##_end##_dma(AddressSpace *as, \
++ dma_addr_t addr, \
++ uint##_bits##_t val, \
++ MemTxAttrs attrs) \
++ { \
++ val = cpu_to_##_end##_bits(val); \
++ return dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \
+ }
+
+ static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr, MemTxAttrs attrs)
+@@ -265,10 +265,10 @@ static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr, MemTxAttrs att
+ return val;
+ }
+
+-static inline void stb_dma(AddressSpace *as, dma_addr_t addr,
+- uint8_t val, MemTxAttrs attrs)
++static inline MemTxResult stb_dma(AddressSpace *as, dma_addr_t addr,
++ uint8_t val, MemTxAttrs attrs)
+ {
+- dma_memory_write(as, addr, &val, 1, attrs);
++ return dma_memory_write(as, addr, &val, 1, attrs);
+ }
+
+ DEFINE_LDST_DMA(uw, w, 16, le);
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0018-target-ppc-Implement-Vector-Extract-Mask.patch b/meta/recipes-devtools/qemu/qemu/0018-target-ppc-Implement-Vector-Extract-Mask.patch
new file mode 100644
index 0000000000..57450c6fb7
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0018-target-ppc-Implement-Vector-Extract-Mask.patch
@@ -0,0 +1,141 @@
+From 2dc8450e80b82c481904570dce789843b031db13 Mon Sep 17 00:00:00 2001
+From: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Date: Fri, 17 Dec 2021 17:57:13 +0100
+Subject: [PATCH 18/21] target/ppc: Implement Vector Extract Mask
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Implement the following PowerISA v3.1 instructions:
+vextractbm: Vector Extract Byte Mask
+vextracthm: Vector Extract Halfword Mask
+vextractwm: Vector Extract Word Mask
+vextractdm: Vector Extract Doubleword Mask
+vextractqm: Vector Extract Quadword Mask
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=17868d81e0074905b2c1e414af6618570e8059eb]
+
+Signed-off-by: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Message-Id: <20211203194229.746275-3-matheus.ferst@eldorado.org.br>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/insn32.decode | 6 +++
+ target/ppc/translate/vmx-impl.c.inc | 82 +++++++++++++++++++++++++++++
+ 2 files changed, 88 insertions(+)
+
+diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode
+index e032251c74..b0568b1356 100644
+--- a/target/ppc/insn32.decode
++++ b/target/ppc/insn32.decode
+@@ -423,6 +423,12 @@ VEXPANDWM 000100 ..... 00010 ..... 11001000010 @VX_tb
+ VEXPANDDM 000100 ..... 00011 ..... 11001000010 @VX_tb
+ VEXPANDQM 000100 ..... 00100 ..... 11001000010 @VX_tb
+
++VEXTRACTBM 000100 ..... 01000 ..... 11001000010 @VX_tb
++VEXTRACTHM 000100 ..... 01001 ..... 11001000010 @VX_tb
++VEXTRACTWM 000100 ..... 01010 ..... 11001000010 @VX_tb
++VEXTRACTDM 000100 ..... 01011 ..... 11001000010 @VX_tb
++VEXTRACTQM 000100 ..... 01100 ..... 11001000010 @VX_tb
++
+ # VSX Load/Store Instructions
+
+ LXV 111101 ..... ..... ............ . 001 @DQ_TSX
+diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
+index ebb0484323..96c97bf6e7 100644
+--- a/target/ppc/translate/vmx-impl.c.inc
++++ b/target/ppc/translate/vmx-impl.c.inc
+@@ -1525,6 +1525,88 @@ static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a)
+ return true;
+ }
+
++static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
++{
++ const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece,
++ mask = dup_const(vece, 1 << (elem_width - 1));
++ uint64_t i, j;
++ TCGv_i64 lo, hi, t0, t1;
++
++ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
++ REQUIRE_VECTOR(ctx);
++
++ hi = tcg_temp_new_i64();
++ lo = tcg_temp_new_i64();
++ t0 = tcg_temp_new_i64();
++ t1 = tcg_temp_new_i64();
++
++ get_avr64(lo, a->vrb, false);
++ get_avr64(hi, a->vrb, true);
++
++ tcg_gen_andi_i64(lo, lo, mask);
++ tcg_gen_andi_i64(hi, hi, mask);
++
++ /*
++ * Gather the most significant bit of each element in the highest element
++ * element. E.g. for bytes:
++ * aXXXXXXXbXXXXXXXcXXXXXXXdXXXXXXXeXXXXXXXfXXXXXXXgXXXXXXXhXXXXXXX
++ * & dup(1 << (elem_width - 1))
++ * a0000000b0000000c0000000d0000000e0000000f0000000g0000000h0000000
++ * << 32 - 4
++ * 0000e0000000f0000000g0000000h00000000000000000000000000000000000
++ * |
++ * a000e000b000f000c000g000d000h000e0000000f0000000g0000000h0000000
++ * << 16 - 2
++ * 00c000g000d000h000e0000000f0000000g0000000h000000000000000000000
++ * |
++ * a0c0e0g0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h0000000
++ * << 8 - 1
++ * 0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h00000000000000
++ * |
++ * abcdefghbcdefgh0cdefgh00defgh000efgh0000fgh00000gh000000h0000000
++ */
++ for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
++ tcg_gen_shli_i64(t0, hi, j - i);
++ tcg_gen_shli_i64(t1, lo, j - i);
++ tcg_gen_or_i64(hi, hi, t0);
++ tcg_gen_or_i64(lo, lo, t1);
++ }
++
++ tcg_gen_shri_i64(hi, hi, 64 - elem_count_half);
++ tcg_gen_extract2_i64(lo, lo, hi, 64 - elem_count_half);
++ tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], lo);
++
++ tcg_temp_free_i64(hi);
++ tcg_temp_free_i64(lo);
++ tcg_temp_free_i64(t0);
++ tcg_temp_free_i64(t1);
++
++ return true;
++}
++
++TRANS(VEXTRACTBM, do_vextractm, MO_8)
++TRANS(VEXTRACTHM, do_vextractm, MO_16)
++TRANS(VEXTRACTWM, do_vextractm, MO_32)
++TRANS(VEXTRACTDM, do_vextractm, MO_64)
++
++static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a)
++{
++ TCGv_i64 tmp;
++
++ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
++ REQUIRE_VECTOR(ctx);
++
++ tmp = tcg_temp_new_i64();
++
++ get_avr64(tmp, a->vrb, true);
++ tcg_gen_shri_i64(tmp, tmp, 63);
++ tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], tmp);
++
++ tcg_temp_free_i64(tmp);
++
++ return true;
++}
++
+ #define GEN_VAFORM_PAIRED(name0, name1, opc2) \
+ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+ { \
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0018_let_ld_pointer_dma_function_propagate_MemTxResult.patch b/meta/recipes-devtools/qemu/qemu/0018_let_ld_pointer_dma_function_propagate_MemTxResult.patch
new file mode 100644
index 0000000000..d8a136c47f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0018_let_ld_pointer_dma_function_propagate_MemTxResult.patch
@@ -0,0 +1,175 @@
+From cd1db8df7431edd2210ed0123e2e09b9b6d1e621 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Fri, 17 Dec 2021 22:31:11 +0100
+Subject: [PATCH] dma: Let ld*_dma() propagate MemTxResult
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+dma_memory_read() returns a MemTxResult type. Do not discard
+it, return it to the caller.
+
+Update the few callers.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=cd1db8df7431edd2210ed0123e2e09b9b6d1e621]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-19-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/intc/pnv_xive.c | 8 ++++----
+ hw/usb/hcd-xhci.c | 7 ++++---
+ include/hw/pci/pci.h | 6 ++++--
+ include/hw/ppc/spapr_vio.h | 6 +++++-
+ include/sysemu/dma.h | 25 ++++++++++++-------------
+ 5 files changed, 29 insertions(+), 23 deletions(-)
+
+diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c
+index d9249bb..bb20751 100644
+--- a/hw/intc/pnv_xive.c
++++ b/hw/intc/pnv_xive.c
+@@ -172,7 +172,7 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
+
+ /* Get the page size of the indirect table. */
+ vsd_addr = vsd & VSD_ADDRESS_MASK;
+- vsd = ldq_be_dma(&address_space_memory, vsd_addr, MEMTXATTRS_UNSPECIFIED);
++ ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
+
+ if (!(vsd & VSD_ADDRESS_MASK)) {
+ #ifdef XIVE_DEBUG
+@@ -195,8 +195,8 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
+ /* Load the VSD we are looking for, if not already done */
+ if (vsd_idx) {
+ vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
+- vsd = ldq_be_dma(&address_space_memory, vsd_addr,
+- MEMTXATTRS_UNSPECIFIED);
++ ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
++ MEMTXATTRS_UNSPECIFIED);
+
+ if (!(vsd & VSD_ADDRESS_MASK)) {
+ #ifdef XIVE_DEBUG
+@@ -543,7 +543,7 @@ static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
+
+ /* Get the page size of the indirect table. */
+ vsd_addr = vsd & VSD_ADDRESS_MASK;
+- vsd = ldq_be_dma(&address_space_memory, vsd_addr, MEMTXATTRS_UNSPECIFIED);
++ ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
+
+ if (!(vsd & VSD_ADDRESS_MASK)) {
+ #ifdef XIVE_DEBUG
+diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
+index d960b81..da5a407 100644
+--- a/hw/usb/hcd-xhci.c
++++ b/hw/usb/hcd-xhci.c
+@@ -2062,7 +2062,7 @@ static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid,
+ assert(slotid >= 1 && slotid <= xhci->numslots);
+
+ dcbaap = xhci_addr64(xhci->dcbaap_low, xhci->dcbaap_high);
+- poctx = ldq_le_dma(xhci->as, dcbaap + 8 * slotid, MEMTXATTRS_UNSPECIFIED);
++ ldq_le_dma(xhci->as, dcbaap + 8 * slotid, &poctx, MEMTXATTRS_UNSPECIFIED);
+ ictx = xhci_mask64(pictx);
+ octx = xhci_mask64(poctx);
+
+@@ -3429,6 +3429,7 @@ static int usb_xhci_post_load(void *opaque, int version_id)
+ uint32_t slot_ctx[4];
+ uint32_t ep_ctx[5];
+ int slotid, epid, state;
++ uint64_t addr;
+
+ dcbaap = xhci_addr64(xhci->dcbaap_low, xhci->dcbaap_high);
+
+@@ -3437,8 +3438,8 @@ static int usb_xhci_post_load(void *opaque, int version_id)
+ if (!slot->addressed) {
+ continue;
+ }
+- slot->ctx = xhci_mask64(ldq_le_dma(xhci->as, dcbaap + 8 * slotid,
+- MEMTXATTRS_UNSPECIFIED));
++ ldq_le_dma(xhci->as, dcbaap + 8 * slotid, &addr, MEMTXATTRS_UNSPECIFIED);
++ slot->ctx = xhci_mask64(addr);
+ xhci_dma_read_u32s(xhci, slot->ctx, slot_ctx, sizeof(slot_ctx));
+ slot->uport = xhci_lookup_uport(xhci, slot_ctx);
+ if (!slot->uport) {
+diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
+index 0613308..8c5f2ed 100644
+--- a/include/hw/pci/pci.h
++++ b/include/hw/pci/pci.h
+@@ -854,8 +854,10 @@ static inline MemTxResult pci_dma_write(PCIDevice *dev, dma_addr_t addr,
+ static inline uint##_bits##_t ld##_l##_pci_dma(PCIDevice *dev, \
+ dma_addr_t addr) \
+ { \
+- return ld##_l##_dma(pci_get_address_space(dev), addr, \
+- MEMTXATTRS_UNSPECIFIED); \
++ uint##_bits##_t val; \
++ ld##_l##_dma(pci_get_address_space(dev), addr, &val, \
++ MEMTXATTRS_UNSPECIFIED); \
++ return val; \
+ } \
+ static inline void st##_s##_pci_dma(PCIDevice *dev, \
+ dma_addr_t addr, uint##_bits##_t val) \
+diff --git a/include/hw/ppc/spapr_vio.h b/include/hw/ppc/spapr_vio.h
+index d2ec9b0..7eae1a4 100644
+--- a/include/hw/ppc/spapr_vio.h
++++ b/include/hw/ppc/spapr_vio.h
+@@ -127,7 +127,11 @@ static inline int spapr_vio_dma_set(SpaprVioDevice *dev, uint64_t taddr,
+ #define vio_stq(_dev, _addr, _val) \
+ (stq_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
+ #define vio_ldq(_dev, _addr) \
+- (ldq_be_dma(&(_dev)->as, (_addr), MEMTXATTRS_UNSPECIFIED))
++ ({ \
++ uint64_t _val; \
++ ldq_be_dma(&(_dev)->as, (_addr), &_val, MEMTXATTRS_UNSPECIFIED); \
++ _val; \
++ })
+
+ int spapr_vio_send_crq(SpaprVioDevice *dev, uint8_t *crq);
+
+diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
+index 895044d..b3faef4 100644
+--- a/include/sysemu/dma.h
++++ b/include/sysemu/dma.h
+@@ -240,14 +240,15 @@ static inline void dma_memory_unmap(AddressSpace *as,
+ }
+
+ #define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
+- static inline uint##_bits##_t ld##_lname##_##_end##_dma(AddressSpace *as, \
+- dma_addr_t addr, \
+- MemTxAttrs attrs) \
+- { \
+- uint##_bits##_t val; \
+- dma_memory_read(as, addr, &val, (_bits) / 8, attrs); \
+- return _end##_bits##_to_cpu(val); \
+- } \
++ static inline MemTxResult ld##_lname##_##_end##_dma(AddressSpace *as, \
++ dma_addr_t addr, \
++ uint##_bits##_t *pval, \
++ MemTxAttrs attrs) \
++ { \
++ MemTxResult res = dma_memory_read(as, addr, pval, (_bits) / 8, attrs); \
++ _end##_bits##_to_cpus(pval); \
++ return res; \
++ } \
+ static inline MemTxResult st##_sname##_##_end##_dma(AddressSpace *as, \
+ dma_addr_t addr, \
+ uint##_bits##_t val, \
+@@ -257,12 +258,10 @@ static inline void dma_memory_unmap(AddressSpace *as,
+ return dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \
+ }
+
+-static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr, MemTxAttrs attrs)
++static inline MemTxResult ldub_dma(AddressSpace *as, dma_addr_t addr,
++ uint8_t *val, MemTxAttrs attrs)
+ {
+- uint8_t val;
+-
+- dma_memory_read(as, addr, &val, 1, attrs);
+- return val;
++ return dma_memory_read(as, addr, val, 1, attrs);
+ }
+
+ static inline MemTxResult stb_dma(AddressSpace *as, dma_addr_t addr,
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0019-target-ppc-Implement-Vector-Mask-Move-insns.patch b/meta/recipes-devtools/qemu/qemu/0019-target-ppc-Implement-Vector-Mask-Move-insns.patch
new file mode 100644
index 0000000000..96fda98771
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0019-target-ppc-Implement-Vector-Mask-Move-insns.patch
@@ -0,0 +1,187 @@
+From 4d5202aad706fd338646d19aafbf255c3864333c Mon Sep 17 00:00:00 2001
+From: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Date: Fri, 17 Dec 2021 17:57:13 +0100
+Subject: [PATCH 19/21] target/ppc: Implement Vector Mask Move insns
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Implement the following PowerISA v3.1 instructions:
+mtvsrbm: Move to VSR Byte Mask
+mtvsrhm: Move to VSR Halfword Mask
+mtvsrwm: Move to VSR Word Mask
+mtvsrdm: Move to VSR Doubleword Mask
+mtvsrqm: Move to VSR Quadword Mask
+mtvsrbmi: Move to VSR Byte Mask Immediate
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=9193eaa901c54dbff4a91ea0b12a99e0135dbca1]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Message-Id: <20211203194229.746275-4-matheus.ferst@eldorado.org.br>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/insn32.decode | 11 +++
+ target/ppc/translate/vmx-impl.c.inc | 115 ++++++++++++++++++++++++++++
+ 2 files changed, 126 insertions(+)
+
+diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode
+index b0568b1356..8bdc059a4c 100644
+--- a/target/ppc/insn32.decode
++++ b/target/ppc/insn32.decode
+@@ -40,6 +40,10 @@
+ %ds_rtp 22:4 !function=times_2
+ @DS_rtp ...... ....0 ra:5 .............. .. &D rt=%ds_rtp si=%ds_si
+
++&DX_b vrt b
++%dx_b 6:10 16:5 0:1
++@DX_b ...... vrt:5 ..... .......... ..... . &DX_b b=%dx_b
++
+ &DX rt d
+ %dx_d 6:s10 16:5 0:1
+ @DX ...... rt:5 ..... .......... ..... . &DX d=%dx_d
+@@ -417,6 +421,13 @@ VSRDBI 000100 ..... ..... ..... 01 ... 010110 @VN
+
+ ## Vector Mask Manipulation Instructions
+
++MTVSRBM 000100 ..... 10000 ..... 11001000010 @VX_tb
++MTVSRHM 000100 ..... 10001 ..... 11001000010 @VX_tb
++MTVSRWM 000100 ..... 10010 ..... 11001000010 @VX_tb
++MTVSRDM 000100 ..... 10011 ..... 11001000010 @VX_tb
++MTVSRQM 000100 ..... 10100 ..... 11001000010 @VX_tb
++MTVSRBMI 000100 ..... ..... .......... 01010 . @DX_b
++
+ VEXPANDBM 000100 ..... 00000 ..... 11001000010 @VX_tb
+ VEXPANDHM 000100 ..... 00001 ..... 11001000010 @VX_tb
+ VEXPANDWM 000100 ..... 00010 ..... 11001000010 @VX_tb
+diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
+index 96c97bf6e7..d5e02fd7f2 100644
+--- a/target/ppc/translate/vmx-impl.c.inc
++++ b/target/ppc/translate/vmx-impl.c.inc
+@@ -1607,6 +1607,121 @@ static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a)
+ return true;
+ }
+
++static bool do_mtvsrm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
++{
++ const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece;
++ uint64_t c;
++ int i, j;
++ TCGv_i64 hi, lo, t0, t1;
++
++ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
++ REQUIRE_VECTOR(ctx);
++
++ hi = tcg_temp_new_i64();
++ lo = tcg_temp_new_i64();
++ t0 = tcg_temp_new_i64();
++ t1 = tcg_temp_new_i64();
++
++ tcg_gen_extu_tl_i64(t0, cpu_gpr[a->vrb]);
++ tcg_gen_extract_i64(hi, t0, elem_count_half, elem_count_half);
++ tcg_gen_extract_i64(lo, t0, 0, elem_count_half);
++
++ /*
++ * Spread the bits into their respective elements.
++ * E.g. for bytes:
++ * 00000000000000000000000000000000000000000000000000000000abcdefgh
++ * << 32 - 4
++ * 0000000000000000000000000000abcdefgh0000000000000000000000000000
++ * |
++ * 0000000000000000000000000000abcdefgh00000000000000000000abcdefgh
++ * << 16 - 2
++ * 00000000000000abcdefgh00000000000000000000abcdefgh00000000000000
++ * |
++ * 00000000000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh
++ * << 8 - 1
++ * 0000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh0000000
++ * |
++ * 0000000abcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgh
++ * & dup(1)
++ * 0000000a0000000b0000000c0000000d0000000e0000000f0000000g0000000h
++ * * 0xff
++ * aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh
++ */
++ for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
++ tcg_gen_shli_i64(t0, hi, j - i);
++ tcg_gen_shli_i64(t1, lo, j - i);
++ tcg_gen_or_i64(hi, hi, t0);
++ tcg_gen_or_i64(lo, lo, t1);
++ }
++
++ c = dup_const(vece, 1);
++ tcg_gen_andi_i64(hi, hi, c);
++ tcg_gen_andi_i64(lo, lo, c);
++
++ c = MAKE_64BIT_MASK(0, elem_width);
++ tcg_gen_muli_i64(hi, hi, c);
++ tcg_gen_muli_i64(lo, lo, c);
++
++ set_avr64(a->vrt, lo, false);
++ set_avr64(a->vrt, hi, true);
++
++ tcg_temp_free_i64(hi);
++ tcg_temp_free_i64(lo);
++ tcg_temp_free_i64(t0);
++ tcg_temp_free_i64(t1);
++
++ return true;
++}
++
++TRANS(MTVSRBM, do_mtvsrm, MO_8)
++TRANS(MTVSRHM, do_mtvsrm, MO_16)
++TRANS(MTVSRWM, do_mtvsrm, MO_32)
++TRANS(MTVSRDM, do_mtvsrm, MO_64)
++
++static bool trans_MTVSRQM(DisasContext *ctx, arg_VX_tb *a)
++{
++ TCGv_i64 tmp;
++
++ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
++ REQUIRE_VECTOR(ctx);
++
++ tmp = tcg_temp_new_i64();
++
++ tcg_gen_ext_tl_i64(tmp, cpu_gpr[a->vrb]);
++ tcg_gen_sextract_i64(tmp, tmp, 0, 1);
++ set_avr64(a->vrt, tmp, false);
++ set_avr64(a->vrt, tmp, true);
++
++ tcg_temp_free_i64(tmp);
++
++ return true;
++}
++
++static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a)
++{
++ const uint64_t mask = dup_const(MO_8, 1);
++ uint64_t hi, lo;
++
++ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
++ REQUIRE_VECTOR(ctx);
++
++ hi = extract16(a->b, 8, 8);
++ lo = extract16(a->b, 0, 8);
++
++ for (int i = 4, j = 32; i > 0; i >>= 1, j >>= 1) {
++ hi |= hi << (j - i);
++ lo |= lo << (j - i);
++ }
++
++ hi = (hi & mask) * 0xFF;
++ lo = (lo & mask) * 0xFF;
++
++ set_avr64(a->vrt, tcg_constant_i64(hi), true);
++ set_avr64(a->vrt, tcg_constant_i64(lo), false);
++
++ return true;
++}
++
+ #define GEN_VAFORM_PAIRED(name0, name1, opc2) \
+ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+ { \
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0019_let_st_pointer_pci_dma_function_take_MemTxAttrs_argument.patch b/meta/recipes-devtools/qemu/qemu/0019_let_st_pointer_pci_dma_function_take_MemTxAttrs_argument.patch
new file mode 100644
index 0000000000..69101f308d
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0019_let_st_pointer_pci_dma_function_take_MemTxAttrs_argument.patch
@@ -0,0 +1,303 @@
+From a423a1b523296f8798a5851aaaba64dd166c0a74 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Fri, 17 Dec 2021 22:39:42 +0100
+Subject: [PATCH] pci: Let st*_pci_dma() take MemTxAttrs argument
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Let devices specify transaction attributes when calling st*_pci_dma().
+
+Keep the default MEMTXATTRS_UNSPECIFIED in the few callers.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=a423a1b523296f8798a5851aaaba64dd166c0a74]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-21-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/audio/intel-hda.c | 10 ++++++----
+ hw/net/eepro100.c | 29 ++++++++++++++++++-----------
+ hw/net/tulip.c | 18 ++++++++++--------
+ hw/scsi/megasas.c | 15 ++++++++++-----
+ hw/scsi/vmw_pvscsi.c | 3 ++-
+ include/hw/pci/pci.h | 11 ++++++-----
+ 6 files changed, 52 insertions(+), 34 deletions(-)
+
+diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c
+index fb3d34a..3309ae0 100644
+--- a/hw/audio/intel-hda.c
++++ b/hw/audio/intel-hda.c
+@@ -345,6 +345,7 @@ static void intel_hda_corb_run(IntelHDAState *d)
+
+ static void intel_hda_response(HDACodecDevice *dev, bool solicited, uint32_t response)
+ {
++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ HDACodecBus *bus = HDA_BUS(dev->qdev.parent_bus);
+ IntelHDAState *d = container_of(bus, IntelHDAState, codecs);
+ hwaddr addr;
+@@ -367,8 +368,8 @@ static void intel_hda_response(HDACodecDevice *dev, bool solicited, uint32_t res
+ ex = (solicited ? 0 : (1 << 4)) | dev->cad;
+ wp = (d->rirb_wp + 1) & 0xff;
+ addr = intel_hda_addr(d->rirb_lbase, d->rirb_ubase);
+- stl_le_pci_dma(&d->pci, addr + 8*wp, response);
+- stl_le_pci_dma(&d->pci, addr + 8*wp + 4, ex);
++ stl_le_pci_dma(&d->pci, addr + 8 * wp, response, attrs);
++ stl_le_pci_dma(&d->pci, addr + 8 * wp + 4, ex, attrs);
+ d->rirb_wp = wp;
+
+ dprint(d, 2, "%s: [wp 0x%x] response 0x%x, extra 0x%x\n",
+@@ -394,6 +395,7 @@ static void intel_hda_response(HDACodecDevice *dev, bool solicited, uint32_t res
+ static bool intel_hda_xfer(HDACodecDevice *dev, uint32_t stnr, bool output,
+ uint8_t *buf, uint32_t len)
+ {
++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ HDACodecBus *bus = HDA_BUS(dev->qdev.parent_bus);
+ IntelHDAState *d = container_of(bus, IntelHDAState, codecs);
+ hwaddr addr;
+@@ -428,7 +430,7 @@ static bool intel_hda_xfer(HDACodecDevice *dev, uint32_t stnr, bool output,
+ st->be, st->bp, st->bpl[st->be].len, copy);
+
+ pci_dma_rw(&d->pci, st->bpl[st->be].addr + st->bp, buf, copy, !output,
+- MEMTXATTRS_UNSPECIFIED);
++ attrs);
+ st->lpib += copy;
+ st->bp += copy;
+ buf += copy;
+@@ -451,7 +453,7 @@ static bool intel_hda_xfer(HDACodecDevice *dev, uint32_t stnr, bool output,
+ if (d->dp_lbase & 0x01) {
+ s = st - d->st;
+ addr = intel_hda_addr(d->dp_lbase & ~0x01, d->dp_ubase);
+- stl_le_pci_dma(&d->pci, addr + 8*s, st->lpib);
++ stl_le_pci_dma(&d->pci, addr + 8 * s, st->lpib, attrs);
+ }
+ dprint(d, 3, "dma: --\n");
+
+diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c
+index 16e95ef..83c4431 100644
+--- a/hw/net/eepro100.c
++++ b/hw/net/eepro100.c
+@@ -700,6 +700,8 @@ static void set_ru_state(EEPRO100State * s, ru_state_t state)
+
+ static void dump_statistics(EEPRO100State * s)
+ {
++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
++
+ /* Dump statistical data. Most data is never changed by the emulation
+ * and always 0, so we first just copy the whole block and then those
+ * values which really matter.
+@@ -707,16 +709,18 @@ static void dump_statistics(EEPRO100State * s)
+ */
+ pci_dma_write(&s->dev, s->statsaddr, &s->statistics, s->stats_size);
+ stl_le_pci_dma(&s->dev, s->statsaddr + 0,
+- s->statistics.tx_good_frames);
++ s->statistics.tx_good_frames, attrs);
+ stl_le_pci_dma(&s->dev, s->statsaddr + 36,
+- s->statistics.rx_good_frames);
++ s->statistics.rx_good_frames, attrs);
+ stl_le_pci_dma(&s->dev, s->statsaddr + 48,
+- s->statistics.rx_resource_errors);
++ s->statistics.rx_resource_errors, attrs);
+ stl_le_pci_dma(&s->dev, s->statsaddr + 60,
+- s->statistics.rx_short_frame_errors);
++ s->statistics.rx_short_frame_errors, attrs);
+ #if 0
+- stw_le_pci_dma(&s->dev, s->statsaddr + 76, s->statistics.xmt_tco_frames);
+- stw_le_pci_dma(&s->dev, s->statsaddr + 78, s->statistics.rcv_tco_frames);
++ stw_le_pci_dma(&s->dev, s->statsaddr + 76,
++ s->statistics.xmt_tco_frames, attrs);
++ stw_le_pci_dma(&s->dev, s->statsaddr + 78,
++ s->statistics.rcv_tco_frames, attrs);
+ missing("CU dump statistical counters");
+ #endif
+ }
+@@ -833,6 +837,7 @@ static void set_multicast_list(EEPRO100State *s)
+
+ static void action_command(EEPRO100State *s)
+ {
++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ /* The loop below won't stop if it gets special handcrafted data.
+ Therefore we limit the number of iterations. */
+ unsigned max_loop_count = 16;
+@@ -911,7 +916,7 @@ static void action_command(EEPRO100State *s)
+ }
+ /* Write new status. */
+ stw_le_pci_dma(&s->dev, s->cb_address,
+- s->tx.status | ok_status | STATUS_C);
++ s->tx.status | ok_status | STATUS_C, attrs);
+ if (bit_i) {
+ /* CU completed action. */
+ eepro100_cx_interrupt(s);
+@@ -937,6 +942,7 @@ static void action_command(EEPRO100State *s)
+
+ static void eepro100_cu_command(EEPRO100State * s, uint8_t val)
+ {
++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ cu_state_t cu_state;
+ switch (val) {
+ case CU_NOP:
+@@ -986,7 +992,7 @@ static void eepro100_cu_command(EEPRO100State * s, uint8_t val)
+ /* Dump statistical counters. */
+ TRACE(OTHER, logout("val=0x%02x (dump stats)\n", val));
+ dump_statistics(s);
+- stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa005);
++ stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa005, attrs);
+ break;
+ case CU_CMD_BASE:
+ /* Load CU base. */
+@@ -997,7 +1003,7 @@ static void eepro100_cu_command(EEPRO100State * s, uint8_t val)
+ /* Dump and reset statistical counters. */
+ TRACE(OTHER, logout("val=0x%02x (dump stats and reset)\n", val));
+ dump_statistics(s);
+- stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa007);
++ stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa007, attrs);
+ memset(&s->statistics, 0, sizeof(s->statistics));
+ break;
+ case CU_SRESUME:
+@@ -1612,6 +1618,7 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size)
+ * - Magic packets should set bit 30 in power management driver register.
+ * - Interesting packets should set bit 29 in power management driver register.
+ */
++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ EEPRO100State *s = qemu_get_nic_opaque(nc);
+ uint16_t rfd_status = 0xa000;
+ #if defined(CONFIG_PAD_RECEIVED_FRAMES)
+@@ -1726,9 +1733,9 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size)
+ TRACE(OTHER, logout("command 0x%04x, link 0x%08x, addr 0x%08x, size %u\n",
+ rfd_command, rx.link, rx.rx_buf_addr, rfd_size));
+ stw_le_pci_dma(&s->dev, s->ru_base + s->ru_offset +
+- offsetof(eepro100_rx_t, status), rfd_status);
++ offsetof(eepro100_rx_t, status), rfd_status, attrs);
+ stw_le_pci_dma(&s->dev, s->ru_base + s->ru_offset +
+- offsetof(eepro100_rx_t, count), size);
++ offsetof(eepro100_rx_t, count), size, attrs);
+ /* Early receive interrupt not supported. */
+ #if 0
+ eepro100_er_interrupt(s);
+diff --git a/hw/net/tulip.c b/hw/net/tulip.c
+index ca69f7e..1f2c79d 100644
+--- a/hw/net/tulip.c
++++ b/hw/net/tulip.c
+@@ -86,16 +86,18 @@ static void tulip_desc_read(TULIPState *s, hwaddr p,
+ static void tulip_desc_write(TULIPState *s, hwaddr p,
+ struct tulip_descriptor *desc)
+ {
++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
++
+ if (s->csr[0] & CSR0_DBO) {
+- stl_be_pci_dma(&s->dev, p, desc->status);
+- stl_be_pci_dma(&s->dev, p + 4, desc->control);
+- stl_be_pci_dma(&s->dev, p + 8, desc->buf_addr1);
+- stl_be_pci_dma(&s->dev, p + 12, desc->buf_addr2);
++ stl_be_pci_dma(&s->dev, p, desc->status, attrs);
++ stl_be_pci_dma(&s->dev, p + 4, desc->control, attrs);
++ stl_be_pci_dma(&s->dev, p + 8, desc->buf_addr1, attrs);
++ stl_be_pci_dma(&s->dev, p + 12, desc->buf_addr2, attrs);
+ } else {
+- stl_le_pci_dma(&s->dev, p, desc->status);
+- stl_le_pci_dma(&s->dev, p + 4, desc->control);
+- stl_le_pci_dma(&s->dev, p + 8, desc->buf_addr1);
+- stl_le_pci_dma(&s->dev, p + 12, desc->buf_addr2);
++ stl_le_pci_dma(&s->dev, p, desc->status, attrs);
++ stl_le_pci_dma(&s->dev, p + 4, desc->control, attrs);
++ stl_le_pci_dma(&s->dev, p + 8, desc->buf_addr1, attrs);
++ stl_le_pci_dma(&s->dev, p + 12, desc->buf_addr2, attrs);
+ }
+ }
+
+diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
+index 091a350..b5e8b14 100644
+--- a/hw/scsi/megasas.c
++++ b/hw/scsi/megasas.c
+@@ -168,14 +168,16 @@ static void megasas_frame_set_cmd_status(MegasasState *s,
+ unsigned long frame, uint8_t v)
+ {
+ PCIDevice *pci = &s->parent_obj;
+- stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, cmd_status), v);
++ stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, cmd_status),
++ v, MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static void megasas_frame_set_scsi_status(MegasasState *s,
+ unsigned long frame, uint8_t v)
+ {
+ PCIDevice *pci = &s->parent_obj;
+- stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, scsi_status), v);
++ stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, scsi_status),
++ v, MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static inline const char *mfi_frame_desc(unsigned int cmd)
+@@ -542,6 +544,7 @@ static MegasasCmd *megasas_enqueue_frame(MegasasState *s,
+
+ static void megasas_complete_frame(MegasasState *s, uint64_t context)
+ {
++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ PCIDevice *pci_dev = PCI_DEVICE(s);
+ int tail, queue_offset;
+
+@@ -555,10 +558,12 @@ static void megasas_complete_frame(MegasasState *s, uint64_t context)
+ */
+ if (megasas_use_queue64(s)) {
+ queue_offset = s->reply_queue_head * sizeof(uint64_t);
+- stq_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset, context);
++ stq_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset,
++ context, attrs);
+ } else {
+ queue_offset = s->reply_queue_head * sizeof(uint32_t);
+- stl_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset, context);
++ stl_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset,
++ context, attrs);
+ }
+ s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa);
+ trace_megasas_qf_complete(context, s->reply_queue_head,
+@@ -572,7 +577,7 @@ static void megasas_complete_frame(MegasasState *s, uint64_t context)
+ s->reply_queue_head = megasas_next_index(s, tail, s->fw_cmds);
+ trace_megasas_qf_update(s->reply_queue_head, s->reply_queue_tail,
+ s->busy);
+- stl_le_pci_dma(pci_dev, s->producer_pa, s->reply_queue_head);
++ stl_le_pci_dma(pci_dev, s->producer_pa, s->reply_queue_head, attrs);
+ /* Notify HBA */
+ if (msix_enabled(pci_dev)) {
+ trace_megasas_msix_raise(0);
+diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c
+index cd76bd6..59c3e8b 100644
+--- a/hw/scsi/vmw_pvscsi.c
++++ b/hw/scsi/vmw_pvscsi.c
+@@ -55,7 +55,8 @@
+ (m)->rs_pa + offsetof(struct PVSCSIRingsState, field)))
+ #define RS_SET_FIELD(m, field, val) \
+ (stl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
+- (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val))
++ (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val, \
++ MEMTXATTRS_UNSPECIFIED))
+
+ struct PVSCSIClass {
+ PCIDeviceClass parent_class;
+diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
+index 8c5f2ed..9f51ef2 100644
+--- a/include/hw/pci/pci.h
++++ b/include/hw/pci/pci.h
+@@ -859,11 +859,12 @@ static inline MemTxResult pci_dma_write(PCIDevice *dev, dma_addr_t addr,
+ MEMTXATTRS_UNSPECIFIED); \
+ return val; \
+ } \
+- static inline void st##_s##_pci_dma(PCIDevice *dev, \
+- dma_addr_t addr, uint##_bits##_t val) \
+- { \
+- st##_s##_dma(pci_get_address_space(dev), addr, val, \
+- MEMTXATTRS_UNSPECIFIED); \
++ static inline void st##_s##_pci_dma(PCIDevice *dev, \
++ dma_addr_t addr, \
++ uint##_bits##_t val, \
++ MemTxAttrs attrs) \
++ { \
++ st##_s##_dma(pci_get_address_space(dev), addr, val, attrs); \
+ }
+
+ PCI_DMA_DEFINE_LDST(ub, b, 8);
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0020-target-ppc-move-xs-n-madd-am-ds-p-xs-n-msub-am-ds-p-.patch b/meta/recipes-devtools/qemu/qemu/0020-target-ppc-move-xs-n-madd-am-ds-p-xs-n-msub-am-ds-p-.patch
new file mode 100644
index 0000000000..7e747298a9
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0020-target-ppc-move-xs-n-madd-am-ds-p-xs-n-msub-am-ds-p-.patch
@@ -0,0 +1,258 @@
+From a3c7553efdec661a8f7d7dfc0c0618a35fab005c Mon Sep 17 00:00:00 2001
+From: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Date: Wed, 2 Mar 2022 06:51:38 +0100
+Subject: [PATCH 20/21] target/ppc: move xs[n]madd[am][ds]p/xs[n]msub[am][ds]p
+ to decodetree
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=e4318ab2e423c4caf9a88a4e99b5e234096b81a9]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Message-Id: <20220225210936.1749575-37-matheus.ferst@eldorado.org.br>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/fpu_helper.c | 23 ++++++------
+ target/ppc/helper.h | 16 ++++-----
+ target/ppc/insn32.decode | 22 ++++++++++++
+ target/ppc/translate/vsx-impl.c.inc | 56 ++++++++++++++++++++++++-----
+ target/ppc/translate/vsx-ops.c.inc | 16 ---------
+ 5 files changed, 90 insertions(+), 43 deletions(-)
+
+diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
+index 5cc7fb1dcb..853e5f6029 100644
+--- a/target/ppc/fpu_helper.c
++++ b/target/ppc/fpu_helper.c
+@@ -2036,10 +2036,11 @@ VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
+ * maddflgs - flags for the float*muladd routine that control the
+ * various forms (madd, msub, nmadd, nmsub)
+ * sfprf - set FPRF
++ * r2sp - round intermediate double precision result to single precision
+ */
+ #define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp) \
+ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+- ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \
++ ppc_vsr_t *s1, ppc_vsr_t *s2, ppc_vsr_t *s3) \
+ { \
+ ppc_vsr_t t = *xt; \
+ int i; \
+@@ -2055,12 +2056,12 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ * result to odd. \
+ */ \
+ set_float_rounding_mode(float_round_to_zero, &tstat); \
+- t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
++ t.fld = tp##_muladd(s1->fld, s3->fld, s2->fld, \
+ maddflgs, &tstat); \
+ t.fld |= (get_float_exception_flags(&tstat) & \
+ float_flag_inexact) != 0; \
+ } else { \
+- t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
++ t.fld = tp##_muladd(s1->fld, s3->fld, s2->fld, \
+ maddflgs, &tstat); \
+ } \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+@@ -2082,14 +2083,14 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ do_float_check_status(env, GETPC()); \
+ }
+
+-VSX_MADD(xsmadddp, 1, float64, VsrD(0), MADD_FLGS, 1, 0)
+-VSX_MADD(xsmsubdp, 1, float64, VsrD(0), MSUB_FLGS, 1, 0)
+-VSX_MADD(xsnmadddp, 1, float64, VsrD(0), NMADD_FLGS, 1, 0)
+-VSX_MADD(xsnmsubdp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0)
+-VSX_MADD(xsmaddsp, 1, float64, VsrD(0), MADD_FLGS, 1, 1)
+-VSX_MADD(xsmsubsp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1)
+-VSX_MADD(xsnmaddsp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1)
+-VSX_MADD(xsnmsubsp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1)
++VSX_MADD(XSMADDDP, 1, float64, VsrD(0), MADD_FLGS, 1, 0)
++VSX_MADD(XSMSUBDP, 1, float64, VsrD(0), MSUB_FLGS, 1, 0)
++VSX_MADD(XSNMADDDP, 1, float64, VsrD(0), NMADD_FLGS, 1, 0)
++VSX_MADD(XSNMSUBDP, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0)
++VSX_MADD(XSMADDSP, 1, float64, VsrD(0), MADD_FLGS, 1, 1)
++VSX_MADD(XSMSUBSP, 1, float64, VsrD(0), MSUB_FLGS, 1, 1)
++VSX_MADD(XSNMADDSP, 1, float64, VsrD(0), NMADD_FLGS, 1, 1)
++VSX_MADD(XSNMSUBSP, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1)
+
+ VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0, 0)
+ VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0)
+diff --git a/target/ppc/helper.h b/target/ppc/helper.h
+index ef5bdd38a7..e147b37644 100644
+--- a/target/ppc/helper.h
++++ b/target/ppc/helper.h
+@@ -376,10 +376,10 @@ DEF_HELPER_3(xssqrtdp, void, env, vsr, vsr)
+ DEF_HELPER_3(xsrsqrtedp, void, env, vsr, vsr)
+ DEF_HELPER_4(xstdivdp, void, env, i32, vsr, vsr)
+ DEF_HELPER_3(xstsqrtdp, void, env, i32, vsr)
+-DEF_HELPER_5(xsmadddp, void, env, vsr, vsr, vsr, vsr)
+-DEF_HELPER_5(xsmsubdp, void, env, vsr, vsr, vsr, vsr)
+-DEF_HELPER_5(xsnmadddp, void, env, vsr, vsr, vsr, vsr)
+-DEF_HELPER_5(xsnmsubdp, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSMADDDP, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSMSUBDP, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSNMADDDP, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSNMSUBDP, void, env, vsr, vsr, vsr, vsr)
+ DEF_HELPER_4(xscmpeqdp, void, env, vsr, vsr, vsr)
+ DEF_HELPER_4(xscmpgtdp, void, env, vsr, vsr, vsr)
+ DEF_HELPER_4(xscmpgedp, void, env, vsr, vsr, vsr)
+@@ -439,10 +439,10 @@ DEF_HELPER_3(xsresp, void, env, vsr, vsr)
+ DEF_HELPER_2(xsrsp, i64, env, i64)
+ DEF_HELPER_3(xssqrtsp, void, env, vsr, vsr)
+ DEF_HELPER_3(xsrsqrtesp, void, env, vsr, vsr)
+-DEF_HELPER_5(xsmaddsp, void, env, vsr, vsr, vsr, vsr)
+-DEF_HELPER_5(xsmsubsp, void, env, vsr, vsr, vsr, vsr)
+-DEF_HELPER_5(xsnmaddsp, void, env, vsr, vsr, vsr, vsr)
+-DEF_HELPER_5(xsnmsubsp, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSMADDSP, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSMSUBSP, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSNMADDSP, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSNMSUBSP, void, env, vsr, vsr, vsr, vsr)
+
+ DEF_HELPER_4(xvadddp, void, env, vsr, vsr, vsr)
+ DEF_HELPER_4(xvsubdp, void, env, vsr, vsr, vsr)
+diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode
+index 8bdc059a4c..0ff8818084 100644
+--- a/target/ppc/insn32.decode
++++ b/target/ppc/insn32.decode
+@@ -451,6 +451,28 @@ STXVX 011111 ..... ..... ..... 0110001100 . @X_TSX
+ LXVPX 011111 ..... ..... ..... 0101001101 - @X_TSXP
+ STXVPX 011111 ..... ..... ..... 0111001101 - @X_TSXP
+
++## VSX Scalar Multiply-Add Instructions
++
++XSMADDADP 111100 ..... ..... ..... 00100001 . . . @XX3
++XSMADDMDP 111100 ..... ..... ..... 00101001 . . . @XX3
++XSMADDASP 111100 ..... ..... ..... 00000001 . . . @XX3
++XSMADDMSP 111100 ..... ..... ..... 00001001 . . . @XX3
++
++XSMSUBADP 111100 ..... ..... ..... 00110001 . . . @XX3
++XSMSUBMDP 111100 ..... ..... ..... 00111001 . . . @XX3
++XSMSUBASP 111100 ..... ..... ..... 00010001 . . . @XX3
++XSMSUBMSP 111100 ..... ..... ..... 00011001 . . . @XX3
++
++XSNMADDASP 111100 ..... ..... ..... 10000001 . . . @XX3
++XSNMADDMSP 111100 ..... ..... ..... 10001001 . . . @XX3
++XSNMADDADP 111100 ..... ..... ..... 10100001 . . . @XX3
++XSNMADDMDP 111100 ..... ..... ..... 10101001 . . . @XX3
++
++XSNMSUBASP 111100 ..... ..... ..... 10010001 . . . @XX3
++XSNMSUBMSP 111100 ..... ..... ..... 10011001 . . . @XX3
++XSNMSUBADP 111100 ..... ..... ..... 10110001 . . . @XX3
++XSNMSUBMDP 111100 ..... ..... ..... 10111001 . . . @XX3
++
+ ## VSX splat instruction
+
+ XXSPLTIB 111100 ..... 00 ........ 0101101000 . @X_imm8
+diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
+index 99c8a57e50..90d3ac665b 100644
+--- a/target/ppc/translate/vsx-impl.c.inc
++++ b/target/ppc/translate/vsx-impl.c.inc
+@@ -1201,6 +1201,54 @@ GEN_VSX_HELPER_2(xvtstdcdp, 0x14, 0x1E, 0, PPC2_VSX)
+ GEN_VSX_HELPER_X3(xxperm, 0x08, 0x03, 0, PPC2_ISA300)
+ GEN_VSX_HELPER_X3(xxpermr, 0x08, 0x07, 0, PPC2_ISA300)
+
++static bool do_xsmadd(DisasContext *ctx, int tgt, int src1, int src2, int src3,
++ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
++{
++ TCGv_ptr t, s1, s2, s3;
++
++ t = gen_vsr_ptr(tgt);
++ s1 = gen_vsr_ptr(src1);
++ s2 = gen_vsr_ptr(src2);
++ s3 = gen_vsr_ptr(src3);
++
++ gen_helper(cpu_env, t, s1, s2, s3);
++
++ tcg_temp_free_ptr(t);
++ tcg_temp_free_ptr(s1);
++ tcg_temp_free_ptr(s2);
++ tcg_temp_free_ptr(s3);
++
++ return true;
++}
++
++static bool do_xsmadd_XX3(DisasContext *ctx, arg_XX3 *a, bool type_a,
++ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
++{
++ REQUIRE_VSX(ctx);
++
++ if (type_a) {
++ return do_xsmadd(ctx, a->xt, a->xa, a->xt, a->xb, gen_helper);
++ }
++ return do_xsmadd(ctx, a->xt, a->xa, a->xb, a->xt, gen_helper);
++}
++
++TRANS_FLAGS2(VSX, XSMADDADP, do_xsmadd_XX3, true, gen_helper_XSMADDDP)
++TRANS_FLAGS2(VSX, XSMADDMDP, do_xsmadd_XX3, false, gen_helper_XSMADDDP)
++TRANS_FLAGS2(VSX, XSMSUBADP, do_xsmadd_XX3, true, gen_helper_XSMSUBDP)
++TRANS_FLAGS2(VSX, XSMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSMSUBDP)
++TRANS_FLAGS2(VSX, XSNMADDADP, do_xsmadd_XX3, true, gen_helper_XSNMADDDP)
++TRANS_FLAGS2(VSX, XSNMADDMDP, do_xsmadd_XX3, false, gen_helper_XSNMADDDP)
++TRANS_FLAGS2(VSX, XSNMSUBADP, do_xsmadd_XX3, true, gen_helper_XSNMSUBDP)
++TRANS_FLAGS2(VSX, XSNMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSNMSUBDP)
++TRANS_FLAGS2(VSX207, XSMADDASP, do_xsmadd_XX3, true, gen_helper_XSMADDSP)
++TRANS_FLAGS2(VSX207, XSMADDMSP, do_xsmadd_XX3, false, gen_helper_XSMADDSP)
++TRANS_FLAGS2(VSX207, XSMSUBASP, do_xsmadd_XX3, true, gen_helper_XSMSUBSP)
++TRANS_FLAGS2(VSX207, XSMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSMSUBSP)
++TRANS_FLAGS2(VSX207, XSNMADDASP, do_xsmadd_XX3, true, gen_helper_XSNMADDSP)
++TRANS_FLAGS2(VSX207, XSNMADDMSP, do_xsmadd_XX3, false, gen_helper_XSNMADDSP)
++TRANS_FLAGS2(VSX207, XSNMSUBASP, do_xsmadd_XX3, true, gen_helper_XSNMSUBSP)
++TRANS_FLAGS2(VSX207, XSNMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSNMSUBSP)
++
+ #define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type) \
+ static void gen_##name(DisasContext *ctx) \
+ { \
+@@ -1231,14 +1279,6 @@ static void gen_##name(DisasContext *ctx) \
+ tcg_temp_free_ptr(c); \
+ }
+
+-GEN_VSX_HELPER_VSX_MADD(xsmadddp, 0x04, 0x04, 0x05, 0, PPC2_VSX)
+-GEN_VSX_HELPER_VSX_MADD(xsmsubdp, 0x04, 0x06, 0x07, 0, PPC2_VSX)
+-GEN_VSX_HELPER_VSX_MADD(xsnmadddp, 0x04, 0x14, 0x15, 0, PPC2_VSX)
+-GEN_VSX_HELPER_VSX_MADD(xsnmsubdp, 0x04, 0x16, 0x17, 0, PPC2_VSX)
+-GEN_VSX_HELPER_VSX_MADD(xsmaddsp, 0x04, 0x00, 0x01, 0, PPC2_VSX207)
+-GEN_VSX_HELPER_VSX_MADD(xsmsubsp, 0x04, 0x02, 0x03, 0, PPC2_VSX207)
+-GEN_VSX_HELPER_VSX_MADD(xsnmaddsp, 0x04, 0x10, 0x11, 0, PPC2_VSX207)
+-GEN_VSX_HELPER_VSX_MADD(xsnmsubsp, 0x04, 0x12, 0x13, 0, PPC2_VSX207)
+ GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX)
+ GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX)
+ GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX)
+diff --git a/target/ppc/translate/vsx-ops.c.inc b/target/ppc/translate/vsx-ops.c.inc
+index c974324c4c..ef0200eead 100644
+--- a/target/ppc/translate/vsx-ops.c.inc
++++ b/target/ppc/translate/vsx-ops.c.inc
+@@ -186,14 +186,6 @@ GEN_XX2FORM(xssqrtdp, 0x16, 0x04, PPC2_VSX),
+ GEN_XX2FORM(xsrsqrtedp, 0x14, 0x04, PPC2_VSX),
+ GEN_XX3FORM(xstdivdp, 0x14, 0x07, PPC2_VSX),
+ GEN_XX2FORM(xstsqrtdp, 0x14, 0x06, PPC2_VSX),
+-GEN_XX3FORM_NAME(xsmadddp, "xsmaddadp", 0x04, 0x04, PPC2_VSX),
+-GEN_XX3FORM_NAME(xsmadddp, "xsmaddmdp", 0x04, 0x05, PPC2_VSX),
+-GEN_XX3FORM_NAME(xsmsubdp, "xsmsubadp", 0x04, 0x06, PPC2_VSX),
+-GEN_XX3FORM_NAME(xsmsubdp, "xsmsubmdp", 0x04, 0x07, PPC2_VSX),
+-GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddadp", 0x04, 0x14, PPC2_VSX),
+-GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddmdp", 0x04, 0x15, PPC2_VSX),
+-GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubadp", 0x04, 0x16, PPC2_VSX),
+-GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubmdp", 0x04, 0x17, PPC2_VSX),
+ GEN_XX3FORM(xscmpeqdp, 0x0C, 0x00, PPC2_ISA300),
+ GEN_XX3FORM(xscmpgtdp, 0x0C, 0x01, PPC2_ISA300),
+ GEN_XX3FORM(xscmpgedp, 0x0C, 0x02, PPC2_ISA300),
+@@ -235,14 +227,6 @@ GEN_XX2FORM(xsresp, 0x14, 0x01, PPC2_VSX207),
+ GEN_XX2FORM(xsrsp, 0x12, 0x11, PPC2_VSX207),
+ GEN_XX2FORM(xssqrtsp, 0x16, 0x00, PPC2_VSX207),
+ GEN_XX2FORM(xsrsqrtesp, 0x14, 0x00, PPC2_VSX207),
+-GEN_XX3FORM_NAME(xsmaddsp, "xsmaddasp", 0x04, 0x00, PPC2_VSX207),
+-GEN_XX3FORM_NAME(xsmaddsp, "xsmaddmsp", 0x04, 0x01, PPC2_VSX207),
+-GEN_XX3FORM_NAME(xsmsubsp, "xsmsubasp", 0x04, 0x02, PPC2_VSX207),
+-GEN_XX3FORM_NAME(xsmsubsp, "xsmsubmsp", 0x04, 0x03, PPC2_VSX207),
+-GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddasp", 0x04, 0x10, PPC2_VSX207),
+-GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddmsp", 0x04, 0x11, PPC2_VSX207),
+-GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubasp", 0x04, 0x12, PPC2_VSX207),
+-GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubmsp", 0x04, 0x13, PPC2_VSX207),
+ GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207),
+ GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207),
+
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0020_let_ld_pointer_pci_dma_function_take_MemTxAttrs_argument.patch b/meta/recipes-devtools/qemu/qemu/0020_let_ld_pointer_pci_dma_function_take_MemTxAttrs_argument.patch
new file mode 100644
index 0000000000..7f9de244be
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0020_let_ld_pointer_pci_dma_function_take_MemTxAttrs_argument.patch
@@ -0,0 +1,271 @@
+From 398f9a84ac7132e38caf7b066273734b3bf619ff Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Fri, 17 Dec 2021 23:45:06 +0100
+Subject: [PATCH] pci: Let ld*_pci_dma() take MemTxAttrs argument
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Let devices specify transaction attributes when calling ld*_pci_dma().
+
+Keep the default MEMTXATTRS_UNSPECIFIED in the few callers.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=398f9a84ac7132e38caf7b066273734b3bf619ff]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-22-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/audio/intel-hda.c | 2 +-
+ hw/net/eepro100.c | 19 +++++++++++++------
+ hw/net/tulip.c | 18 ++++++++++--------
+ hw/scsi/megasas.c | 16 ++++++++++------
+ hw/scsi/mptsas.c | 10 ++++++----
+ hw/scsi/vmw_pvscsi.c | 3 ++-
+ hw/usb/hcd-xhci.c | 1 +
+ include/hw/pci/pci.h | 6 +++---
+ 8 files changed, 46 insertions(+), 29 deletions(-)
+
+diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c
+index 3309ae0..e34b7ab 100644
+--- a/hw/audio/intel-hda.c
++++ b/hw/audio/intel-hda.c
+@@ -335,7 +335,7 @@ static void intel_hda_corb_run(IntelHDAState *d)
+
+ rp = (d->corb_rp + 1) & 0xff;
+ addr = intel_hda_addr(d->corb_lbase, d->corb_ubase);
+- verb = ldl_le_pci_dma(&d->pci, addr + 4*rp);
++ verb = ldl_le_pci_dma(&d->pci, addr + 4 * rp, MEMTXATTRS_UNSPECIFIED);
+ d->corb_rp = rp;
+
+ dprint(d, 2, "%s: [rp 0x%x] verb 0x%08x\n", __func__, rp, verb);
+diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c
+index 83c4431..eb82e9c 100644
+--- a/hw/net/eepro100.c
++++ b/hw/net/eepro100.c
+@@ -737,6 +737,7 @@ static void read_cb(EEPRO100State *s)
+
+ static void tx_command(EEPRO100State *s)
+ {
++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ uint32_t tbd_array = s->tx.tbd_array_addr;
+ uint16_t tcb_bytes = s->tx.tcb_bytes & 0x3fff;
+ /* Sends larger than MAX_ETH_FRAME_SIZE are allowed, up to 2600 bytes. */
+@@ -772,11 +773,14 @@ static void tx_command(EEPRO100State *s)
+ /* Extended Flexible TCB. */
+ for (; tbd_count < 2; tbd_count++) {
+ uint32_t tx_buffer_address = ldl_le_pci_dma(&s->dev,
+- tbd_address);
++ tbd_address,
++ attrs);
+ uint16_t tx_buffer_size = lduw_le_pci_dma(&s->dev,
+- tbd_address + 4);
++ tbd_address + 4,
++ attrs);
+ uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev,
+- tbd_address + 6);
++ tbd_address + 6,
++ attrs);
+ tbd_address += 8;
+ TRACE(RXTX, logout
+ ("TBD (extended flexible mode): buffer address 0x%08x, size 0x%04x\n",
+@@ -792,9 +796,12 @@ static void tx_command(EEPRO100State *s)
+ }
+ tbd_address = tbd_array;
+ for (; tbd_count < s->tx.tbd_count; tbd_count++) {
+- uint32_t tx_buffer_address = ldl_le_pci_dma(&s->dev, tbd_address);
+- uint16_t tx_buffer_size = lduw_le_pci_dma(&s->dev, tbd_address + 4);
+- uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev, tbd_address + 6);
++ uint32_t tx_buffer_address = ldl_le_pci_dma(&s->dev, tbd_address,
++ attrs);
++ uint16_t tx_buffer_size = lduw_le_pci_dma(&s->dev, tbd_address + 4,
++ attrs);
++ uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev, tbd_address + 6,
++ attrs);
+ tbd_address += 8;
+ TRACE(RXTX, logout
+ ("TBD (flexible mode): buffer address 0x%08x, size 0x%04x\n",
+diff --git a/hw/net/tulip.c b/hw/net/tulip.c
+index 1f2c79d..c76e486 100644
+--- a/hw/net/tulip.c
++++ b/hw/net/tulip.c
+@@ -70,16 +70,18 @@ static const VMStateDescription vmstate_pci_tulip = {
+ static void tulip_desc_read(TULIPState *s, hwaddr p,
+ struct tulip_descriptor *desc)
+ {
++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
++
+ if (s->csr[0] & CSR0_DBO) {
+- desc->status = ldl_be_pci_dma(&s->dev, p);
+- desc->control = ldl_be_pci_dma(&s->dev, p + 4);
+- desc->buf_addr1 = ldl_be_pci_dma(&s->dev, p + 8);
+- desc->buf_addr2 = ldl_be_pci_dma(&s->dev, p + 12);
++ desc->status = ldl_be_pci_dma(&s->dev, p, attrs);
++ desc->control = ldl_be_pci_dma(&s->dev, p + 4, attrs);
++ desc->buf_addr1 = ldl_be_pci_dma(&s->dev, p + 8, attrs);
++ desc->buf_addr2 = ldl_be_pci_dma(&s->dev, p + 12, attrs);
+ } else {
+- desc->status = ldl_le_pci_dma(&s->dev, p);
+- desc->control = ldl_le_pci_dma(&s->dev, p + 4);
+- desc->buf_addr1 = ldl_le_pci_dma(&s->dev, p + 8);
+- desc->buf_addr2 = ldl_le_pci_dma(&s->dev, p + 12);
++ desc->status = ldl_le_pci_dma(&s->dev, p, attrs);
++ desc->control = ldl_le_pci_dma(&s->dev, p + 4, attrs);
++ desc->buf_addr1 = ldl_le_pci_dma(&s->dev, p + 8, attrs);
++ desc->buf_addr2 = ldl_le_pci_dma(&s->dev, p + 12, attrs);
+ }
+ }
+
+diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
+index b5e8b14..98b1370 100644
+--- a/hw/scsi/megasas.c
++++ b/hw/scsi/megasas.c
+@@ -202,7 +202,9 @@ static uint64_t megasas_frame_get_context(MegasasState *s,
+ unsigned long frame)
+ {
+ PCIDevice *pci = &s->parent_obj;
+- return ldq_le_pci_dma(pci, frame + offsetof(struct mfi_frame_header, context));
++ return ldq_le_pci_dma(pci,
++ frame + offsetof(struct mfi_frame_header, context),
++ MEMTXATTRS_UNSPECIFIED);
+ }
+
+ static bool megasas_frame_is_ieee_sgl(MegasasCmd *cmd)
+@@ -534,7 +536,8 @@ static MegasasCmd *megasas_enqueue_frame(MegasasState *s,
+ s->busy++;
+
+ if (s->consumer_pa) {
+- s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa);
++ s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa,
++ MEMTXATTRS_UNSPECIFIED);
+ }
+ trace_megasas_qf_enqueue(cmd->index, cmd->count, cmd->context,
+ s->reply_queue_head, s->reply_queue_tail, s->busy);
+@@ -565,14 +568,14 @@ static void megasas_complete_frame(MegasasState *s, uint64_t context)
+ stl_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset,
+ context, attrs);
+ }
+- s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa);
++ s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa, attrs);
+ trace_megasas_qf_complete(context, s->reply_queue_head,
+ s->reply_queue_tail, s->busy);
+ }
+
+ if (megasas_intr_enabled(s)) {
+ /* Update reply queue pointer */
+- s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa);
++ s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa, attrs);
+ tail = s->reply_queue_head;
+ s->reply_queue_head = megasas_next_index(s, tail, s->fw_cmds);
+ trace_megasas_qf_update(s->reply_queue_head, s->reply_queue_tail,
+@@ -637,6 +640,7 @@ static void megasas_abort_command(MegasasCmd *cmd)
+
+ static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd)
+ {
++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ PCIDevice *pcid = PCI_DEVICE(s);
+ uint32_t pa_hi, pa_lo;
+ hwaddr iq_pa, initq_size = sizeof(struct mfi_init_qinfo);
+@@ -675,9 +679,9 @@ static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd)
+ pa_lo = le32_to_cpu(initq->pi_addr_lo);
+ pa_hi = le32_to_cpu(initq->pi_addr_hi);
+ s->producer_pa = ((uint64_t) pa_hi << 32) | pa_lo;
+- s->reply_queue_head = ldl_le_pci_dma(pcid, s->producer_pa);
++ s->reply_queue_head = ldl_le_pci_dma(pcid, s->producer_pa, attrs);
+ s->reply_queue_head %= MEGASAS_MAX_FRAMES;
+- s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa);
++ s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa, attrs);
+ s->reply_queue_tail %= MEGASAS_MAX_FRAMES;
+ flags = le32_to_cpu(initq->flags);
+ if (flags & MFI_QUEUE_FLAG_CONTEXT64) {
+diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c
+index f6c7765..ac9f4df 100644
+--- a/hw/scsi/mptsas.c
++++ b/hw/scsi/mptsas.c
+@@ -172,14 +172,15 @@ static const int mpi_request_sizes[] = {
+ static dma_addr_t mptsas_ld_sg_base(MPTSASState *s, uint32_t flags_and_length,
+ dma_addr_t *sgaddr)
+ {
++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ PCIDevice *pci = (PCIDevice *) s;
+ dma_addr_t addr;
+
+ if (flags_and_length & MPI_SGE_FLAGS_64_BIT_ADDRESSING) {
+- addr = ldq_le_pci_dma(pci, *sgaddr + 4);
++ addr = ldq_le_pci_dma(pci, *sgaddr + 4, attrs);
+ *sgaddr += 12;
+ } else {
+- addr = ldl_le_pci_dma(pci, *sgaddr + 4);
++ addr = ldl_le_pci_dma(pci, *sgaddr + 4, attrs);
+ *sgaddr += 8;
+ }
+ return addr;
+@@ -203,7 +204,7 @@ static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr addr)
+ dma_addr_t addr, len;
+ uint32_t flags_and_length;
+
+- flags_and_length = ldl_le_pci_dma(pci, sgaddr);
++ flags_and_length = ldl_le_pci_dma(pci, sgaddr, MEMTXATTRS_UNSPECIFIED);
+ len = flags_and_length & MPI_SGE_LENGTH_MASK;
+ if ((flags_and_length & MPI_SGE_FLAGS_ELEMENT_TYPE_MASK)
+ != MPI_SGE_FLAGS_SIMPLE_ELEMENT ||
+@@ -234,7 +235,8 @@ static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr addr)
+ break;
+ }
+
+- flags_and_length = ldl_le_pci_dma(pci, next_chain_addr);
++ flags_and_length = ldl_le_pci_dma(pci, next_chain_addr,
++ MEMTXATTRS_UNSPECIFIED);
+ if ((flags_and_length & MPI_SGE_FLAGS_ELEMENT_TYPE_MASK)
+ != MPI_SGE_FLAGS_CHAIN_ELEMENT) {
+ return MPI_IOCSTATUS_INVALID_SGL;
+diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c
+index 59c3e8b..33e16f9 100644
+--- a/hw/scsi/vmw_pvscsi.c
++++ b/hw/scsi/vmw_pvscsi.c
+@@ -52,7 +52,8 @@
+
+ #define RS_GET_FIELD(m, field) \
+ (ldl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
+- (m)->rs_pa + offsetof(struct PVSCSIRingsState, field)))
++ (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), \
++ MEMTXATTRS_UNSPECIFIED))
+ #define RS_SET_FIELD(m, field, val) \
+ (stl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
+ (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val, \
+diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
+index da5a407..14bdb89 100644
+--- a/hw/usb/hcd-xhci.c
++++ b/hw/usb/hcd-xhci.c
+@@ -3440,6 +3440,7 @@ static int usb_xhci_post_load(void *opaque, int version_id)
+ }
+ ldq_le_dma(xhci->as, dcbaap + 8 * slotid, &addr, MEMTXATTRS_UNSPECIFIED);
+ slot->ctx = xhci_mask64(addr);
++
+ xhci_dma_read_u32s(xhci, slot->ctx, slot_ctx, sizeof(slot_ctx));
+ slot->uport = xhci_lookup_uport(xhci, slot_ctx);
+ if (!slot->uport) {
+diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
+index 9f51ef2..7a46c1f 100644
+--- a/include/hw/pci/pci.h
++++ b/include/hw/pci/pci.h
+@@ -852,11 +852,11 @@ static inline MemTxResult pci_dma_write(PCIDevice *dev, dma_addr_t addr,
+
+ #define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \
+ static inline uint##_bits##_t ld##_l##_pci_dma(PCIDevice *dev, \
+- dma_addr_t addr) \
++ dma_addr_t addr, \
++ MemTxAttrs attrs) \
+ { \
+ uint##_bits##_t val; \
+- ld##_l##_dma(pci_get_address_space(dev), addr, &val, \
+- MEMTXATTRS_UNSPECIFIED); \
++ ld##_l##_dma(pci_get_address_space(dev), addr, &val, attrs); \
+ return val; \
+ } \
+ static inline void st##_s##_pci_dma(PCIDevice *dev, \
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0021-target-ppc-implement-xs-n-maddqp-o-xs-n-msubqp-o.patch b/meta/recipes-devtools/qemu/qemu/0021-target-ppc-implement-xs-n-maddqp-o-xs-n-msubqp-o.patch
new file mode 100644
index 0000000000..11d732ac13
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0021-target-ppc-implement-xs-n-maddqp-o-xs-n-msubqp-o.patch
@@ -0,0 +1,174 @@
+From 1c1f82fbf0a434948b041eb35c671137628d5538 Mon Sep 17 00:00:00 2001
+From: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Date: Wed, 2 Mar 2022 06:51:38 +0100
+Subject: [PATCH 21/21] target/ppc: implement xs[n]maddqp[o]/xs[n]msubqp[o]
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Implement the following PowerISA v3.0 instuctions:
+xsmaddqp[o]: VSX Scalar Multiply-Add Quad-Precision [using round to Odd]
+xsmsubqp[o]: VSX Scalar Multiply-Subtract Quad-Precision [using round
+ to Odd]
+xsnmaddqp[o]: VSX Scalar Negative Multiply-Add Quad-Precision [using
+ round to Odd]
+xsnmsubqp[o]: VSX Scalar Negative Multiply-Subtract Quad-Precision
+ [using round to Odd]
+
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=3bb1aed246d7b59ceee625a82628f7369d492a8f]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Matheus Ferst <matheus.ferst@eldorado.org.br>
+Message-Id: <20220225210936.1749575-38-matheus.ferst@eldorado.org.br>
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ target/ppc/fpu_helper.c | 42 +++++++++++++++++++++++++++++
+ target/ppc/helper.h | 9 +++++++
+ target/ppc/insn32.decode | 4 +++
+ target/ppc/translate/vsx-impl.c.inc | 25 +++++++++++++++++
+ 4 files changed, 80 insertions(+)
+
+diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
+index 853e5f6029..bdbbdb3b11 100644
+--- a/target/ppc/fpu_helper.c
++++ b/target/ppc/fpu_helper.c
+@@ -2102,6 +2102,48 @@ VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0)
+ VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0)
+ VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0)
+
++/*
++ * VSX_MADDQ - VSX floating point quad-precision muliply/add
++ * op - instruction mnemonic
++ * maddflgs - flags for the float*muladd routine that control the
++ * various forms (madd, msub, nmadd, nmsub)
++ * ro - round to odd
++ */
++#define VSX_MADDQ(op, maddflgs, ro) \
++void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *s1, ppc_vsr_t *s2,\
++ ppc_vsr_t *s3) \
++{ \
++ ppc_vsr_t t = *xt; \
++ \
++ helper_reset_fpstatus(env); \
++ \
++ float_status tstat = env->fp_status; \
++ set_float_exception_flags(0, &tstat); \
++ if (ro) { \
++ tstat.float_rounding_mode = float_round_to_odd; \
++ } \
++ t.f128 = float128_muladd(s1->f128, s3->f128, s2->f128, maddflgs, &tstat); \
++ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
++ \
++ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
++ float_invalid_op_madd(env, tstat.float_exception_flags, \
++ false, GETPC()); \
++ } \
++ \
++ helper_compute_fprf_float128(env, t.f128); \
++ *xt = t; \
++ do_float_check_status(env, GETPC()); \
++}
++
++VSX_MADDQ(XSMADDQP, MADD_FLGS, 0)
++VSX_MADDQ(XSMADDQPO, MADD_FLGS, 1)
++VSX_MADDQ(XSMSUBQP, MSUB_FLGS, 0)
++VSX_MADDQ(XSMSUBQPO, MSUB_FLGS, 1)
++VSX_MADDQ(XSNMADDQP, NMADD_FLGS, 0)
++VSX_MADDQ(XSNMADDQPO, NMADD_FLGS, 1)
++VSX_MADDQ(XSNMSUBQP, NMSUB_FLGS, 0)
++VSX_MADDQ(XSNMSUBQPO, NMSUB_FLGS, 0)
++
+ /*
+ * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
+ * op - instruction mnemonic
+diff --git a/target/ppc/helper.h b/target/ppc/helper.h
+index e147b37644..b5080c4955 100644
+--- a/target/ppc/helper.h
++++ b/target/ppc/helper.h
+@@ -444,6 +444,15 @@ DEF_HELPER_5(XSMSUBSP, void, env, vsr, vsr, vsr, vsr)
+ DEF_HELPER_5(XSNMADDSP, void, env, vsr, vsr, vsr, vsr)
+ DEF_HELPER_5(XSNMSUBSP, void, env, vsr, vsr, vsr, vsr)
+
++DEF_HELPER_5(XSMADDQP, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSMADDQPO, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSMSUBQP, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSMSUBQPO, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSNMADDQP, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSNMADDQPO, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSNMSUBQP, void, env, vsr, vsr, vsr, vsr)
++DEF_HELPER_5(XSNMSUBQPO, void, env, vsr, vsr, vsr, vsr)
++
+ DEF_HELPER_4(xvadddp, void, env, vsr, vsr, vsr)
+ DEF_HELPER_4(xvsubdp, void, env, vsr, vsr, vsr)
+ DEF_HELPER_4(xvmuldp, void, env, vsr, vsr, vsr)
+diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode
+index 0ff8818084..6bcb1e6804 100644
+--- a/target/ppc/insn32.decode
++++ b/target/ppc/insn32.decode
+@@ -457,21 +457,25 @@ XSMADDADP 111100 ..... ..... ..... 00100001 . . . @XX3
+ XSMADDMDP 111100 ..... ..... ..... 00101001 . . . @XX3
+ XSMADDASP 111100 ..... ..... ..... 00000001 . . . @XX3
+ XSMADDMSP 111100 ..... ..... ..... 00001001 . . . @XX3
++XSMADDQP 111111 ..... ..... ..... 0110000100 . @X_rc
+
+ XSMSUBADP 111100 ..... ..... ..... 00110001 . . . @XX3
+ XSMSUBMDP 111100 ..... ..... ..... 00111001 . . . @XX3
+ XSMSUBASP 111100 ..... ..... ..... 00010001 . . . @XX3
+ XSMSUBMSP 111100 ..... ..... ..... 00011001 . . . @XX3
++XSMSUBQP 111111 ..... ..... ..... 0110100100 . @X_rc
+
+ XSNMADDASP 111100 ..... ..... ..... 10000001 . . . @XX3
+ XSNMADDMSP 111100 ..... ..... ..... 10001001 . . . @XX3
+ XSNMADDADP 111100 ..... ..... ..... 10100001 . . . @XX3
+ XSNMADDMDP 111100 ..... ..... ..... 10101001 . . . @XX3
++XSNMADDQP 111111 ..... ..... ..... 0111000100 . @X_rc
+
+ XSNMSUBASP 111100 ..... ..... ..... 10010001 . . . @XX3
+ XSNMSUBMSP 111100 ..... ..... ..... 10011001 . . . @XX3
+ XSNMSUBADP 111100 ..... ..... ..... 10110001 . . . @XX3
+ XSNMSUBMDP 111100 ..... ..... ..... 10111001 . . . @XX3
++XSNMSUBQP 111111 ..... ..... ..... 0111100100 . @X_rc
+
+ ## VSX splat instruction
+
+diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
+index 90d3ac665b..4253f01319 100644
+--- a/target/ppc/translate/vsx-impl.c.inc
++++ b/target/ppc/translate/vsx-impl.c.inc
+@@ -1249,6 +1249,31 @@ TRANS_FLAGS2(VSX207, XSNMADDMSP, do_xsmadd_XX3, false, gen_helper_XSNMADDSP)
+ TRANS_FLAGS2(VSX207, XSNMSUBASP, do_xsmadd_XX3, true, gen_helper_XSNMSUBSP)
+ TRANS_FLAGS2(VSX207, XSNMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSNMSUBSP)
+
++static bool do_xsmadd_X(DisasContext *ctx, arg_X_rc *a,
++ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr),
++ void (*gen_helper_ro)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
++{
++ int vrt, vra, vrb;
++
++ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
++ REQUIRE_VSX(ctx);
++
++ vrt = a->rt + 32;
++ vra = a->ra + 32;
++ vrb = a->rb + 32;
++
++ if (a->rc) {
++ return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper_ro);
++ }
++
++ return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper);
++}
++
++TRANS(XSMADDQP, do_xsmadd_X, gen_helper_XSMADDQP, gen_helper_XSMADDQPO)
++TRANS(XSMSUBQP, do_xsmadd_X, gen_helper_XSMSUBQP, gen_helper_XSMSUBQPO)
++TRANS(XSNMADDQP, do_xsmadd_X, gen_helper_XSNMADDQP, gen_helper_XSNMADDQPO)
++TRANS(XSNMSUBQP, do_xsmadd_X, gen_helper_XSNMSUBQP, gen_helper_XSNMSUBQPO)
++
+ #define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type) \
+ static void gen_##name(DisasContext *ctx) \
+ { \
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0021_let_st_pointer_pci_dma_function_propagate_MemTxResult.patch b/meta/recipes-devtools/qemu/qemu/0021_let_st_pointer_pci_dma_function_propagate_MemTxResult.patch
new file mode 100644
index 0000000000..e52a45b90f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0021_let_st_pointer_pci_dma_function_propagate_MemTxResult.patch
@@ -0,0 +1,47 @@
+From 6bebb270731758fae3114b7d24c2b12b7c325cc5 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Fri, 17 Dec 2021 23:47:30 +0100
+Subject: [PATCH] pci: Let st*_pci_dma() propagate MemTxResult
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+st*_dma() returns a MemTxResult type. Do not discard
+it, return it to the caller.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=6bebb270731758fae3114b7d24c2b12b7c325cc5]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-23-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ include/hw/pci/pci.h | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
+index 7a46c1f..c90cecc 100644
+--- a/include/hw/pci/pci.h
++++ b/include/hw/pci/pci.h
+@@ -859,12 +859,12 @@ static inline MemTxResult pci_dma_write(PCIDevice *dev, dma_addr_t addr,
+ ld##_l##_dma(pci_get_address_space(dev), addr, &val, attrs); \
+ return val; \
+ } \
+- static inline void st##_s##_pci_dma(PCIDevice *dev, \
+- dma_addr_t addr, \
+- uint##_bits##_t val, \
+- MemTxAttrs attrs) \
++ static inline MemTxResult st##_s##_pci_dma(PCIDevice *dev, \
++ dma_addr_t addr, \
++ uint##_bits##_t val, \
++ MemTxAttrs attrs) \
+ { \
+- st##_s##_dma(pci_get_address_space(dev), addr, val, attrs); \
++ return st##_s##_dma(pci_get_address_space(dev), addr, val, attrs); \
+ }
+
+ PCI_DMA_DEFINE_LDST(ub, b, 8);
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0022_let_ld_pointer_pci_dma_function_propagate_MemTxResult.patch b/meta/recipes-devtools/qemu/qemu/0022_let_ld_pointer_pci_dma_function_propagate_MemTxResult.patch
new file mode 100644
index 0000000000..6bd6350f44
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0022_let_ld_pointer_pci_dma_function_propagate_MemTxResult.patch
@@ -0,0 +1,296 @@
+From 4a63054bce23982b99f4d3c65528e47e614086b2 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Fri, 17 Dec 2021 23:49:30 +0100
+Subject: [PATCH] pci: Let ld*_pci_dma() propagate MemTxResult
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+ld*_dma() returns a MemTxResult type. Do not discard
+it, return it to the caller.
+
+Update the few callers.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=4a63054bce23982b99f4d3c65528e47e614086b2]
+
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211223115554.3155328-24-philmd@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/audio/intel-hda.c | 2 +-
+ hw/net/eepro100.c | 25 ++++++++++---------------
+ hw/net/tulip.c | 16 ++++++++--------
+ hw/scsi/megasas.c | 21 ++++++++++++---------
+ hw/scsi/mptsas.c | 16 +++++++++++-----
+ hw/scsi/vmw_pvscsi.c | 16 ++++++++++------
+ include/hw/pci/pci.h | 17 ++++++++---------
+ 7 files changed, 60 insertions(+), 53 deletions(-)
+
+diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c
+index e34b7ab..2b55d52 100644
+--- a/hw/audio/intel-hda.c
++++ b/hw/audio/intel-hda.c
+@@ -335,7 +335,7 @@ static void intel_hda_corb_run(IntelHDAState *d)
+
+ rp = (d->corb_rp + 1) & 0xff;
+ addr = intel_hda_addr(d->corb_lbase, d->corb_ubase);
+- verb = ldl_le_pci_dma(&d->pci, addr + 4 * rp, MEMTXATTRS_UNSPECIFIED);
++ ldl_le_pci_dma(&d->pci, addr + 4 * rp, &verb, MEMTXATTRS_UNSPECIFIED);
+ d->corb_rp = rp;
+
+ dprint(d, 2, "%s: [rp 0x%x] verb 0x%08x\n", __func__, rp, verb);
+diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c
+index eb82e9c..679f52f 100644
+--- a/hw/net/eepro100.c
++++ b/hw/net/eepro100.c
+@@ -769,18 +769,16 @@ static void tx_command(EEPRO100State *s)
+ } else {
+ /* Flexible mode. */
+ uint8_t tbd_count = 0;
++ uint32_t tx_buffer_address;
++ uint16_t tx_buffer_size;
++ uint16_t tx_buffer_el;
++
+ if (s->has_extended_tcb_support && !(s->configuration[6] & BIT(4))) {
+ /* Extended Flexible TCB. */
+ for (; tbd_count < 2; tbd_count++) {
+- uint32_t tx_buffer_address = ldl_le_pci_dma(&s->dev,
+- tbd_address,
+- attrs);
+- uint16_t tx_buffer_size = lduw_le_pci_dma(&s->dev,
+- tbd_address + 4,
+- attrs);
+- uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev,
+- tbd_address + 6,
+- attrs);
++ ldl_le_pci_dma(&s->dev, tbd_address, &tx_buffer_address, attrs);
++ lduw_le_pci_dma(&s->dev, tbd_address + 4, &tx_buffer_size, attrs);
++ lduw_le_pci_dma(&s->dev, tbd_address + 6, &tx_buffer_el, attrs);
+ tbd_address += 8;
+ TRACE(RXTX, logout
+ ("TBD (extended flexible mode): buffer address 0x%08x, size 0x%04x\n",
+@@ -796,12 +794,9 @@ static void tx_command(EEPRO100State *s)
+ }
+ tbd_address = tbd_array;
+ for (; tbd_count < s->tx.tbd_count; tbd_count++) {
+- uint32_t tx_buffer_address = ldl_le_pci_dma(&s->dev, tbd_address,
+- attrs);
+- uint16_t tx_buffer_size = lduw_le_pci_dma(&s->dev, tbd_address + 4,
+- attrs);
+- uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev, tbd_address + 6,
+- attrs);
++ ldl_le_pci_dma(&s->dev, tbd_address, &tx_buffer_address, attrs);
++ lduw_le_pci_dma(&s->dev, tbd_address + 4, &tx_buffer_size, attrs);
++ lduw_le_pci_dma(&s->dev, tbd_address + 6, &tx_buffer_el, attrs);
+ tbd_address += 8;
+ TRACE(RXTX, logout
+ ("TBD (flexible mode): buffer address 0x%08x, size 0x%04x\n",
+diff --git a/hw/net/tulip.c b/hw/net/tulip.c
+index c76e486..d5b6cc5 100644
+--- a/hw/net/tulip.c
++++ b/hw/net/tulip.c
+@@ -73,15 +73,15 @@ static void tulip_desc_read(TULIPState *s, hwaddr p,
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+
+ if (s->csr[0] & CSR0_DBO) {
+- desc->status = ldl_be_pci_dma(&s->dev, p, attrs);
+- desc->control = ldl_be_pci_dma(&s->dev, p + 4, attrs);
+- desc->buf_addr1 = ldl_be_pci_dma(&s->dev, p + 8, attrs);
+- desc->buf_addr2 = ldl_be_pci_dma(&s->dev, p + 12, attrs);
++ ldl_be_pci_dma(&s->dev, p, &desc->status, attrs);
++ ldl_be_pci_dma(&s->dev, p + 4, &desc->control, attrs);
++ ldl_be_pci_dma(&s->dev, p + 8, &desc->buf_addr1, attrs);
++ ldl_be_pci_dma(&s->dev, p + 12, &desc->buf_addr2, attrs);
+ } else {
+- desc->status = ldl_le_pci_dma(&s->dev, p, attrs);
+- desc->control = ldl_le_pci_dma(&s->dev, p + 4, attrs);
+- desc->buf_addr1 = ldl_le_pci_dma(&s->dev, p + 8, attrs);
+- desc->buf_addr2 = ldl_le_pci_dma(&s->dev, p + 12, attrs);
++ ldl_le_pci_dma(&s->dev, p, &desc->status, attrs);
++ ldl_le_pci_dma(&s->dev, p + 4, &desc->control, attrs);
++ ldl_le_pci_dma(&s->dev, p + 8, &desc->buf_addr1, attrs);
++ ldl_le_pci_dma(&s->dev, p + 12, &desc->buf_addr2, attrs);
+ }
+ }
+
+diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
+index 98b1370..dc9bbdb 100644
+--- a/hw/scsi/megasas.c
++++ b/hw/scsi/megasas.c
+@@ -202,9 +202,12 @@ static uint64_t megasas_frame_get_context(MegasasState *s,
+ unsigned long frame)
+ {
+ PCIDevice *pci = &s->parent_obj;
+- return ldq_le_pci_dma(pci,
+- frame + offsetof(struct mfi_frame_header, context),
+- MEMTXATTRS_UNSPECIFIED);
++ uint64_t val;
++
++ ldq_le_pci_dma(pci, frame + offsetof(struct mfi_frame_header, context),
++ &val, MEMTXATTRS_UNSPECIFIED);
++
++ return val;
+ }
+
+ static bool megasas_frame_is_ieee_sgl(MegasasCmd *cmd)
+@@ -536,8 +539,8 @@ static MegasasCmd *megasas_enqueue_frame(MegasasState *s,
+ s->busy++;
+
+ if (s->consumer_pa) {
+- s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa,
+- MEMTXATTRS_UNSPECIFIED);
++ ldl_le_pci_dma(pcid, s->consumer_pa, &s->reply_queue_tail,
++ MEMTXATTRS_UNSPECIFIED);
+ }
+ trace_megasas_qf_enqueue(cmd->index, cmd->count, cmd->context,
+ s->reply_queue_head, s->reply_queue_tail, s->busy);
+@@ -568,14 +571,14 @@ static void megasas_complete_frame(MegasasState *s, uint64_t context)
+ stl_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset,
+ context, attrs);
+ }
+- s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa, attrs);
++ ldl_le_pci_dma(pci_dev, s->consumer_pa, &s->reply_queue_tail, attrs);
+ trace_megasas_qf_complete(context, s->reply_queue_head,
+ s->reply_queue_tail, s->busy);
+ }
+
+ if (megasas_intr_enabled(s)) {
+ /* Update reply queue pointer */
+- s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa, attrs);
++ ldl_le_pci_dma(pci_dev, s->consumer_pa, &s->reply_queue_tail, attrs);
+ tail = s->reply_queue_head;
+ s->reply_queue_head = megasas_next_index(s, tail, s->fw_cmds);
+ trace_megasas_qf_update(s->reply_queue_head, s->reply_queue_tail,
+@@ -679,9 +682,9 @@ static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd)
+ pa_lo = le32_to_cpu(initq->pi_addr_lo);
+ pa_hi = le32_to_cpu(initq->pi_addr_hi);
+ s->producer_pa = ((uint64_t) pa_hi << 32) | pa_lo;
+- s->reply_queue_head = ldl_le_pci_dma(pcid, s->producer_pa, attrs);
++ ldl_le_pci_dma(pcid, s->producer_pa, &s->reply_queue_head, attrs);
+ s->reply_queue_head %= MEGASAS_MAX_FRAMES;
+- s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa, attrs);
++ ldl_le_pci_dma(pcid, s->consumer_pa, &s->reply_queue_tail, attrs);
+ s->reply_queue_tail %= MEGASAS_MAX_FRAMES;
+ flags = le32_to_cpu(initq->flags);
+ if (flags & MFI_QUEUE_FLAG_CONTEXT64) {
+diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c
+index ac9f4df..5181b0c 100644
+--- a/hw/scsi/mptsas.c
++++ b/hw/scsi/mptsas.c
+@@ -177,10 +177,16 @@ static dma_addr_t mptsas_ld_sg_base(MPTSASState *s, uint32_t flags_and_length,
+ dma_addr_t addr;
+
+ if (flags_and_length & MPI_SGE_FLAGS_64_BIT_ADDRESSING) {
+- addr = ldq_le_pci_dma(pci, *sgaddr + 4, attrs);
++ uint64_t addr64;
++
++ ldq_le_pci_dma(pci, *sgaddr + 4, &addr64, attrs);
++ addr = addr64;
+ *sgaddr += 12;
+ } else {
+- addr = ldl_le_pci_dma(pci, *sgaddr + 4, attrs);
++ uint32_t addr32;
++
++ ldl_le_pci_dma(pci, *sgaddr + 4, &addr32, attrs);
++ addr = addr32;
+ *sgaddr += 8;
+ }
+ return addr;
+@@ -204,7 +210,7 @@ static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr addr)
+ dma_addr_t addr, len;
+ uint32_t flags_and_length;
+
+- flags_and_length = ldl_le_pci_dma(pci, sgaddr, MEMTXATTRS_UNSPECIFIED);
++ ldl_le_pci_dma(pci, sgaddr, &flags_and_length, MEMTXATTRS_UNSPECIFIED);
+ len = flags_and_length & MPI_SGE_LENGTH_MASK;
+ if ((flags_and_length & MPI_SGE_FLAGS_ELEMENT_TYPE_MASK)
+ != MPI_SGE_FLAGS_SIMPLE_ELEMENT ||
+@@ -235,8 +241,8 @@ static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr addr)
+ break;
+ }
+
+- flags_and_length = ldl_le_pci_dma(pci, next_chain_addr,
+- MEMTXATTRS_UNSPECIFIED);
++ ldl_le_pci_dma(pci, next_chain_addr, &flags_and_length,
++ MEMTXATTRS_UNSPECIFIED);
+ if ((flags_and_length & MPI_SGE_FLAGS_ELEMENT_TYPE_MASK)
+ != MPI_SGE_FLAGS_CHAIN_ELEMENT) {
+ return MPI_IOCSTATUS_INVALID_SGL;
+diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c
+index 33e16f9..4d9969f 100644
+--- a/hw/scsi/vmw_pvscsi.c
++++ b/hw/scsi/vmw_pvscsi.c
+@@ -50,10 +50,10 @@
+ #define PVSCSI_MAX_CMD_DATA_WORDS \
+ (sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t))
+
+-#define RS_GET_FIELD(m, field) \
+- (ldl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
++#define RS_GET_FIELD(pval, m, field) \
++ ldl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
+ (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), \
+- MEMTXATTRS_UNSPECIFIED))
++ pval, MEMTXATTRS_UNSPECIFIED)
+ #define RS_SET_FIELD(m, field, val) \
+ (stl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
+ (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val, \
+@@ -249,10 +249,11 @@ pvscsi_ring_cleanup(PVSCSIRingInfo *mgr)
+ static hwaddr
+ pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr)
+ {
+- uint32_t ready_ptr = RS_GET_FIELD(mgr, reqProdIdx);
++ uint32_t ready_ptr;
+ uint32_t ring_size = PVSCSI_MAX_NUM_PAGES_REQ_RING
+ * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
+
++ RS_GET_FIELD(&ready_ptr, mgr, reqProdIdx);
+ if (ready_ptr != mgr->consumed_ptr
+ && ready_ptr - mgr->consumed_ptr < ring_size) {
+ uint32_t next_ready_ptr =
+@@ -323,8 +324,11 @@ pvscsi_ring_flush_cmp(PVSCSIRingInfo *mgr)
+ static bool
+ pvscsi_ring_msg_has_room(PVSCSIRingInfo *mgr)
+ {
+- uint32_t prodIdx = RS_GET_FIELD(mgr, msgProdIdx);
+- uint32_t consIdx = RS_GET_FIELD(mgr, msgConsIdx);
++ uint32_t prodIdx;
++ uint32_t consIdx;
++
++ RS_GET_FIELD(&prodIdx, mgr, msgProdIdx);
++ RS_GET_FIELD(&consIdx, mgr, msgConsIdx);
+
+ return (prodIdx - consIdx) < (mgr->msg_len_mask + 1);
+ }
+diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
+index c90cecc..5b36334 100644
+--- a/include/hw/pci/pci.h
++++ b/include/hw/pci/pci.h
+@@ -850,15 +850,14 @@ static inline MemTxResult pci_dma_write(PCIDevice *dev, dma_addr_t addr,
+ DMA_DIRECTION_FROM_DEVICE, MEMTXATTRS_UNSPECIFIED);
+ }
+
+-#define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \
+- static inline uint##_bits##_t ld##_l##_pci_dma(PCIDevice *dev, \
+- dma_addr_t addr, \
+- MemTxAttrs attrs) \
+- { \
+- uint##_bits##_t val; \
+- ld##_l##_dma(pci_get_address_space(dev), addr, &val, attrs); \
+- return val; \
+- } \
++#define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \
++ static inline MemTxResult ld##_l##_pci_dma(PCIDevice *dev, \
++ dma_addr_t addr, \
++ uint##_bits##_t *val, \
++ MemTxAttrs attrs) \
++ { \
++ return ld##_l##_dma(pci_get_address_space(dev), addr, val, attrs); \
++ } \
+ static inline MemTxResult st##_s##_pci_dma(PCIDevice *dev, \
+ dma_addr_t addr, \
+ uint##_bits##_t val, \
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-14394.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-14394.patch
new file mode 100644
index 0000000000..aff91a7355
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-14394.patch
@@ -0,0 +1,79 @@
+From effaf5a240e03020f4ae953e10b764622c3e87cc Mon Sep 17 00:00:00 2001
+From: Thomas Huth <thuth@redhat.com>
+Date: Tue, 8 Aug 2023 10:44:51 +0000
+Subject: [PATCH] hw/usb/hcd-xhci: Fix unbounded loop in
+ xhci_ring_chain_length() (CVE-2020-14394)
+
+The loop condition in xhci_ring_chain_length() is under control of
+the guest, and additionally the code does not check for failed DMA
+transfers (e.g. if reaching the end of the RAM), so the loop there
+could run for a very long time or even forever. Fix it by checking
+the return value of dma_memory_read() and by introducing a maximum
+loop length.
+
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/646
+Message-Id: <20220804131300.96368-1-thuth@redhat.com>
+Reviewed-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Acked-by: Gerd Hoffmann <kraxel@redhat.com>
+Signed-off-by: Thomas Huth <thuth@redhat.com>
+
+CVE: CVE-2020-14394
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/effaf5a240e03020f4ae953e10b764622c3e87cc]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ hw/usb/hcd-xhci.c | 23 +++++++++++++++++++----
+ 1 file changed, 19 insertions(+), 4 deletions(-)
+
+diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
+index 14bdb8967..c63a36dcc 100644
+--- a/hw/usb/hcd-xhci.c
++++ b/hw/usb/hcd-xhci.c
+@@ -21,6 +21,7 @@
+
+ #include "qemu/osdep.h"
+ #include "qemu/timer.h"
++#include "qemu/log.h"
+ #include "qemu/module.h"
+ #include "qemu/queue.h"
+ #include "migration/vmstate.h"
+@@ -725,10 +726,14 @@ static int xhci_ring_chain_length(XHCIState *xhci, const XHCIRing *ring)
+ bool control_td_set = 0;
+ uint32_t link_cnt = 0;
+
+- while (1) {
++ do {
+ TRBType type;
+- dma_memory_read(xhci->as, dequeue, &trb, TRB_SIZE,
+- MEMTXATTRS_UNSPECIFIED);
++ if (dma_memory_read(xhci->as, dequeue, &trb, TRB_SIZE,
++ MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) {
++ qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA memory access failed!\n",
++ __func__);
++ return -1;
++ }
+ le64_to_cpus(&trb.parameter);
+ le32_to_cpus(&trb.status);
+ le32_to_cpus(&trb.control);
+@@ -762,7 +767,17 @@ static int xhci_ring_chain_length(XHCIState *xhci, const XHCIRing *ring)
+ if (!control_td_set && !(trb.control & TRB_TR_CH)) {
+ return length;
+ }
+- }
++
++ /*
++ * According to the xHCI spec, Transfer Ring segments should have
++ * a maximum size of 64 kB (see chapter "6 Data Structures")
++ */
++ } while (length < TRB_LINK_LIMIT * 65536 / TRB_SIZE);
++
++ qemu_log_mask(LOG_GUEST_ERROR, "%s: exceeded maximum tranfer ring size!\n",
++ __func__);
++
++ return -1;
+ }
+
+ static void xhci_er_reset(XHCIState *xhci, int v)
+--
+2.35.5
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3611_1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3611_1.patch
new file mode 100644
index 0000000000..dc7990d1b7
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3611_1.patch
@@ -0,0 +1,74 @@
+From be5a8cf347d0c47ee3e933dde075526fd8bd5c40 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Sat, 18 Dec 2021 17:09:10 +0100
+Subject: [PATCH] hw/audio/intel-hda: Do not ignore DMA overrun errors
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Per the "High Definition Audio Specification" manual (rev. 1.0a),
+section "3.3.30 Offset 5Dh: RIRBSTS - RIRB Status":
+
+ Response Overrun Interrupt Status (RIRBOIS):
+
+ Hardware sets this bit to a 1 when an overrun occurs in the RIRB.
+ An interrupt may be generated if the Response Overrun Interrupt
+ Control bit is set.
+
+ This bit will be set if the RIRB DMA engine is not able to write
+ the incoming responses to memory before additional incoming
+ responses overrun the internal FIFO.
+
+ When hardware detects an overrun, it will drop the responses which
+ overrun the buffer and set the RIRBOIS status bit to indicate the
+ error condition. Optionally, if the RIRBOIC is set, the hardware
+ will also generate an error to alert software to the problem.
+
+QEMU emulates the DMA engine with the stl_le_pci_dma() calls. This
+function returns a MemTxResult indicating whether the DMA access
+was successful.
+Handle any MemTxResult error as "DMA engine is not able to write the
+incoming responses to memory" and raise the Overrun Interrupt flag
+when this case occurs.
+
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=patch;h=be5a8cf347d0c47ee3e933dde075526fd8bd5c40]
+
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211218160912.1591633-2-philmd@redhat.com>
+Signed-off-by: Thomas Huth <thuth@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/audio/intel-hda.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c
+index 5f8a878..47a36ac 100644
+--- a/hw/audio/intel-hda.c
++++ b/hw/audio/intel-hda.c
+@@ -350,6 +350,7 @@ static void intel_hda_response(HDACodecDevice *dev, bool solicited, uint32_t res
+ IntelHDAState *d = container_of(bus, IntelHDAState, codecs);
+ hwaddr addr;
+ uint32_t wp, ex;
++ MemTxResult res = MEMTX_OK;
+
+ if (d->ics & ICH6_IRS_BUSY) {
+ dprint(d, 2, "%s: [irr] response 0x%x, cad 0x%x\n",
+@@ -368,8 +369,12 @@ static void intel_hda_response(HDACodecDevice *dev, bool solicited, uint32_t res
+ ex = (solicited ? 0 : (1 << 4)) | dev->cad;
+ wp = (d->rirb_wp + 1) & 0xff;
+ addr = intel_hda_addr(d->rirb_lbase, d->rirb_ubase);
+- stl_le_pci_dma(&d->pci, addr + 8 * wp, response, attrs);
+- stl_le_pci_dma(&d->pci, addr + 8 * wp + 4, ex, attrs);
++ res |= stl_le_pci_dma(&d->pci, addr + 8 * wp, response, attrs);
++ res |= stl_le_pci_dma(&d->pci, addr + 8 * wp + 4, ex, attrs);
++ if (res != MEMTX_OK && (d->rirb_ctl & ICH6_RBCTL_OVERRUN_EN)) {
++ d->rirb_sts |= ICH6_RBSTS_OVERRUN;
++ intel_hda_update_irq(d);
++ }
+ d->rirb_wp = wp;
+
+ dprint(d, 2, "%s: [wp 0x%x] response 0x%x, extra 0x%x\n",
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3611_2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3611_2.patch
new file mode 100644
index 0000000000..b79fadf3f6
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3611_2.patch
@@ -0,0 +1,43 @@
+From 79fa99831debc9782087e834382c577215f2f511 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Sat, 18 Dec 2021 17:09:11 +0100
+Subject: [PATCH] hw/audio/intel-hda: Restrict DMA engine to memories (not MMIO
+ devices)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Issue #542 reports a reentrancy problem when the DMA engine accesses
+the HDA controller I/O registers. Fix by restricting the DMA engine
+to memories regions (forbidding MMIO devices such the HDA controller).
+
+Reported-by: OSS-Fuzz (Issue 28435)
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Reviewed-by: Thomas Huth <thuth@redhat.com>
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/542
+CVE: CVE-2021-3611
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=patch;h=79fa99831debc9782087e834382c577215f2f511]
+
+Message-Id: <20211218160912.1591633-3-philmd@redhat.com>
+Signed-off-by: Thomas Huth <thuth@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/audio/intel-hda.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c
+index 47a36ac..78a47bc 100644
+--- a/hw/audio/intel-hda.c
++++ b/hw/audio/intel-hda.c
+@@ -345,7 +345,7 @@ static void intel_hda_corb_run(IntelHDAState *d)
+
+ static void intel_hda_response(HDACodecDevice *dev, bool solicited, uint32_t response)
+ {
+- const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
++ const MemTxAttrs attrs = { .memory = true };
+ HDACodecBus *bus = HDA_BUS(dev->qdev.parent_bus);
+ IntelHDAState *d = container_of(bus, IntelHDAState, codecs);
+ hwaddr addr;
+--
+1.8.3.1
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3638.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3638.patch
new file mode 100644
index 0000000000..3cbb34c54c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3638.patch
@@ -0,0 +1,88 @@
+From 205ccfd7a5ec86bd9a5678b8bd157562fc9a1643 Mon Sep 17 00:00:00 2001
+From: Philippe Mathieu-Daudé <philmd@redhat.com>
+Date: Thu, 10 Aug 2023 07:30:54 +0000
+Subject: [PATCH] hw/display/ati_2d: Fix buffer overflow in ati_2d_blt
+ (CVE-2021-3638) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8
+ Content-Transfer-Encoding: 8bit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When building QEMU with DEBUG_ATI defined then running with
+'-device ati-vga,romfile="" -d unimp,guest_errors -trace ati\*'
+we get:
+
+ ati_mm_write 4 0x16c0 DP_CNTL <- 0x1
+ ati_mm_write 4 0x146c DP_GUI_MASTER_CNTL <- 0x2
+ ati_mm_write 4 0x16c8 DP_MIX <- 0xff0000
+ ati_mm_write 4 0x16c4 DP_DATATYPE <- 0x2
+ ati_mm_write 4 0x224 CRTC_OFFSET <- 0x0
+ ati_mm_write 4 0x142c DST_PITCH_OFFSET <- 0xfe00000
+ ati_mm_write 4 0x1420 DST_Y <- 0x3fff
+ ati_mm_write 4 0x1410 DST_HEIGHT <- 0x3fff
+ ati_mm_write 4 0x1588 DST_WIDTH_X <- 0x3fff3fff
+ ati_2d_blt: vram:0x7fff5fa00000 addr:0 ds:0x7fff61273800 stride:2560 bpp:32 rop:0xff
+ ati_2d_blt: 0 0 0, 0 127 0, (0,0) -> (16383,16383) 16383x16383 > ^
+ ati_2d_blt: pixman_fill(dst:0x7fff5fa00000, stride:254, bpp:8, x:16383, y:16383, w:16383, h:16383, xor:0xff000000)
+ Thread 3 "qemu-system-i38" received signal SIGSEGV, Segmentation fault.
+ (gdb) bt
+ #0 0x00007ffff7f62ce0 in sse2_fill.lto_priv () at /lib64/libpixman-1.so.0
+ #1 0x00007ffff7f09278 in pixman_fill () at /lib64/libpixman-1.so.0
+ #2 0x0000555557b5a9af in ati_2d_blt (s=0x631000028800) at hw/display/ati_2d.c:196
+ #3 0x0000555557b4b5a2 in ati_mm_write (opaque=0x631000028800, addr=5512, data=1073692671, size=4) at hw/display/ati.c:843
+ #4 0x0000555558b90ec4 in memory_region_write_accessor (mr=0x631000039cc0, addr=5512, ..., size=4, ...) at softmmu/memory.c:492
+
+Commit 584acf34cb0 ("ati-vga: Fix reverse bit blts") introduced
+the local dst_x and dst_y which adjust the (x, y) coordinates
+depending on the direction in the SRCCOPY ROP3 operation, but
+forgot to address the same issue for the PATCOPY, BLACKNESS and
+WHITENESS operations, which also call pixman_fill().
+
+Fix that now by using the adjusted coordinates in the pixman_fill
+call, and update the related debug printf().
+
+Reported-by: Qiang Liu <qiangliu@zju.edu.cn>
+Fixes: 584acf34cb0 ("ati-vga: Fix reverse bit blts")
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Tested-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Message-Id: <20210906153103.1661195-1-philmd@redhat.com>
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+CVE: CVE-2021-3638
+
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/205ccfd7a5ec86bd9a5678b8bd157562fc9a1643]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ hw/display/ati_2d.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/hw/display/ati_2d.c b/hw/display/ati_2d.c
+index 4dc10ea79..692bec91d 100644
+--- a/hw/display/ati_2d.c
++++ b/hw/display/ati_2d.c
+@@ -84,7 +84,7 @@ void ati_2d_blt(ATIVGAState *s)
+ DPRINTF("%d %d %d, %d %d %d, (%d,%d) -> (%d,%d) %dx%d %c %c\n",
+ s->regs.src_offset, s->regs.dst_offset, s->regs.default_offset,
+ s->regs.src_pitch, s->regs.dst_pitch, s->regs.default_pitch,
+- s->regs.src_x, s->regs.src_y, s->regs.dst_x, s->regs.dst_y,
++ s->regs.src_x, s->regs.src_y, dst_x, dst_y,
+ s->regs.dst_width, s->regs.dst_height,
+ (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ? '>' : '<'),
+ (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ? 'v' : '^'));
+@@ -180,11 +180,11 @@ void ati_2d_blt(ATIVGAState *s)
+ dst_stride /= sizeof(uint32_t);
+ DPRINTF("pixman_fill(%p, %d, %d, %d, %d, %d, %d, %x)\n",
+ dst_bits, dst_stride, bpp,
+- s->regs.dst_x, s->regs.dst_y,
++ dst_x, dst_y,
+ s->regs.dst_width, s->regs.dst_height,
+ filler);
+ pixman_fill((uint32_t *)dst_bits, dst_stride, bpp,
+- s->regs.dst_x, s->regs.dst_y,
++ dst_x, dst_y,
+ s->regs.dst_width, s->regs.dst_height,
+ filler);
+ if (dst_bits >= s->vga.vram_ptr + s->vga.vbe_start_addr &&
+--
+2.40.0
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3750-1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3750-1.patch
new file mode 100644
index 0000000000..e898c20767
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3750-1.patch
@@ -0,0 +1,59 @@
+From b9d383ab797f54ae5fa8746117770709921dc529 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Wed, 15 Dec 2021 19:24:19 +0100
+Subject: [PATCH] hw/intc/arm_gicv3: Check for !MEMTX_OK instead of MEMTX_ERROR
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Quoting Peter Maydell:
+
+ "These MEMTX_* aren't from the memory transaction
+ API functions; they're just being used by gicd_readl() and
+ friends as a way to indicate a success/failure so that the
+ actual MemoryRegionOps read/write fns like gicv3_dist_read()
+ can log a guest error."
+
+We are going to introduce more MemTxResult bits, so it is
+safer to check for !MEMTX_OK rather than MEMTX_ERROR.
+
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Signed-off-by: Philippe Mathieu-DaudÃf© <philmd@redhat.com>
+Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Virendra Thakur <virendra.thakur@kpit.com>
+
+CVE: CVE-2021-3750
+
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=b9d383ab797f54ae5fa8746117770709921dc529]
+---
+ hw/intc/arm_gicv3_redist.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c
+index c8ff3ec..99b11ca 100644
+--- a/hw/intc/arm_gicv3_redist.c
++++ b/hw/intc/arm_gicv3_redist.c
+@@ -462,7 +462,7 @@ MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
+ break;
+ }
+
+- if (r == MEMTX_ERROR) {
++ if (r != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest read at offset " TARGET_FMT_plx
+ " size %u\n", __func__, offset, size);
+@@ -521,7 +521,7 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
+ break;
+ }
+
+- if (r == MEMTX_ERROR) {
++ if (r != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write at offset " TARGET_FMT_plx
+ " size %u\n", __func__, offset, size);
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3750-2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3750-2.patch
new file mode 100644
index 0000000000..f163b4fab3
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3750-2.patch
@@ -0,0 +1,65 @@
+From 58e74682baf4e1ad26b064d8c02e5bc99c75c5d9 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Wed, 15 Dec 2021 19:24:20 +0100
+Subject: [PATCH] softmmu/physmem: Simplify flatview_write and
+ address_space_access_valid
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Remove unuseful local 'result' variables.
+
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Alexander Bulekov <alxndr@bu.edu>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Signed-off-by: Philippe Mathieu-DaudÃf© <philmd@redhat.com>
+Message-Id: <20211215182421.418374-3-philmd@redhat.com>
+Signed-off-by: Thomas Huth <thuth@redhat.com>
+Signed-off-by: Virendra Thakur <virendra.thakur@kpit.com>
+
+CVE: CVE-2021-3750
+
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=58e74682baf4e1ad26b064d8c02e5bc99c75c5d9]
+---
+ softmmu/physmem.c | 11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+diff --git a/softmmu/physmem.c b/softmmu/physmem.c
+index 43ae70f..3d968ca 100644
+--- a/softmmu/physmem.c
++++ b/softmmu/physmem.c
+@@ -2826,14 +2826,11 @@ static MemTxResult flatview_write(FlatVi
+ hwaddr l;
+ hwaddr addr1;
+ MemoryRegion *mr;
+- MemTxResult result = MEMTX_OK;
+
+ l = len;
+ mr = flatview_translate(fv, addr, &addr1, &l, true, attrs);
+- result = flatview_write_continue(fv, addr, attrs, buf, len,
+- addr1, l, mr);
+-
+- return result;
++ return flatview_write_continue(fv, addr, attrs, buf, len,
++ addr1, l, mr);
+ }
+
+ /* Called within RCU critical section. */
+@@ -3130,12 +3127,10 @@ bool address_space_access_valid(AddressS
+ MemTxAttrs attrs)
+ {
+ FlatView *fv;
+- bool result;
+
+ RCU_READ_LOCK_GUARD();
+ fv = address_space_to_flatview(as);
+- result = flatview_access_valid(fv, addr, len, is_write, attrs);
+- return result;
++ return flatview_access_valid(fv, addr, len, is_write, attrs);
+ }
+
+ static hwaddr
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3750-3.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3750-3.patch
new file mode 100644
index 0000000000..24668ad1a5
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3750-3.patch
@@ -0,0 +1,156 @@
+From 3ab6fdc91b72e156da22848f0003ff4225690ced Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Wed, 15 Dec 2021 19:24:21 +0100
+Subject: [PATCH] softmmu/physmem: Introduce MemTxAttrs::memory field and
+ MEMTX_ACCESS_ERROR
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Add the 'memory' bit to the memory attributes to restrict bus
+controller accesses to memories.
+
+Introduce flatview_access_allowed() to check bus permission
+before running any bus transaction.
+
+Have read/write accessors return MEMTX_ACCESS_ERROR if an access is
+restricted.
+
+There is no change for the default case where 'memory' is not set.
+
+Signed-off-by: Philippe Mathieu-DaudÃf© <philmd@redhat.com>
+Message-Id: <20211215182421.418374-4-philmd@redhat.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+[thuth: Replaced MEMTX_BUS_ERROR with MEMTX_ACCESS_ERROR, remove "inline"]
+Signed-off-by: Thomas Huth <thuth@redhat.com>
+Signed-off-by: Virendra Thakur <virendra.thakur@kpit.com>
+
+CVE: CVE-2021-3750
+
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=3ab6fdc91b72e156da22848f0003ff4225690ced]
+---
+ include/exec/memattrs.h | 9 +++++++++
+ softmmu/physmem.c | 44 ++++++++++++++++++++++++++++++++++++++++++--
+ 2 files changed, 51 insertions(+), 2 deletions(-)
+
+diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
+index 95f2d20..9fb98bc 100644
+--- a/include/exec/memattrs.h
++++ b/include/exec/memattrs.h
+@@ -35,6 +35,14 @@ typedef struct MemTxAttrs {
+ unsigned int secure:1;
+ /* Memory access is usermode (unprivileged) */
+ unsigned int user:1;
++ /*
++ * Bus interconnect and peripherals can access anything (memories,
++ * devices) by default. By setting the 'memory' bit, bus transaction
++ * are restricted to "normal" memories (per the AMBA documentation)
++ * versus devices. Access to devices will be logged and rejected
++ * (see MEMTX_ACCESS_ERROR).
++ */
++ unsigned int memory:1;
+ /* Requester ID (for MSI for example) */
+ unsigned int requester_id:16;
+ /* Invert endianness for this page */
+@@ -66,6 +74,7 @@ typedef struct MemTxAttrs {
+ #define MEMTX_OK 0
+ #define MEMTX_ERROR (1U << 0) /* device returned an error */
+ #define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
++#define MEMTX_ACCESS_ERROR (1U << 2) /* access denied */
+ typedef uint32_t MemTxResult;
+
+ #endif
+diff --git a/softmmu/physmem.c b/softmmu/physmem.c
+index 3d968ca..4e1b27a 100644
+--- a/softmmu/physmem.c
++++ b/softmmu/physmem.c
+@@ -41,6 +41,7 @@
+ #include "qemu/config-file.h"
+ #include "qemu/error-report.h"
+ #include "qemu/qemu-print.h"
++#include "qemu/log.h"
+ #include "exec/memory.h"
+ #include "exec/ioport.h"
+ #include "sysemu/dma.h"
+@@ -2759,6 +2760,33 @@ static bool prepare_mmio_access(MemoryRe
+ return release_lock;
+ }
+
++/**
++ * flatview_access_allowed
++ * @mr: #MemoryRegion to be accessed
++ * @attrs: memory transaction attributes
++ * @addr: address within that memory region
++ * @len: the number of bytes to access
++ *
++ * Check if a memory transaction is allowed.
++ *
++ * Returns: true if transaction is allowed, false if denied.
++ */
++static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs,
++ hwaddr addr, hwaddr len)
++{
++ if (likely(!attrs.memory)) {
++ return true;
++ }
++ if (memory_region_is_ram(mr)) {
++ return true;
++ }
++ qemu_log_mask(LOG_GUEST_ERROR,
++ "Invalid access to non-RAM device at "
++ "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", "
++ "region '%s'\n", addr, len, memory_region_name(mr));
++ return false;
++}
++
+ /* Called within RCU critical section. */
+ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
+ MemTxAttrs attrs,
+@@ -2773,7 +2801,10 @@ static MemTxResult flatview_write_contin
+ const uint8_t *buf = ptr;
+
+ for (;;) {
+- if (!memory_access_is_direct(mr, true)) {
++ if (!flatview_access_allowed(mr, attrs, addr1, l)) {
++ result |= MEMTX_ACCESS_ERROR;
++ /* Keep going. */
++ } else if (!memory_access_is_direct(mr, true)) {
+ release_lock |= prepare_mmio_access(mr);
+ l = memory_access_size(mr, l, addr1);
+ /* XXX: could force current_cpu to NULL to avoid
+@@ -2818,6 +2849,9 @@ static MemTxResult flatview_write(FlatVi
+
+ l = len;
+ mr = flatview_translate(fv, addr, &addr1, &l, true, attrs);
++ if (!flatview_access_allowed(mr, attrs, addr, len)) {
++ return MEMTX_ACCESS_ERROR;
++ }
+ return flatview_write_continue(fv, addr, attrs, buf, len,
+ addr1, l, mr);
+ }
+@@ -2836,7 +2870,10 @@ MemTxResult flatview_read_continue(FlatV
+
+ fuzz_dma_read_cb(addr, len, mr);
+ for (;;) {
+- if (!memory_access_is_direct(mr, false)) {
++ if (!flatview_access_allowed(mr, attrs, addr1, l)) {
++ result |= MEMTX_ACCESS_ERROR;
++ /* Keep going. */
++ } else if (!memory_access_is_direct(mr, false)) {
+ /* I/O case */
+ release_lock |= prepare_mmio_access(mr);
+ l = memory_access_size(mr, l, addr1);
+@@ -2879,6 +2916,9 @@ static MemTxResult flatview_read(FlatVie
+
+ l = len;
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
++ if (!flatview_access_allowed(mr, attrs, addr, len)) {
++ return MEMTX_ACCESS_ERROR;
++ }
+ return flatview_read_continue(fv, addr, attrs, buf, len,
+ addr1, l, mr);
+ }
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch
new file mode 100644
index 0000000000..7555e5bc40
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch
@@ -0,0 +1,70 @@
+From 12daeafc9868c1ebe482d580494f9e6d3d5c260f Mon Sep 17 00:00:00 2001
+From: Klaus Jensen <k.jensen@samsung.com>
+Date: Fri, 17 Dec 2021 10:44:01 +0100
+Subject: [PATCH] hw/nvme: fix CVE-2021-3929
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This fixes CVE-2021-3929 "locally" by denying DMA to the iomem of the
+device itself. This still allows DMA to MMIO regions of other devices
+(e.g. doing P2P DMA to the controller memory buffer of another NVMe
+device).
+
+Fixes: CVE-2021-3929
+Reported-by: Qiuhao Li <Qiuhao.Li@outlook.com>
+Reviewed-by: Keith Busch <kbusch@kernel.org>
+Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
+
+Upstream-Status: Backport [736b01642d85be832385063f278fe7cd4ffb5221]
+CVE: CVE-2021-3929
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/nvme/ctrl.c | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
+index 5f573c417..eda52c6ac 100644
+--- a/hw/nvme/ctrl.c
++++ b/hw/nvme/ctrl.c
+@@ -357,6 +357,24 @@ static inline void *nvme_addr_to_pmr(NvmeCtrl *n, hwaddr addr)
+ return memory_region_get_ram_ptr(&n->pmr.dev->mr) + (addr - n->pmr.cba);
+ }
+
++static inline bool nvme_addr_is_iomem(NvmeCtrl *n, hwaddr addr)
++{
++ hwaddr hi, lo;
++
++ /*
++ * The purpose of this check is to guard against invalid "local" access to
++ * the iomem (i.e. controller registers). Thus, we check against the range
++ * covered by the 'bar0' MemoryRegion since that is currently composed of
++ * two subregions (the NVMe "MBAR" and the MSI-X table/pba). Note, however,
++ * that if the device model is ever changed to allow the CMB to be located
++ * in BAR0 as well, then this must be changed.
++ */
++ lo = n->bar0.addr;
++ hi = lo + int128_get64(n->bar0.size);
++
++ return addr >= lo && addr < hi;
++}
++
+ static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
+ {
+ hwaddr hi = addr + size - 1;
+@@ -614,6 +632,10 @@ static uint16_t nvme_map_addr(NvmeCtrl *n, NvmeSg *sg, hwaddr addr, size_t len)
+
+ trace_pci_nvme_map_addr(addr, len);
+
++ if (nvme_addr_is_iomem(n, addr)) {
++ return NVME_DATA_TRAS_ERROR;
++ }
++
+ if (nvme_addr_is_cmb(n, addr)) {
+ cmb = true;
+ } else if (nvme_addr_is_pmr(n, addr)) {
+--
+2.33.0
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-4158.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-4158.patch
new file mode 100644
index 0000000000..f6de53244f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-4158.patch
@@ -0,0 +1,46 @@
+From a0b64c6d078acb9bcfae600e22bf99a9a7deca7c Mon Sep 17 00:00:00 2001
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Tue, 21 Dec 2021 09:45:44 -0500
+Subject: [PATCH] acpi: validate hotplug selector on access
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When bus is looked up on a pci write, we didn't
+validate that the lookup succeeded.
+Fuzzers thus can trigger QEMU crash by dereferencing the NULL
+bus pointer.
+
+Fixes: b32bd763a1 ("pci: introduce acpi-index property for PCI device")
+Fixes: CVE-2021-4158
+Cc: "Igor Mammedov" <imammedo@redhat.com>
+Fixes: https://gitlab.com/qemu-project/qemu/-/issues/770
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Reviewed-by: Ani Sinha <ani@anisinha.ca>
+
+Upstream-Status: Backport [9bd6565ccee68f72d5012e24646e12a1c662827e]
+CVE: CVE-2021-4158
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/acpi/pcihp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c
+index 30405b511..a5e182dd3 100644
+--- a/hw/acpi/pcihp.c
++++ b/hw/acpi/pcihp.c
+@@ -491,6 +491,9 @@ static void pci_write(void *opaque, hwaddr addr, uint64_t data,
+ }
+
+ bus = acpi_pcihp_find_hotplug_bus(s, s->hotplug_select);
++ if (!bus) {
++ break;
++ }
+ QTAILQ_FOREACH_SAFE(kid, &bus->qbus.children, sibling, next) {
+ Object *o = OBJECT(kid->child);
+ PCIDevice *dev = PCI_DEVICE(o);
+--
+2.33.0
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-0216_1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-0216_1.patch
new file mode 100644
index 0000000000..de7458fc72
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-0216_1.patch
@@ -0,0 +1,42 @@
+From 1cedc914b2c4b4e0c9dfcd1b0e02917af35b5eb6 Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Tue, 5 Jul 2022 22:05:43 +0200
+Subject: [PATCH 1/3] scsi/lsi53c895a: fix use-after-free in lsi_do_msgout
+ (CVE-2022-0216)
+
+Set current_req->req to NULL to prevent reusing a free'd buffer in case of
+repeated SCSI cancel requests. Thanks to Thomas Huth for suggesting the patch.
+
+Fixes: CVE-2022-0216
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/972
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Reviewed-by: Thomas Huth <thuth@redhat.com>
+Message-Id: <20220705200543.2366809-1-mcascell@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+Upstream-Status: Backport [6c8fa961da5e60f574bb52fd3ad44b1e9e8ad4b8]
+CVE: CVE-2022-0216
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/scsi/lsi53c895a.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c
+index 85e907a78..8033cf050 100644
+--- a/hw/scsi/lsi53c895a.c
++++ b/hw/scsi/lsi53c895a.c
+@@ -1029,8 +1029,9 @@ static void lsi_do_msgout(LSIState *s)
+ case 0x0d:
+ /* The ABORT TAG message clears the current I/O process only. */
+ trace_lsi_do_msgout_abort(current_tag);
+- if (current_req) {
++ if (current_req && current_req->req) {
+ scsi_req_cancel(current_req->req);
++ current_req->req = NULL;
+ }
+ lsi_disconnect(s);
+ break;
+--
+2.33.0
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-0216_2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-0216_2.patch
new file mode 100644
index 0000000000..12f5a602da
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-0216_2.patch
@@ -0,0 +1,52 @@
+From 8f2c2cb908758192d5ebc00605cbf0989b8a507c Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Mon, 11 Jul 2022 14:33:16 +0200
+Subject: [PATCH 3/3] scsi/lsi53c895a: really fix use-after-free in
+ lsi_do_msgout (CVE-2022-0216)
+
+Set current_req to NULL, not current_req->req, to prevent reusing a free'd
+buffer in case of repeated SCSI cancel requests. Also apply the fix to
+CLEAR QUEUE and BUS DEVICE RESET messages as well, since they also cancel
+the request.
+
+Thanks to Alexander Bulekov for providing a reproducer.
+
+Fixes: CVE-2022-0216
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/972
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Message-Id: <20220711123316.421279-1-mcascell@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+Upstream-Status: Backport [4367a20cc442c56b05611b4224de9a61908f9eac]
+CVE: CVE-2022-0216
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/scsi/lsi53c895a.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c
+index 8033cf050..fbe3fa3dd 100644
+--- a/hw/scsi/lsi53c895a.c
++++ b/hw/scsi/lsi53c895a.c
+@@ -1031,7 +1031,7 @@ static void lsi_do_msgout(LSIState *s)
+ trace_lsi_do_msgout_abort(current_tag);
+ if (current_req && current_req->req) {
+ scsi_req_cancel(current_req->req);
+- current_req->req = NULL;
++ current_req = NULL;
+ }
+ lsi_disconnect(s);
+ break;
+@@ -1057,6 +1057,7 @@ static void lsi_do_msgout(LSIState *s)
+ /* clear the current I/O process */
+ if (s->current) {
+ scsi_req_cancel(s->current->req);
++ current_req = NULL;
+ }
+
+ /* As the current implemented devices scsi_disk and scsi_generic
+--
+2.33.0
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-0358.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-0358.patch
new file mode 100644
index 0000000000..8eb1475638
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-0358.patch
@@ -0,0 +1,106 @@
+From 4d2558ec9336d3614a43f7437c9cf74793ae3a87 Mon Sep 17 00:00:00 2001
+From: Vivek Goyal <vgoyal@redhat.com>
+Date: Tue, 25 Jan 2022 13:51:14 -0500
+Subject: [PATCH] virtiofsd: Drop membership of all supplementary groups
+ (CVE-2022-0358)
+
+At the start, drop membership of all supplementary groups. This is
+not required.
+
+If we have membership of "root" supplementary group and when we switch
+uid/gid using setresuid/setsgid, we still retain membership of existing
+supplemntary groups. And that can allow some operations which are not
+normally allowed.
+
+For example, if root in guest creates a dir as follows.
+
+$ mkdir -m 03777 test_dir
+
+This sets SGID on dir as well as allows unprivileged users to write into
+this dir.
+
+And now as unprivileged user open file as follows.
+
+$ su test
+$ fd = open("test_dir/priviledge_id", O_RDWR|O_CREAT|O_EXCL, 02755);
+
+This will create SGID set executable in test_dir/.
+
+And that's a problem because now an unpriviliged user can execute it,
+get egid=0 and get access to resources owned by "root" group. This is
+privilege escalation.
+
+Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=2044863
+Fixes: CVE-2022-0358
+Reported-by: JIETAO XIAO <shawtao1125@gmail.com>
+Suggested-by: Miklos Szeredi <mszeredi@redhat.com>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
+Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
+Message-Id: <YfBGoriS38eBQrAb@redhat.com>
+Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
+ dgilbert: Fixed missing {}'s style nit
+
+Upstream-Status: Backport [449e8171f96a6a944d1f3b7d3627ae059eae21ca]
+CVE: CVE-2022-0358
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ tools/virtiofsd/passthrough_ll.c | 27 +++++++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+diff --git a/tools/virtiofsd/passthrough_ll.c b/tools/virtiofsd/passthrough_ll.c
+index 64b5b4fbb..b3d0674f6 100644
+--- a/tools/virtiofsd/passthrough_ll.c
++++ b/tools/virtiofsd/passthrough_ll.c
+@@ -54,6 +54,7 @@
+ #include <sys/wait.h>
+ #include <sys/xattr.h>
+ #include <syslog.h>
++#include <grp.h>
+
+ #include "qemu/cutils.h"
+ #include "passthrough_helpers.h"
+@@ -1161,6 +1162,30 @@ static void lo_lookup(fuse_req_t req, fuse_ino_t parent, const char *name)
+ #define OURSYS_setresuid SYS_setresuid
+ #endif
+
++static void drop_supplementary_groups(void)
++{
++ int ret;
++
++ ret = getgroups(0, NULL);
++ if (ret == -1) {
++ fuse_log(FUSE_LOG_ERR, "getgroups() failed with error=%d:%s\n",
++ errno, strerror(errno));
++ exit(1);
++ }
++
++ if (!ret) {
++ return;
++ }
++
++ /* Drop all supplementary groups. We should not need it */
++ ret = setgroups(0, NULL);
++ if (ret == -1) {
++ fuse_log(FUSE_LOG_ERR, "setgroups() failed with error=%d:%s\n",
++ errno, strerror(errno));
++ exit(1);
++ }
++}
++
+ /*
+ * Change to uid/gid of caller so that file is created with
+ * ownership of caller.
+@@ -3926,6 +3951,8 @@ int main(int argc, char *argv[])
+
+ qemu_init_exec_dir(argv[0]);
+
++ drop_supplementary_groups();
++
+ pthread_mutex_init(&lo.mutex, NULL);
+ lo.inodes = g_hash_table_new(lo_key_hash, lo_key_equal);
+ lo.root.fd = -1;
+--
+2.33.0
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-3165.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-3165.patch
new file mode 100644
index 0000000000..a7d061eb99
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-3165.patch
@@ -0,0 +1,61 @@
+From a15f7d9913d050fb72a79bbbefa5c2329d92e71d Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 8 Nov 2022 17:10:00 +0530
+Subject: [PATCH] CVE-2022-3165
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/d307040b18]
+CVE: CVE-2022-3165
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+ui/vnc-clipboard: fix integer underflow in vnc_client_cut_text_ext
+
+Extended ClientCutText messages start with a 4-byte header. If len < 4,
+an integer underflow occurs in vnc_client_cut_text_ext. The result is
+used to decompress data in a while loop in inflate_buffer, leading to
+CPU consumption and denial of service. Prevent this by checking dlen in
+protocol_client_msg.
+
+Fixes: CVE-2022-3165
+
+("ui/vnc: clipboard support")
+Reported-by: default avatarTangPeng <tangpeng@qianxin.com>
+Signed-off-by: Mauro Matteo Cascella's avatarMauro Matteo Cascella <mcascell@redhat.com>
+Message-Id: <20220925204511.1103214-1-mcascell@redhat.com>
+Signed-off-by: Gerd Hoffmann's avatarGerd Hoffmann <kraxel@redhat.com>
+---
+ ui/vnc.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/ui/vnc.c b/ui/vnc.c
+index af02522e8..a14b6861b 100644
+--- a/ui/vnc.c
++++ b/ui/vnc.c
+@@ -2442,8 +2442,8 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len)
+ if (len == 1) {
+ return 8;
+ }
++ uint32_t dlen = abs(read_s32(data, 4));
+ if (len == 8) {
+- uint32_t dlen = abs(read_s32(data, 4));
+ if (dlen > (1 << 20)) {
+ error_report("vnc: client_cut_text msg payload has %u bytes"
+ " which exceeds our limit of 1MB.", dlen);
+@@ -2456,8 +2456,13 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len)
+ }
+
+ if (read_s32(data, 4) < 0) {
+- vnc_client_cut_text_ext(vs, abs(read_s32(data, 4)),
+- read_u32(data, 8), data + 12);
++ if (dlen < 4) {
++ error_report("vnc: malformed payload (header less than 4 bytes)"
++ " in extended clipboard pseudo-encoding.");
++ vnc_client_error(vs);
++ break;
++ }
++ vnc_client_cut_text_ext(vs, dlen, read_u32(data, 8), data + 12);
+ break;
+ }
+ vnc_client_cut_text(vs, read_u32(data, 4), data + 8);
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-35414.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-35414.patch
new file mode 100644
index 0000000000..3786497f01
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-35414.patch
@@ -0,0 +1,53 @@
+From ee76e64ee1cb232b77652b21cc94ec6b6c7e4b13 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 27 Jul 2022 10:49:47 +0530
+Subject: [PATCH] CVE-2022-35414
+
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/418ade7849ce7641c0f7333718caf5091a02fd4c]
+CVE: CVE-2022-35414
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ softmmu/physmem.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/softmmu/physmem.c b/softmmu/physmem.c
+index 3524c04c2..3c467527d 100644
+--- a/softmmu/physmem.c
++++ b/softmmu/physmem.c
+@@ -667,7 +667,7 @@ void tcg_iommu_init_notifier_list(CPUState *cpu)
+
+ /* Called from RCU critical section */
+ MemoryRegionSection *
+-address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
++address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr,
+ hwaddr *xlat, hwaddr *plen,
+ MemTxAttrs attrs, int *prot)
+ {
+@@ -676,6 +676,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
+ IOMMUMemoryRegionClass *imrc;
+ IOMMUTLBEntry iotlb;
+ int iommu_idx;
++ hwaddr addr = orig_addr;
+ AddressSpaceDispatch *d =
+ qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
+
+@@ -720,6 +721,16 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
+ return section;
+
+ translate_fail:
++ /*
++ * We should be given a page-aligned address -- certainly
++ * tlb_set_page_with_attrs() does so. The page offset of xlat
++ * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0.
++ * The page portion of xlat will be logged by memory_region_access_valid()
++ * when this memory access is rejected, so use the original untranslated
++ * physical address.
++ */
++ assert((orig_addr & ~TARGET_PAGE_MASK) == 0);
++ *xlat = orig_addr;
+ return &d->map.sections[PHYS_SECTION_UNASSIGNED];
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-4144.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-4144.patch
new file mode 100644
index 0000000000..96052a19e8
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-4144.patch
@@ -0,0 +1,99 @@
+From 6dbbf055148c6f1b7d8a3251a65bd6f3d1e1f622 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@linaro.org>
+Date: Mon, 28 Nov 2022 21:27:40 +0100
+Subject: [PATCH] hw/display/qxl: Avoid buffer overrun in qxl_phys2virt
+ (CVE-2022-4144)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Have qxl_get_check_slot_offset() return false if the requested
+buffer size does not fit within the slot memory region.
+
+Similarly qxl_phys2virt() now returns NULL in such case, and
+qxl_dirty_one_surface() aborts.
+
+This avoids buffer overrun in the host pointer returned by
+memory_region_get_ram_ptr().
+
+Fixes: CVE-2022-4144 (out-of-bounds read)
+Reported-by: Wenxu Yin (@awxylitol)
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1336
+
+CVE: CVE-2022-4144
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/6dbbf055148c6f1b7d8a3251a65bd6f3d1e1f622]
+Comments: Deleted patch hunk in qxl.h,as it contains change
+in comments which is not present in current version of qemu
+
+Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Message-Id: <20221128202741.4945-5-philmd@linaro.org>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ hw/display/qxl.c | 27 +++++++++++++++++++++++----
+ 1 files changed, 23 insertions(+), 4 deletions(-)
+
+diff --git a/hw/display/qxl.c b/hw/display/qxl.c
+index 231d733250..0b21626aad 100644
+--- a/hw/display/qxl.c
++++ b/hw/display/qxl.c
+@@ -1424,11 +1424,13 @@ static void qxl_reset_surfaces(PCIQXLDevice *d)
+
+ /* can be also called from spice server thread context */
+ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl,
+- uint32_t *s, uint64_t *o)
++ uint32_t *s, uint64_t *o,
++ size_t size_requested)
+ {
+ uint64_t phys = le64_to_cpu(pqxl);
+ uint32_t slot = (phys >> (64 - 8)) & 0xff;
+ uint64_t offset = phys & 0xffffffffffff;
++ uint64_t size_available;
+
+ if (slot >= NUM_MEMSLOTS) {
+ qxl_set_guest_bug(qxl, "slot too large %d >= %d", slot,
+@@ -1452,6 +1454,23 @@ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl,
+ slot, offset, qxl->guest_slots[slot].size);
+ return false;
+ }
++ size_available = memory_region_size(qxl->guest_slots[slot].mr);
++ if (qxl->guest_slots[slot].offset + offset >= size_available) {
++ qxl_set_guest_bug(qxl,
++ "slot %d offset %"PRIu64" > region size %"PRIu64"\n",
++ slot, qxl->guest_slots[slot].offset + offset,
++ size_available);
++ return false;
++ }
++ size_available -= qxl->guest_slots[slot].offset + offset;
++ if (size_requested > size_available) {
++ qxl_set_guest_bug(qxl,
++ "slot %d offset %"PRIu64" size %zu: "
++ "overrun by %"PRIu64" bytes\n",
++ slot, offset, size_requested,
++ size_requested - size_available);
++ return false;
++ }
+
+ *s = slot;
+ *o = offset;
+@@ -1471,7 +1490,7 @@ void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id,
+ offset = le64_to_cpu(pqxl) & 0xffffffffffff;
+ return (void *)(intptr_t)offset;
+ case MEMSLOT_GROUP_GUEST:
+- if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset)) {
++ if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset, size)) {
+ return NULL;
+ }
+ ptr = memory_region_get_ram_ptr(qxl->guest_slots[slot].mr);
+@@ -1937,9 +1956,9 @@ static void qxl_dirty_one_surface(PCIQXLDevice *qxl, QXLPHYSICAL pqxl,
+ uint32_t slot;
+ bool rc;
+
+- rc = qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset);
+- assert(rc == true);
+ size = (uint64_t)height * abs(stride);
++ rc = qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset, size);
++ assert(rc == true);
+ trace_qxl_surfaces_dirty(qxl->id, offset, size);
+ qxl_set_dirty(qxl->guest_slots[slot].mr,
+ qxl->guest_slots[slot].offset + offset,
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-0330.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-0330.patch
new file mode 100644
index 0000000000..025075fd6d
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-0330.patch
@@ -0,0 +1,75 @@
+[Ubuntu note: remove fuzz-lsi53c895a-test.c changes since the file does not
+ exist for this release]
+From b987718bbb1d0eabf95499b976212dd5f0120d75 Mon Sep 17 00:00:00 2001
+From: Thomas Huth <thuth@redhat.com>
+Date: Mon, 22 May 2023 11:10:11 +0200
+Subject: [PATCH] hw/scsi/lsi53c895a: Fix reentrancy issues in the LSI
+ controller (CVE-2023-0330)
+
+We cannot use the generic reentrancy guard in the LSI code, so
+we have to manually prevent endless reentrancy here. The problematic
+lsi_execute_script() function has already a way to detect whether
+too many instructions have been executed - we just have to slightly
+change the logic here that it also takes into account if the function
+has been called too often in a reentrant way.
+
+The code in fuzz-lsi53c895a-test.c has been taken from an earlier
+patch by Mauro Matteo Cascella.
+
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1563
+Message-Id: <20230522091011.1082574-1-thuth@redhat.com>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Reviewed-by: Alexander Bulekov <alxndr@bu.edu>
+Signed-off-by: Thomas Huth <thuth@redhat.com>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2023-0330.patch?h=ubuntu/jammy-security
+Upstream commit https://gitlab.com/qemu-project/qemu/-/commit/b987718bbb1d0eabf95499b976212dd5f0120d75]
+CVE: CVE-2023-0330
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ hw/scsi/lsi53c895a.c | 23 +++++++++++++++------
+ tests/qtest/fuzz-lsi53c895a-test.c | 33 ++++++++++++++++++++++++++++++
+ 2 files changed, 50 insertions(+), 6 deletions(-)
+
+--- qemu-6.2+dfsg.orig/hw/scsi/lsi53c895a.c
++++ qemu-6.2+dfsg/hw/scsi/lsi53c895a.c
+@@ -1135,15 +1135,24 @@ static void lsi_execute_script(LSIState
+ uint32_t addr, addr_high;
+ int opcode;
+ int insn_processed = 0;
++ static int reentrancy_level;
++
++ reentrancy_level++;
+
+ s->istat1 |= LSI_ISTAT1_SRUN;
+ again:
+- if (++insn_processed > LSI_MAX_INSN) {
+- /* Some windows drivers make the device spin waiting for a memory
+- location to change. If we have been executed a lot of code then
+- assume this is the case and force an unexpected device disconnect.
+- This is apparently sufficient to beat the drivers into submission.
+- */
++ /*
++ * Some windows drivers make the device spin waiting for a memory location
++ * to change. If we have executed more than LSI_MAX_INSN instructions then
++ * assume this is the case and force an unexpected device disconnect. This
++ * is apparently sufficient to beat the drivers into submission.
++ *
++ * Another issue (CVE-2023-0330) can occur if the script is programmed to
++ * trigger itself again and again. Avoid this problem by stopping after
++ * being called multiple times in a reentrant way (8 is an arbitrary value
++ * which should be enough for all valid use cases).
++ */
++ if (++insn_processed > LSI_MAX_INSN || reentrancy_level > 8) {
+ if (!(s->sien0 & LSI_SIST0_UDC)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "lsi_scsi: inf. loop with UDC masked");
+@@ -1597,6 +1606,8 @@ again:
+ }
+ }
+ trace_lsi_execute_script_stop();
++
++ reentrancy_level--;
+ }
+
+ static uint8_t lsi_reg_readb(LSIState *s, int offset)
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-1544.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-1544.patch
new file mode 100644
index 0000000000..b4781e1c18
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-1544.patch
@@ -0,0 +1,70 @@
+From e7d6e37675e422cfab2fe8c6bd411d2097228760 Mon Sep 17 00:00:00 2001
+From: Yuval Shaia <yuval.shaia.ml@gmail.com>
+Date: Wed, 1 Mar 2023 16:29:26 +0200
+Subject: [PATCH] hw/pvrdma: Protect against buggy or malicious guest driver
+
+Guest driver allocates and initialize page tables to be used as a ring
+of descriptors for CQ and async events.
+The page table that represents the ring, along with the number of pages
+in the page table is passed to the device.
+Currently our device supports only one page table for a ring.
+
+Let's make sure that the number of page table entries the driver
+reports, do not exceeds the one page table size.
+
+CVE: CVE-2023-1544
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/85fc35afa93c]
+
+Reported-by: Soul Chen <soulchen8650@gmail.com>
+Signed-off-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
+Fixes: CVE-2023-1544
+Message-ID: <20230301142926.18686-1-yuval.shaia.ml@gmail.com>
+Signed-off-by: Thomas Huth <thuth@redhat.com>
+(cherry picked from commit 85fc35afa93c7320d1641d344d0c5dfbe341d087)
+Signed-off-by: Niranjan Pradhan <nirpradh@cisco.com>
+---
+ hw/rdma/vmw/pvrdma_main.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c
+index 4fc6712025..55b338046e 100644
+--- a/hw/rdma/vmw/pvrdma_main.c
++++ b/hw/rdma/vmw/pvrdma_main.c
+@@ -91,19 +91,33 @@ static int init_dev_ring(PvrdmaRing *ring, PvrdmaRingState **ring_state,
+ dma_addr_t dir_addr, uint32_t num_pages)
+ {
+ uint64_t *dir, *tbl;
+- int rc = 0;
++ int max_pages, rc = 0;
+
+ if (!num_pages) {
+ rdma_error_report("Ring pages count must be strictly positive");
+ return -EINVAL;
+ }
+
++ /*
++ * Make sure we can satisfy the requested number of pages in a single
++ * TARGET_PAGE_SIZE sized page table (taking into account that first entry
++ * is reserved for ring-state)
++ */
++ max_pages = TARGET_PAGE_SIZE / sizeof(dma_addr_t) - 1;
++ if (num_pages > max_pages) {
++ rdma_error_report("Maximum pages on a single directory must not exceed %d\n",
++ max_pages);
++ return -EINVAL;
++ }
++
+ dir = rdma_pci_dma_map(pci_dev, dir_addr, TARGET_PAGE_SIZE);
+ if (!dir) {
+ rdma_error_report("Failed to map to page directory (ring %s)", name);
+ rc = -ENOMEM;
+ goto out;
+ }
++
++ /* We support only one page table for a ring */
+ tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
+ if (!tbl) {
+ rdma_error_report("Failed to map to page table (ring %s)", name);
+--
+2.35.6
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-2861.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-2861.patch
new file mode 100644
index 0000000000..a86413fbad
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-2861.patch
@@ -0,0 +1,180 @@
+From f6b0de53fb87ddefed348a39284c8e2f28dc4eda Mon Sep 17 00:00:00 2001
+From: Christian Schoenebeck <qemu_oss@crudebyte.com>
+Date: Wed, 7 Jun 2023 18:29:33 +0200
+Subject: [PATCH] 9pfs: prevent opening special files (CVE-2023-2861)
+
+The 9p protocol does not specifically define how server shall behave when
+client tries to open a special file, however from security POV it does
+make sense for 9p server to prohibit opening any special file on host side
+in general. A sane Linux 9p client for instance would never attempt to
+open a special file on host side, it would always handle those exclusively
+on its guest side. A malicious client however could potentially escape
+from the exported 9p tree by creating and opening a device file on host
+side.
+
+With QEMU this could only be exploited in the following unsafe setups:
+
+ - Running QEMU binary as root AND 9p 'local' fs driver AND 'passthrough'
+ security model.
+
+or
+
+ - Using 9p 'proxy' fs driver (which is running its helper daemon as
+ root).
+
+These setups were already discouraged for safety reasons before,
+however for obvious reasons we are now tightening behaviour on this.
+
+Fixes: CVE-2023-2861
+Reported-by: Yanwu Shen <ywsPlz@gmail.com>
+Reported-by: Jietao Xiao <shawtao1125@gmail.com>
+Reported-by: Jinku Li <jkli@xidian.edu.cn>
+Reported-by: Wenbo Shen <shenwenbo@zju.edu.cn>
+Signed-off-by: Christian Schoenebeck <qemu_oss@crudebyte.com>
+Reviewed-by: Greg Kurz <groug@kaod.org>
+Reviewed-by: Michael Tokarev <mjt@tls.msk.ru>
+Message-Id: <E1q6w7r-0000Q0-NM@lizzy.crudebyte.com>
+
+Upstream-Status: Backport from [https://github.com/qemu/qemu/commit/10fad73a2bf1c76c8aa9d6322755e5f877d83ce5]
+CVE: CVE-2023-2861
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ fsdev/virtfs-proxy-helper.c | 27 +++++++++++++++++++++++--
+ hw/9pfs/9p-util.h | 40 +++++++++++++++++++++++++++++++++++++
+ 2 files changed, 65 insertions(+), 2 deletions(-)
+
+diff --git a/fsdev/virtfs-proxy-helper.c b/fsdev/virtfs-proxy-helper.c
+index 15c0e79b0..f9e4669a5 100644
+--- a/fsdev/virtfs-proxy-helper.c
++++ b/fsdev/virtfs-proxy-helper.c
+@@ -26,6 +26,7 @@
+ #include "qemu/xattr.h"
+ #include "9p-iov-marshal.h"
+ #include "hw/9pfs/9p-proxy.h"
++#include "hw/9pfs/9p-util.h"
+ #include "fsdev/9p-iov-marshal.h"
+
+ #define PROGNAME "virtfs-proxy-helper"
+@@ -338,6 +339,28 @@ static void resetugid(int suid, int sgid)
+ }
+ }
+
++/*
++ * Open regular file or directory. Attempts to open any special file are
++ * rejected.
++ *
++ * returns file descriptor or -1 on error
++ */
++static int open_regular(const char *pathname, int flags, mode_t mode)
++{
++ int fd;
++
++ fd = open(pathname, flags, mode);
++ if (fd < 0) {
++ return fd;
++ }
++
++ if (close_if_special_file(fd) < 0) {
++ return -1;
++ }
++
++ return fd;
++}
++
+ /*
+ * send response in two parts
+ * 1) ProxyHeader
+@@ -682,7 +705,7 @@ static int do_create(struct iovec *iovec)
+ if (ret < 0) {
+ goto unmarshal_err_out;
+ }
+- ret = open(path.data, flags, mode);
++ ret = open_regular(path.data, flags, mode);
+ if (ret < 0) {
+ ret = -errno;
+ }
+@@ -707,7 +730,7 @@ static int do_open(struct iovec *iovec)
+ if (ret < 0) {
+ goto err_out;
+ }
+- ret = open(path.data, flags);
++ ret = open_regular(path.data, flags, 0);
+ if (ret < 0) {
+ ret = -errno;
+ }
+diff --git a/hw/9pfs/9p-util.h b/hw/9pfs/9p-util.h
+index 546f46dc7..23000e917 100644
+--- a/hw/9pfs/9p-util.h
++++ b/hw/9pfs/9p-util.h
+@@ -13,12 +13,16 @@
+ #ifndef QEMU_9P_UTIL_H
+ #define QEMU_9P_UTIL_H
+
++#include "qemu/error-report.h"
++
+ #ifdef O_PATH
+ #define O_PATH_9P_UTIL O_PATH
+ #else
+ #define O_PATH_9P_UTIL 0
+ #endif
+
++#define qemu_fstat fstat
++
+ static inline void close_preserve_errno(int fd)
+ {
+ int serrno = errno;
+@@ -26,6 +30,38 @@ static inline void close_preserve_errno(int fd)
+ errno = serrno;
+ }
+
++/**
++ * close_if_special_file() - Close @fd if neither regular file nor directory.
++ *
++ * @fd: file descriptor of open file
++ * Return: 0 on regular file or directory, -1 otherwise
++ *
++ * CVE-2023-2861: Prohibit opening any special file directly on host
++ * (especially device files), as a compromised client could potentially gain
++ * access outside exported tree under certain, unsafe setups. We expect
++ * client to handle I/O on special files exclusively on guest side.
++ */
++static inline int close_if_special_file(int fd)
++{
++ struct stat stbuf;
++
++ if (qemu_fstat(fd, &stbuf) < 0) {
++ close_preserve_errno(fd);
++ return -1;
++ }
++ if (!S_ISREG(stbuf.st_mode) && !S_ISDIR(stbuf.st_mode)) {
++ error_report_once(
++ "9p: broken or compromised client detected; attempt to open "
++ "special file (i.e. neither regular file, nor directory)"
++ );
++ close(fd);
++ errno = ENXIO;
++ return -1;
++ }
++
++ return 0;
++}
++
+ static inline int openat_dir(int dirfd, const char *name)
+ {
+ return openat(dirfd, name,
+@@ -56,6 +92,10 @@ again:
+ return -1;
+ }
+
++ if (close_if_special_file(fd) < 0) {
++ return -1;
++ }
++
+ serrno = errno;
+ /* O_NONBLOCK was only needed to open the file. Let's drop it. We don't
+ * do that with O_PATH since fcntl(F_SETFL) isn't supported, and openat()
+--
+2.35.7
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-3180.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-3180.patch
new file mode 100644
index 0000000000..30080924c8
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-3180.patch
@@ -0,0 +1,50 @@
+From 49f1e02bac166821c712534aaa775f50e1afe17f Mon Sep 17 00:00:00 2001
+From: zhenwei pi <pizhenwei@bytedance.com>
+Date: Thu, 3 Aug 2023 10:43:13 +0800
+Subject: [PATCH] virtio-crypto: verify src&dst buffer length for sym request
+
+For symmetric algorithms, the length of ciphertext must be as same
+as the plaintext.
+The missing verification of the src_len and the dst_len in
+virtio_crypto_sym_op_helper() may lead buffer overflow/divulged.
+
+This patch is originally written by Yiming Tao for QEMU-SECURITY,
+resend it(a few changes of error message) in qemu-devel.
+
+Fixes: CVE-2023-3180
+Fixes: 04b9b37edda("virtio-crypto: add data queue processing handler")
+Cc: Gonglei <arei.gonglei@huawei.com>
+Cc: Mauro Matteo Cascella <mcascell@redhat.com>
+Cc: Yiming Tao <taoym@zju.edu.cn>
+Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
+Message-Id: <20230803024314.29962-2-pizhenwei@bytedance.com>
+Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+(cherry picked from commit 9d38a8434721a6479fe03fb5afb150ca793d3980)
+Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/49f1e02bac166821c712534aaa775f50e1afe17f]
+CVE: CVE-2023-3180
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ hw/virtio/virtio-crypto.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
+index a1d122b9aa..ccaa704530 100644
+--- a/hw/virtio/virtio-crypto.c
++++ b/hw/virtio/virtio-crypto.c
+@@ -635,6 +635,11 @@ virtio_crypto_sym_op_helper(VirtIODevice *vdev,
+ return NULL;
+ }
+
++ if (unlikely(src_len != dst_len)) {
++ virtio_error(vdev, "sym request src len is different from dst len");
++ return NULL;
++ }
++
+ max_len = (uint64_t)iv_len + aad_len + src_len + dst_len + hash_result_len;
+ if (unlikely(max_len > vcrypto->conf.max_size)) {
+ virtio_error(vdev, "virtio-crypto too big length");
+--
+2.40.0
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-3255.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-3255.patch
new file mode 100644
index 0000000000..f030df111f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-3255.patch
@@ -0,0 +1,64 @@
+From d921fea338c1059a27ce7b75309d7a2e485f710b Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Tue, 4 Jul 2023 10:41:22 +0200
+Subject: [PATCH] ui/vnc-clipboard: fix infinite loop in inflate_buffer
+ (CVE-2023-3255)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+A wrong exit condition may lead to an infinite loop when inflating a
+valid zlib buffer containing some extra bytes in the `inflate_buffer`
+function. The bug only occurs post-authentication. Return the buffer
+immediately if the end of the compressed data has been reached
+(Z_STREAM_END).
+
+Fixes: CVE-2023-3255
+Fixes: 0bf41cab ("ui/vnc: clipboard support")
+Reported-by: Kevin Denis <kevin.denis@synacktiv.com>
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
+Tested-by: Marc-André Lureau <marcandre.lureau@redhat.com>
+Message-ID: <20230704084210.101822-1-mcascell@redhat.com>
+
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/d921fea338c1059a27ce7b75309d7a2e485f710b]
+
+CVE: CVE-2023-3255
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+
+---
+ ui/vnc-clipboard.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/ui/vnc-clipboard.c b/ui/vnc-clipboard.c
+index 8aeadfaa21..c759be3438 100644
+--- a/ui/vnc-clipboard.c
++++ b/ui/vnc-clipboard.c
+@@ -50,8 +50,11 @@ static uint8_t *inflate_buffer(uint8_t *in, uint32_t in_len, uint32_t *size)
+ ret = inflate(&stream, Z_FINISH);
+ switch (ret) {
+ case Z_OK:
+- case Z_STREAM_END:
+ break;
++ case Z_STREAM_END:
++ *size = stream.total_out;
++ inflateEnd(&stream);
++ return out;
+ case Z_BUF_ERROR:
+ out_len <<= 1;
+ if (out_len > (1 << 20)) {
+@@ -66,11 +69,6 @@ static uint8_t *inflate_buffer(uint8_t *in, uint32_t in_len, uint32_t *size)
+ }
+ }
+
+- *size = stream.total_out;
+- inflateEnd(&stream);
+-
+- return out;
+-
+ err_end:
+ inflateEnd(&stream);
+ err:
+--
+2.40.0
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-3301.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-3301.patch
new file mode 100644
index 0000000000..ffb5cd3861
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-3301.patch
@@ -0,0 +1,60 @@
+From a0d7215e339b61c7d7a7b3fcf754954d80d93eb8 Mon Sep 17 00:00:00 2001
+From: Ani Sinha <anisinha@redhat.com>
+Date: Mon, 19 Jun 2023 12:22:09 +0530
+Subject: [PATCH] vhost-vdpa: do not cleanup the vdpa/vhost-net structures if
+ peer nic is present
+
+When a peer nic is still attached to the vdpa backend, it is too early to free
+up the vhost-net and vdpa structures. If these structures are freed here, then
+QEMU crashes when the guest is being shut down. The following call chain
+would result in an assertion failure since the pointer returned from
+vhost_vdpa_get_vhost_net() would be NULL:
+
+do_vm_stop() -> vm_state_notify() -> virtio_set_status() ->
+virtio_net_vhost_status() -> get_vhost_net().
+
+Therefore, we defer freeing up the structures until at guest shutdown
+time when qemu_cleanup() calls net_cleanup() which then calls
+qemu_del_net_client() which would eventually call vhost_vdpa_cleanup()
+again to free up the structures. This time, the loop in net_cleanup()
+ensures that vhost_vdpa_cleanup() will be called one last time when
+all the peer nics are detached and freed.
+
+All unit tests pass with this change.
+
+CC: imammedo@redhat.com
+CC: jusual@redhat.com
+CC: mst@redhat.com
+Fixes: CVE-2023-3301
+Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2128929
+Signed-off-by: Ani Sinha <anisinha@redhat.com>
+Message-Id: <20230619065209.442185-1-anisinha@redhat.com>
+Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/a0d7215e339b61c7d7a7b3fcf754954d80d93eb8]
+CVE: CVE-2023-3301
+
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ net/vhost-vdpa.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/net/vhost-vdpa.c
++++ b/net/vhost-vdpa.c
+@@ -140,6 +140,14 @@ static void vhost_vdpa_cleanup(NetClient
+ {
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+
++ /*
++ * If a peer NIC is attached, do not cleanup anything.
++ * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
++ * when the guest is shutting down.
++ */
++ if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
++ return;
++ }
+ if (s->vhost_net) {
+ vhost_net_cleanup(s->vhost_net);
+ g_free(s->vhost_net);
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-3354.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-3354.patch
new file mode 100644
index 0000000000..250716fcfc
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-3354.patch
@@ -0,0 +1,87 @@
+From 10be627d2b5ec2d6b3dce045144aa739eef678b4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= <berrange@redhat.com>
+Date: Tue, 20 Jun 2023 09:45:34 +0100
+Subject: [PATCH] io: remove io watch if TLS channel is closed during handshake
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The TLS handshake make take some time to complete, during which time an
+I/O watch might be registered with the main loop. If the owner of the
+I/O channel invokes qio_channel_close() while the handshake is waiting
+to continue the I/O watch must be removed. Failing to remove it will
+later trigger the completion callback which the owner is not expecting
+to receive. In the case of the VNC server, this results in a SEGV as
+vnc_disconnect_start() tries to shutdown a client connection that is
+already gone / NULL.
+
+CVE-2023-3354
+Reported-by: jiangyegen <jiangyegen@huawei.com>
+Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/10be627d2b5ec2d6b3dce045144aa739eef678b4]
+CVE: CVE-2023-3354
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ include/io/channel-tls.h | 1 +
+ io/channel-tls.c | 18 ++++++++++++------
+ 2 files changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/include/io/channel-tls.h b/include/io/channel-tls.h
+index 5672479e9..26c67f17e 100644
+--- a/include/io/channel-tls.h
++++ b/include/io/channel-tls.h
+@@ -48,6 +48,7 @@ struct QIOChannelTLS {
+ QIOChannel *master;
+ QCryptoTLSSession *session;
+ QIOChannelShutdown shutdown;
++ guint hs_ioc_tag;
+ };
+
+ /**
+diff --git a/io/channel-tls.c b/io/channel-tls.c
+index 2ae1b92fc..34476e6b7 100644
+--- a/io/channel-tls.c
++++ b/io/channel-tls.c
+@@ -195,12 +195,13 @@ static void qio_channel_tls_handshake_task(QIOChannelTLS *ioc,
+ }
+
+ trace_qio_channel_tls_handshake_pending(ioc, status);
+- qio_channel_add_watch_full(ioc->master,
+- condition,
+- qio_channel_tls_handshake_io,
+- data,
+- NULL,
+- context);
++ ioc->hs_ioc_tag =
++ qio_channel_add_watch_full(ioc->master,
++ condition,
++ qio_channel_tls_handshake_io,
++ data,
++ NULL,
++ context);
+ }
+ }
+
+@@ -215,6 +216,7 @@ static gboolean qio_channel_tls_handshake_io(QIOChannel *ioc,
+ QIOChannelTLS *tioc = QIO_CHANNEL_TLS(
+ qio_task_get_source(task));
+
++ tioc->hs_ioc_tag = 0;
+ g_free(data);
+ qio_channel_tls_handshake_task(tioc, task, context);
+
+@@ -373,6 +375,10 @@ static int qio_channel_tls_close(QIOChannel *ioc,
+ {
+ QIOChannelTLS *tioc = QIO_CHANNEL_TLS(ioc);
+
++ if (tioc->hs_ioc_tag) {
++ g_clear_handle_id(&tioc->hs_ioc_tag, g_source_remove);
++ }
++
+ return qio_channel_close(tioc->master, errp);
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-42467.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-42467.patch
new file mode 100644
index 0000000000..d53683faa7
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-42467.patch
@@ -0,0 +1,46 @@
+From 7cfcc79b0ab800959716738aff9419f53fc68c9c Mon Sep 17 00:00:00 2001
+From: Thomas Huth <thuth@redhat.com>
+Date: Mon, 25 Sep 2023 11:18:54 +0200
+Subject: [PATCH] hw/scsi/scsi-disk: Disallow block sizes smaller than 512
+ [CVE-2023-42467]
+
+We are doing things like
+
+ nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE);
+
+in the code here (e.g. in scsi_disk_emulate_mode_sense()), so if
+the blocksize is smaller than BDRV_SECTOR_SIZE (=512), this crashes
+with a division by 0 exception. Thus disallow block sizes of 256
+bytes to avoid this situation.
+
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1813
+CVE: 2023-42467
+Signed-off-by: Thomas Huth <thuth@redhat.com>
+Message-ID: <20230925091854.49198-1-thuth@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2023-42467
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/7cfcc79b0ab800959716738aff9419f53fc68c9c]
+Signed-off-by: Poonam Jadhav <poonam.jadhav@kpit.com>
+---
+ hw/scsi/scsi-disk.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
+index e0d79c7966c..477ee2bcd47 100644
+--- a/hw/scsi/scsi-disk.c
++++ b/hw/scsi/scsi-disk.c
+@@ -1628,9 +1628,10 @@ static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
+ * Since the existing code only checks/updates bits 8-15 of the block
+ * size, restrict ourselves to the same requirement for now to ensure
+ * that a block size set by a block descriptor and then read back by
+- * a subsequent SCSI command will be the same
++ * a subsequent SCSI command will be the same. Also disallow a block
++ * size of 256 since we cannot handle anything below BDRV_SECTOR_SIZE.
+ */
+- if (bs && !(bs & ~0xff00) && bs != s->qdev.blocksize) {
++ if (bs && !(bs & ~0xfe00) && bs != s->qdev.blocksize) {
+ s->qdev.blocksize = bs;
+ trace_scsi_disk_mode_select_set_blocksize(s->qdev.blocksize);
+ }
+--
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-5088.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-5088.patch
new file mode 100644
index 0000000000..c5ea9d739a
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-5088.patch
@@ -0,0 +1,112 @@
+From 7d7512019fc40c577e2bdd61f114f31a9eb84a8e Mon Sep 17 00:00:00 2001
+From: Fiona Ebner <f.ebner@proxmox.com>
+Date: Wed, 6 Sep 2023 15:09:21 +0200
+Subject: [PATCH] hw/ide: reset: cancel async DMA operation before resetting
+ state
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+If there is a pending DMA operation during ide_bus_reset(), the fact
+that the IDEState is already reset before the operation is canceled
+can be problematic. In particular, ide_dma_cb() might be called and
+then use the reset IDEState which contains the signature after the
+reset. When used to construct the IO operation this leads to
+ide_get_sector() returning 0 and nsector being 1. This is particularly
+bad, because a write command will thus destroy the first sector which
+often contains a partition table or similar.
+
+Traces showing the unsolicited write happening with IDEState
+0x5595af6949d0 being used after reset:
+
+> ahci_port_write ahci(0x5595af6923f0)[0]: port write [reg:PxSCTL] @ 0x2c: 0x00000300
+> ahci_reset_port ahci(0x5595af6923f0)[0]: reset port
+> ide_reset IDEstate 0x5595af6949d0
+> ide_reset IDEstate 0x5595af694da8
+> ide_bus_reset_aio aio_cancel
+> dma_aio_cancel dbs=0x7f64600089a0
+> dma_blk_cb dbs=0x7f64600089a0 ret=0
+> dma_complete dbs=0x7f64600089a0 ret=0 cb=0x5595acd40b30
+> ahci_populate_sglist ahci(0x5595af6923f0)[0]
+> ahci_dma_prepare_buf ahci(0x5595af6923f0)[0]: prepare buf limit=512 prepared=512
+> ide_dma_cb IDEState 0x5595af6949d0; sector_num=0 n=1 cmd=DMA WRITE
+> dma_blk_io dbs=0x7f6420802010 bs=0x5595ae2c6c30 offset=0 to_dev=1
+> dma_blk_cb dbs=0x7f6420802010 ret=0
+
+> (gdb) p *qiov
+> $11 = {iov = 0x7f647c76d840, niov = 1, {{nalloc = 1, local_iov = {iov_base = 0x0,
+> iov_len = 512}}, {__pad = "\001\000\000\000\000\000\000\000\000\000\000",
+> size = 512}}}
+> (gdb) bt
+> #0 blk_aio_pwritev (blk=0x5595ae2c6c30, offset=0, qiov=0x7f6420802070, flags=0,
+> cb=0x5595ace6f0b0 <dma_blk_cb>, opaque=0x7f6420802010)
+> at ../block/block-backend.c:1682
+> #1 0x00005595ace6f185 in dma_blk_cb (opaque=0x7f6420802010, ret=<optimized out>)
+> at ../softmmu/dma-helpers.c:179
+> #2 0x00005595ace6f778 in dma_blk_io (ctx=0x5595ae0609f0,
+> sg=sg@entry=0x5595af694d00, offset=offset@entry=0, align=align@entry=512,
+> io_func=io_func@entry=0x5595ace6ee30 <dma_blk_write_io_func>,
+> io_func_opaque=io_func_opaque@entry=0x5595ae2c6c30,
+> cb=0x5595acd40b30 <ide_dma_cb>, opaque=0x5595af6949d0,
+> dir=DMA_DIRECTION_TO_DEVICE) at ../softmmu/dma-helpers.c:244
+> #3 0x00005595ace6f90a in dma_blk_write (blk=0x5595ae2c6c30,
+> sg=sg@entry=0x5595af694d00, offset=offset@entry=0, align=align@entry=512,
+> cb=cb@entry=0x5595acd40b30 <ide_dma_cb>, opaque=opaque@entry=0x5595af6949d0)
+> at ../softmmu/dma-helpers.c:280
+> #4 0x00005595acd40e18 in ide_dma_cb (opaque=0x5595af6949d0, ret=<optimized out>)
+> at ../hw/ide/core.c:953
+> #5 0x00005595ace6f319 in dma_complete (ret=0, dbs=0x7f64600089a0)
+> at ../softmmu/dma-helpers.c:107
+> #6 dma_blk_cb (opaque=0x7f64600089a0, ret=0) at ../softmmu/dma-helpers.c:127
+> #7 0x00005595ad12227d in blk_aio_complete (acb=0x7f6460005b10)
+> at ../block/block-backend.c:1527
+> #8 blk_aio_complete (acb=0x7f6460005b10) at ../block/block-backend.c:1524
+> #9 blk_aio_write_entry (opaque=0x7f6460005b10) at ../block/block-backend.c:1594
+> #10 0x00005595ad258cfb in coroutine_trampoline (i0=<optimized out>,
+> i1=<optimized out>) at ../util/coroutine-ucontext.c:177
+
+CVE: CVE-2023-5088
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/7d7512019fc40c577e2bdd61f114f31a9eb84a8e]
+
+Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Tested-by: simon.rowe@nutanix.com
+Message-ID: <20230906130922.142845-1-f.ebner@proxmox.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Signed-off-by: Sourav Pramanik <sourav.pramanik@kpit.com>
+---
+ hw/ide/core.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/hw/ide/core.c b/hw/ide/core.c
+index b5e0dcd29b2..63ba665f3d2 100644
+--- a/hw/ide/core.c
++++ b/hw/ide/core.c
+@@ -2515,19 +2515,19 @@ static void ide_dummy_transfer_stop(IDEState *s)
+
+ void ide_bus_reset(IDEBus *bus)
+ {
+- bus->unit = 0;
+- bus->cmd = 0;
+- ide_reset(&bus->ifs[0]);
+- ide_reset(&bus->ifs[1]);
+- ide_clear_hob(bus);
+-
+- /* pending async DMA */
++ /* pending async DMA - needs the IDEState before it is reset */
+ if (bus->dma->aiocb) {
+ trace_ide_bus_reset_aio();
+ blk_aio_cancel(bus->dma->aiocb);
+ bus->dma->aiocb = NULL;
+ }
+
++ bus->unit = 0;
++ bus->cmd = 0;
++ ide_reset(&bus->ifs[0]);
++ ide_reset(&bus->ifs[1]);
++ ide_clear_hob(bus);
++
+ /* reset dma provider too */
+ if (bus->dma->ops->reset) {
+ bus->dma->ops->reset(bus->dma);
+--
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-6683.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-6683.patch
new file mode 100644
index 0000000000..e528574076
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-6683.patch
@@ -0,0 +1,92 @@
+From 405484b29f6548c7b86549b0f961b906337aa68a Mon Sep 17 00:00:00 2001
+From: Fiona Ebner <f.ebner@proxmox.com>
+Date: Wed, 24 Jan 2024 11:57:48 +0100
+Subject: [PATCH] ui/clipboard: mark type as not available when there is no
+ data
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+With VNC, a client can send a non-extended VNC_MSG_CLIENT_CUT_TEXT
+message with len=0. In qemu_clipboard_set_data(), the clipboard info
+will be updated setting data to NULL (because g_memdup(data, size)
+returns NULL when size is 0). If the client does not set the
+VNC_ENCODING_CLIPBOARD_EXT feature when setting up the encodings, then
+the 'request' callback for the clipboard peer is not initialized.
+Later, because data is NULL, qemu_clipboard_request() can be reached
+via vdagent_chr_write() and vdagent_clipboard_recv_request() and
+there, the clipboard owner's 'request' callback will be attempted to
+be called, but that is a NULL pointer.
+
+In particular, this can happen when using the KRDC (22.12.3) VNC
+client.
+
+Another scenario leading to the same issue is with two clients (say
+noVNC and KRDC):
+
+The noVNC client sets the extension VNC_FEATURE_CLIPBOARD_EXT and
+initializes its cbpeer.
+
+The KRDC client does not, but triggers a vnc_client_cut_text() (note
+it's not the _ext variant)). There, a new clipboard info with it as
+the 'owner' is created and via qemu_clipboard_set_data() is called,
+which in turn calls qemu_clipboard_update() with that info.
+
+In qemu_clipboard_update(), the notifier for the noVNC client will be
+called, i.e. vnc_clipboard_notify() and also set vs->cbinfo for the
+noVNC client. The 'owner' in that clipboard info is the clipboard peer
+for the KRDC client, which did not initialize the 'request' function.
+That sounds correct to me, it is the owner of that clipboard info.
+
+Then when noVNC sends a VNC_MSG_CLIENT_CUT_TEXT message (it did set
+the VNC_FEATURE_CLIPBOARD_EXT feature correctly, so a check for it
+passes), that clipboard info is passed to qemu_clipboard_request() and
+the original segfault still happens.
+
+Fix the issue by handling updates with size 0 differently. In
+particular, mark in the clipboard info that the type is not available.
+
+While at it, switch to g_memdup2(), because g_memdup() is deprecated.
+
+Cc: qemu-stable@nongnu.org
+Fixes: CVE-2023-6683
+Reported-by: Markus Frank <m.frank@proxmox.com>
+Suggested-by: Marc-André Lureau <marcandre.lureau@redhat.com>
+Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
+Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
+Tested-by: Markus Frank <m.frank@proxmox.com>
+Message-ID: <20240124105749.204610-1-f.ebner@proxmox.com>
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/405484b29f6548c7b86549b0f961b906337aa68a]
+CVE: CVE-2023-6683
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ ui/clipboard.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/ui/clipboard.c b/ui/clipboard.c
+index 3d14bffaf80..b3f6fa3c9e1 100644
+--- a/ui/clipboard.c
++++ b/ui/clipboard.c
+@@ -163,9 +163,15 @@ void qemu_clipboard_set_data(QemuClipboardPeer *peer,
+ }
+
+ g_free(info->types[type].data);
+- info->types[type].data = g_memdup(data, size);
+- info->types[type].size = size;
+- info->types[type].available = true;
++ if (size) {
++ info->types[type].data = g_memdup2(data, size);
++ info->types[type].size = size;
++ info->types[type].available = true;
++ } else {
++ info->types[type].data = NULL;
++ info->types[type].size = 0;
++ info->types[type].available = false;
++ }
+
+ if (update) {
+ qemu_clipboard_update(info);
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-6693.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-6693.patch
new file mode 100644
index 0000000000..b91f2e6902
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-6693.patch
@@ -0,0 +1,74 @@
+From 2220e8189fb94068dbad333228659fbac819abb0 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Tue, 2 Jan 2024 11:29:01 +0800
+Subject: [PATCH] virtio-net: correctly copy vnet header when flushing TX
+
+When HASH_REPORT is negotiated, the guest_hdr_len might be larger than
+the size of the mergeable rx buffer header. Using
+virtio_net_hdr_mrg_rxbuf during the header swap might lead a stack
+overflow in this case. Fixing this by using virtio_net_hdr_v1_hash
+instead.
+
+Reported-by: Xiao Lei <leixiao.nop@zju.edu.cn>
+Cc: Yuri Benditovich <yuri.benditovich@daynix.com>
+Cc: qemu-stable@nongnu.org
+Cc: Mauro Matteo Cascella <mcascell@redhat.com>
+Fixes: CVE-2023-6693
+Fixes: e22f0603fb2f ("virtio-net: reference implementation of hash report")
+Reviewed-by: Michael Tokarev <mjt@tls.msk.ru>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/2220e8189fb94068dbad333228659fbac819abb0]
+CVE: CVE-2023-6693
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ hw/net/virtio-net.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
+index e1f474883..42e66697f 100644
+--- a/hw/net/virtio-net.c
++++ b/hw/net/virtio-net.c
+@@ -600,6 +600,11 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
+
+ n->mergeable_rx_bufs = mergeable_rx_bufs;
+
++ /*
++ * Note: when extending the vnet header, please make sure to
++ * change the vnet header copying logic in virtio_net_flush_tx()
++ * as well.
++ */
+ if (version_1) {
+ n->guest_hdr_len = hash_report ?
+ sizeof(struct virtio_net_hdr_v1_hash) :
+@@ -2520,7 +2525,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
+ ssize_t ret;
+ unsigned int out_num;
+ struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
+- struct virtio_net_hdr_mrg_rxbuf mhdr;
++ struct virtio_net_hdr_v1_hash vhdr;
+
+ elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
+ if (!elem) {
+@@ -2537,7 +2542,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
+ }
+
+ if (n->has_vnet_hdr) {
+- if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
++ if (iov_to_buf(out_sg, out_num, 0, &vhdr, n->guest_hdr_len) <
+ n->guest_hdr_len) {
+ virtio_error(vdev, "virtio-net header incorrect");
+ virtqueue_detach_element(q->tx_vq, elem, 0);
+@@ -2545,8 +2550,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
+ return -EINVAL;
+ }
+ if (n->needs_vnet_hdr_swap) {
+- virtio_net_hdr_swap(vdev, (void *) &mhdr);
+- sg2[0].iov_base = &mhdr;
++ virtio_net_hdr_swap(vdev, (void *) &vhdr);
++ sg2[0].iov_base = &vhdr;
+ sg2[0].iov_len = n->guest_hdr_len;
+ out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
+ out_sg, out_num,
+--
+2.34.1
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2024-24474.patch b/meta/recipes-devtools/qemu/qemu/CVE-2024-24474.patch
new file mode 100644
index 0000000000..e890fe56cf
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2024-24474.patch
@@ -0,0 +1,44 @@
+From 77668e4b9bca03a856c27ba899a2513ddf52bb52 Mon Sep 17 00:00:00 2001
+From: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Date: Wed, 13 Sep 2023 21:44:09 +0100
+Subject: [PATCH] esp: restrict non-DMA transfer length to that of available
+ data
+
+In the case where a SCSI layer transfer is incorrectly terminated, it is
+possible for a TI command to cause a SCSI buffer overflow due to the
+expected transfer data length being less than the available data in the
+FIFO. When this occurs the unsigned async_len variable underflows and
+becomes a large offset which writes past the end of the allocated SCSI
+buffer.
+
+Restrict the non-DMA transfer length to be the smallest of the expected
+transfer length and the available FIFO data to ensure that it is no longer
+possible for the SCSI buffer overflow to occur.
+
+Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1810
+Reviewed-by: Thomas Huth <thuth@redhat.com>
+Message-ID: <20230913204410.65650-3-mark.cave-ayland@ilande.co.uk>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/77668e4b9bca03a856c27ba899a2513ddf52bb52]
+CVE: CVE-2024-24474
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ hw/scsi/esp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
+index 4218a6a96054..9b11d8c5738a 100644
+--- a/hw/scsi/esp.c
++++ b/hw/scsi/esp.c
+@@ -759,7 +759,8 @@ static void esp_do_nodma(ESPState *s)
+ }
+
+ if (to_device) {
+- len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ);
++ len = MIN(s->async_len, ESP_FIFO_SZ);
++ len = MIN(len, fifo8_num_used(&s->fifo));
+ esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
+ s->async_buf += len;
+ s->async_len -= len;
diff --git a/meta/recipes-devtools/qemu/qemu/scsi-disk-allow-MODE-SELECT-block-desriptor-to-set-the-block-size.patch b/meta/recipes-devtools/qemu/qemu/scsi-disk-allow-MODE-SELECT-block-desriptor-to-set-the-block-size.patch
new file mode 100644
index 0000000000..d8e48d07dd
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/scsi-disk-allow-MODE-SELECT-block-desriptor-to-set-the-block-size.patch
@@ -0,0 +1,54 @@
+From 356c4c441ec01910314c5867c680bef80d1dd373 Mon Sep 17 00:00:00 2001
+From: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Date: Wed, 22 Jun 2022 11:53:12 +0100
+Subject: [PATCH] scsi-disk: allow MODE SELECT block descriptor to set the
+ block size
+
+The MODE SELECT command can contain an optional block descriptor that can be used
+to set the device block size. If the block descriptor is present then update the
+block size on the SCSI device accordingly.
+
+This allows CDROMs to be used with A/UX which requires a CDROM drive which is
+capable of switching from a 2048 byte sector size to a 512 byte sector size.
+
+Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Message-Id: <20220622105314.802852-13-mark.cave-ayland@ilande.co.uk>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+Comment: Patch is refreshed
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/356c4c441ec01910314c5867c680bef80d1dd373]
+Signed-off-by: Poonam Jadhav <poonam.jadhav@kpit.com>
+---
+ hw/scsi/scsi-disk.c | 6 ++++++
+ hw/scsi/trace-events | 1 +
+ 2 files changed, 7 insertions(+)
+
+diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
+index db27e834dae3..f5cdb9ad4b54 100644
+--- a/hw/scsi/scsi-disk.c
++++ b/hw/scsi/scsi-disk.c
+@@ -1616,6 +1616,12 @@ static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
+ goto invalid_param;
+ }
+
++ /* Allow changing the block size */
++ if (bd_len && p[6] != (s->qdev.blocksize >> 8)) {
++ s->qdev.blocksize = p[6] << 8;
++ trace_scsi_disk_mode_select_set_blocksize(s->qdev.blocksize);
++ }
++
+ len -= bd_len;
+ p += bd_len;
+
+diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events
+index 8e927ff62de1..ab238293f0da 100644
+--- a/hw/scsi/trace-events
++++ b/hw/scsi/trace-events
+@@ -338,6 +338,7 @@scsi_disk_dma_command_READ(uint64_t lba, uint32_t len) "Read (sector %" PRId64 ", count %u)"
+ scsi_disk_dma_command_WRITE(const char *cmd, uint64_t lba, int len) "Write %s(sector %" PRId64 ", count %u)"
+ scsi_disk_new_request(uint32_t lun, uint32_t tag, const char *line) "Command: lun=%d tag=0x%x data=%s"
+ scsi_disk_aio_sgio_command(uint32_t tag, uint8_t cmd, uint64_t lba, int len, uint32_t timeout) "disk aio sgio: tag=0x%x cmd=0x%x (sector %" PRId64 ", count %d) timeout=%u"
++scsi_disk_mode_select_set_blocksize(int blocksize) "set block size to %d"
+
+ # scsi-generic.c
+ scsi_generic_command_complete_noio(void *req, uint32_t tag, int statuc) "Command complete %p tag=0x%x status=%d"
diff --git a/meta/recipes-devtools/qemu/qemu/scsi-disk-ensure-block-size-is-non-zero-and-changes-limited-to-bits-8-15.patch b/meta/recipes-devtools/qemu/qemu/scsi-disk-ensure-block-size-is-non-zero-and-changes-limited-to-bits-8-15.patch
new file mode 100644
index 0000000000..1e1be683fc
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/scsi-disk-ensure-block-size-is-non-zero-and-changes-limited-to-bits-8-15.patch
@@ -0,0 +1,67 @@
+From 55794c904df723109b228da28b5db778e0df3110 Mon Sep 17 00:00:00 2001
+From: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Date: Sat, 30 Jul 2022 13:26:56 +0100
+Subject: [PATCH] scsi-disk: ensure block size is non-zero and changes limited
+ to bits 8-15
+
+The existing code assumes that the block size can be generated from p[1] << 8
+in multiple places which ignores the top and bottom 8 bits. If the block size
+is allowed to be set to an arbitrary value then this causes a mismatch
+between the value written by the guest in the block descriptor and the value
+subsequently read back using READ CAPACITY causing the guest to generate
+requests that can crash QEMU.
+
+For now restrict block size changes to bits 8-15 and also ignore requests to
+set the block size to 0 which causes the SCSI emulation to crash in at least
+one place with a divide by zero error.
+
+Fixes: 356c4c441e ("scsi-disk: allow MODE SELECT block descriptor to set the block size")
+Closes: https://gitlab.com/qemu-project/qemu/-/issues/1112
+Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Message-Id: <20220730122656.253448-3-mark.cave-ayland@ilande.co.uk>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+Comment: Patch is refreshed
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/55794c904df723109b228da28b5db778e0df3110]
+Signed-off-by: Poonam Jadhav <poonam.jadhav@kpit.com>
+---
+ hw/scsi/scsi-disk.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
+index 3027ac3b1ed6..efee6739f9ad 100644
+--- a/hw/scsi/scsi-disk.c
++++ b/hw/scsi/scsi-disk.c
+@@ -1532,7 +1532,7 @@ static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
+ int cmd = r->req.cmd.buf[0];
+ int len = r->req.cmd.xfer;
+ int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
+- int bd_len;
++ int bd_len, bs;
+ int pass;
+
+ /* We only support PF=1, SP=0. */
+@@ -1617,9 +1617,19 @@ static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
+ }
+
+ /* Allow changing the block size */
+- if (bd_len && p[6] != (s->qdev.blocksize >> 8)) {
+- s->qdev.blocksize = p[6] << 8;
+- trace_scsi_disk_mode_select_set_blocksize(s->qdev.blocksize);
++ if (bd_len) {
++ bs = p[5] << 16 | p[6] << 8 | p[7];
++
++ /*
++ * Since the existing code only checks/updates bits 8-15 of the block
++ * size, restrict ourselves to the same requirement for now to ensure
++ * that a block size set by a block descriptor and then read back by
++ * a subsequent SCSI command will be the same
++ */
++ if (bs && !(bs & ~0xff00) && bs != s->qdev.blocksize) {
++ s->qdev.blocksize = bs;
++ trace_scsi_disk_mode_select_set_blocksize(s->qdev.blocksize);
++ }
+ }
+
+ len -= bd_len;
+
diff --git a/meta/recipes-devtools/qemu/qemu_6.2.0.bb b/meta/recipes-devtools/qemu/qemu_6.2.0.bb
index 9f7fad9886..42e133967e 100644
--- a/meta/recipes-devtools/qemu/qemu_6.2.0.bb
+++ b/meta/recipes-devtools/qemu/qemu_6.2.0.bb
@@ -15,12 +15,12 @@ EXTRA_OECONF:append:class-target:mipsarcho32 = "${@bb.utils.contains('BBEXTENDCU
EXTRA_OECONF:append:class-nativesdk = " --target-list=${@get_qemu_target_list(d)}"
PACKAGECONFIG ??= " \
- fdt sdl kvm pie \
+ fdt sdl kvm pie slirp \
${@bb.utils.filter('DISTRO_FEATURES', 'alsa xen', d)} \
${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'virglrenderer epoxy', '', d)} \
${@bb.utils.filter('DISTRO_FEATURES', 'seccomp', d)} \
"
-PACKAGECONFIG:class-nativesdk ??= "fdt sdl kvm pie \
+PACKAGECONFIG:class-nativesdk ??= "fdt sdl kvm pie slirp \
${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'virglrenderer epoxy', '', d)} \
"
# ppc32 hosts are no longer supported in qemu
diff --git a/meta/recipes-devtools/quilt/quilt.inc b/meta/recipes-devtools/quilt/quilt.inc
index 07611e6d85..72deb24915 100644
--- a/meta/recipes-devtools/quilt/quilt.inc
+++ b/meta/recipes-devtools/quilt/quilt.inc
@@ -12,6 +12,9 @@ SRC_URI = "${SAVANNAH_GNU_MIRROR}/quilt/quilt-${PV}.tar.gz \
file://Makefile \
file://test.sh \
file://0001-tests-Allow-different-output-from-mv.patch \
+ file://fix-grep-3.8.patch \
+ file://faildiff-order.patch \
+ file://0001-test-Fix-a-race-condition-in-merge.test.patch \
"
SRC_URI:append:class-target = " file://gnu_patch_test_fix_target.patch"
diff --git a/meta/recipes-devtools/quilt/quilt/0001-test-Fix-a-race-condition-in-merge.test.patch b/meta/recipes-devtools/quilt/quilt/0001-test-Fix-a-race-condition-in-merge.test.patch
new file mode 100644
index 0000000000..01d4c8befc
--- /dev/null
+++ b/meta/recipes-devtools/quilt/quilt/0001-test-Fix-a-race-condition-in-merge.test.patch
@@ -0,0 +1,48 @@
+From c1ce964f3e9312100a60f03c1e1fdd601e1911f2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=C4=90o=C3=A0n=20Tr=E1=BA=A7n=20C=C3=B4ng=20Danh?=
+ <congdanhqx@gmail.com>
+Date: Tue, 28 Feb 2023 18:45:15 +0100
+Subject: [PATCH] test: Fix a race condition in merge.test
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Just like commit 4dfe7f9, (test: Fix a race condition, 2023-01-20),
+this fix a test race when stdout and stderr in any order.
+
+Upstream-Status: Backport [https://git.savannah.nongnu.org/cgit/quilt.git/commit/?id=c1ce964f3e9312100a60f03c1e1fdd601e1911f2]
+Signed-off-by: Đoàn Trần Công Danh <congdanhqx@gmail.com>
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+---
+ test/merge.test | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/test/merge.test b/test/merge.test
+index c64b33d..2e67d4f 100644
+--- a/test/merge.test
++++ b/test/merge.test
+@@ -39,8 +39,9 @@ Test the patch merging functionality of `quilt diff'.
+ > Applying patch %{P}c.diff
+ > Now at patch %{P}c.diff
+
+- $ quilt diff -P b.diff | grep -v "^\\(---\\|+++\\)"
++ $ quilt diff -P b.diff >/dev/null
+ > Warning: more recent patches modify files in patch %{P}b.diff
++ $ quilt diff -P b.diff 2>/dev/null | grep -v "^\\(---\\|+++\\)"
+ >~ Index: [^/]+/abc\.txt
+ > ===================================================================
+ > @@ -1,3 +1,3 @@
+@@ -49,8 +50,9 @@ Test the patch merging functionality of `quilt diff'.
+ > +b+
+ > c
+
+- $ quilt diff --combine a.diff -P b.diff | grep -v "^\\(---\\|+++\\)"
++ $ quilt diff --combine a.diff -P b.diff >/dev/null
+ > Warning: more recent patches modify files in patch %{P}b.diff
++ $ quilt diff --combine a.diff -P b.diff 2>/dev/null | grep -v "^\\(---\\|+++\\)"
+ >~ Index: [^/]+/abc\.txt
+ > ===================================================================
+ > @@ -1,3 +1,3 @@
+--
+2.40.0
+
diff --git a/meta/recipes-devtools/quilt/quilt/faildiff-order.patch b/meta/recipes-devtools/quilt/quilt/faildiff-order.patch
new file mode 100644
index 0000000000..f22065a250
--- /dev/null
+++ b/meta/recipes-devtools/quilt/quilt/faildiff-order.patch
@@ -0,0 +1,41 @@
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+From 4dfe7f9e702c85243a71e4de267a13e434b6d6c2 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Fri, 20 Jan 2023 12:56:08 +0100
+Subject: [PATCH] test: Fix a race condition
+
+The test suite does not differentiate between stdout and stderr. When
+messages are printed to both, the order in which they will reach us
+is apparently not guaranteed. Ideally this would be deterministic, but
+until then, explicitly test stdout and stderr separately in the test
+case itself. Otherwise the test suite fails randomly, which is a pain
+for distribution package maintainers.
+
+This fixes bug #63651 reported by Ross Burton:
+https://savannah.nongnu.org/bugs/index.php?63651
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+---
+ test/faildiff.test | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/test/faildiff.test b/test/faildiff.test
+index 5afb8e3..0444c15 100644
+--- a/test/faildiff.test
++++ b/test/faildiff.test
+@@ -27,8 +27,9 @@ What happens on binary files?
+ > File test.bin added to patch %{P}test.diff
+
+ $ printf "\\003\\000\\001" > test.bin
+- $ quilt diff -pab --no-index
++ $ quilt diff -pab --no-index 2>/dev/null
+ >~ (Files|Binary files) a/test\.bin and b/test\.bin differ
++ $ quilt diff -pab --no-index >/dev/null
+ > Diff failed on file 'test.bin', aborting
+ $ echo %{?}
+ > 1
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/quilt/quilt/fix-grep-3.8.patch b/meta/recipes-devtools/quilt/quilt/fix-grep-3.8.patch
new file mode 100644
index 0000000000..68a4b4c195
--- /dev/null
+++ b/meta/recipes-devtools/quilt/quilt/fix-grep-3.8.patch
@@ -0,0 +1,144 @@
+From f73f8d7f71de2878d3f92881a5fcb8eafd78cb5f Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Fri, 9 Sep 2022 10:10:37 +0200
+Subject: Avoid warnings with grep 3.8
+
+GNU grep version 3.8 became more strict about needless quoting in
+patterns. We have one occurrence of that in quilt, where "/"
+characters are being quoted by default. There are cases where they
+indeed need to be quoted (typically when used in a sed s/// command)
+but most of the time they do not, and this results in the following
+warning:
+
+grep: warning: stray \ before /
+
+So rename quote_bre() to quote_sed_re(), and introduce
+quote_grep_re() which does not quote "/".
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Upstream-Status: Backport [https://git.savannah.nongnu.org/cgit/quilt.git/commit/?id=f73f8d7f71de2878d3f92881a5fcb8eafd78cb5f]
+Signed-off-by: Alexander Kanavin <alex@linutronix.de>
+---
+ quilt/diff.in | 2 +-
+ quilt/patches.in | 2 +-
+ quilt/scripts/patchfns.in | 20 +++++++++++++-------
+ quilt/upgrade.in | 4 ++--
+ 4 files changed, 17 insertions(+), 11 deletions(-)
+
+diff --git a/quilt/diff.in b/quilt/diff.in
+index e90dc33..07788ff 100644
+--- a/quilt/diff.in
++++ b/quilt/diff.in
+@@ -255,7 +255,7 @@ then
+ # Add all files in the snapshot into the file list (they may all
+ # have changed).
+ files=( $(find $QUILT_PC/$snap_subdir -type f \
+- | sed -e "s/^$(quote_bre $QUILT_PC/$snap_subdir/)//" \
++ | sed -e "s/^$(quote_sed_re $QUILT_PC/$snap_subdir/)//" \
+ | sort) )
+ printf "%s\n" "${files[@]}" >&4
+ unset files
+diff --git a/quilt/patches.in b/quilt/patches.in
+index bb17a46..eac45a9 100644
+--- a/quilt/patches.in
++++ b/quilt/patches.in
+@@ -60,7 +60,7 @@ scan_unapplied()
+ # Quote each file name only once
+ for file in "${opt_files[@]}"
+ do
+- files_bre[${#files_bre[@]}]=$(quote_bre "$file")
++ files_bre[${#files_bre[@]}]=$(quote_grep_re "$file")
+ done
+
+ # "Or" all files in a single pattern
+diff --git a/quilt/scripts/patchfns.in b/quilt/scripts/patchfns.in
+index c2d5f9d..1bd7233 100644
+--- a/quilt/scripts/patchfns.in
++++ b/quilt/scripts/patchfns.in
+@@ -78,8 +78,14 @@ array_join()
+ done
+ }
+
+-# Quote a string for use in a basic regular expression.
+-quote_bre()
++# Quote a string for use in a regular expression for a grep pattern.
++quote_grep_re()
++{
++ echo "$1" | sed -e 's:\([][^$.*\\]\):\\\1:g'
++}
++
++# Quote a string for use in a regular expression for a sed s/// command.
++quote_sed_re()
+ {
+ echo "$1" | sed -e 's:\([][^$/.*\\]\):\\\1:g'
+ }
+@@ -215,7 +221,7 @@ patch_in_series()
+
+ if [ -e "$SERIES" ]
+ then
+- grep -q "^$(quote_bre $patch)\([ \t]\|$\)" "$SERIES"
++ grep -q "^$(quote_grep_re $patch)\([ \t]\|$\)" "$SERIES"
+ else
+ return 1
+ fi
+@@ -365,7 +371,7 @@ is_applied()
+ {
+ local patch=$1
+ [ -e $DB ] || return 1
+- grep -q "^$(quote_bre $patch)\$" $DB
++ grep -q "^$(quote_grep_re $patch)\$" $DB
+ }
+
+ applied_patches()
+@@ -465,7 +471,7 @@ remove_from_db()
+ local tmpfile
+ if tmpfile=$(gen_tempfile)
+ then
+- grep -v "^$(quote_bre $patch)\$" $DB > $tmpfile
++ grep -v "^$(quote_grep_re $patch)\$" $DB > $tmpfile
+ cat $tmpfile > $DB
+ rm -f $tmpfile
+ [ -s $DB ] || rm -f $DB
+@@ -520,7 +526,7 @@ find_patch()
+ fi
+
+ local patch=${1#$SUBDIR_DOWN$QUILT_PATCHES/}
+- local bre=$(quote_bre "$patch")
++ local bre=$(quote_sed_re "$patch")
+ set -- $(sed -e "/^$bre\(\|\.patch\|\.diff\?\)\(\|\.gz\|\.bz2\|\.xz\|\.lzma\|\.lz\)\([ "$'\t'"]\|$\)/!d" \
+ -e 's/[ '$'\t''].*//' "$SERIES")
+ if [ $# -eq 1 ]
+@@ -631,7 +637,7 @@ files_in_patch()
+ then
+ find "$path" -type f \
+ -a ! -path "$(quote_glob "$path")/.timestamp" |
+- sed -e "s/$(quote_bre "$path")\///"
++ sed -e "s/$(quote_sed_re "$path")\///"
+ fi
+ }
+
+diff --git a/quilt/upgrade.in b/quilt/upgrade.in
+index dbf7d05..866aa33 100644
+--- a/quilt/upgrade.in
++++ b/quilt/upgrade.in
+@@ -74,7 +74,7 @@ printf $"Converting meta-data to version %s\n" "$DB_VERSION"
+
+ for patch in $(applied_patches)
+ do
+- proper_name="$(grep "^$(quote_bre $patch)"'\(\|\.patch\|\.diff?\)\(\|\.gz\|\.bz2\)\([ \t]\|$\)' $SERIES)"
++ proper_name="$(grep "^$(quote_grep_re $patch)"'\(\|\.patch\|\.diff?\)\(\|\.gz\|\.bz2\)\([ \t]\|$\)' $SERIES)"
+ proper_name=${proper_name#$QUILT_PATCHES/}
+ proper_name=${proper_name%% *}
+ if [ -z "$proper_name" ]
+@@ -84,7 +84,7 @@ do
+ fi
+
+ if [ "$patch" != "$proper_name" -a -d $QUILT_PC/$patch ] \
+- && grep -q "^$(quote_bre $patch)\$" \
++ && grep -q "^$(quote_grep_re $patch)\$" \
+ $QUILT_PC/applied-patches
+ then
+ mv $QUILT_PC/$patch $QUILT_PC/$proper_name \
+--
+cgit v1.1
+
diff --git a/meta/recipes-devtools/rpm/files/0001-CVE-2021-3521.patch b/meta/recipes-devtools/rpm/files/0001-CVE-2021-3521.patch
deleted file mode 100644
index 044b4dd2a0..0000000000
--- a/meta/recipes-devtools/rpm/files/0001-CVE-2021-3521.patch
+++ /dev/null
@@ -1,57 +0,0 @@
-From 9a6871126f472feea057d5f803505ec8cc78f083 Mon Sep 17 00:00:00 2001
-From: Panu Matilainen <pmatilai@redhat.com>
-Date: Thu, 30 Sep 2021 09:56:20 +0300
-Subject: [PATCH 1/3] Refactor pgpDigParams construction to helper function
-
-No functional changes, just to reduce code duplication and needed by
-the following commits.
-
-CVE: CVE-2021-3521
-Upstream-Status: Backport [https://github.com/rpm-software-management/rpm/commit/9f03f42e2]
-
-Signed-off-by: Changqing Li <changqing.li@windriver.com>
----
- rpmio/rpmpgp.c | 13 +++++++++----
- 1 file changed, 9 insertions(+), 4 deletions(-)
-
-diff --git a/rpmio/rpmpgp.c b/rpmio/rpmpgp.c
-index d0688ebe9a..e472b5320f 100644
---- a/rpmio/rpmpgp.c
-+++ b/rpmio/rpmpgp.c
-@@ -1041,6 +1041,13 @@ unsigned int pgpDigParamsAlgo(pgpDigParams digp, unsigned int algotype)
- return algo;
- }
-
-+static pgpDigParams pgpDigParamsNew(uint8_t tag)
-+{
-+ pgpDigParams digp = xcalloc(1, sizeof(*digp));
-+ digp->tag = tag;
-+ return digp;
-+}
-+
- int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype,
- pgpDigParams * ret)
- {
-@@ -1058,8 +1065,7 @@ int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype,
- if (pkttype && pkt.tag != pkttype) {
- break;
- } else {
-- digp = xcalloc(1, sizeof(*digp));
-- digp->tag = pkt.tag;
-+ digp = pgpDigParamsNew(pkt.tag);
- }
- }
-
-@@ -1105,8 +1111,7 @@ int pgpPrtParamsSubkeys(const uint8_t *pkts, size_t pktlen,
- digps = xrealloc(digps, alloced * sizeof(*digps));
- }
-
-- digps[count] = xcalloc(1, sizeof(**digps));
-- digps[count]->tag = PGPTAG_PUBLIC_SUBKEY;
-+ digps[count] = pgpDigParamsNew(PGPTAG_PUBLIC_SUBKEY);
- /* Copy UID from main key to subkey */
- digps[count]->userid = xstrdup(mainkey->userid);
-
---
-2.17.1
-
diff --git a/meta/recipes-devtools/rpm/files/0001-Do-not-hardcode-lib-rpm-as-the-installation-path-for.patch b/meta/recipes-devtools/rpm/files/0001-Do-not-hardcode-lib-rpm-as-the-installation-path-for.patch
index 6d236ac400..c6cf9d4c88 100644
--- a/meta/recipes-devtools/rpm/files/0001-Do-not-hardcode-lib-rpm-as-the-installation-path-for.patch
+++ b/meta/recipes-devtools/rpm/files/0001-Do-not-hardcode-lib-rpm-as-the-installation-path-for.patch
@@ -1,4 +1,4 @@
-From 8d013fe154a162305f76141151baf767dd04b598 Mon Sep 17 00:00:00 2001
+From 4ab6a4c5bbad65c3401016bb26b87214cdd0c59b Mon Sep 17 00:00:00 2001
From: Alexander Kanavin <alex.kanavin@gmail.com>
Date: Mon, 27 Feb 2017 09:43:30 +0200
Subject: [PATCH] Do not hardcode "lib/rpm" as the installation path for
@@ -14,10 +14,10 @@ Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/configure.ac b/configure.ac
-index eb7d6941b..10a889b5d 100644
+index 372875fc4..1b7add9ee 100644
--- a/configure.ac
+++ b/configure.ac
-@@ -871,7 +871,7 @@ else
+@@ -884,7 +884,7 @@ else
usrprefix=$prefix
fi
@@ -27,10 +27,10 @@ index eb7d6941b..10a889b5d 100644
AC_SUBST(OBJDUMP)
diff --git a/macros.in b/macros.in
-index a1f795e5f..689e784ef 100644
+index d53ab5ed5..9d10441c8 100644
--- a/macros.in
+++ b/macros.in
-@@ -933,7 +933,7 @@ package or when debugging this package.\
+@@ -911,7 +911,7 @@ package or when debugging this package.\
%_sharedstatedir %{_prefix}/com
%_localstatedir %{_prefix}/var
%_lib lib
@@ -40,7 +40,7 @@ index a1f795e5f..689e784ef 100644
%_infodir %{_datadir}/info
%_mandir %{_datadir}/man
diff --git a/rpm.am b/rpm.am
-index 7b57f433b..9bbb9ee96 100644
+index ebe4e40d1..e6920e258 100644
--- a/rpm.am
+++ b/rpm.am
@@ -1,10 +1,10 @@
@@ -55,4 +55,4 @@ index 7b57f433b..9bbb9ee96 100644
+rpmconfigdir = $(libdir)/rpm
# Libtool version (current-revision-age) for all our libraries
- rpm_version_info = 11:0:2
+ rpm_version_info = 12:0:3
diff --git a/meta/recipes-devtools/rpm/files/0001-When-cross-installing-execute-package-scriptlets-wit.patch b/meta/recipes-devtools/rpm/files/0001-When-cross-installing-execute-package-scriptlets-wit.patch
index 4020a31092..2a0069cafe 100644
--- a/meta/recipes-devtools/rpm/files/0001-When-cross-installing-execute-package-scriptlets-wit.patch
+++ b/meta/recipes-devtools/rpm/files/0001-When-cross-installing-execute-package-scriptlets-wit.patch
@@ -28,11 +28,18 @@ Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
lib/rpmscript.c | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
-diff --git a/lib/rpmscript.c b/lib/rpmscript.c
-index cc98c4885..f8bd3df04 100644
--- a/lib/rpmscript.c
+++ b/lib/rpmscript.c
-@@ -394,8 +394,7 @@ exit:
+@@ -17,7 +17,7 @@
+ #include "rpmio/rpmio_internal.h"
+
+ #include "lib/rpmplugins.h" /* rpm plugins hooks */
+-
++#include "lib/rpmchroot.h" /* rpmChrootOut */
+ #include "debug.h"
+
+ struct scriptNextFileFunc_s {
+@@ -391,8 +391,7 @@ exit:
Fclose(out); /* XXX dup'd STDOUT_FILENO */
if (fn) {
@@ -42,7 +49,7 @@ index cc98c4885..f8bd3df04 100644
free(fn);
}
free(mline);
-@@ -428,7 +427,13 @@ rpmRC rpmScriptRun(rpmScript script, int arg1, int arg2, FD_t scriptFd,
+@@ -426,7 +425,13 @@ rpmRC rpmScriptRun(rpmScript script, int
if (rc != RPMRC_FAIL) {
if (script_type & RPMSCRIPTLET_EXEC) {
@@ -57,6 +64,3 @@ index cc98c4885..f8bd3df04 100644
} else {
rc = runLuaScript(plugins, prefixes, script->descr, lvl, scriptFd, &args, script->body, arg1, arg2, &script->nextFileFunc);
}
---
-2.11.0
-
diff --git a/meta/recipes-devtools/rpm/files/0001-configure.ac-add-linux-gnux32-variant-to-triplet-han.patch b/meta/recipes-devtools/rpm/files/0001-configure.ac-add-linux-gnux32-variant-to-triplet-han.patch
new file mode 100644
index 0000000000..2174a79e75
--- /dev/null
+++ b/meta/recipes-devtools/rpm/files/0001-configure.ac-add-linux-gnux32-variant-to-triplet-han.patch
@@ -0,0 +1,31 @@
+From 8f51462d41d8fe942d5d0a06f08d47f625141995 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex@linutronix.de>
+Date: Thu, 4 Aug 2022 12:15:08 +0200
+Subject: [PATCH] configure.ac: add linux-gnux32 variant to triplet handling
+
+x32 is a 64 bit x86 ABI with 32 bit pointers.
+
+Upstream-Status: Submitted [https://github.com/rpm-software-management/rpm/pull/2143]
+Signed-off-by: Alexander Kanavin <alex@linutronix.de>
+---
+ configure.ac | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/configure.ac b/configure.ac
+index 372875fc49..7d6a3d274e 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -845,6 +845,10 @@ if echo "$host_os" | grep '.*-gnuabi64$' > /dev/null ; then
+ host_os=`echo "${host_os}" | sed 's/-gnuabi64$//'`
+ host_os_gnu=-gnuabi64
+ fi
++if echo "$host_os" | grep '.*-gnux32$' > /dev/null ; then
++ host_os=`echo "${host_os}" | sed 's/-gnux32$//'`
++ host_os_gnu=-gnux32
++fi
+ if echo "$host_os" | grep '.*-gnu$' > /dev/null ; then
+ host_os=`echo "${host_os}" | sed 's/-gnu$//'`
+ fi
+--
+2.30.2
+
diff --git a/meta/recipes-devtools/rpm/files/0002-CVE-2021-3521.patch b/meta/recipes-devtools/rpm/files/0002-CVE-2021-3521.patch
deleted file mode 100644
index 683b57d455..0000000000
--- a/meta/recipes-devtools/rpm/files/0002-CVE-2021-3521.patch
+++ /dev/null
@@ -1,64 +0,0 @@
-From c4b1bee51bbdd732b94b431a951481af99117703 Mon Sep 17 00:00:00 2001
-From: Panu Matilainen <pmatilai@redhat.com>
-Date: Thu, 30 Sep 2021 09:51:10 +0300
-Subject: [PATCH 2/3] Process MPI's from all kinds of signatures
-
-No immediate effect but needed by the following commits.
-
-CVE: CVE-2021-3521
-Upstream-Status: Backport [https://github.com/rpm-software-management/rpm/commit/b5e8bc74b]
-
-Signed-off-by: Changqing Li <changqing.li@windriver.com>
-
----
- rpmio/rpmpgp.c | 13 +++++--------
- 1 file changed, 5 insertions(+), 8 deletions(-)
-
-diff --git a/rpmio/rpmpgp.c b/rpmio/rpmpgp.c
-index 25f67048fd..509e777e6d 100644
---- a/rpmio/rpmpgp.c
-+++ b/rpmio/rpmpgp.c
-@@ -543,7 +543,7 @@ pgpDigAlg pgpDigAlgFree(pgpDigAlg alg)
- return NULL;
- }
-
--static int pgpPrtSigParams(pgpTag tag, uint8_t pubkey_algo, uint8_t sigtype,
-+static int pgpPrtSigParams(pgpTag tag, uint8_t pubkey_algo,
- const uint8_t *p, const uint8_t *h, size_t hlen,
- pgpDigParams sigp)
- {
-@@ -556,10 +556,8 @@ static int pgpPrtSigParams(pgpTag tag, uint8_t pubkey_algo, uint8_t sigtype,
- int mpil = pgpMpiLen(p);
- if (pend - p < mpil)
- break;
-- if (sigtype == PGPSIGTYPE_BINARY || sigtype == PGPSIGTYPE_TEXT) {
-- if (sigalg->setmpi(sigalg, i, p))
-- break;
-- }
-+ if (sigalg->setmpi(sigalg, i, p))
-+ break;
- p += mpil;
- }
-
-@@ -619,7 +617,7 @@ static int pgpPrtSig(pgpTag tag, const uint8_t *h, size_t hlen,
- }
-
- p = ((uint8_t *)v) + sizeof(*v);
-- rc = pgpPrtSigParams(tag, v->pubkey_algo, v->sigtype, p, h, hlen, _digp);
-+ rc = pgpPrtSigParams(tag, v->pubkey_algo, p, h, hlen, _digp);
- } break;
- case 4:
- { pgpPktSigV4 v = (pgpPktSigV4)h;
-@@ -677,8 +675,7 @@ static int pgpPrtSig(pgpTag tag, const uint8_t *h, size_t hlen,
- p += 2;
- if (p > hend)
- return 1;
--
-- rc = pgpPrtSigParams(tag, v->pubkey_algo, v->sigtype, p, h, hlen, _digp);
-+ rc = pgpPrtSigParams(tag, v->pubkey_algo, p, h, hlen, _digp);
- } break;
- default:
- rpmlog(RPMLOG_WARNING, _("Unsupported version of signature: V%d\n"), version);
---
-2.17.1
-
diff --git a/meta/recipes-devtools/rpm/files/0003-CVE-2021-3521.patch b/meta/recipes-devtools/rpm/files/0003-CVE-2021-3521.patch
deleted file mode 100644
index a5ec802501..0000000000
--- a/meta/recipes-devtools/rpm/files/0003-CVE-2021-3521.patch
+++ /dev/null
@@ -1,329 +0,0 @@
-From 07676ca03ad8afcf1ca95a2353c83fbb1d970b9b Mon Sep 17 00:00:00 2001
-From: Panu Matilainen <pmatilai@redhat.com>
-Date: Thu, 30 Sep 2021 09:59:30 +0300
-Subject: [PATCH 3/3] Validate and require subkey binding signatures on PGP
- public keys
-
-All subkeys must be followed by a binding signature by the primary key
-as per the OpenPGP RFC, enforce the presence and validity in the parser.
-
-The implementation is as kludgey as they come to work around our
-simple-minded parser structure without touching API, to maximise
-backportability. Store all the raw packets internally as we decode them
-to be able to access previous elements at will, needed to validate ordering
-and access the actual data. Add testcases for manipulated keys whose
-import previously would succeed.
-
-Depends on the two previous commits:
-7b399fcb8f52566e6f3b4327197a85facd08db91 and
-236b802a4aa48711823a191d1b7f753c82a89ec5
-
-Fixes CVE-2021-3521.
-
-Upstream-Status: Backport [https://github.com/rpm-software-management/rpm/commit/bd36c5dc9]
-CVE:CVE-2021-3521
-
-Signed-off-by: Changqing Li <changqing.li@windriver.com>
-
----
- rpmio/rpmpgp.c | 99 +++++++++++++++++--
- tests/Makefile.am | 3 +
- tests/data/keys/CVE-2021-3521-badbind.asc | 25 +++++
- .../data/keys/CVE-2021-3521-nosubsig-last.asc | 25 +++++
- tests/data/keys/CVE-2021-3521-nosubsig.asc | 37 +++++++
- tests/rpmsigdig.at | 28 ++++++
- 6 files changed, 209 insertions(+), 8 deletions(-)
- create mode 100644 tests/data/keys/CVE-2021-3521-badbind.asc
- create mode 100644 tests/data/keys/CVE-2021-3521-nosubsig-last.asc
- create mode 100644 tests/data/keys/CVE-2021-3521-nosubsig.asc
-
-diff --git a/rpmio/rpmpgp.c b/rpmio/rpmpgp.c
-index 509e777e6d..371ad4d9b6 100644
---- a/rpmio/rpmpgp.c
-+++ b/rpmio/rpmpgp.c
-@@ -1061,33 +1061,116 @@ static pgpDigParams pgpDigParamsNew(uint8_t tag)
- return digp;
- }
-
-+static int hashKey(DIGEST_CTX hash, const struct pgpPkt *pkt, int exptag)
-+{
-+ int rc = -1;
-+ if (pkt->tag == exptag) {
-+ uint8_t head[] = {
-+ 0x99,
-+ (pkt->blen >> 8),
-+ (pkt->blen ),
-+ };
-+
-+ rpmDigestUpdate(hash, head, 3);
-+ rpmDigestUpdate(hash, pkt->body, pkt->blen);
-+ rc = 0;
-+ }
-+ return rc;
-+}
-+
-+static int pgpVerifySelf(pgpDigParams key, pgpDigParams selfsig,
-+ const struct pgpPkt *all, int i)
-+{
-+ int rc = -1;
-+ DIGEST_CTX hash = NULL;
-+
-+ switch (selfsig->sigtype) {
-+ case PGPSIGTYPE_SUBKEY_BINDING:
-+ hash = rpmDigestInit(selfsig->hash_algo, 0);
-+ if (hash) {
-+ rc = hashKey(hash, &all[0], PGPTAG_PUBLIC_KEY);
-+ if (!rc)
-+ rc = hashKey(hash, &all[i-1], PGPTAG_PUBLIC_SUBKEY);
-+ }
-+ break;
-+ default:
-+ /* ignore types we can't handle */
-+ rc = 0;
-+ break;
-+ }
-+
-+ if (hash && rc == 0)
-+ rc = pgpVerifySignature(key, selfsig, hash);
-+
-+ rpmDigestFinal(hash, NULL, NULL, 0);
-+
-+ return rc;
-+}
-+
- int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype,
- pgpDigParams * ret)
- {
- const uint8_t *p = pkts;
- const uint8_t *pend = pkts + pktlen;
- pgpDigParams digp = NULL;
-- struct pgpPkt pkt;
-+ pgpDigParams selfsig = NULL;
-+ int i = 0;
-+ int alloced = 16; /* plenty for normal cases */
-+ struct pgpPkt *all = xmalloc(alloced * sizeof(*all));
- int rc = -1; /* assume failure */
-+ int expect = 0;
-+ int prevtag = 0;
-
- while (p < pend) {
-- if (decodePkt(p, (pend - p), &pkt))
-+ struct pgpPkt *pkt = &all[i];
-+ if (decodePkt(p, (pend - p), pkt))
- break;
-
- if (digp == NULL) {
-- if (pkttype && pkt.tag != pkttype) {
-+ if (pkttype && pkt->tag != pkttype) {
- break;
- } else {
-- digp = pgpDigParamsNew(pkt.tag);
-+ digp = pgpDigParamsNew(pkt->tag);
- }
- }
-
-- if (pgpPrtPkt(&pkt, digp))
-+ if (expect) {
-+ if (pkt->tag != expect)
-+ break;
-+ selfsig = pgpDigParamsNew(pkt->tag);
-+ }
-+ if (pgpPrtPkt(pkt, selfsig ? selfsig : digp))
- break;
-
-- p += (pkt.body - pkt.head) + pkt.blen;
-- if (pkttype == PGPTAG_SIGNATURE)
-- break;
-+ if (selfsig) {
-+ /* subkeys must be followed by binding signature */
-+ if (prevtag == PGPTAG_PUBLIC_SUBKEY) {
-+ if (selfsig->sigtype != PGPSIGTYPE_SUBKEY_BINDING)
-+ break;
-+ }
-+
-+ int xx = pgpVerifySelf(digp, selfsig, all, i);
-+
-+ selfsig = pgpDigParamsFree(selfsig);
-+ if (xx)
-+ break;
-+ expect = 0;
-+ }
-+
-+ if (pkt->tag == PGPTAG_PUBLIC_SUBKEY)
-+ expect = PGPTAG_SIGNATURE;
-+ prevtag = pkt->tag;
-+
-+ i++;
-+ p += (pkt->body - pkt->head) + pkt->blen;
-+ if (pkttype == PGPTAG_SIGNATURE)
-+ break;
-+
-+ if (alloced <= i) {
-+ alloced *= 2;
-+ all = xrealloc(all, alloced * sizeof(*all));
-+ }
-+
- }
-
- rc = (digp && (p == pend)) ? 0 : -1;
-diff --git a/tests/Makefile.am b/tests/Makefile.am
-index a41ce10de8..7bb23247f1 100644
---- a/tests/Makefile.am
-+++ b/tests/Makefile.am
-@@ -107,6 +107,9 @@ EXTRA_DIST += data/SPECS/hello-config-buildid.spec
- EXTRA_DIST += data/SPECS/hello-cd.spec
- EXTRA_DIST += data/keys/rpm.org-rsa-2048-test.pub
- EXTRA_DIST += data/keys/rpm.org-rsa-2048-test.secret
-+EXTRA_DIST += data/keys/CVE-2021-3521-badbind.asc
-+EXTRA_DIST += data/keys/CVE-2022-3521-nosubsig.asc
-+EXTRA_DIST += data/keys/CVE-2022-3521-nosubsig-last.asc
- EXTRA_DIST += data/macros.testfile
- EXTRA_DIST += data/macros.debug
- EXTRA_DIST += data/SOURCES/foo.c
-diff --git a/tests/data/keys/CVE-2021-3521-badbind.asc b/tests/data/keys/CVE-2021-3521-badbind.asc
-new file mode 100644
-index 0000000000..aea00f9d7a
---- /dev/null
-+++ b/tests/data/keys/CVE-2021-3521-badbind.asc
-@@ -0,0 +1,25 @@
-+-----BEGIN PGP PUBLIC KEY BLOCK-----
-+Version: rpm-4.17.90 (NSS-3)
-+
-+mQENBFjmORgBCAC7TMEk6wnjSs8Dr4yqSScWdU2pjcqrkTxuzdWvowcIUPZI0w/g
-+HkRqGd4apjvY2V15kjL10gk3QhFP3pZ/9p7zh8o8NHX7aGdSGDK7NOq1eFaErPRY
-+91LW9RiZ0lbOjXEzIL0KHxUiTQEmdXJT43DJMFPyW9fkCWg0OltiX618FUdWWfI8
-+eySdLur1utnqBvdEbCUvWK2RX3vQZQdvEBODnNk2pxqTyV0w6VPQ96W++lF/5Aas
-+7rUv3HIyIXxIggc8FRrnH+y9XvvHDonhTIlGnYZN4ubm9i4y3gOkrZlGTrEw7elQ
-+1QeMyG2QQEbze8YjpTm4iLABCBrRfPRaQpwrABEBAAG0IXJwbS5vcmcgUlNBIHRl
-+c3RrZXkgPHJzYUBycG0ub3JnPokBNwQTAQgAIQUCWOY5GAIbAwULCQgHAgYVCAkK
-+CwIEFgIDAQIeAQIXgAAKCRBDRFkeGWTF/MxxCACnjqFL+MmPh9W9JQKT2DcLbBzf
-+Cqo6wcEBoCOcwgRSk8dSikhARoteoa55JRJhuMyeKhhEAogE9HRmCPFdjezFTwgB
-+BDVBpO2dZ023mLXDVCYX3S8pShOgCP6Tn4wqCnYeAdLcGg106N4xcmgtcssJE+Pr
-+XzTZksbZsrTVEmL/Ym+R5w5jBfFnGk7Yw7ndwfQsfNXQb5AZynClFxnX546lcyZX
-+fEx3/e6ezw57WNOUK6WT+8b+EGovPkbetK/rGxNXuWaP6X4A/QUm8O98nCuHYFQq
-++mvNdsCBqGf7mhaRGtpHk/JgCn5rFvArMDqLVrR9hX0LdCSsH7EGE+bR3r7wuQEN
-+BFjmORgBCACk+vDZrIXQuFXEYToZVwb2attzbbJJCqD71vmZTLsW0QxuPKRgbcYY
-+zp4K4lVBnHhFrF8MOUOxJ7kQWIJZMZFt+BDcptCYurbD2H4W2xvnWViiC+LzCMzz
-+iMJT6165uefL4JHTDPxC2fFiM9yrc72LmylJNkM/vepT128J5Qv0gRUaQbHiQuS6
-+Dm/+WRnUfx3i89SV4mnBxb/Ta93GVqoOciWwzWSnwEnWYAvOb95JL4U7c5J5f/+c
-+KnQDHsW7sIiIdscsWzvgf6qs2Ra1Zrt7Fdk4+ZS2f/adagLhDO1C24sXf5XfMk5m
-+L0OGwZSr9m5s17VXxfspgU5ugc8kBJfzABEBAAE=
-+=WCfs
-+-----END PGP PUBLIC KEY BLOCK-----
-+
-diff --git a/tests/data/keys/CVE-2021-3521-nosubsig-last.asc b/tests/data/keys/CVE-2021-3521-nosubsig-last.asc
-new file mode 100644
-index 0000000000..aea00f9d7a
---- /dev/null
-+++ b/tests/data/keys/CVE-2021-3521-nosubsig-last.asc
-@@ -0,0 +1,25 @@
-+-----BEGIN PGP PUBLIC KEY BLOCK-----
-+Version: rpm-4.17.90 (NSS-3)
-+
-+mQENBFjmORgBCAC7TMEk6wnjSs8Dr4yqSScWdU2pjcqrkTxuzdWvowcIUPZI0w/g
-+HkRqGd4apjvY2V15kjL10gk3QhFP3pZ/9p7zh8o8NHX7aGdSGDK7NOq1eFaErPRY
-+91LW9RiZ0lbOjXEzIL0KHxUiTQEmdXJT43DJMFPyW9fkCWg0OltiX618FUdWWfI8
-+eySdLur1utnqBvdEbCUvWK2RX3vQZQdvEBODnNk2pxqTyV0w6VPQ96W++lF/5Aas
-+7rUv3HIyIXxIggc8FRrnH+y9XvvHDonhTIlGnYZN4ubm9i4y3gOkrZlGTrEw7elQ
-+1QeMyG2QQEbze8YjpTm4iLABCBrRfPRaQpwrABEBAAG0IXJwbS5vcmcgUlNBIHRl
-+c3RrZXkgPHJzYUBycG0ub3JnPokBNwQTAQgAIQUCWOY5GAIbAwULCQgHAgYVCAkK
-+CwIEFgIDAQIeAQIXgAAKCRBDRFkeGWTF/MxxCACnjqFL+MmPh9W9JQKT2DcLbBzf
-+Cqo6wcEBoCOcwgRSk8dSikhARoteoa55JRJhuMyeKhhEAogE9HRmCPFdjezFTwgB
-+BDVBpO2dZ023mLXDVCYX3S8pShOgCP6Tn4wqCnYeAdLcGg106N4xcmgtcssJE+Pr
-+XzTZksbZsrTVEmL/Ym+R5w5jBfFnGk7Yw7ndwfQsfNXQb5AZynClFxnX546lcyZX
-+fEx3/e6ezw57WNOUK6WT+8b+EGovPkbetK/rGxNXuWaP6X4A/QUm8O98nCuHYFQq
-++mvNdsCBqGf7mhaRGtpHk/JgCn5rFvArMDqLVrR9hX0LdCSsH7EGE+bR3r7wuQEN
-+BFjmORgBCACk+vDZrIXQuFXEYToZVwb2attzbbJJCqD71vmZTLsW0QxuPKRgbcYY
-+zp4K4lVBnHhFrF8MOUOxJ7kQWIJZMZFt+BDcptCYurbD2H4W2xvnWViiC+LzCMzz
-+iMJT6165uefL4JHTDPxC2fFiM9yrc72LmylJNkM/vepT128J5Qv0gRUaQbHiQuS6
-+Dm/+WRnUfx3i89SV4mnBxb/Ta93GVqoOciWwzWSnwEnWYAvOb95JL4U7c5J5f/+c
-+KnQDHsW7sIiIdscsWzvgf6qs2Ra1Zrt7Fdk4+ZS2f/adagLhDO1C24sXf5XfMk5m
-+L0OGwZSr9m5s17VXxfspgU5ugc8kBJfzABEBAAE=
-+=WCfs
-+-----END PGP PUBLIC KEY BLOCK-----
-+
-diff --git a/tests/data/keys/CVE-2021-3521-nosubsig.asc b/tests/data/keys/CVE-2021-3521-nosubsig.asc
-new file mode 100644
-index 0000000000..3a2e7417f8
---- /dev/null
-+++ b/tests/data/keys/CVE-2021-3521-nosubsig.asc
-@@ -0,0 +1,37 @@
-+-----BEGIN PGP PUBLIC KEY BLOCK-----
-+Version: rpm-4.17.90 (NSS-3)
-+
-+mQENBFjmORgBCAC7TMEk6wnjSs8Dr4yqSScWdU2pjcqrkTxuzdWvowcIUPZI0w/g
-+HkRqGd4apjvY2V15kjL10gk3QhFP3pZ/9p7zh8o8NHX7aGdSGDK7NOq1eFaErPRY
-+91LW9RiZ0lbOjXEzIL0KHxUiTQEmdXJT43DJMFPyW9fkCWg0OltiX618FUdWWfI8
-+eySdLur1utnqBvdEbCUvWK2RX3vQZQdvEBODnNk2pxqTyV0w6VPQ96W++lF/5Aas
-+7rUv3HIyIXxIggc8FRrnH+y9XvvHDonhTIlGnYZN4ubm9i4y3gOkrZlGTrEw7elQ
-+1QeMyG2QQEbze8YjpTm4iLABCBrRfPRaQpwrABEBAAG0IXJwbS5vcmcgUlNBIHRl
-+c3RrZXkgPHJzYUBycG0ub3JnPokBNwQTAQgAIQUCWOY5GAIbAwULCQgHAgYVCAkK
-+CwIEFgIDAQIeAQIXgAAKCRBDRFkeGWTF/MxxCACnjqFL+MmPh9W9JQKT2DcLbBzf
-+Cqo6wcEBoCOcwgRSk8dSikhARoteoa55JRJhuMyeKhhEAogE9HRmCPFdjezFTwgB
-+BDVBpO2dZ023mLXDVCYX3S8pShOgCP6Tn4wqCnYeAdLcGg106N4xcmgtcssJE+Pr
-+XzTZksbZsrTVEmL/Ym+R5w5jBfFnGk7Yw7ndwfQsfNXQb5AZynClFxnX546lcyZX
-+fEx3/e6ezw57WNOUK6WT+8b+EGovPkbetK/rGxNXuWaP6X4A/QUm8O98nCuHYFQq
-++mvNdsCBqGf7mhaRGtpHk/JgCn5rFvArMDqLVrR9hX0LdCSsH7EGE+bR3r7wuQEN
-+BFjmORgBCACk+vDZrIXQuFXEYToZVwb2attzbbJJCqD71vmZTLsW0QxuPKRgbcYY
-+zp4K4lVBnHhFrF8MOUOxJ7kQWIJZMZFt+BDcptCYurbD2H4W2xvnWViiC+LzCMzz
-+iMJT6165uefL4JHTDPxC2fFiM9yrc72LmylJNkM/vepT128J5Qv0gRUaQbHiQuS6
-+Dm/+WRnUfx3i89SV4mnBxb/Ta93GVqoOciWwzWSnwEnWYAvOb95JL4U7c5J5f/+c
-+KnQDHsW7sIiIdscsWzvgf6qs2Ra1Zrt7Fdk4+ZS2f/adagLhDO1C24sXf5XfMk5m
-+L0OGwZSr9m5s17VXxfspgU5ugc8kBJfzABEBAAG5AQ0EWOY5GAEIAKT68NmshdC4
-+VcRhOhlXBvZq23NtskkKoPvW+ZlMuxbRDG48pGBtxhjOngriVUGceEWsXww5Q7En
-+uRBYglkxkW34ENym0Ji6tsPYfhbbG+dZWKIL4vMIzPOIwlPrXrm558vgkdMM/ELZ
-+8WIz3KtzvYubKUk2Qz+96lPXbwnlC/SBFRpBseJC5LoOb/5ZGdR/HeLz1JXiacHF
-+v9Nr3cZWqg5yJbDNZKfASdZgC85v3kkvhTtzknl//5wqdAMexbuwiIh2xyxbO+B/
-+qqzZFrVmu3sV2Tj5lLZ/9p1qAuEM7ULbixd/ld8yTmYvQ4bBlKv2bmzXtVfF+ymB
-+Tm6BzyQEl/MAEQEAAYkBHwQYAQgACQUCWOY5GAIbDAAKCRBDRFkeGWTF/PANB/9j
-+mifmj6z/EPe0PJFhrpISt9PjiUQCt0IPtiL5zKAkWjHePIzyi+0kCTBF6DDLFxos
-+3vN4bWnVKT1kBhZAQlPqpJTg+m74JUYeDGCdNx9SK7oRllATqyu+5rncgxjWVPnQ
-+zu/HRPlWJwcVFYEVXYL8xzfantwQTqefjmcRmBRdA2XJITK+hGWwAmrqAWx+q5xX
-+Pa8wkNMxVzNS2rUKO9SoVuJ/wlUvfoShkJ/VJ5HDp3qzUqncADfdGN35TDzscngQ
-+gHvnMwVBfYfSCABV1hNByoZcc/kxkrWMmsd/EnIyLd1Q1baKqc3cEDuC6E6/o4yJ
-+E4XX4jtDmdZPreZALsiB
-+=rRop
-+-----END PGP PUBLIC KEY BLOCK-----
-+
-diff --git a/tests/rpmsigdig.at b/tests/rpmsigdig.at
-index 8e7c759b8f..e2d30a7f1b 100644
---- a/tests/rpmsigdig.at
-+++ b/tests/rpmsigdig.at
-@@ -2,6 +2,34 @@
-
- AT_BANNER([RPM signatures and digests])
-
-+AT_SETUP([rpmkeys --import invalid keys])
-+AT_KEYWORDS([rpmkeys import])
-+RPMDB_INIT
-+
-+AT_CHECK([
-+runroot rpmkeys --import /data/keys/CVE-2021-3521-badbind.asc
-+],
-+[1],
-+[],
-+[error: /data/keys/CVE-2021-3521-badbind.asc: key 1 import failed.]
-+)
-+AT_CHECK([
-+runroot rpmkeys --import /data/keys/CVE-2021-3521-nosubsig.asc
-+],
-+[1],
-+[],
-+[error: /data/keys/CVE-2021-3521-nosubsig.asc: key 1 import failed.]
-+)
-+
-+AT_CHECK([
-+runroot rpmkeys --import /data/keys/CVE-2021-3521-nosubsig-last.asc
-+],
-+[1],
-+[],
-+[error: /data/keys/CVE-2021-3521-nosubsig-last.asc: key 1 import failed.]
-+)
-+AT_CLEANUP
-+
- # ------------------------------
- # Test pre-built package verification
- AT_SETUP([rpmkeys -Kv <unsigned> 1])
---
-2.17.1
-
diff --git a/meta/recipes-devtools/rpm/rpm_4.17.0.bb b/meta/recipes-devtools/rpm/rpm_4.17.1.bb
index c392ac0db4..9b6446f265 100644
--- a/meta/recipes-devtools/rpm/rpm_4.17.0.bb
+++ b/meta/recipes-devtools/rpm/rpm_4.17.1.bb
@@ -39,13 +39,11 @@ SRC_URI = "git://github.com/rpm-software-management/rpm;branch=rpm-4.17.x;protoc
file://0001-tools-Add-error.h-for-non-glibc-case.patch \
file://0001-docs-do-not-build-manpages-requires-pandoc.patch \
file://0001-build-pack.c-do-not-insert-payloadflags-into-.rpm-me.patch \
- file://0001-CVE-2021-3521.patch \
- file://0002-CVE-2021-3521.patch \
- file://0003-CVE-2021-3521.patch \
+ file://0001-configure.ac-add-linux-gnux32-variant-to-triplet-han.patch \
"
PE = "1"
-SRCREV = "3e74e8ba2dd5e76a5353d238dc7fc38651ce27b3"
+SRCREV = "5bef402da334595ed9302b8bca1acdf5e88bfe11"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-devtools/rsync/files/0001-Add-missing-prototypes-to-function-declarations.patch b/meta/recipes-devtools/rsync/files/0001-Add-missing-prototypes-to-function-declarations.patch
new file mode 100644
index 0000000000..474d82db22
--- /dev/null
+++ b/meta/recipes-devtools/rsync/files/0001-Add-missing-prototypes-to-function-declarations.patch
@@ -0,0 +1,173 @@
+From 785c0072c80c2f6e0839478453cf65fdeac15da0 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Mon, 29 Aug 2022 19:53:28 -0700
+Subject: [PATCH] Add missing prototypes to function declarations
+
+With Clang 15+ compiler -Wstrict-prototypes is triggering warnings which
+are turned into errors with -Werror, this fixes the problem by adding
+missing prototypes
+
+Fixes errors like
+| log.c:134:24: error: a function declaration without a prototype is deprecated in all versions of C [-Werror,-Wstrict-prototypes]
+| static void syslog_init()
+| ^
+| void
+
+Upstream-Status: Submitted [https://lists.samba.org/archive/rsync/2022-August/032858.html]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ checksum.c | 2 +-
+ exclude.c | 2 +-
+ hlink.c | 3 +--
+ lib/pool_alloc.c | 2 +-
+ log.c | 2 +-
+ main.c | 2 +-
+ syscall.c | 4 ++--
+ zlib/crc32.c | 2 +-
+ zlib/trees.c | 2 +-
+ zlib/zutil.c | 4 ++--
+ 10 files changed, 12 insertions(+), 13 deletions(-)
+
+diff --git a/checksum.c b/checksum.c
+index fb8c0a0..174c28c 100644
+--- a/checksum.c
++++ b/checksum.c
+@@ -629,7 +629,7 @@ int sum_end(char *sum)
+ return csum_len_for_type(cursum_type, 0);
+ }
+
+-void init_checksum_choices()
++void init_checksum_choices(void)
+ {
+ #ifdef SUPPORT_XXH3
+ char buf[32816];
+diff --git a/exclude.c b/exclude.c
+index adc82e2..79f5a82 100644
+--- a/exclude.c
++++ b/exclude.c
+@@ -358,7 +358,7 @@ void implied_include_partial_string(const char *s_start, const char *s_end)
+ memcpy(partial_string_buf, s_start, partial_string_len);
+ }
+
+-void free_implied_include_partial_string()
++void free_implied_include_partial_string(void)
+ {
+ if (partial_string_buf) {
+ free(partial_string_buf);
+diff --git a/hlink.c b/hlink.c
+index 66810a3..6511dfb 100644
+--- a/hlink.c
++++ b/hlink.c
+@@ -117,8 +117,7 @@ static void match_gnums(int32 *ndx_list, int ndx_count)
+ struct ht_int32_node *node = NULL;
+ int32 gnum, gnum_next;
+
+- qsort(ndx_list, ndx_count, sizeof ndx_list[0], (int (*)()) hlink_compare_gnum);
+-
++ qsort(ndx_list, ndx_count, sizeof ndx_list[0], (int (*)(const void *, const void *)) hlink_compare_gnum);
+ for (from = 0; from < ndx_count; from++) {
+ file = hlink_flist->sorted[ndx_list[from]];
+ gnum = F_HL_GNUM(file);
+diff --git a/lib/pool_alloc.c b/lib/pool_alloc.c
+index a1a7245..4eae062 100644
+--- a/lib/pool_alloc.c
++++ b/lib/pool_alloc.c
+@@ -9,7 +9,7 @@ struct alloc_pool
+ size_t size; /* extent size */
+ size_t quantum; /* allocation quantum */
+ struct pool_extent *extents; /* top extent is "live" */
+- void (*bomb)(); /* called if malloc fails */
++ void (*bomb)(const char *, const char *, int); /* called if malloc fails */
+ int flags;
+
+ /* statistical data */
+diff --git a/log.c b/log.c
+index 44344e2..991e359 100644
+--- a/log.c
++++ b/log.c
+@@ -131,7 +131,7 @@ static void logit(int priority, const char *buf)
+ }
+ }
+
+-static void syslog_init()
++static void syslog_init(void)
+ {
+ int options = LOG_PID;
+
+diff --git a/main.c b/main.c
+index 9ebfbea..affa244 100644
+--- a/main.c
++++ b/main.c
+@@ -244,7 +244,7 @@ void read_del_stats(int f)
+ stats.deleted_files += stats.deleted_specials = read_varint(f);
+ }
+
+-static void become_copy_as_user()
++static void become_copy_as_user(void)
+ {
+ char *gname;
+ uid_t uid;
+diff --git a/syscall.c b/syscall.c
+index d92074a..92ca86d 100644
+--- a/syscall.c
++++ b/syscall.c
+@@ -389,9 +389,9 @@ OFF_T do_lseek(int fd, OFF_T offset, int whence)
+ {
+ #ifdef HAVE_LSEEK64
+ #if !SIZEOF_OFF64_T
+- OFF_T lseek64();
++ OFF_T lseek64(int fd, OFF_T offset, int whence);
+ #else
+- off64_t lseek64();
++ off64_t lseek64(int fd, off64_t offset, int whence);
+ #endif
+ return lseek64(fd, offset, whence);
+ #else
+diff --git a/zlib/crc32.c b/zlib/crc32.c
+index 05733f4..50c6c02 100644
+--- a/zlib/crc32.c
++++ b/zlib/crc32.c
+@@ -187,7 +187,7 @@ local void write_table(out, table)
+ /* =========================================================================
+ * This function can be used by asm versions of crc32()
+ */
+-const z_crc_t FAR * ZEXPORT get_crc_table()
++const z_crc_t FAR * ZEXPORT get_crc_table(void)
+ {
+ #ifdef DYNAMIC_CRC_TABLE
+ if (crc_table_empty)
+diff --git a/zlib/trees.c b/zlib/trees.c
+index 9c66770..0d9047e 100644
+--- a/zlib/trees.c
++++ b/zlib/trees.c
+@@ -231,7 +231,7 @@ local void send_bits(s, value, length)
+ /* ===========================================================================
+ * Initialize the various 'constant' tables.
+ */
+-local void tr_static_init()
++local void tr_static_init(void)
+ {
+ #if defined(GEN_TREES_H) || !defined(STDC)
+ static int static_init_done = 0;
+diff --git a/zlib/zutil.c b/zlib/zutil.c
+index bbba7b2..61f8dc9 100644
+--- a/zlib/zutil.c
++++ b/zlib/zutil.c
+@@ -27,12 +27,12 @@ z_const char * const z_errmsg[10] = {
+ ""};
+
+
+-const char * ZEXPORT zlibVersion()
++const char * ZEXPORT zlibVersion(void)
+ {
+ return ZLIB_VERSION;
+ }
+
+-uLong ZEXPORT zlibCompileFlags()
++uLong ZEXPORT zlibCompileFlags(void)
+ {
+ uLong flags;
+
+--
+2.37.2
+
diff --git a/meta/recipes-devtools/rsync/files/0001-Turn-on-pedantic-errors-at-the-end-of-configure.patch b/meta/recipes-devtools/rsync/files/0001-Turn-on-pedantic-errors-at-the-end-of-configure.patch
new file mode 100644
index 0000000000..1d9c4bfe48
--- /dev/null
+++ b/meta/recipes-devtools/rsync/files/0001-Turn-on-pedantic-errors-at-the-end-of-configure.patch
@@ -0,0 +1,68 @@
+From e64a58387db46239902b610871a0eb81626e99ff Mon Sep 17 00:00:00 2001
+From: Paul Eggert <eggert@cs.ucla.edu>
+Date: Thu, 18 Aug 2022 07:46:28 -0700
+Subject: [PATCH] Turn on -pedantic-errors at the end of 'configure'
+
+Problem reported by Khem Raj in:
+https://lists.gnu.org/r/autoconf-patches/2022-08/msg00009.html
+Upstream-Status: Submitted [https://lists.samba.org/archive/rsync/2022-August/032862.html]
+---
+ configure.ac | 35 ++++++++++++++++++++---------------
+ 1 file changed, 20 insertions(+), 15 deletions(-)
+
+diff --git a/configure.ac b/configure.ac
+index d185b2d3..7e9514f7 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -1071,21 +1071,6 @@ elif test x"$ac_cv_header_popt_h" != x"yes"; then
+ with_included_popt=yes
+ fi
+
+-if test x"$GCC" = x"yes"; then
+- if test x"$with_included_popt" != x"yes"; then
+- # Turn pedantic warnings into errors to ensure an array-init overflow is an error.
+- CFLAGS="$CFLAGS -pedantic-errors"
+- else
+- # Our internal popt code cannot be compiled with pedantic warnings as errors, so try to
+- # turn off pedantic warnings (which will not lose the error for array-init overflow).
+- # Older gcc versions don't understand -Wno-pedantic, so check if --help=warnings lists
+- # -Wpedantic and use that as a flag.
+- case `$CC --help=warnings 2>/dev/null | grep Wpedantic` in
+- *-Wpedantic*) CFLAGS="$CFLAGS -pedantic-errors -Wno-pedantic" ;;
+- esac
+- fi
+-fi
+-
+ AC_MSG_CHECKING([whether to use included libpopt])
+ if test x"$with_included_popt" = x"yes"; then
+ AC_MSG_RESULT($srcdir/popt)
+@@ -1444,6 +1429,26 @@ case "$CC" in
+ ;;
+ esac
+
++# Enable -pedantic-errors last, so that it doesn't mess up other
++# 'configure' tests. For example, Autoconf uses empty function
++# prototypes like 'int main () {}' which Clang 15's -pedantic-errors
++# would reject. Generally it's not a good idea to try to run
++# 'configure' itself with strict compiler checking.
++if test x"$GCC" = x"yes"; then
++ if test x"$with_included_popt" != x"yes"; then
++ # Turn pedantic warnings into errors to ensure an array-init overflow is an error.
++ CFLAGS="$CFLAGS -pedantic-errors"
++ else
++ # Our internal popt code cannot be compiled with pedantic warnings as errors, so try to
++ # turn off pedantic warnings (which will not lose the error for array-init overflow).
++ # Older gcc versions don't understand -Wno-pedantic, so check if --help=warnings lists
++ # -Wpedantic and use that as a flag.
++ case `$CC --help=warnings 2>/dev/null | grep Wpedantic` in
++ *-Wpedantic*) CFLAGS="$CFLAGS -pedantic-errors -Wno-pedantic" ;;
++ esac
++ fi
++fi
++
+ AC_CONFIG_FILES([Makefile lib/dummy zlib/dummy popt/dummy shconfig])
+ AC_OUTPUT
+
+--
+2.37.1
+
diff --git a/meta/recipes-devtools/rsync/files/0001-rsync-ssl-Verify-the-hostname-in-the-certificate-whe.patch b/meta/recipes-devtools/rsync/files/0001-rsync-ssl-Verify-the-hostname-in-the-certificate-whe.patch
deleted file mode 100644
index 2d51ddf965..0000000000
--- a/meta/recipes-devtools/rsync/files/0001-rsync-ssl-Verify-the-hostname-in-the-certificate-whe.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From fbe85634d88e82fbb439ae2a5d1aca8b8c309bea Mon Sep 17 00:00:00 2001
-From: Matt McCutchen <matt@mattmccutchen.net>
-Date: Wed, 26 Aug 2020 12:16:08 -0400
-Subject: [PATCH] rsync-ssl: Verify the hostname in the certificate when using
- openssl.
-
-CVE: CVE-2020-14387
-
-Upstream-Status: Backport [https://git.samba.org/?p=rsync.git;a=commit;h=c3f7414]
-
-Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
----
- rsync-ssl | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/rsync-ssl b/rsync-ssl
-index 8101975..46701af 100755
---- a/rsync-ssl
-+++ b/rsync-ssl
-@@ -129,7 +129,7 @@ function rsync_ssl_helper {
- fi
-
- if [[ $RSYNC_SSL_TYPE == openssl ]]; then
-- exec $RSYNC_SSL_OPENSSL s_client $caopt $certopt -quiet -verify_quiet -servername $hostname -connect $hostname:$port
-+ exec $RSYNC_SSL_OPENSSL s_client $caopt $certopt -quiet -verify_quiet -servername $hostname -verify_hostname $hostname -connect $hostname:$port
- elif [[ $RSYNC_SSL_TYPE == gnutls ]]; then
- exec $RSYNC_SSL_GNUTLS --logfile=/dev/null $gnutls_cert_opt $gnutls_opts $hostname:$port
- else
---
-2.17.1
-
diff --git a/meta/recipes-devtools/rsync/files/makefile-no-rebuild.patch b/meta/recipes-devtools/rsync/files/makefile-no-rebuild.patch
index 4ba7665280..42a6372ba7 100644
--- a/meta/recipes-devtools/rsync/files/makefile-no-rebuild.patch
+++ b/meta/recipes-devtools/rsync/files/makefile-no-rebuild.patch
@@ -1,4 +1,4 @@
-From 1f29584e57f5fda09970c66f3b94f4720e09c1bb Mon Sep 17 00:00:00 2001
+From 81700d1a0e51391028c761cc8ef1cd660084d114 Mon Sep 17 00:00:00 2001
From: Ross Burton <ross.burton@intel.com>
Date: Tue, 12 Apr 2016 15:51:54 +0100
Subject: [PATCH] rsync: remove upstream's rebuild logic
@@ -14,12 +14,12 @@ Signed-off-by: Ross Burton <ross.burton@intel.com>
1 file changed, 54 deletions(-)
diff --git a/Makefile.in b/Makefile.in
-index 672fcc4..c12d8d4 100644
+index 3cde955..d963a70 100644
--- a/Makefile.in
+++ b/Makefile.in
-@@ -168,60 +168,6 @@ gen: conf proto.h man
- gensend: gen
- rsync -aic $(GENFILES) $${SAMBA_HOST-samba.org}:/home/ftp/pub/rsync/generated-files/
+@@ -190,60 +190,6 @@ gensend: gen
+ fi
+ rsync -aic $(GENFILES) git-version.h $${SAMBA_HOST-samba.org}:/home/ftp/pub/rsync/generated-files/ || true
-aclocal.m4: $(srcdir)/m4/*.m4
- aclocal -I $(srcdir)/m4
@@ -41,7 +41,7 @@ index 672fcc4..c12d8d4 100644
- else \
- echo "config.h.in has CHANGED."; \
- fi
-- @if test -f configure.sh.old -o -f config.h.in.old; then \
+- @if test -f configure.sh.old || test -f config.h.in.old; then \
- if test "$(MAKECMDGOALS)" = reconfigure; then \
- echo 'Continuing with "make reconfigure".'; \
- else \
diff --git a/meta/recipes-devtools/rsync/rsync_3.2.3.bb b/meta/recipes-devtools/rsync/rsync_3.2.5.bb
index 6168ee85fc..983bdd5ab0 100644
--- a/meta/recipes-devtools/rsync/rsync_3.2.3.bb
+++ b/meta/recipes-devtools/rsync/rsync_3.2.5.bb
@@ -6,7 +6,7 @@ SECTION = "console/network"
# GPL-2.0-or-later (<< 3.0.0), GPL-3.0-or-later (>= 3.0.0)
# Includes opennsh and xxhash dynamic link exception
LICENSE = "GPL-3.0-or-later"
-LIC_FILES_CHKSUM = "file://COPYING;md5=9e5a4f9b3a253d51520617aa54f8eb26"
+LIC_FILES_CHKSUM = "file://COPYING;md5=24423708fe159c9d12be1ea29fcb18c7"
DEPENDS = "popt"
@@ -14,10 +14,11 @@ SRC_URI = "https://download.samba.org/pub/${BPN}/src/${BP}.tar.gz \
file://rsyncd.conf \
file://makefile-no-rebuild.patch \
file://determism.patch \
- file://0001-rsync-ssl-Verify-the-hostname-in-the-certificate-whe.patch \
+ file://0001-Add-missing-prototypes-to-function-declarations.patch \
+ file://0001-Turn-on-pedantic-errors-at-the-end-of-configure.patch \
"
-SRC_URI[sha256sum] = "becc3c504ceea499f4167a260040ccf4d9f2ef9499ad5683c179a697146ce50e"
+SRC_URI[sha256sum] = "2ac4d21635cdf791867bc377c35ca6dda7f50d919a58be45057fd51600c69aba"
# -16548 required for v3.1.3pre1. Already in v3.1.3.
CVE_CHECK_IGNORE += " CVE-2017-16548 "
@@ -41,7 +42,17 @@ PACKAGECONFIG[zstd] = "--enable-zstd,--disable-zstd,zstd"
CACHED_CONFIGUREVARS += "rsync_cv_can_hardlink_special=yes rsync_cv_can_hardlink_symlink=yes"
EXTRA_OEMAKE = 'STRIP=""'
-EXTRA_OECONF = "--disable-simd --disable-md2man --disable-asm --with-nobody-group=nogroup"
+EXTRA_OECONF = "--disable-md2man --with-nobody-group=nogroup"
+
+#| ./simd-checksum-x86_64.cpp: In function 'uint32_t get_checksum1_cpp(char*, int32_t)':
+#| ./simd-checksum-x86_64.cpp:89:52: error: multiversioning needs 'ifunc' which is not supported on this target
+#| 89 | __attribute__ ((target("default"))) MVSTATIC int32 get_checksum1_avx2_64(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2) { return i; }
+#| | ^~~~~~~~~~~~~~~~~~~~~
+#| ./simd-checksum-x86_64.cpp:480:1: error: use of multiversioned function without a default
+#| 480 | }
+#| | ^
+#| If you can't fix the issue, re-run ./configure with --disable-roll-simd.
+EXTRA_OECONF:append:libc-musl = " --disable-roll-simd"
# rsync 3.0 uses configure.sh instead of configure, and
# makefile checks the existence of configure.sh
diff --git a/meta/recipes-devtools/ruby/ruby.inc b/meta/recipes-devtools/ruby/ruby.inc
deleted file mode 100644
index ebff5efd1f..0000000000
--- a/meta/recipes-devtools/ruby/ruby.inc
+++ /dev/null
@@ -1,39 +0,0 @@
-SUMMARY = "An interpreter of object-oriented scripting language"
-DESCRIPTION = "Ruby is an interpreted scripting language for quick \
-and easy object-oriented programming. It has many features to process \
-text files and to do system management tasks (as in Perl). \
-It is simple, straight-forward, and extensible. \
-"
-HOMEPAGE = "http://www.ruby-lang.org/"
-SECTION = "devel/ruby"
-LICENSE = "Ruby | BSD-2-Clause | BSD-3-Clause | GPL-2.0-only | ISC | MIT"
-LIC_FILES_CHKSUM = "file://COPYING;md5=5b8c87559868796979806100db3f3805 \
- file://BSDL;md5=8b50bc6de8f586dc66790ba11d064d75 \
- file://GPL;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
- file://LEGAL;md5=f260190bc1e92e363f0ee3c0463d4c7c \
- "
-
-DEPENDS = "zlib openssl libyaml gdbm readline libffi"
-DEPENDS:append:class-target = " ruby-native"
-
-SHRT_VER = "${@oe.utils.trim_version("${PV}", 2)}"
-SRC_URI = "http://cache.ruby-lang.org/pub/ruby/${SHRT_VER}/ruby-${PV}.tar.gz \
- file://0001-extmk-fix-cross-compilation-of-external-gems.patch \
- file://0002-Obey-LDFLAGS-for-the-link-of-libruby.patch \
- "
-UPSTREAM_CHECK_URI = "https://www.ruby-lang.org/en/downloads/"
-
-inherit autotools ptest pkgconfig
-
-
-# This snippet lets compiled extensions which rely on external libraries,
-# such as zlib, compile properly. If we don't do this, then when extmk.rb
-# runs, it uses the native libraries instead of the target libraries, and so
-# none of the linking operations succeed -- which makes extconf.rb think
-# that the libraries aren't available and hence that the extension can't be
-# built.
-
-do_configure:prepend() {
- sed -i "s#%%TARGET_CFLAGS%%#$CFLAGS#; s#%%TARGET_LDFLAGS%%#$LDFLAGS#" ${S}/common.mk
- rm -rf ${S}/ruby/
-}
diff --git a/meta/recipes-devtools/ruby/ruby/CVE-2023-28755.patch b/meta/recipes-devtools/ruby/ruby/CVE-2023-28755.patch
new file mode 100644
index 0000000000..d611c41dcc
--- /dev/null
+++ b/meta/recipes-devtools/ruby/ruby/CVE-2023-28755.patch
@@ -0,0 +1,68 @@
+From db4bb57d4af6d097a0c29490536793d95f1d8983 Mon Sep 17 00:00:00 2001
+From: Hiroshi SHIBATA <hsbt@ruby-lang.org>
+Date: Mon, 24 Apr 2023 08:27:24 +0000
+Subject: [PATCH] Merge URI-0.12.1
+
+CVE: CVE-2023-28755
+
+Upstream-Status: Backport [https://github.com/ruby/ruby/commit/8ce4ab146498879b65e22f1be951b25eebb79300]
+
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ lib/uri/rfc3986_parser.rb | 4 ++--
+ lib/uri/version.rb | 2 +-
+ test/uri/test_common.rb | 11 +++++++++++
+ 3 files changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/lib/uri/rfc3986_parser.rb b/lib/uri/rfc3986_parser.rb
+index 3e07de4..3c89311 100644
+--- a/lib/uri/rfc3986_parser.rb
++++ b/lib/uri/rfc3986_parser.rb
+@@ -3,8 +3,8 @@ module URI
+ class RFC3986_Parser # :nodoc:
+ # URI defined in RFC3986
+ # this regexp is modified not to host is not empty string
+- RFC3986_URI = /\A(?<URI>(?<scheme>[A-Za-z][+\-.0-9A-Za-z]*):(?<hier-part>\/\/(?<authority>(?:(?<userinfo>(?:%\h\h|[!$&-.0-;=A-Z_a-z~])*)@)?(?<host>(?<IP-literal>\[(?:(?<IPv6address>(?:\h{1,4}:){6}(?<ls32>\h{1,4}:\h{1,4}|(?<IPv4address>(?<dec-octet>[1-9]\d|1\d{2}|2[0-4]\d|25[0-5]|\d)\.\g<dec-octet>\.\g<dec-octet>\.\g<dec-octet>))|::(?:\h{1,4}:){5}\g<ls32>|\h{1,4}?::(?:\h{1,4}:){4}\g<ls32>|(?:(?:\h{1,4}:)?\h{1,4})?::(?:\h{1,4}:){3}\g<ls32>|(?:(?:\h{1,4}:){,2}\h{1,4})?::(?:\h{1,4}:){2}\g<ls32>|(?:(?:\h{1,4}:){,3}\h{1,4})?::\h{1,4}:\g<ls32>|(?:(?:\h{1,4}:){,4}\h{1,4})?::\g<ls32>|(?:(?:\h{1,4}:){,5}\h{1,4})?::\h{1,4}|(?:(?:\h{1,4}:){,6}\h{1,4})?::)|(?<IPvFuture>v\h+\.[!$&-.0-;=A-Z_a-z~]+))\])|\g<IPv4address>|(?<reg-name>(?:%\h\h|[!$&-.0-9;=A-Z_a-z~])+))?(?::(?<port>\d*))?)(?<path-abempty>(?:\/(?<segment>(?:%\h\h|[!$&-.0-;=@-Z_a-z~])*))*)|(?<path-absolute>\/(?:(?<segment-nz>(?:%\h\h|[!$&-.0-;=@-Z_a-z~])+)(?:\/\g<segment>)*)?)|(?<path-rootless>\g<segment-nz>(?:\/\g<segment>)*)|(?<path-empty>))(?:\?(?<query>[^#]*))?(?:\#(?<fragment>(?:%\h\h|[!$&-.0-;=@-Z_a-z~\/?])*))?)\z/
+- RFC3986_relative_ref = /\A(?<relative-ref>(?<relative-part>\/\/(?<authority>(?:(?<userinfo>(?:%\h\h|[!$&-.0-;=A-Z_a-z~])*)@)?(?<host>(?<IP-literal>\[(?<IPv6address>(?:\h{1,4}:){6}(?<ls32>\h{1,4}:\h{1,4}|(?<IPv4address>(?<dec-octet>[1-9]\d|1\d{2}|2[0-4]\d|25[0-5]|\d)\.\g<dec-octet>\.\g<dec-octet>\.\g<dec-octet>))|::(?:\h{1,4}:){5}\g<ls32>|\h{1,4}?::(?:\h{1,4}:){4}\g<ls32>|(?:(?:\h{1,4}:){,1}\h{1,4})?::(?:\h{1,4}:){3}\g<ls32>|(?:(?:\h{1,4}:){,2}\h{1,4})?::(?:\h{1,4}:){2}\g<ls32>|(?:(?:\h{1,4}:){,3}\h{1,4})?::\h{1,4}:\g<ls32>|(?:(?:\h{1,4}:){,4}\h{1,4})?::\g<ls32>|(?:(?:\h{1,4}:){,5}\h{1,4})?::\h{1,4}|(?:(?:\h{1,4}:){,6}\h{1,4})?::)|(?<IPvFuture>v\h+\.[!$&-.0-;=A-Z_a-z~]+)\])|\g<IPv4address>|(?<reg-name>(?:%\h\h|[!$&-.0-9;=A-Z_a-z~])+))?(?::(?<port>\d*))?)(?<path-abempty>(?:\/(?<segment>(?:%\h\h|[!$&-.0-;=@-Z_a-z~])*))*)|(?<path-absolute>\/(?:(?<segment-nz>(?:%\h\h|[!$&-.0-;=@-Z_a-z~])+)(?:\/\g<segment>)*)?)|(?<path-noscheme>(?<segment-nz-nc>(?:%\h\h|[!$&-.0-9;=@-Z_a-z~])+)(?:\/\g<segment>)*)|(?<path-empty>))(?:\?(?<query>[^#]*))?(?:\#(?<fragment>(?:%\h\h|[!$&-.0-;=@-Z_a-z~\/?])*))?)\z/
++ RFC3986_URI = /\A(?<URI>(?<scheme>[A-Za-z][+\-.0-9A-Za-z]*+):(?<hier-part>\/\/(?<authority>(?:(?<userinfo>(?:%\h\h|[!$&-.0-;=A-Z_a-z~])*+)@)?(?<host>(?<IP-literal>\[(?:(?<IPv6address>(?:\h{1,4}:){6}(?<ls32>\h{1,4}:\h{1,4}|(?<IPv4address>(?<dec-octet>[1-9]\d|1\d{2}|2[0-4]\d|25[0-5]|\d)\.\g<dec-octet>\.\g<dec-octet>\.\g<dec-octet>))|::(?:\h{1,4}:){5}\g<ls32>|\h{1,4}?::(?:\h{1,4}:){4}\g<ls32>|(?:(?:\h{1,4}:)?\h{1,4})?::(?:\h{1,4}:){3}\g<ls32>|(?:(?:\h{1,4}:){,2}\h{1,4})?::(?:\h{1,4}:){2}\g<ls32>|(?:(?:\h{1,4}:){,3}\h{1,4})?::\h{1,4}:\g<ls32>|(?:(?:\h{1,4}:){,4}\h{1,4})?::\g<ls32>|(?:(?:\h{1,4}:){,5}\h{1,4})?::\h{1,4}|(?:(?:\h{1,4}:){,6}\h{1,4})?::)|(?<IPvFuture>v\h++\.[!$&-.0-;=A-Z_a-z~]++))\])|\g<IPv4address>|(?<reg-name>(?:%\h\h|[!$&-.0-9;=A-Z_a-z~])*+))(?::(?<port>\d*+))?)(?<path-abempty>(?:\/(?<segment>(?:%\h\h|[!$&-.0-;=@-Z_a-z~])*+))*+)|(?<path-absolute>\/(?:(?<segment-nz>(?:%\h\h|[!$&-.0-;=@-Z_a-z~])++)(?:\/\g<segment>)*+)?)|(?<path-rootless>\g<segment-nz>(?:\/\g<segment>)*+)|(?<path-empty>))(?:\?(?<query>[^#]*+))?(?:\#(?<fragment>(?:%\h\h|[!$&-.0-;=@-Z_a-z~\/?])*+))?)\z/
++ RFC3986_relative_ref = /\A(?<relative-ref>(?<relative-part>\/\/(?<authority>(?:(?<userinfo>(?:%\h\h|[!$&-.0-;=A-Z_a-z~])*+)@)?(?<host>(?<IP-literal>\[(?:(?<IPv6address>(?:\h{1,4}:){6}(?<ls32>\h{1,4}:\h{1,4}|(?<IPv4address>(?<dec-octet>[1-9]\d|1\d{2}|2[0-4]\d|25[0-5]|\d)\.\g<dec-octet>\.\g<dec-octet>\.\g<dec-octet>))|::(?:\h{1,4}:){5}\g<ls32>|\h{1,4}?::(?:\h{1,4}:){4}\g<ls32>|(?:(?:\h{1,4}:){,1}\h{1,4})?::(?:\h{1,4}:){3}\g<ls32>|(?:(?:\h{1,4}:){,2}\h{1,4})?::(?:\h{1,4}:){2}\g<ls32>|(?:(?:\h{1,4}:){,3}\h{1,4})?::\h{1,4}:\g<ls32>|(?:(?:\h{1,4}:){,4}\h{1,4})?::\g<ls32>|(?:(?:\h{1,4}:){,5}\h{1,4})?::\h{1,4}|(?:(?:\h{1,4}:){,6}\h{1,4})?::)|(?<IPvFuture>v\h++\.[!$&-.0-;=A-Z_a-z~]++))\])|\g<IPv4address>|(?<reg-name>(?:%\h\h|[!$&-.0-9;=A-Z_a-z~])++))?(?::(?<port>\d*+))?)(?<path-abempty>(?:\/(?<segment>(?:%\h\h|[!$&-.0-;=@-Z_a-z~])*+))*+)|(?<path-absolute>\/(?:(?<segment-nz>(?:%\h\h|[!$&-.0-;=@-Z_a-z~])++)(?:\/\g<segment>)*+)?)|(?<path-noscheme>(?<segment-nz-nc>(?:%\h\h|[!$&-.0-9;=@-Z_a-z~])++)(?:\/\g<segment>)*+)|(?<path-empty>))(?:\?(?<query>[^#]*+))?(?:\#(?<fragment>(?:%\h\h|[!$&-.0-;=@-Z_a-z~\/?])*+))?)\z/
+ attr_reader :regexp
+
+ def initialize
+diff --git a/lib/uri/version.rb b/lib/uri/version.rb
+index 82188e2..7497a7d 100644
+--- a/lib/uri/version.rb
++++ b/lib/uri/version.rb
+@@ -1,6 +1,6 @@
+ module URI
+ # :stopdoc:
+- VERSION_CODE = '001100'.freeze
++ VERSION_CODE = '001201'.freeze
+ VERSION = VERSION_CODE.scan(/../).collect{|n| n.to_i}.join('.').freeze
+ # :startdoc:
+ end
+diff --git a/test/uri/test_common.rb b/test/uri/test_common.rb
+index 5e30cda..1d34783 100644
+--- a/test/uri/test_common.rb
++++ b/test/uri/test_common.rb
+@@ -78,6 +78,17 @@ class TestCommon < Test::Unit::TestCase
+ assert_raise(NoMethodError) { Object.new.URI("http://www.ruby-lang.org/") }
+ end
+
++ def test_parse_timeout
++ pre = ->(n) {
++ 'https://example.com/dir/' + 'a' * (n * 100) + '/##.jpg'
++ }
++ assert_linear_performance((1..10).map {|i| i * 100}, rehearsal: 1000, pre: pre) do |uri|
++ assert_raise(URI::InvalidURIError) do
++ URI.parse(uri)
++ end
++ end
++ end
++
+ def test_encode_www_form_component
+ assert_equal("%00+%21%22%23%24%25%26%27%28%29*%2B%2C-.%2F09%3A%3B%3C%3D%3E%3F%40" \
+ "AZ%5B%5C%5D%5E_%60az%7B%7C%7D%7E",
+--
+2.35.5
+
diff --git a/meta/recipes-devtools/ruby/ruby/CVE-2023-28756.patch b/meta/recipes-devtools/ruby/ruby/CVE-2023-28756.patch
new file mode 100644
index 0000000000..cf24b13f53
--- /dev/null
+++ b/meta/recipes-devtools/ruby/ruby/CVE-2023-28756.patch
@@ -0,0 +1,73 @@
+From 957bb7cb81995f26c671afce0ee50a5c660e540e Mon Sep 17 00:00:00 2001
+From: Hiroshi SHIBATA <hsbt@ruby-lang.org>
+Date: Wed, 29 Mar 2023 13:28:25 +0900
+Subject: [PATCH] CVE-2023-28756
+
+CVE: CVE-2023-28756
+Upstream-Status: Backport [https://github.com/ruby/ruby/commit/957bb7cb81995f26c671afce0ee50a5c660e540e]
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ lib/time.gemspec | 2 +-
+ lib/time.rb | 6 +++---
+ test/test_time.rb | 9 +++++++++
+ 3 files changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/lib/time.gemspec b/lib/time.gemspec
+index 72fba34..bada91a 100644
+--- a/lib/time.gemspec
++++ b/lib/time.gemspec
+@@ -1,6 +1,6 @@
+ Gem::Specification.new do |spec|
+ spec.name = "time"
+- spec.version = "0.2.0"
++ spec.version = "0.2.2"
+ spec.authors = ["Tanaka Akira"]
+ spec.email = ["akr@fsij.org"]
+
+diff --git a/lib/time.rb b/lib/time.rb
+index bd20a1a..6a13212 100644
+--- a/lib/time.rb
++++ b/lib/time.rb
+@@ -509,8 +509,8 @@ class Time
+ (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+
+ (\d{2,})\s+
+ (\d{2})\s*
+- :\s*(\d{2})\s*
+- (?::\s*(\d{2}))?\s+
++ :\s*(\d{2})
++ (?:\s*:\s*(\d\d))?\s+
+ ([+-]\d{4}|
+ UT|GMT|EST|EDT|CST|CDT|MST|MDT|PST|PDT|[A-IK-Z])/ix =~ date
+ # Since RFC 2822 permit comments, the regexp has no right anchor.
+@@ -701,7 +701,7 @@ class Time
+ #
+ # If self is a UTC time, Z is used as TZD. [+-]hh:mm is used otherwise.
+ #
+- # +fractional_digits+ specifies a number of digits to use for fractional
++ # +fraction_digits+ specifies a number of digits to use for fractional
+ # seconds. Its default value is 0.
+ #
+ # require 'time'
+diff --git a/test/test_time.rb b/test/test_time.rb
+index b50d841..23e8e10 100644
+--- a/test/test_time.rb
++++ b/test/test_time.rb
+@@ -62,6 +62,15 @@ class TestTimeExtension < Test::Unit::TestCase # :nodoc:
+ assert_equal(true, t.utc?)
+ end
+
++ def test_rfc2822_nonlinear
++ pre = ->(n) {"0 Feb 00 00 :00" + " " * n}
++ assert_linear_performance([100, 500, 5000, 50_000], pre: pre) do |s|
++ assert_raise(ArgumentError) do
++ Time.rfc2822(s)
++ end
++ end
++ end
++
+ if defined?(Ractor)
+ def test_rfc2822_ractor
+ assert_ractor(<<~RUBY, require: 'time')
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/ruby/ruby/CVE-2023-36617_1.patch b/meta/recipes-devtools/ruby/ruby/CVE-2023-36617_1.patch
new file mode 100644
index 0000000000..57a15d302e
--- /dev/null
+++ b/meta/recipes-devtools/ruby/ruby/CVE-2023-36617_1.patch
@@ -0,0 +1,52 @@
+From 9c2eb12776c1b5df2517a7e618e5fe818cc3395e Mon Sep 17 00:00:00 2001
+From: Nobuyoshi Nakada <nobu@ruby-lang.org>
+Date: Thu, 27 Jul 2023 15:53:01 +0800
+Subject: [PATCH] ruby: Fix quadratic backtracking on invalid relative URI
+
+Upstream-Status: Backport [https://github.com/ruby/uri/commit/9010ee2536adda10a0555ae1ed6fe2f5808e6bf1]
+CVE: CVE-2023-36617
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ lib/uri/rfc2396_parser.rb | 4 ++--
+ test/uri/test_parser.rb | 12 ++++++++++++
+ 2 files changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/lib/uri/rfc2396_parser.rb b/lib/uri/rfc2396_parser.rb
+index 76a8f99..00c66cf 100644
+--- a/lib/uri/rfc2396_parser.rb
++++ b/lib/uri/rfc2396_parser.rb
+@@ -497,8 +497,8 @@ module URI
+ ret = {}
+
+ # for URI::split
+- ret[:ABS_URI] = Regexp.new('\A\s*' + pattern[:X_ABS_URI] + '\s*\z', Regexp::EXTENDED)
+- ret[:REL_URI] = Regexp.new('\A\s*' + pattern[:X_REL_URI] + '\s*\z', Regexp::EXTENDED)
++ ret[:ABS_URI] = Regexp.new('\A\s*+' + pattern[:X_ABS_URI] + '\s*\z', Regexp::EXTENDED)
++ ret[:REL_URI] = Regexp.new('\A\s*+' + pattern[:X_REL_URI] + '\s*\z', Regexp::EXTENDED)
+
+ # for URI::extract
+ ret[:URI_REF] = Regexp.new(pattern[:URI_REF])
+diff --git a/test/uri/test_parser.rb b/test/uri/test_parser.rb
+index 03de137..01ed32a 100644
+--- a/test/uri/test_parser.rb
++++ b/test/uri/test_parser.rb
+@@ -63,4 +63,16 @@ class URI::TestParser < Test::Unit::TestCase
+ assert_equal("\u3042", p1.unescape('%e3%81%82'.force_encoding(Encoding::US_ASCII)))
+ assert_equal("\xe3\x83\x90\xe3\x83\x90", p1.unescape("\xe3\x83\x90%e3%83%90"))
+ end
++
++ def test_rfc2822_parse_relative_uri
++ pre = ->(length) {
++ " " * length + "\0"
++ }
++ parser = URI::RFC2396_Parser.new
++ assert_linear_performance((1..5).map {|i| 10**i}, pre: pre) do |uri|
++ assert_raise(URI::InvalidURIError) do
++ parser.split(uri)
++ end
++ end
++ end
+ end
+--
+2.40.0
diff --git a/meta/recipes-devtools/ruby/ruby/CVE-2023-36617_2.patch b/meta/recipes-devtools/ruby/ruby/CVE-2023-36617_2.patch
new file mode 100644
index 0000000000..ff558183b6
--- /dev/null
+++ b/meta/recipes-devtools/ruby/ruby/CVE-2023-36617_2.patch
@@ -0,0 +1,47 @@
+From eea5868120509c245216c4b5c2d4b5db1c593d0e Mon Sep 17 00:00:00 2001
+From: Nobuyoshi Nakada <nobu@ruby-lang.org>
+Date: Thu, 27 Jul 2023 16:16:30 +0800
+Subject: [PATCH] ruby: Fix quadratic backtracking on invalid port number
+
+Upstream-Status: Backport [https://github.com/ruby/uri/commit/9d7bcef1e6ad23c9c6e4932f297fb737888144c8]
+CVE: CVE-2023-36617
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ lib/uri/rfc3986_parser.rb | 2 +-
+ test/uri/test_parser.rb | 10 ++++++++++
+ 2 files changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/lib/uri/rfc3986_parser.rb b/lib/uri/rfc3986_parser.rb
+index 3c89311..cde3ea7 100644
+--- a/lib/uri/rfc3986_parser.rb
++++ b/lib/uri/rfc3986_parser.rb
+@@ -101,7 +101,7 @@ module URI
+ QUERY: /\A(?:%\h\h|[!$&-.0-;=@-Z_a-z~\/?])*\z/,
+ FRAGMENT: /\A(?:%\h\h|[!$&-.0-;=@-Z_a-z~\/?])*\z/,
+ OPAQUE: /\A(?:[^\/].*)?\z/,
+- PORT: /\A[\x09\x0a\x0c\x0d ]*\d*[\x09\x0a\x0c\x0d ]*\z/,
++ PORT: /\A[\x09\x0a\x0c\x0d ]*+\d*[\x09\x0a\x0c\x0d ]*\z/,
+ }
+ end
+
+diff --git a/test/uri/test_parser.rb b/test/uri/test_parser.rb
+index 01ed32a..81c2210 100644
+--- a/test/uri/test_parser.rb
++++ b/test/uri/test_parser.rb
+@@ -75,4 +75,14 @@ class URI::TestParser < Test::Unit::TestCase
+ end
+ end
+ end
++
++ def test_rfc3986_port_check
++ pre = ->(length) {"\t" * length + "a"}
++ uri = URI.parse("http://my.example.com")
++ assert_linear_performance((1..5).map {|i| 10**i}, pre: pre) do |port|
++ assert_raise(URI::InvalidComponentError) do
++ uri.port = port
++ end
++ end
++ end
+ end
+--
+2.40.0
diff --git a/meta/recipes-devtools/ruby/ruby/CVE-2024-27281.patch b/meta/recipes-devtools/ruby/ruby/CVE-2024-27281.patch
new file mode 100644
index 0000000000..6f4b35a786
--- /dev/null
+++ b/meta/recipes-devtools/ruby/ruby/CVE-2024-27281.patch
@@ -0,0 +1,97 @@
+From da7a0c7553ef7250ca665a3fecdc01dbaacbb43d Mon Sep 17 00:00:00 2001
+From: Nobuyoshi Nakada <nobu@ruby-lang.org>
+Date: Mon, 15 Apr 2024 11:40:00 +0000
+Subject: [PATCH] Filter marshaled objets
+
+CVE: CVE-2024-27281
+Upstream-Status: Backport [https://github.com/ruby/rdoc/commit/da7a0c7553ef7250ca665a3fecdc01dbaacbb43d]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ lib/rdoc/store.rb | 45 ++++++++++++++++++++++++++-------------------
+ 1 file changed, 26 insertions(+), 19 deletions(-)
+
+diff --git a/lib/rdoc/store.rb b/lib/rdoc/store.rb
+index 5ba671c..c793e49 100644
+--- a/lib/rdoc/store.rb
++++ b/lib/rdoc/store.rb
+@@ -556,9 +556,7 @@ class RDoc::Store
+ def load_cache
+ #orig_enc = @encoding
+
+- File.open cache_path, 'rb' do |io|
+- @cache = Marshal.load io.read
+- end
++ @cache = marshal_load(cache_path)
+
+ load_enc = @cache[:encoding]
+
+@@ -615,9 +613,7 @@ class RDoc::Store
+ def load_class_data klass_name
+ file = class_file klass_name
+
+- File.open file, 'rb' do |io|
+- Marshal.load io.read
+- end
++ marshal_load(file)
+ rescue Errno::ENOENT => e
+ error = MissingFileError.new(self, file, klass_name)
+ error.set_backtrace e.backtrace
+@@ -630,14 +626,10 @@ class RDoc::Store
+ def load_method klass_name, method_name
+ file = method_file klass_name, method_name
+
+- File.open file, 'rb' do |io|
+- obj = Marshal.load io.read
+- obj.store = self
+- obj.parent =
+- find_class_or_module(klass_name) || load_class(klass_name) unless
+- obj.parent
+- obj
+- end
++ obj = marshal_load(file)
++ obj.store = self
++ obj.parent ||= find_class_or_module(klass_name) || load_class(klass_name)
++ obj
+ rescue Errno::ENOENT => e
+ error = MissingFileError.new(self, file, klass_name + method_name)
+ error.set_backtrace e.backtrace
+@@ -650,11 +642,9 @@ class RDoc::Store
+ def load_page page_name
+ file = page_file page_name
+
+- File.open file, 'rb' do |io|
+- obj = Marshal.load io.read
+- obj.store = self
+- obj
+- end
++ obj = marshal_load(file)
++ obj.store = self
++ obj
+ rescue Errno::ENOENT => e
+ error = MissingFileError.new(self, file, page_name)
+ error.set_backtrace e.backtrace
+@@ -976,4 +966,21 @@ class RDoc::Store
+ @unique_modules
+ end
+
++ private
++ def marshal_load(file)
++ File.open(file, 'rb') {|io| Marshal.load(io, MarshalFilter)}
++ end
++
++ MarshalFilter = proc do |obj|
++ case obj
++ when true, false, nil, Array, Class, Encoding, Hash, Integer, String, Symbol, RDoc::Text
++ else
++ unless obj.class.name.start_with?("RDoc::")
++ raise TypeError, "not permitted class: #{obj.class.name}"
++ end
++ end
++ obj
++ end
++ private_constant :MarshalFilter
++
+ end
+--
+2.35.5
diff --git a/meta/recipes-devtools/ruby/ruby_3.1.2.bb b/meta/recipes-devtools/ruby/ruby_3.1.3.bb
index 38ba46731b..2ad3c9e207 100644
--- a/meta/recipes-devtools/ruby/ruby_3.1.2.bb
+++ b/meta/recipes-devtools/ruby/ruby_3.1.3.bb
@@ -1,8 +1,25 @@
-require ruby.inc
-
-DEPENDS:append:libc-musl = " libucontext"
-
-SRC_URI += " \
+SUMMARY = "An interpreter of object-oriented scripting language"
+DESCRIPTION = "Ruby is an interpreted scripting language for quick \
+and easy object-oriented programming. It has many features to process \
+text files and to do system management tasks (as in Perl). \
+It is simple, straight-forward, and extensible. \
+"
+HOMEPAGE = "http://www.ruby-lang.org/"
+SECTION = "devel/ruby"
+LICENSE = "Ruby | BSD-2-Clause | BSD-3-Clause | GPL-2.0-only | ISC | MIT"
+LIC_FILES_CHKSUM = "file://COPYING;md5=5b8c87559868796979806100db3f3805 \
+ file://BSDL;md5=8b50bc6de8f586dc66790ba11d064d75 \
+ file://GPL;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
+ file://LEGAL;md5=f260190bc1e92e363f0ee3c0463d4c7c \
+ "
+
+DEPENDS = "zlib openssl libyaml gdbm readline libffi"
+DEPENDS:append:class-target = " ruby-native"
+
+SHRT_VER = "${@oe.utils.trim_version("${PV}", 2)}"
+SRC_URI = "http://cache.ruby-lang.org/pub/ruby/${SHRT_VER}/ruby-${PV}.tar.gz \
+ file://0001-extmk-fix-cross-compilation-of-external-gems.patch \
+ file://0002-Obey-LDFLAGS-for-the-link-of-libruby.patch \
file://remove_has_include_macros.patch \
file://run-ptest \
file://0001-template-Makefile.in-do-not-write-host-cross-cc-item.patch \
@@ -12,9 +29,32 @@ SRC_URI += " \
file://0005-Mark-Gemspec-reproducible-change-fixing-784225-too.patch \
file://0006-Make-gemspecs-reproducible.patch \
file://0001-vm_dump.c-Define-REG_S1-and-REG_S2-for-musl-riscv.patch \
+ file://CVE-2023-28756.patch \
+ file://CVE-2023-28755.patch \
+ file://CVE-2023-36617_1.patch \
+ file://CVE-2023-36617_2.patch \
+ file://CVE-2024-27281.patch \
"
+UPSTREAM_CHECK_URI = "https://www.ruby-lang.org/en/downloads/"
+
+inherit autotools ptest pkgconfig
+
+
+# This snippet lets compiled extensions which rely on external libraries,
+# such as zlib, compile properly. If we don't do this, then when extmk.rb
+# runs, it uses the native libraries instead of the target libraries, and so
+# none of the linking operations succeed -- which makes extconf.rb think
+# that the libraries aren't available and hence that the extension can't be
+# built.
+
+do_configure:prepend() {
+ sed -i "s#%%TARGET_CFLAGS%%#$CFLAGS#; s#%%TARGET_LDFLAGS%%#$LDFLAGS#" ${S}/common.mk
+ rm -rf ${S}/ruby/
+}
+
+DEPENDS:append:libc-musl = " libucontext"
-SRC_URI[sha256sum] = "61843112389f02b735428b53bb64cf988ad9fb81858b8248e22e57336f24a83e"
+SRC_URI[sha256sum] = "5ea498a35f4cd15875200a52dde42b6eb179e1264e17d78732c3a57cd1c6ab9e"
PACKAGECONFIG ??= ""
PACKAGECONFIG += "${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)}"
diff --git a/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service b/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service
index 7f72f3388a..b6b81d5c1a 100644
--- a/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service
+++ b/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service
@@ -1,7 +1,7 @@
[Unit]
Description=Run pending postinsts
DefaultDependencies=no
-After=systemd-remount-fs.service systemd-tmpfiles-setup.service tmp.mount
+After=systemd-remount-fs.service systemd-tmpfiles-setup.service tmp.mount ldconfig.service
Before=sysinit.target
[Service]
diff --git a/meta/recipes-devtools/rust/rust-common.inc b/meta/recipes-devtools/rust/rust-common.inc
index 621cd4ad57..a73367bbd5 100644
--- a/meta/recipes-devtools/rust/rust-common.inc
+++ b/meta/recipes-devtools/rust/rust-common.inc
@@ -109,7 +109,7 @@ def llvm_features_from_target_fpu(d):
# TARGET_FPU can be hard or soft. +soft-float tell llvm to use soft float
# ABI. There is no option for hard.
- fpu = d.getVar('TARGET_FPU', True)
+ fpu = d.getVar('TARGET_FPU')
return ["+soft-float"] if fpu == "soft" else []
def llvm_features(d):
@@ -119,12 +119,12 @@ def llvm_features(d):
## arm-unknown-linux-gnueabihf
-DATA_LAYOUT[arm] = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
-TARGET_ENDIAN[arm] = "little"
-TARGET_POINTER_WIDTH[arm] = "32"
-TARGET_C_INT_WIDTH[arm] = "32"
-MAX_ATOMIC_WIDTH[arm] = "64"
-FEATURES[arm] = "+v6,+vfp2"
+DATA_LAYOUT[arm-eabi] = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+TARGET_ENDIAN[arm-eabi] = "little"
+TARGET_POINTER_WIDTH[arm-eabi] = "32"
+TARGET_C_INT_WIDTH[arm-eabi] = "32"
+MAX_ATOMIC_WIDTH[arm-eabi] = "64"
+FEATURES[arm-eabi] = "+v6,+vfp2"
## armv7-unknown-linux-gnueabihf
DATA_LAYOUT[armv7-eabi] = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
@@ -297,6 +297,12 @@ def rust_gen_target(d, thing, wd, features, cpu, arch, abi=""):
sys = sys_for(d, thing)
prefix = prefix_for(d, thing)
+ if thing == "TARGET":
+ abi = d.getVar('ABIEXTENSION')
+ # arm and armv7 have different targets in llvm
+ if arch == "arm" and target_is_armv7(d):
+ arch = 'armv7'
+
rust_arch = oe.rust.arch_to_rust_arch(arch)
if abi:
@@ -307,12 +313,13 @@ def rust_gen_target(d, thing, wd, features, cpu, arch, abi=""):
features = features or d.getVarFlag('FEATURES', arch_abi) or ""
features = features.strip()
+ llvm_target = d.getVar('RUST_TARGET_SYS')
+ if thing == "BUILD":
+ llvm_target = d.getVar('RUST_HOST_SYS')
+
# build tspec
tspec = {}
- if bb.data.inherits_class('cross-canadian', d):
- tspec['llvm-target'] = d.getVar('RUST_HOST_SYS', arch_abi)
- else:
- tspec['llvm-target'] = d.getVar('RUST_TARGET_SYS', arch_abi)
+ tspec['llvm-target'] = llvm_target
tspec['data-layout'] = d.getVarFlag('DATA_LAYOUT', arch_abi)
tspec['max-atomic-width'] = int(d.getVarFlag('MAX_ATOMIC_WIDTH', arch_abi))
tspec['target-pointer-width'] = d.getVarFlag('TARGET_POINTER_WIDTH', arch_abi)
diff --git a/meta/recipes-devtools/rust/rust-cross-canadian-common.inc b/meta/recipes-devtools/rust/rust-cross-canadian-common.inc
index 1f21c8af26..df4901f1fa 100644
--- a/meta/recipes-devtools/rust/rust-cross-canadian-common.inc
+++ b/meta/recipes-devtools/rust/rust-cross-canadian-common.inc
@@ -27,9 +27,10 @@ DEBUG_PREFIX_MAP = "-fdebug-prefix-map=${WORKDIR}=/usr/src/debug/${PN}/${EXTENDP
python do_rust_gen_targets () {
wd = d.getVar('WORKDIR') + '/targets/'
- rust_gen_target(d, 'TARGET', wd, d.getVar('TARGET_LLVM_FEATURES') or "", d.getVar('TARGET_LLVM_CPU'), d.getVar('TARGET_ARCH'))
- rust_gen_target(d, 'HOST', wd, "", "generic", d.getVar('HOST_ARCH'))
+ # Order of BUILD, HOST, TARGET is important in case the files overwrite, most specific last
rust_gen_target(d, 'BUILD', wd, "", "generic", d.getVar('BUILD_ARCH'))
+ rust_gen_target(d, 'HOST', wd, "", "generic", d.getVar('HOST_ARCH'))
+ rust_gen_target(d, 'TARGET', wd, d.getVar('TARGET_LLVM_FEATURES') or "", d.getVar('TARGET_LLVM_CPU'), d.getVar('TARGET_ARCH'))
}
INHIBIT_DEFAULT_RUST_DEPS = "1"
diff --git a/meta/recipes-devtools/rust/rust-cross.inc b/meta/recipes-devtools/rust/rust-cross.inc
index f6babfeeda..2e47a3aa5f 100644
--- a/meta/recipes-devtools/rust/rust-cross.inc
+++ b/meta/recipes-devtools/rust/rust-cross.inc
@@ -1,22 +1,9 @@
python do_rust_gen_targets () {
wd = d.getVar('WORKDIR') + '/targets/'
- # It is important 'TARGET' is last here so that it overrides our less
- # informed choices for BUILD & HOST if TARGET happens to be the same as
- # either of them.
- for thing in ['BUILD', 'HOST', 'TARGET']:
- bb.debug(1, "rust_gen_target for " + thing)
- features = ""
- cpu = "generic"
- arch = d.getVar('{}_ARCH'.format(thing))
- abi = ""
- if thing is "TARGET":
- abi = d.getVar('ABIEXTENSION')
- # arm and armv7 have different targets in llvm
- if arch == "arm" and target_is_armv7(d):
- arch = 'armv7'
- features = d.getVar('TARGET_LLVM_FEATURES') or ""
- cpu = d.getVar('TARGET_LLVM_CPU')
- rust_gen_target(d, thing, wd, features, cpu, arch, abi)
+ # Order of BUILD, HOST, TARGET is important in case the files overwrite, most specific last
+ rust_gen_target(d, 'BUILD', wd, "", "generic", d.getVar('BUILD_ARCH'))
+ rust_gen_target(d, 'HOST', wd, "", "generic", d.getVar('HOST_ARCH'))
+ rust_gen_target(d, 'TARGET', wd, d.getVar('TARGET_LLVM_FEATURES') or "", d.getVar('TARGET_LLVM_CPU'), d.getVar('TARGET_ARCH'))
}
# Otherwise we'll depend on what we provide
diff --git a/meta/recipes-devtools/rust/rust-llvm.inc b/meta/recipes-devtools/rust/rust-llvm.inc
index 5c2ccdac9a..416a07cd40 100644
--- a/meta/recipes-devtools/rust/rust-llvm.inc
+++ b/meta/recipes-devtools/rust/rust-llvm.inc
@@ -3,7 +3,9 @@ LICENSE ?= "Apache-2.0-with-LLVM-exception"
HOMEPAGE = "http://www.rust-lang.org"
SRC_URI += "file://0002-llvm-allow-env-override-of-exe-path.patch;striplevel=2 \
- file://0001-AsmMatcherEmitter-sort-ClassInfo-lists-by-name-as-we.patch;striplevel=2"
+ file://0001-AsmMatcherEmitter-sort-ClassInfo-lists-by-name-as-we.patch;striplevel=2 \
+ file://0003-Support-Add-missing-cstdint-header-to-Signals.h.patch;striplevel=2 \
+"
S = "${RUSTSRC}/src/llvm-project/llvm"
@@ -23,9 +25,11 @@ CXXFLAGS:remove = "-g"
LLVM_DIR = "llvm${LLVM_RELEASE}"
+RUST_LLVM_TARGETS ?= "ARM;AArch64;Mips;PowerPC;RISCV;X86"
+
EXTRA_OECMAKE = " \
-DCMAKE_BUILD_TYPE=Release \
- -DLLVM_TARGETS_TO_BUILD='ARM;AArch64;Mips;PowerPC;RISCV;X86' \
+ -DLLVM_TARGETS_TO_BUILD='${RUST_LLVM_TARGETS}' \
-DLLVM_BUILD_DOCS=OFF \
-DLLVM_ENABLE_TERMINFO=OFF \
-DLLVM_ENABLE_ZLIB=OFF \
diff --git a/meta/recipes-devtools/rust/rust-llvm/0003-Support-Add-missing-cstdint-header-to-Signals.h.patch b/meta/recipes-devtools/rust/rust-llvm/0003-Support-Add-missing-cstdint-header-to-Signals.h.patch
new file mode 100644
index 0000000000..6ed23aa9c5
--- /dev/null
+++ b/meta/recipes-devtools/rust/rust-llvm/0003-Support-Add-missing-cstdint-header-to-Signals.h.patch
@@ -0,0 +1,32 @@
+From a94bf34221fc4519bd8ec72560c2d363ffe2de4c Mon Sep 17 00:00:00 2001
+From: Sergei Trofimovich <slyich@gmail.com>
+Date: Mon, 23 May 2022 08:03:23 +0100
+Subject: [PATCH] [Support] Add missing <cstdint> header to Signals.h
+
+Without the change llvm build fails on this week's gcc-13 snapshot as:
+
+ [ 0%] Building CXX object lib/Support/CMakeFiles/LLVMSupport.dir/Signals.cpp.o
+ In file included from llvm/lib/Support/Signals.cpp:14:
+ llvm/include/llvm/Support/Signals.h:119:8: error: variable or field 'CleanupOnSignal' declared void
+ 119 | void CleanupOnSignal(uintptr_t Context);
+ | ^~~~~~~~~~~~~~~
+
+Upstream-Status: Backport [llvmorg-15.0.0 ff1681ddb303223973653f7f5f3f3435b48a1983]
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+---
+ llvm/include/llvm/Support/Signals.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/llvm/include/llvm/Support/Signals.h b/llvm/include/llvm/Support/Signals.h
+index 44f5a750ff5c..937e0572d4a7 100644
+--- a/llvm/include/llvm/Support/Signals.h
++++ b/llvm/include/llvm/Support/Signals.h
+@@ -14,6 +14,7 @@
+ #ifndef LLVM_SUPPORT_SIGNALS_H
+ #define LLVM_SUPPORT_SIGNALS_H
+
++#include <cstdint>
+ #include <string>
+
+ namespace llvm {
diff --git a/meta/recipes-devtools/rust/rust-source.inc b/meta/recipes-devtools/rust/rust-source.inc
index ea70ad786f..c377a680a7 100644
--- a/meta/recipes-devtools/rust/rust-source.inc
+++ b/meta/recipes-devtools/rust/rust-source.inc
@@ -5,3 +5,6 @@ RUSTSRC = "${WORKDIR}/rustc-${PV}-src"
UPSTREAM_CHECK_URI = "https://forge.rust-lang.org/infra/other-installation-methods.html"
UPSTREAM_CHECK_REGEX = "rustc-(?P<pver>\d+(\.\d+)+)-src"
+
+#CVE-2024-24576 is specific to Microsoft Windows
+CVE_CHECK_IGNORE += "CVE-2024-24576"
diff --git a/meta/recipes-devtools/rust/rust.inc b/meta/recipes-devtools/rust/rust.inc
index f39228e3c0..008b2ce4a4 100644
--- a/meta/recipes-devtools/rust/rust.inc
+++ b/meta/recipes-devtools/rust/rust.inc
@@ -79,7 +79,7 @@ python do_configure() {
config = configparser.RawConfigParser()
# [target.ARCH-poky-linux]
- target_section = "target.{}".format(d.getVar('TARGET_SYS', True))
+ target_section = "target.{}".format(d.getVar('TARGET_SYS'))
config.add_section(target_section)
llvm_config = d.expand("${YOCTO_ALTERNATE_EXE_PATH}")
@@ -90,7 +90,7 @@ python do_configure() {
# If we don't do this rust-native will compile it's own llvm for BUILD.
# [target.${BUILD_ARCH}-unknown-linux-gnu]
- target_section = "target.{}".format(d.getVar('SNAPSHOT_BUILD_SYS', True))
+ target_section = "target.{}".format(d.getVar('SNAPSHOT_BUILD_SYS'))
config.add_section(target_section)
config.set(target_section, "llvm-config", e(llvm_config))
@@ -124,26 +124,26 @@ python do_configure() {
config.set("build", "vendor", e(True))
if not "targets" in locals():
- targets = [d.getVar("TARGET_SYS", True)]
+ targets = [d.getVar("TARGET_SYS")]
config.set("build", "target", e(targets))
if not "hosts" in locals():
- hosts = [d.getVar("HOST_SYS", True)]
+ hosts = [d.getVar("HOST_SYS")]
config.set("build", "host", e(hosts))
# We can't use BUILD_SYS since that is something the rust snapshot knows
# nothing about when trying to build some stage0 tools (like fabricate)
- config.set("build", "build", e(d.getVar("SNAPSHOT_BUILD_SYS", True)))
+ config.set("build", "build", e(d.getVar("SNAPSHOT_BUILD_SYS")))
# [install]
config.add_section("install")
# ./x.py install doesn't have any notion of "destdir"
# but we can prepend ${D} to all the directories instead
- config.set("install", "prefix", e(d.getVar("D", True) + d.getVar("prefix", True)))
- config.set("install", "bindir", e(d.getVar("D", True) + d.getVar("bindir", True)))
- config.set("install", "libdir", e(d.getVar("D", True) + d.getVar("libdir", True)))
- config.set("install", "datadir", e(d.getVar("D", True) + d.getVar("datadir", True)))
- config.set("install", "mandir", e(d.getVar("D", True) + d.getVar("mandir", True)))
+ config.set("install", "prefix", e(d.getVar("D") + d.getVar("prefix")))
+ config.set("install", "bindir", e(d.getVar("D") + d.getVar("bindir")))
+ config.set("install", "libdir", e(d.getVar("D") + d.getVar("libdir")))
+ config.set("install", "datadir", e(d.getVar("D") + d.getVar("datadir")))
+ config.set("install", "mandir", e(d.getVar("D") + d.getVar("mandir")))
with open("config.toml", "w") as f:
f.write('changelog-seen = 2\n\n')
diff --git a/meta/recipes-devtools/strace/strace/0001-caps-abbrev.awk-fix-gawk-s-path.patch b/meta/recipes-devtools/strace/strace/0001-caps-abbrev.awk-fix-gawk-s-path.patch
deleted file mode 100644
index 235e803641..0000000000
--- a/meta/recipes-devtools/strace/strace/0001-caps-abbrev.awk-fix-gawk-s-path.patch
+++ /dev/null
@@ -1,47 +0,0 @@
-From 597cc206d982e7237eb93fdc33e8c4bb6bb2d796 Mon Sep 17 00:00:00 2001
-From: Robert Yang <liezhi.yang@windriver.com>
-Date: Thu, 9 Feb 2017 01:27:49 -0800
-Subject: [PATCH] caps-abbrev.awk: fix gawk's path
-
-It should be /usr/bin/gawk as other scripts use in this package.
-
-Upstream-Status: Pending
-
-Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
-
----
- tests-m32/caps-abbrev.awk | 2 +-
- tests-mx32/caps-abbrev.awk | 2 +-
- tests/caps-abbrev.awk | 2 +-
- 3 files changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/tests-m32/caps-abbrev.awk b/tests-m32/caps-abbrev.awk
-index c00023b..a56cd56 100644
---- a/tests-m32/caps-abbrev.awk
-+++ b/tests-m32/caps-abbrev.awk
-@@ -1,4 +1,4 @@
--#!/bin/gawk
-+#!/usr/bin/gawk
- #
- # This file is part of caps strace test.
- #
-diff --git a/tests-mx32/caps-abbrev.awk b/tests-mx32/caps-abbrev.awk
-index c00023b..a56cd56 100644
---- a/tests-mx32/caps-abbrev.awk
-+++ b/tests-mx32/caps-abbrev.awk
-@@ -1,4 +1,4 @@
--#!/bin/gawk
-+#!/usr/bin/gawk
- #
- # This file is part of caps strace test.
- #
-diff --git a/tests/caps-abbrev.awk b/tests/caps-abbrev.awk
-index c00023b..a56cd56 100644
---- a/tests/caps-abbrev.awk
-+++ b/tests/caps-abbrev.awk
-@@ -1,4 +1,4 @@
--#!/bin/gawk
-+#!/usr/bin/gawk
- #
- # This file is part of caps strace test.
- #
diff --git a/meta/recipes-devtools/strace/strace/3bbfb541b258baec9eba674b5d8dc30007a61542.patch b/meta/recipes-devtools/strace/strace/3bbfb541b258baec9eba674b5d8dc30007a61542.patch
new file mode 100644
index 0000000000..b4c6ff99de
--- /dev/null
+++ b/meta/recipes-devtools/strace/strace/3bbfb541b258baec9eba674b5d8dc30007a61542.patch
@@ -0,0 +1,50 @@
+From 3bbfb541b258baec9eba674b5d8dc30007a61542 Mon Sep 17 00:00:00 2001
+From: "Dmitry V. Levin" <ldv@strace.io>
+Date: Wed, 21 Jun 2023 08:00:00 +0000
+Subject: [PATCH] net: enhance getsockopt decoding
+
+When getsockopt syscall fails the kernel sometimes updates the optlen
+argument, for example, NETLINK_LIST_MEMBERSHIPS updates it even if
+optval is not writable.
+
+* src/net.c (SYS_FUNC(getsockopt)): Try to fetch and print optlen
+argument on exiting syscall regardless of getsockopt exit status.
+
+Upstream-Status: Backport
+---
+ src/net.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/src/net.c b/src/net.c
+index f68ccb947..7244b5e57 100644
+--- a/src/net.c
++++ b/src/net.c
+@@ -1038,7 +1038,7 @@ SYS_FUNC(getsockopt)
+ } else {
+ ulen = get_tcb_priv_ulong(tcp);
+
+- if (syserror(tcp) || umove(tcp, tcp->u_arg[4], &rlen) < 0) {
++ if (umove(tcp, tcp->u_arg[4], &rlen) < 0) {
+ /* optval */
+ printaddr(tcp->u_arg[3]);
+ tprint_arg_next();
+@@ -1047,6 +1047,19 @@ SYS_FUNC(getsockopt)
+ tprint_indirect_begin();
+ PRINT_VAL_D(ulen);
+ tprint_indirect_end();
++ } else if (syserror(tcp)) {
++ /* optval */
++ printaddr(tcp->u_arg[3]);
++ tprint_arg_next();
++
++ /* optlen */
++ tprint_indirect_begin();
++ if (ulen != rlen) {
++ PRINT_VAL_D(ulen);
++ tprint_value_changed();
++ }
++ PRINT_VAL_D(rlen);
++ tprint_indirect_end();
+ } else {
+ /* optval */
+ print_getsockopt(tcp, tcp->u_arg[1], tcp->u_arg[2],
diff --git a/meta/recipes-devtools/strace/strace/f31c2f4494779e5c5f170ad10539bfc2dfafe967.patch b/meta/recipes-devtools/strace/strace/f31c2f4494779e5c5f170ad10539bfc2dfafe967.patch
new file mode 100644
index 0000000000..a0843836c2
--- /dev/null
+++ b/meta/recipes-devtools/strace/strace/f31c2f4494779e5c5f170ad10539bfc2dfafe967.patch
@@ -0,0 +1,50 @@
+From f31c2f4494779e5c5f170ad10539bfc2dfafe967 Mon Sep 17 00:00:00 2001
+From: "Dmitry V. Levin" <ldv@strace.io>
+Date: Sat, 24 Jun 2023 08:00:00 +0000
+Subject: [PATCH] tests: update sockopt-sol_netlink test
+
+Update sockopt-sol_netlink test that started to fail, likely
+due to recent linux kernel commit f4e4534850a9 ("net/netlink: fix
+NETLINK_LIST_MEMBERSHIPS length report").
+
+* tests/sockopt-sol_netlink.c (main): Always print changing optlen value
+on exiting syscall.
+
+Reported-by: Alexander Gordeev <agordeev@linux.ibm.com>
+---
+ tests/sockopt-sol_netlink.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+Upstream-Status: Backport
+
+diff --git a/tests/sockopt-sol_netlink.c b/tests/sockopt-sol_netlink.c
+index 82b98adc23..1c33219ac5 100644
+--- a/tests/sockopt-sol_netlink.c
++++ b/tests/sockopt-sol_netlink.c
+@@ -94,7 +94,10 @@ main(void)
+ printf("%p", val);
+ else
+ printf("[%d]", *val);
+- printf(", [%d]) = %s\n", *len, errstr);
++ printf(", [%d", (int) sizeof(*val));
++ if ((int) sizeof(*val) != *len)
++ printf(" => %d", *len);
++ printf("]) = %s\n", errstr);
+
+ /* optlen larger than necessary - shortened */
+ *len = sizeof(*val) + 1;
+@@ -150,8 +153,12 @@ main(void)
+ /* optval EFAULT - print address */
+ *len = sizeof(*val);
+ get_sockopt(fd, names[i].val, efault, len);
+- printf("getsockopt(%d, SOL_NETLINK, %s, %p, [%d]) = %s\n",
+- fd, names[i].str, efault, *len, errstr);
++ printf("getsockopt(%d, SOL_NETLINK, %s, %p",
++ fd, names[i].str, efault);
++ printf(", [%d", (int) sizeof(*val));
++ if ((int) sizeof(*val) != *len)
++ printf(" => %d", *len);
++ printf("]) = %s\n", errstr);
+
+ /* optlen EFAULT - print address */
+ get_sockopt(fd, names[i].val, val, len + 1);
diff --git a/meta/recipes-devtools/strace/strace/update-gawk-paths.patch b/meta/recipes-devtools/strace/strace/update-gawk-paths.patch
index 0c683496ae..a16ede95c2 100644
--- a/meta/recipes-devtools/strace/strace/update-gawk-paths.patch
+++ b/meta/recipes-devtools/strace/strace/update-gawk-paths.patch
@@ -125,3 +125,33 @@ index dce78f5..573d9ea 100644
#
# Copyright (c) 2014-2015 Dmitry V. Levin <ldv@strace.io>
# Copyright (c) 2016 Elvira Khabirova <lineprinter0@gmail.com>
+diff --git a/tests-m32/caps-abbrev.awk b/tests-m32/caps-abbrev.awk
+index c00023b..a56cd56 100644
+--- a/tests-m32/caps-abbrev.awk
++++ b/tests-m32/caps-abbrev.awk
+@@ -1,4 +1,4 @@
+-#!/bin/gawk
++#!/usr/bin/gawk
+ #
+ # This file is part of caps strace test.
+ #
+diff --git a/tests-mx32/caps-abbrev.awk b/tests-mx32/caps-abbrev.awk
+index c00023b..a56cd56 100644
+--- a/tests-mx32/caps-abbrev.awk
++++ b/tests-mx32/caps-abbrev.awk
+@@ -1,4 +1,4 @@
+-#!/bin/gawk
++#!/usr/bin/gawk
+ #
+ # This file is part of caps strace test.
+ #
+diff --git a/tests/caps-abbrev.awk b/tests/caps-abbrev.awk
+index c00023b..a56cd56 100644
+--- a/tests/caps-abbrev.awk
++++ b/tests/caps-abbrev.awk
+@@ -1,4 +1,4 @@
+-#!/bin/gawk
++#!/usr/bin/gawk
+ #
+ # This file is part of caps strace test.
+ #
diff --git a/meta/recipes-devtools/strace/strace_5.16.bb b/meta/recipes-devtools/strace/strace_5.16.bb
index ae19318c20..39082a5bc7 100644
--- a/meta/recipes-devtools/strace/strace_5.16.bb
+++ b/meta/recipes-devtools/strace/strace_5.16.bb
@@ -9,16 +9,20 @@ SRC_URI = "https://strace.io/files/${PV}/strace-${PV}.tar.xz \
file://update-gawk-paths.patch \
file://Makefile-ptest.patch \
file://run-ptest \
- file://0001-caps-abbrev.awk-fix-gawk-s-path.patch \
file://ptest-spacesave.patch \
file://0001-strace-fix-reproducibilty-issues.patch \
file://skip-load.patch \
file://0001-landlock-update-expected-string.patch \
+ file://f31c2f4494779e5c5f170ad10539bfc2dfafe967.patch \
+ file://3bbfb541b258baec9eba674b5d8dc30007a61542.patch \
"
SRC_URI[sha256sum] = "dc7db230ff3e57c249830ba94acab2b862da1fcaac55417e9b85041a833ca285"
inherit autotools ptest
+# Not yet ported to rv32
+COMPATIBLE_HOST:riscv32 = "null"
+
PACKAGECONFIG:class-target ??= "\
${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', 'bluez', '', d)} \
"
diff --git a/meta/recipes-devtools/tcltk/tcl/fix_non_native_build_issue.patch b/meta/recipes-devtools/tcltk/tcl/fix_non_native_build_issue.patch
index 44b2ce0a30..5a10c93a31 100644
--- a/meta/recipes-devtools/tcltk/tcl/fix_non_native_build_issue.patch
+++ b/meta/recipes-devtools/tcltk/tcl/fix_non_native_build_issue.patch
@@ -1,4 +1,4 @@
-Upstream-Status: Pending
+Upstream-Status: Inappropriate [upstream does not support installed tests]
Index: unix/Makefile.in
===================================================================
diff --git a/meta/recipes-devtools/tcltk/tcl/run-ptest b/meta/recipes-devtools/tcltk/tcl/run-ptest
index a62b703082..87e025fce1 100644
--- a/meta/recipes-devtools/tcltk/tcl/run-ptest
+++ b/meta/recipes-devtools/tcltk/tcl/run-ptest
@@ -4,8 +4,12 @@
export TZ="Europe/London"
export TCL_LIBRARY=library
+# Some tests are overly strict with timings and fail on loaded systems.
+# See bugs #14825 #14882 #15081 #15321.
+SKIPPED_TESTS='async-* cmdMZ-6.6 event-* exit-1.* socket-* socket_inet-*'
+
for i in `ls tests/*.test | awk -F/ '{print $2}'`; do
- ./tcltest tests/all.tcl -file $i >$i.log 2>&1
+ ./tcltest tests/all.tcl -file $i -skip "$SKIPPED_TESTS" >$i.log 2>&1
grep -q -F -e "Files with failing tests:" -e "Test files exiting with errors:" $i.log
if [ $? -eq 0 ]; then
echo "FAIL: $i"
diff --git a/meta/recipes-devtools/tcltk/tcl_8.6.11.bb b/meta/recipes-devtools/tcltk/tcl_8.6.11.bb
index 9f6b003ffb..f8f3d7dd3f 100644
--- a/meta/recipes-devtools/tcltk/tcl_8.6.11.bb
+++ b/meta/recipes-devtools/tcltk/tcl_8.6.11.bb
@@ -44,6 +44,12 @@ inherit autotools ptest binconfig
AUTOTOOLS_SCRIPT_PATH = "${S}/unix"
EXTRA_OECONF = "--enable-threads --disable-rpath --enable-man-suffix"
+# Prevent installing copy of tzdata based on tzdata installation on the build host
+# It doesn't install tzdata if one of the following files exist on the host:
+# /usr/share/zoneinfo/UTC /usr/share/zoneinfo/GMT /usr/share/lib/zoneinfo/UTC /usr/share/lib/zoneinfo/GMT /usr/lib/zoneinfo/UTC /usr/lib/zoneinfo/GMT
+# otherwise "/usr/lib/tcl8.6/tzdata" is included in tcl package
+EXTRA_OECONF += "--with-tzdata=no"
+
do_install() {
autotools_do_install
oe_runmake 'DESTDIR=${D}' install-private-headers
@@ -83,6 +89,11 @@ do_install_ptest() {
cp -r ${S}/tests ${D}${PTEST_PATH}
}
+do_install_ptest:append:libc-musl () {
+ # Assumes locales other than provided by musl-locales
+ sed -i -e 's|SKIPPED_TESTS=|SKIPPED_TESTS="unixInit-3*"|' ${D}${PTEST_PATH}/run-ptest
+}
+
# Fix some paths that might be used by Tcl extensions
BINCONFIG_GLOB = "*Config.sh"
diff --git a/meta/recipes-devtools/vala/vala.inc b/meta/recipes-devtools/vala/vala.inc
index 90e0b77de0..162e99bb03 100644
--- a/meta/recipes-devtools/vala/vala.inc
+++ b/meta/recipes-devtools/vala/vala.inc
@@ -42,21 +42,30 @@ EXTRA_OECONF += " --disable-valadoc"
# Vapigen wrapper needs to be available system-wide, because it will be used
# to build vapi files from all other packages with vala support
do_install:append:class-target() {
- install -d ${D}${bindir}/
- install ${B}/vapigen-wrapper ${D}${bindir}/
+ install -d ${D}${bindir_crossscripts}/
+ install ${B}/vapigen-wrapper ${D}${bindir_crossscripts}/
}
# Put vapigen wrapper into target sysroot so that it can be used when building
# vapi files.
-SYSROOT_DIRS:append:class-target = " ${bindir}"
+SYSROOT_DIRS += "${bindir_crossscripts}"
+
+inherit multilib_script
+MULTILIB_SCRIPTS = "${PN}:${bindir}/vala-gen-introspect-0.56"
SYSROOT_PREPROCESS_FUNCS:append:class-target = " vapigen_sysroot_preprocess"
vapigen_sysroot_preprocess() {
# Tweak the vapigen name in the vapigen pkgconfig file, so that it picks
# up our wrapper.
sed -i \
- -e "s|vapigen=.*|vapigen=${bindir}/vapigen-wrapper|" \
+ -e "s|vapigen=.*|vapigen=${bindir_crossscripts}/vapigen-wrapper|" \
${SYSROOT_DESTDIR}${libdir}/pkgconfig/vapigen-${SHRT_VER}.pc
}
SSTATE_SCAN_FILES += "vapigen-wrapper"
+
+PACKAGE_PREPROCESS_FUNCS += "vala_package_preprocess"
+
+vala_package_preprocess () {
+ rm -rf ${PKGD}${bindir_crossscripts}
+}
diff --git a/meta/recipes-devtools/vala/vala_0.56.0.bb b/meta/recipes-devtools/vala/vala_0.56.0.bb
deleted file mode 100644
index a4d6760f10..0000000000
--- a/meta/recipes-devtools/vala/vala_0.56.0.bb
+++ /dev/null
@@ -1,3 +0,0 @@
-require ${BPN}.inc
-
-SRC_URI[sha256sum] = "d92bd13c5630905eeb6a983dcb702204da9731460c2a6e4e39f867996f371040"
diff --git a/meta/recipes-devtools/vala/vala_0.56.3.bb b/meta/recipes-devtools/vala/vala_0.56.3.bb
new file mode 100644
index 0000000000..83f61e5b2f
--- /dev/null
+++ b/meta/recipes-devtools/vala/vala_0.56.3.bb
@@ -0,0 +1,3 @@
+require ${BPN}.inc
+
+SRC_URI[sha256sum] = "e1066221bf7b89cb1fa7327a3888645cb33b604de3bf45aa81132fd040b699bf"
diff --git a/meta/recipes-devtools/valgrind/valgrind/remove-for-aarch64 b/meta/recipes-devtools/valgrind/valgrind/remove-for-aarch64
index 887bfd2766..4477f39132 100644
--- a/meta/recipes-devtools/valgrind/valgrind/remove-for-aarch64
+++ b/meta/recipes-devtools/valgrind/valgrind/remove-for-aarch64
@@ -1,211 +1,7 @@
-gdbserver_tests/hgtls
-cachegrind/tests/ann1
-callgrind/tests/simwork1
-callgrind/tests/simwork2
-callgrind/tests/simwork3
-callgrind/tests/simwork-both
-callgrind/tests/simwork-cache
-callgrind/tests/threads
-callgrind/tests/threads-use
-drd/tests/annotate_barrier
-drd/tests/annotate_barrier_xml
-drd/tests/annotate_hbefore
-drd/tests/annotate_hb_err
-drd/tests/annotate_hb_race
-drd/tests/annotate_ignore_read
-drd/tests/annotate_ignore_rw
-drd/tests/annotate_ignore_rw2
-drd/tests/annotate_ignore_write
-drd/tests/annotate_ignore_write2
-drd/tests/annotate_order_1
-drd/tests/annotate_order_2
-drd/tests/annotate_order_3
-drd/tests/annotate_publish_hg
-drd/tests/annotate_rwlock
-drd/tests/annotate_rwlock_hg
-drd/tests/annotate_sem
-drd/tests/annotate_smart_pointer
-drd/tests/annotate_smart_pointer2
-drd/tests/annotate_spinlock
-drd/tests/annotate_static
-drd/tests/annotate_trace_memory
-drd/tests/annotate_trace_memory_xml
-drd/tests/atomic_var
-drd/tests/bar_bad
-drd/tests/bar_trivial
drd/tests/boost_thread
-drd/tests/bug-235681
-drd/tests/bug322621
-drd/tests/circular_buffer
-drd/tests/concurrent_close
-drd/tests/custom_alloc
-drd/tests/custom_alloc_fiw
-drd/tests/dlopen
-drd/tests/fork-parallel
-drd/tests/fork-serial
-drd/tests/fp_race
-drd/tests/fp_race2
-drd/tests/fp_race_xml
-drd/tests/free_is_write
-drd/tests/free_is_write2
-drd/tests/hg01_all_ok
-drd/tests/hg02_deadlock
-drd/tests/hg03_inherit
-drd/tests/hg04_race
-drd/tests/hg05_race2
-drd/tests/hg06_readshared
-drd/tests/hold_lock_1
-drd/tests/hold_lock_2
-drd/tests/linuxthreads_det
-drd/tests/matinv
-drd/tests/memory_allocation
-drd/tests/monitor_example
-drd/tests/new_delete
-drd/tests/pth_barrier
-drd/tests/pth_barrier2
-drd/tests/pth_barrier3
-drd/tests/pth_barrier_race
-drd/tests/pth_barrier_reinit
-drd/tests/pth_broadcast
-drd/tests/pth_cancel_locked
-drd/tests/pth_cleanup_handler
-drd/tests/pth_cond_race
-drd/tests/pth_cond_race2
-drd/tests/pth_detached2
-drd/tests/pth_detached3
-drd/tests/pth_detached_sem
-drd/tests/pth_inconsistent_cond_wait
-drd/tests/pth_mutex_reinit
-drd/tests/pth_once
-drd/tests/pth_process_shared_mutex
-drd/tests/pth_spinlock
-drd/tests/pth_uninitialized_cond
-drd/tests/read_and_free_race
-drd/tests/recursive_mutex
-drd/tests/rwlock_race
-drd/tests/rwlock_test
-drd/tests/rwlock_type_checking
-drd/tests/sem_as_mutex
-drd/tests/sem_as_mutex2
-drd/tests/sem_as_mutex3
-drd/tests/sem_open
-drd/tests/sem_open2
-drd/tests/sem_open3
-drd/tests/sem_open_traced
-drd/tests/sem_wait
-drd/tests/sigalrm
-drd/tests/sigaltstack
-drd/tests/std_atomic
-drd/tests/std_string
-drd/tests/std_thread
-drd/tests/std_thread2
-drd/tests/str_tester
-drd/tests/tc01_simple_race
-drd/tests/tc02_simple_tls
-drd/tests/tc03_re_excl
-drd/tests/tc04_free_lock
-drd/tests/tc05_simple_race
-drd/tests/tc06_two_races
-drd/tests/tc07_hbl1
-drd/tests/tc08_hbl2
-drd/tests/tc10_rec_lock
-drd/tests/tc11_XCHG
-drd/tests/tc12_rwl_trivial
-drd/tests/tc13_laog1
-drd/tests/tc15_laog_lockdel
-drd/tests/tc16_byterace
-drd/tests/tc17_sembar
-drd/tests/tc18_semabuse
-drd/tests/tc19_shadowmem
-drd/tests/tc21_pthonce
-drd/tests/tc22_exit_w_lock
-drd/tests/tc23_bogus_condwait
-helgrind/tests/annotate_rwlock
-helgrind/tests/annotate_smart_pointer
-helgrind/tests/bar_bad
-helgrind/tests/bar_trivial
-helgrind/tests/bug322621
-helgrind/tests/cond_init_destroy
-helgrind/tests/cond_timedwait_invalid
-helgrind/tests/cond_timedwait_test
-helgrind/tests/free_is_write
-helgrind/tests/hg01_all_ok
-helgrind/tests/hg03_inherit
-helgrind/tests/hg04_race
-helgrind/tests/hg05_race2
-helgrind/tests/hg06_readshared
-helgrind/tests/locked_vs_unlocked1_fwd
-helgrind/tests/locked_vs_unlocked1_rev
-helgrind/tests/locked_vs_unlocked2
-helgrind/tests/locked_vs_unlocked3
-helgrind/tests/pth_barrier1
-helgrind/tests/pth_barrier2
-helgrind/tests/pth_barrier3
-helgrind/tests/pth_destroy_cond
-helgrind/tests/rwlock_race
-helgrind/tests/rwlock_test
-helgrind/tests/shmem_abits
-helgrind/tests/stackteardown
-helgrind/tests/t2t_laog
-helgrind/tests/tc01_simple_race
-helgrind/tests/tc02_simple_tls
-helgrind/tests/tc03_re_excl
-helgrind/tests/tc04_free_lock
-helgrind/tests/tc05_simple_race
-helgrind/tests/tc06_two_races
-helgrind/tests/tc06_two_races_xml
-helgrind/tests/tc07_hbl1
-helgrind/tests/tc08_hbl2
-helgrind/tests/tc09_bad_unlock
-helgrind/tests/tc10_rec_lock
-helgrind/tests/tc11_XCHG
-helgrind/tests/tc12_rwl_trivial
-helgrind/tests/tc13_laog1
-helgrind/tests/tc14_laog_dinphils
-helgrind/tests/tc15_laog_lockdel
-helgrind/tests/tc16_byterace
-helgrind/tests/tc17_sembar
-helgrind/tests/tc18_semabuse
-helgrind/tests/tc19_shadowmem
-helgrind/tests/tc20_verifywrap
-helgrind/tests/tc21_pthonce
-helgrind/tests/tc22_exit_w_lock
-helgrind/tests/tc23_bogus_condwait
-helgrind/tests/tc24_nonzero_sem
-memcheck/tests/accounting
-memcheck/tests/addressable
-memcheck/tests/arm64-linux/scalar
-memcheck/tests/atomic_incs
-memcheck/tests/badaddrvalue
-memcheck/tests/badfree
-memcheck/tests/badfree-2trace
-memcheck/tests/badfree3
-memcheck/tests/badjump
-memcheck/tests/badjump2
-memcheck/tests/badloop
-memcheck/tests/badpoll
-memcheck/tests/badrw
-memcheck/tests/big_blocks_freed_list
-memcheck/tests/brk2
+gdbserver_tests/hgtls
memcheck/tests/dw4
-memcheck/tests/err_disable4
-memcheck/tests/err_disable_arange1
-memcheck/tests/leak-autofreepool-5
-memcheck/tests/linux/lsframe1
-memcheck/tests/linux/lsframe2
-memcheck/tests/linux/with-space
-memcheck/tests/origin5-bz2
-memcheck/tests/origin6-fp
-memcheck/tests/partial_load_dflt
-memcheck/tests/pdb-realloc2
-memcheck/tests/sh-mem
-memcheck/tests/sh-mem-random
-memcheck/tests/sigaltstack
-memcheck/tests/sigkill
-memcheck/tests/signal2
-memcheck/tests/threadname
-memcheck/tests/threadname_xml
-memcheck/tests/unit_oset
+memcheck/tests/leak_cpp_interior
memcheck/tests/varinfo1
memcheck/tests/varinfo2
memcheck/tests/varinfo3
@@ -213,21 +9,5 @@ memcheck/tests/varinfo4
memcheck/tests/varinfo5
memcheck/tests/varinfo6
memcheck/tests/varinforestrict
-memcheck/tests/vcpu_bz2
-memcheck/tests/vcpu_fbench
-memcheck/tests/vcpu_fnfns
-memcheck/tests/wcs
-memcheck/tests/wrap1
-memcheck/tests/wrap2
-memcheck/tests/wrap3
-memcheck/tests/wrap4
-memcheck/tests/wrap5
-memcheck/tests/wrap6
-memcheck/tests/wrap7
-memcheck/tests/wrap8
-memcheck/tests/wrapmalloc
-memcheck/tests/wrapmallocstatic
-memcheck/tests/writev1
-memcheck/tests/xml1
-memcheck/tests/linux/stack_changes
-memcheck/tests/linux/timerfd-syscall
+helgrind/tests/hg05_race2
+helgrind/tests/tc20_verifywrap
diff --git a/meta/recipes-devtools/valgrind/valgrind/remove-for-all b/meta/recipes-devtools/valgrind/valgrind/remove-for-all
index cb8d10b18f..226f97b50e 100644
--- a/meta/recipes-devtools/valgrind/valgrind/remove-for-all
+++ b/meta/recipes-devtools/valgrind/valgrind/remove-for-all
@@ -1,8 +1,10 @@
none/tests/amd64/fb_test_amd64
gdbserver_tests/hginfo
+memcheck/tests/linux/timerfd-syscall
memcheck/tests/supp_unknown
helgrind/tests/tls_threads
drd/tests/bar_bad_xml
drd/tests/pth_barrier_thr_cr
drd/tests/thread_name_xml
massif/tests/deep-D
+
diff --git a/meta/recipes-extended/at/at_3.2.5.bb b/meta/recipes-extended/at/at_3.2.5.bb
index 6769eb364b..c0c876a644 100644
--- a/meta/recipes-extended/at/at_3.2.5.bb
+++ b/meta/recipes-extended/at/at_3.2.5.bb
@@ -22,7 +22,7 @@ PAM_DEPS = "libpam libpam-runtime pam-plugin-env pam-plugin-limits"
RCONFLICTS:${PN} = "atd"
RREPLACES:${PN} = "atd"
-SRC_URI = "http://software.calhariz.com/at/${BPN}_${PV}.orig.tar.gz \
+SRC_URI = "${DEBIAN_MIRROR}/main/a/at/${BPN}_${PV}.orig.tar.gz \
file://posixtm.c \
file://posixtm.h \
file://file_replacement_with_gplv2.patch \
@@ -52,8 +52,10 @@ INITSCRIPT_PARAMS = "defaults"
SYSTEMD_SERVICE:${PN} = "atd.service"
-do_configure:prepend() {
- cp -f ${WORKDIR}/posixtm.[ch] ${S}
+do_patch[postfuncs] += "copy_posix_files"
+
+copy_posix_files() {
+ cp -f ${WORKDIR}/posixtm.[ch] ${S}
}
do_install () {
diff --git a/meta/recipes-extended/bash/bash/0001-changes-to-SIGINT-handler-while-waiting-for-a-child-.patch b/meta/recipes-extended/bash/bash/0001-changes-to-SIGINT-handler-while-waiting-for-a-child-.patch
new file mode 100644
index 0000000000..c12b4b7766
--- /dev/null
+++ b/meta/recipes-extended/bash/bash/0001-changes-to-SIGINT-handler-while-waiting-for-a-child-.patch
@@ -0,0 +1,229 @@
+From 7e84276e07c0835a8729d6fe1265e70eedb2a7f7 Mon Sep 17 00:00:00 2001
+From: Chet Ramey <chet.ramey@case.edu>
+Date: Mon, 30 Oct 2023 12:16:07 -0400
+Subject: [PATCH] changes to SIGINT handler while waiting for a child; skip
+ vertical whitespace after translating an integer
+
+Upstream-Status: Backport
+https://git.savannah.gnu.org/cgit/bash.git/commit/?h=devel&id=fe24a6a55e8850298b496c5b9d82f1866eba190e
+
+[Adjust and drop some codes to be applicable the tree]
+Signed-off-by: Wenlin Kang <wenlin.kang@windriver.com>
+---
+ general.c | 5 +++--
+ jobs.c | 26 ++++++++++++++++----------
+ tests/redir.right | 4 ++--
+ tests/redir11.sub | 2 ++
+ tests/type.right | 16 ++++++++--------
+ tests/type.tests | 24 ++++++++++++------------
+ 6 files changed, 43 insertions(+), 34 deletions(-)
+
+diff --git a/general.c b/general.c
+index 50d5216..68987e2 100644
+--- a/general.c
++++ b/general.c
+@@ -262,8 +262,9 @@ legal_number (string, result)
+ if (errno || ep == string)
+ return 0; /* errno is set on overflow or underflow */
+
+- /* Skip any trailing whitespace, since strtoimax does not. */
+- while (whitespace (*ep))
++ /* Skip any trailing whitespace, since strtoimax does not, using the same
++ test that strtoimax uses for leading whitespace. */
++ while (isspace ((unsigned char) *ep))
+ ep++;
+
+ /* If *string is not '\0' but *ep is '\0' on return, the entire string
+diff --git a/jobs.c b/jobs.c
+index 7c3b6e8..84dab4d 100644
+--- a/jobs.c
++++ b/jobs.c
+@@ -2727,6 +2727,10 @@ wait_for_background_pids (ps)
+ #define INVALID_SIGNAL_HANDLER (SigHandler *)wait_for_background_pids
+ static SigHandler *old_sigint_handler = INVALID_SIGNAL_HANDLER;
+
++/* The current SIGINT handler as set by restore_sigint_handler. Only valid
++ immediately after restore_sigint_handler, used for continuations. */
++static SigHandler *cur_sigint_handler = INVALID_SIGNAL_HANDLER;
++
+ static int wait_sigint_received;
+ static int child_caught_sigint;
+
+@@ -2743,6 +2747,7 @@ wait_sigint_cleanup ()
+ static void
+ restore_sigint_handler ()
+ {
++ cur_sigint_handler = old_sigint_handler;
+ if (old_sigint_handler != INVALID_SIGNAL_HANDLER)
+ {
+ set_signal_handler (SIGINT, old_sigint_handler);
+@@ -2766,8 +2771,7 @@ wait_sigint_handler (sig)
+ restore_sigint_handler ();
+ /* If we got a SIGINT while in `wait', and SIGINT is trapped, do
+ what POSIX.2 says (see builtins/wait.def for more info). */
+- if (this_shell_builtin && this_shell_builtin == wait_builtin &&
+- signal_is_trapped (SIGINT) &&
++ if (signal_is_trapped (SIGINT) &&
+ ((sigint_handler = trap_to_sighandler (SIGINT)) == trap_handler))
+ {
+ trap_handler (SIGINT); /* set pending_traps[SIGINT] */
+@@ -2792,6 +2796,8 @@ wait_sigint_handler (sig)
+ {
+ set_exit_status (128+SIGINT);
+ restore_sigint_handler ();
++ if (cur_sigint_handler == INVALID_SIGNAL_HANDLER)
++ set_sigint_handler (); /* XXX - only do this in one place */
+ kill (getpid (), SIGINT);
+ }
+
+@@ -2934,15 +2940,15 @@ wait_for (pid, flags)
+ {
+ SigHandler *temp_sigint_handler;
+
+- temp_sigint_handler = set_signal_handler (SIGINT, wait_sigint_handler);
+- if (temp_sigint_handler == wait_sigint_handler)
+- {
++ temp_sigint_handler = old_sigint_handler;
++ old_sigint_handler = set_signal_handler (SIGINT, wait_sigint_handler);
++ if (old_sigint_handler == wait_sigint_handler)
++ {
+ #if defined (DEBUG)
+- internal_warning ("wait_for: recursively setting old_sigint_handler to wait_sigint_handler: running_trap = %d", running_trap);
++ internal_debug ("wait_for: recursively setting old_sigint_handler to wait_sigint_handler: running_trap = %d", running_trap);
+ #endif
+- }
+- else
+- old_sigint_handler = temp_sigint_handler;
++ old_sigint_handler = temp_sigint_handler;
++ }
+ waiting_for_child = 0;
+ if (old_sigint_handler == SIG_IGN)
+ set_signal_handler (SIGINT, old_sigint_handler);
+@@ -4148,7 +4154,7 @@ set_job_status_and_cleanup (job)
+ SIGINT (if we reset the sighandler to the default).
+ In this case, we have to fix things up. What a crock. */
+ if (temp_handler == trap_handler && signal_is_trapped (SIGINT) == 0)
+- temp_handler = trap_to_sighandler (SIGINT);
++ temp_handler = trap_to_sighandler (SIGINT);
+ restore_sigint_handler ();
+ if (temp_handler == SIG_DFL)
+ termsig_handler (SIGINT); /* XXX */
+diff --git a/tests/redir.right b/tests/redir.right
+index 8db1041..9e1403c 100644
+--- a/tests/redir.right
++++ b/tests/redir.right
+@@ -154,10 +154,10 @@ foo
+ 1
+ 7
+ after: 42
+-./redir11.sub: line 53: $(ss= declare -i ss): ambiguous redirect
++./redir11.sub: line 55: $(ss= declare -i ss): ambiguous redirect
+ after: 42
+ a+=3
+ foo
+ foo
+-./redir11.sub: line 75: 42: No such file or directory
++./redir11.sub: line 77: 42: No such file or directory
+ 42
+diff --git a/tests/redir11.sub b/tests/redir11.sub
+index d417cdb..2a9f2b8 100644
+--- a/tests/redir11.sub
++++ b/tests/redir11.sub
+@@ -56,6 +56,8 @@ foo()
+ a=4 b=7 foo
+ echo after: $a
+
++exec 7>&- 4>&-
++
+ unset a
+ typeset -i a
+ a=4 eval echo $(echo a+=3)
+diff --git a/tests/type.right b/tests/type.right
+index f876715..c09ab73 100644
+--- a/tests/type.right
++++ b/tests/type.right
+@@ -24,15 +24,15 @@ func ()
+ }
+ while
+ while is a shell keyword
+-./type.tests: line 56: type: m: not found
+-alias m='more'
+-alias m='more'
+-m is aliased to `more'
++./type.tests: line 56: type: morealias: not found
++alias morealias='more'
++alias morealias='more'
++morealias is aliased to `more'
+ alias
+-alias m='more'
+-alias m='more'
+-alias m='more'
+-m is aliased to `more'
++alias morealias='more'
++alias morealias='more'
++alias morealias='more'
++morealias is aliased to `more'
+ builtin
+ builtin is a shell builtin
+ /bin/sh
+diff --git a/tests/type.tests b/tests/type.tests
+index fd39c18..ddc1540 100644
+--- a/tests/type.tests
++++ b/tests/type.tests
+@@ -25,8 +25,6 @@ type -r ${THIS_SH}
+ type notthere
+ command -v notthere
+
+-alias m=more
+-
+ unset -f func 2>/dev/null
+ func() { echo this is func; }
+
+@@ -49,24 +47,26 @@ command -V func
+ command -v while
+ command -V while
+
++alias morealias=more
++
+ # the following two lines should produce the same output
+ # post-3.0 patch makes command -v silent, as posix specifies
+ # first test with alias expansion off (should all fail or produce no output)
+-type -t m
+-type m
+-command -v m
++type -t morealias
++type morealias
++command -v morealias
+ alias -p
+-alias m
++alias morealias
+
+ # then test with alias expansion on
+ shopt -s expand_aliases
+-type m
+-type -t m
+-command -v m
++type morealias
++type -t morealias
++command -v morealias
+ alias -p
+-alias m
++alias morealias
+
+-command -V m
++command -V morealias
+ shopt -u expand_aliases
+
+ command -v builtin
+@@ -76,7 +76,7 @@ command -V /bin/sh
+
+ unset -f func
+ type func
+-unalias m
++unalias morealias
+ type m
+
+ hash -r
+--
+2.25.1
+
diff --git a/meta/recipes-extended/bash/bash/CVE-2022-3715.patch b/meta/recipes-extended/bash/bash/CVE-2022-3715.patch
new file mode 100644
index 0000000000..44f4d91949
--- /dev/null
+++ b/meta/recipes-extended/bash/bash/CVE-2022-3715.patch
@@ -0,0 +1,33 @@
+From 15d2428d5d3df8dd826008baf51579ab7750d8b2 Mon Sep 17 00:00:00 2001
+From: Xiangyu Chen <xiangyu.chen@windriver.com>
+Date: Wed, 23 Nov 2022 11:17:01 +0800
+Subject: [OE-Core][kirkstone][PATCH] bash: heap-buffer-overflow in
+ valid_parameter_transform CVE-2022-3715
+
+Reference:https://bugzilla.redhat.com/show_bug.cgi?id=2126720
+
+CVE: CVE-2022-3715
+Upstream-Status: Backport from
+[https://git.savannah.gnu.org/cgit/bash.git/diff/subst.c?h=bash-5.2-testing&id=9cef6d01181525de119832d2b6a925899cdec08e]
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ subst.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/subst.c b/subst.c
+index 2b76256..38ee9ac 100644
+--- a/subst.c
++++ b/subst.c
+@@ -7962,7 +7962,7 @@ parameter_brace_transform (varname, value, ind, xform, rtype, quoted, pflags, fl
+ return ((char *)NULL);
+ }
+
+- if (valid_parameter_transform (xform) == 0)
++ if (xform[0] == 0 || valid_parameter_transform (xform) == 0)
+ {
+ this_command_name = oname;
+ #if 0 /* TAG: bash-5.2 Martin Schulte <gnu@schrader-schulte.de> 10/2020 */
+--
+2.34.1
+
diff --git a/meta/recipes-extended/bash/bash_5.1.16.bb b/meta/recipes-extended/bash/bash_5.1.16.bb
index d046faa4e5..ab1ecffb3d 100644
--- a/meta/recipes-extended/bash/bash_5.1.16.bb
+++ b/meta/recipes-extended/bash/bash_5.1.16.bb
@@ -15,6 +15,8 @@ SRC_URI = "${GNU_MIRROR}/bash/${BP}.tar.gz;name=tarball \
file://use_aclocal.patch \
file://makerace.patch \
file://makerace2.patch \
+ file://CVE-2022-3715.patch \
+ file://0001-changes-to-SIGINT-handler-while-waiting-for-a-child-.patch \
"
SRC_URI[tarball.sha256sum] = "5bac17218d3911834520dad13cd1f85ab944e1c09ae1aba55906be1f8192f558"
diff --git a/meta/recipes-extended/bc/bc_1.07.1.bb b/meta/recipes-extended/bc/bc_1.07.1.bb
index 1bec76bb2a..5a03751304 100644
--- a/meta/recipes-extended/bc/bc_1.07.1.bb
+++ b/meta/recipes-extended/bc/bc_1.07.1.bb
@@ -32,4 +32,4 @@ do_compile:prepend() {
ALTERNATIVE:${PN} = "bc dc"
ALTERNATIVE_PRIORITY = "100"
-BBCLASSEXTEND = "native"
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-extended/cpio/cpio-2.13/0001-Unset-need_charset_alias-when-building-for-musl.patch b/meta/recipes-extended/cpio/cpio-2.13/0001-Unset-need_charset_alias-when-building-for-musl.patch
deleted file mode 100644
index 6ae213942c..0000000000
--- a/meta/recipes-extended/cpio/cpio-2.13/0001-Unset-need_charset_alias-when-building-for-musl.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From b9565dc2fe0c4f7daaec91b7e83bc7313dee2f4a Mon Sep 17 00:00:00 2001
-From: Khem Raj <raj.khem@gmail.com>
-Date: Mon, 13 Apr 2015 17:02:13 -0700
-Subject: [PATCH] Unset need_charset_alias when building for musl
-
-localcharset uses ac_cv_gnu_library_2_1 from glibc21.m4
-which actually shoudl be fixed in gnulib and then all downstream
-projects will get it eventually. For now we apply the fix to
-coreutils
-
-Upstream-Status: Pending
-
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
----
- lib/gnulib.mk | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-Index: cpio-2.11/gnu/Makefile.am
-===================================================================
---- cpio-2.11.orig/gnu/Makefile.am
-+++ cpio-2.11/gnu/Makefile.am
-@@ -734,7 +734,7 @@ install-exec-localcharset: all-local
- case '$(host_os)' in \
- darwin[56]*) \
- need_charset_alias=true ;; \
-- darwin* | cygwin* | mingw* | pw32* | cegcc*) \
-+ darwin* | cygwin* | mingw* | pw32* | cegcc* | linux-musl*) \
- need_charset_alias=false ;; \
- *) \
- need_charset_alias=true ;; \
diff --git a/meta/recipes-extended/cpio/cpio-2.13/0002-src-global.c-Remove-superfluous-declaration-of-progr.patch b/meta/recipes-extended/cpio/cpio-2.13/0002-src-global.c-Remove-superfluous-declaration-of-progr.patch
deleted file mode 100644
index 478324c1c4..0000000000
--- a/meta/recipes-extended/cpio/cpio-2.13/0002-src-global.c-Remove-superfluous-declaration-of-progr.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From 33e6cb5a28fab3d99bd6818f8c01e6f33805390f Mon Sep 17 00:00:00 2001
-From: Sergey Poznyakoff <gray@gnu.org>
-Date: Mon, 20 Jan 2020 07:45:39 +0200
-Subject: [PATCH] src/global.c: Remove superfluous declaration of program_name
-
-Upstream-Status: Backport (commit 641d3f4)
-Signed-off-by: Richard Leitner <richard.leitner@skidata.com>
----
- src/global.c | 3 ---
- 1 file changed, 3 deletions(-)
-
-diff --git a/src/global.c b/src/global.c
-index fb3abe9..acf92bc 100644
---- a/src/global.c
-+++ b/src/global.c
-@@ -184,9 +184,6 @@ unsigned int warn_option = 0;
- /* Extract to standard output? */
- bool to_stdout_option = false;
-
--/* The name this program was run with. */
--char *program_name;
--
- /* A pointer to either lstat or stat, depending on whether
- dereferencing of symlinks is done for input files. */
- int (*xstat) ();
---
-2.26.2
-
diff --git a/meta/recipes-extended/cpio/cpio-2.13/CVE-2021-38185.patch b/meta/recipes-extended/cpio/cpio-2.13/CVE-2021-38185.patch
deleted file mode 100644
index 6ceafeee49..0000000000
--- a/meta/recipes-extended/cpio/cpio-2.13/CVE-2021-38185.patch
+++ /dev/null
@@ -1,581 +0,0 @@
-GNU cpio through 2.13 allows attackers to execute arbitrary code via a crafted
-pattern file, because of a dstring.c ds_fgetstr integer overflow that triggers
-an out-of-bounds heap write.
-
-CVE: CVE-2021-38185
-Upstream-Status: Backport
-Signed-off-by: Ross Burton <ross.burton@arm.com>
-
-From e494c68a3a0951b1eaba77e2db93f71a890e15d8 Mon Sep 17 00:00:00 2001
-From: Sergey Poznyakoff <gray@gnu.org>
-Date: Sat, 7 Aug 2021 12:52:21 +0300
-Subject: [PATCH 1/3] Rewrite dynamic string support.
-
-* src/dstring.c (ds_init): Take a single argument.
-(ds_free): New function.
-(ds_resize): Take a single argument. Use x2nrealloc to expand
-the storage.
-(ds_reset,ds_append,ds_concat,ds_endswith): New function.
-(ds_fgetstr): Rewrite. In particular, this fixes integer overflow.
-* src/dstring.h (dynamic_string): Keep both the allocated length
-(ds_size) and index of the next free byte in the string (ds_idx).
-(ds_init,ds_resize): Change signature.
-(ds_len): New macro.
-(ds_free,ds_reset,ds_append,ds_concat,ds_endswith): New protos.
-* src/copyin.c: Use new ds_ functions.
-* src/copyout.c: Likewise.
-* src/copypass.c: Likewise.
-* src/util.c: Likewise.
----
- src/copyin.c | 40 +++++++++++------------
- src/copyout.c | 16 ++++-----
- src/copypass.c | 34 +++++++++----------
- src/dstring.c | 88 ++++++++++++++++++++++++++++++++++++--------------
- src/dstring.h | 31 +++++++++---------
- src/util.c | 6 ++--
- 6 files changed, 123 insertions(+), 92 deletions(-)
-
-diff --git a/src/copyin.c b/src/copyin.c
-index b29f348..37e503a 100644
---- a/src/copyin.c
-+++ b/src/copyin.c
-@@ -55,11 +55,12 @@ query_rename(struct cpio_file_stat* file_hdr, FILE *tty_in, FILE *tty_out,
- char *str_res; /* Result for string function. */
- static dynamic_string new_name; /* New file name for rename option. */
- static int initialized_new_name = false;
-+
- if (!initialized_new_name)
-- {
-- ds_init (&new_name, 128);
-- initialized_new_name = true;
-- }
-+ {
-+ ds_init (&new_name);
-+ initialized_new_name = true;
-+ }
-
- if (rename_flag)
- {
-@@ -779,37 +780,36 @@ long_format (struct cpio_file_stat *file_hdr, char const *link_name)
- already in `save_patterns' (from the command line) are preserved. */
-
- static void
--read_pattern_file ()
-+read_pattern_file (void)
- {
-- int max_new_patterns;
-- char **new_save_patterns;
-- int new_num_patterns;
-+ char **new_save_patterns = NULL;
-+ size_t max_new_patterns;
-+ size_t new_num_patterns;
- int i;
-- dynamic_string pattern_name;
-+ dynamic_string pattern_name = DYNAMIC_STRING_INITIALIZER;
- FILE *pattern_fp;
-
- if (num_patterns < 0)
- num_patterns = 0;
-- max_new_patterns = 1 + num_patterns;
-- new_save_patterns = (char **) xmalloc (max_new_patterns * sizeof (char *));
- new_num_patterns = num_patterns;
-- ds_init (&pattern_name, 128);
-+ max_new_patterns = num_patterns;
-+ new_save_patterns = xcalloc (max_new_patterns, sizeof (new_save_patterns[0]));
-
- pattern_fp = fopen (pattern_file_name, "r");
- if (pattern_fp == NULL)
- open_fatal (pattern_file_name);
- while (ds_fgetstr (pattern_fp, &pattern_name, '\n') != NULL)
- {
-- if (new_num_patterns >= max_new_patterns)
-- {
-- max_new_patterns += 1;
-- new_save_patterns = (char **)
-- xrealloc ((char *) new_save_patterns,
-- max_new_patterns * sizeof (char *));
-- }
-+ if (new_num_patterns == max_new_patterns)
-+ new_save_patterns = x2nrealloc (new_save_patterns,
-+ &max_new_patterns,
-+ sizeof (new_save_patterns[0]));
- new_save_patterns[new_num_patterns] = xstrdup (pattern_name.ds_string);
- ++new_num_patterns;
- }
-+
-+ ds_free (&pattern_name);
-+
- if (ferror (pattern_fp) || fclose (pattern_fp) == EOF)
- close_error (pattern_file_name);
-
-@@ -1196,7 +1196,7 @@ swab_array (char *ptr, int count)
- in the file system. */
-
- void
--process_copy_in ()
-+process_copy_in (void)
- {
- char done = false; /* True if trailer reached. */
- FILE *tty_in = NULL; /* Interactive file for rename option. */
-diff --git a/src/copyout.c b/src/copyout.c
-index 8b0beb6..26e3dda 100644
---- a/src/copyout.c
-+++ b/src/copyout.c
-@@ -594,9 +594,10 @@ assign_string (char **pvar, char *value)
- The format of the header depends on the compatibility (-c) flag. */
-
- void
--process_copy_out ()
-+process_copy_out (void)
- {
-- dynamic_string input_name; /* Name of file read from stdin. */
-+ dynamic_string input_name = DYNAMIC_STRING_INITIALIZER;
-+ /* Name of file read from stdin. */
- struct stat file_stat; /* Stat record for file. */
- struct cpio_file_stat file_hdr = CPIO_FILE_STAT_INITIALIZER;
- /* Output header information. */
-@@ -605,7 +606,6 @@ process_copy_out ()
- char *orig_file_name = NULL;
-
- /* Initialize the copy out. */
-- ds_init (&input_name, 128);
- file_hdr.c_magic = 070707;
-
- /* Check whether the output file might be a tape. */
-@@ -657,14 +657,9 @@ process_copy_out ()
- {
- if (file_hdr.c_mode & CP_IFDIR)
- {
-- int len = strlen (input_name.ds_string);
- /* Make sure the name ends with a slash */
-- if (input_name.ds_string[len-1] != '/')
-- {
-- ds_resize (&input_name, len + 2);
-- input_name.ds_string[len] = '/';
-- input_name.ds_string[len+1] = 0;
-- }
-+ if (!ds_endswith (&input_name, '/'))
-+ ds_append (&input_name, '/');
- }
- }
-
-@@ -875,6 +870,7 @@ process_copy_out ()
- (unsigned long) blocks), (unsigned long) blocks);
- }
- cpio_file_stat_free (&file_hdr);
-+ ds_free (&input_name);
- }
-
-
-diff --git a/src/copypass.c b/src/copypass.c
-index dc13b5b..62f31c6 100644
---- a/src/copypass.c
-+++ b/src/copypass.c
-@@ -48,10 +48,12 @@ set_copypass_perms (int fd, const char *name, struct stat *st)
- If `link_flag', link instead of copying. */
-
- void
--process_copy_pass ()
-+process_copy_pass (void)
- {
-- dynamic_string input_name; /* Name of file from stdin. */
-- dynamic_string output_name; /* Name of new file. */
-+ dynamic_string input_name = DYNAMIC_STRING_INITIALIZER;
-+ /* Name of file from stdin. */
-+ dynamic_string output_name = DYNAMIC_STRING_INITIALIZER;
-+ /* Name of new file. */
- size_t dirname_len; /* Length of `directory_name'. */
- int res; /* Result of functions. */
- char *slash; /* For moving past slashes in input name. */
-@@ -65,25 +67,18 @@ process_copy_pass ()
- created files */
-
- /* Initialize the copy pass. */
-- ds_init (&input_name, 128);
-
- dirname_len = strlen (directory_name);
- if (change_directory_option && !ISSLASH (directory_name[0]))
- {
- char *pwd = xgetcwd ();
--
-- dirname_len += strlen (pwd) + 1;
-- ds_init (&output_name, dirname_len + 2);
-- strcpy (output_name.ds_string, pwd);
-- strcat (output_name.ds_string, "/");
-- strcat (output_name.ds_string, directory_name);
-+
-+ ds_concat (&output_name, pwd);
-+ ds_append (&output_name, '/');
- }
-- else
-- {
-- ds_init (&output_name, dirname_len + 2);
-- strcpy (output_name.ds_string, directory_name);
-- }
-- output_name.ds_string[dirname_len] = '/';
-+ ds_concat (&output_name, directory_name);
-+ ds_append (&output_name, '/');
-+ dirname_len = ds_len (&output_name);
- output_is_seekable = true;
-
- change_dir ();
-@@ -116,8 +111,8 @@ process_copy_pass ()
- /* Make the name of the new file. */
- for (slash = input_name.ds_string; *slash == '/'; ++slash)
- ;
-- ds_resize (&output_name, dirname_len + strlen (slash) + 2);
-- strcpy (output_name.ds_string + dirname_len + 1, slash);
-+ ds_reset (&output_name, dirname_len);
-+ ds_concat (&output_name, slash);
-
- existing_dir = false;
- if (lstat (output_name.ds_string, &out_file_stat) == 0)
-@@ -333,6 +328,9 @@ process_copy_pass ()
- (unsigned long) blocks),
- (unsigned long) blocks);
- }
-+
-+ ds_free (&input_name);
-+ ds_free (&output_name);
- }
-
- /* Try and create a hard link from FILE_NAME to another file
-diff --git a/src/dstring.c b/src/dstring.c
-index e9c063f..358f356 100644
---- a/src/dstring.c
-+++ b/src/dstring.c
-@@ -20,8 +20,8 @@
- #if defined(HAVE_CONFIG_H)
- # include <config.h>
- #endif
--
- #include <stdio.h>
-+#include <stdlib.h>
- #if defined(HAVE_STRING_H) || defined(STDC_HEADERS)
- #include <string.h>
- #else
-@@ -33,24 +33,41 @@
- /* Initialiaze dynamic string STRING with space for SIZE characters. */
-
- void
--ds_init (dynamic_string *string, int size)
-+ds_init (dynamic_string *string)
-+{
-+ memset (string, 0, sizeof *string);
-+}
-+
-+/* Free the dynamic string storage. */
-+
-+void
-+ds_free (dynamic_string *string)
- {
-- string->ds_length = size;
-- string->ds_string = (char *) xmalloc (size);
-+ free (string->ds_string);
- }
-
--/* Expand dynamic string STRING, if necessary, to hold SIZE characters. */
-+/* Expand dynamic string STRING, if necessary. */
-
- void
--ds_resize (dynamic_string *string, int size)
-+ds_resize (dynamic_string *string)
- {
-- if (size > string->ds_length)
-+ if (string->ds_idx == string->ds_size)
- {
-- string->ds_length = size;
-- string->ds_string = (char *) xrealloc ((char *) string->ds_string, size);
-+ string->ds_string = x2nrealloc (string->ds_string, &string->ds_size,
-+ 1);
- }
- }
-
-+/* Reset the index of the dynamic string S to LEN. */
-+
-+void
-+ds_reset (dynamic_string *s, size_t len)
-+{
-+ while (len > s->ds_size)
-+ ds_resize (s);
-+ s->ds_idx = len;
-+}
-+
- /* Dynamic string S gets a string terminated by the EOS character
- (which is removed) from file F. S will increase
- in size during the function if the string from F is longer than
-@@ -61,34 +78,50 @@ ds_resize (dynamic_string *string, int size)
- char *
- ds_fgetstr (FILE *f, dynamic_string *s, char eos)
- {
-- int insize; /* Amount needed for line. */
-- int strsize; /* Amount allocated for S. */
- int next_ch;
-
- /* Initialize. */
-- insize = 0;
-- strsize = s->ds_length;
-+ s->ds_idx = 0;
-
- /* Read the input string. */
-- next_ch = getc (f);
-- while (next_ch != eos && next_ch != EOF)
-+ while ((next_ch = getc (f)) != eos && next_ch != EOF)
- {
-- if (insize >= strsize - 1)
-- {
-- ds_resize (s, strsize * 2 + 2);
-- strsize = s->ds_length;
-- }
-- s->ds_string[insize++] = next_ch;
-- next_ch = getc (f);
-+ ds_resize (s);
-+ s->ds_string[s->ds_idx++] = next_ch;
- }
-- s->ds_string[insize++] = '\0';
-+ ds_resize (s);
-+ s->ds_string[s->ds_idx] = '\0';
-
-- if (insize == 1 && next_ch == EOF)
-+ if (s->ds_idx == 0 && next_ch == EOF)
- return NULL;
- else
- return s->ds_string;
- }
-
-+void
-+ds_append (dynamic_string *s, int c)
-+{
-+ ds_resize (s);
-+ s->ds_string[s->ds_idx] = c;
-+ if (c)
-+ {
-+ s->ds_idx++;
-+ ds_resize (s);
-+ s->ds_string[s->ds_idx] = 0;
-+ }
-+}
-+
-+void
-+ds_concat (dynamic_string *s, char const *str)
-+{
-+ size_t len = strlen (str);
-+ while (len + 1 > s->ds_size)
-+ ds_resize (s);
-+ memcpy (s->ds_string + s->ds_idx, str, len);
-+ s->ds_idx += len;
-+ s->ds_string[s->ds_idx] = 0;
-+}
-+
- char *
- ds_fgets (FILE *f, dynamic_string *s)
- {
-@@ -100,3 +133,10 @@ ds_fgetname (FILE *f, dynamic_string *s)
- {
- return ds_fgetstr (f, s, '\0');
- }
-+
-+/* Return true if the dynamic string S ends with character C. */
-+int
-+ds_endswith (dynamic_string *s, int c)
-+{
-+ return (s->ds_idx > 0 && s->ds_string[s->ds_idx - 1] == c);
-+}
-diff --git a/src/dstring.h b/src/dstring.h
-index b5135fe..f5b04ef 100644
---- a/src/dstring.h
-+++ b/src/dstring.h
-@@ -17,10 +17,6 @@
- Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- Boston, MA 02110-1301 USA. */
-
--#ifndef NULL
--#define NULL 0
--#endif
--
- /* A dynamic string consists of record that records the size of an
- allocated string and the pointer to that string. The actual string
- is a normal zero byte terminated string that can be used with the
-@@ -30,22 +26,25 @@
-
- typedef struct
- {
-- int ds_length; /* Actual amount of storage allocated. */
-- char *ds_string; /* String. */
-+ size_t ds_size; /* Actual amount of storage allocated. */
-+ size_t ds_idx; /* Index of the next free byte in the string. */
-+ char *ds_string; /* String storage. */
- } dynamic_string;
-
-+#define DYNAMIC_STRING_INITIALIZER { 0, 0, NULL }
-
--/* Macros that look similar to the original string functions.
-- WARNING: These macros work only on pointers to dynamic string records.
-- If used with a real record, an "&" must be used to get the pointer. */
--#define ds_strlen(s) strlen ((s)->ds_string)
--#define ds_strcmp(s1, s2) strcmp ((s1)->ds_string, (s2)->ds_string)
--#define ds_strncmp(s1, s2, n) strncmp ((s1)->ds_string, (s2)->ds_string, n)
--#define ds_index(s, c) index ((s)->ds_string, c)
--#define ds_rindex(s, c) rindex ((s)->ds_string, c)
-+void ds_init (dynamic_string *string);
-+void ds_free (dynamic_string *string);
-+void ds_reset (dynamic_string *s, size_t len);
-
--void ds_init (dynamic_string *string, int size);
--void ds_resize (dynamic_string *string, int size);
-+/* All functions below guarantee that s->ds_string[s->ds_idx] == '\0' */
- char *ds_fgetname (FILE *f, dynamic_string *s);
- char *ds_fgets (FILE *f, dynamic_string *s);
- char *ds_fgetstr (FILE *f, dynamic_string *s, char eos);
-+void ds_append (dynamic_string *s, int c);
-+void ds_concat (dynamic_string *s, char const *str);
-+
-+#define ds_len(s) ((s)->ds_idx)
-+
-+int ds_endswith (dynamic_string *s, int c);
-+
-diff --git a/src/util.c b/src/util.c
-index 4421b20..6d6bbaa 100644
---- a/src/util.c
-+++ b/src/util.c
-@@ -846,11 +846,9 @@ get_next_reel (int tape_des)
- FILE *tty_out; /* File for interacting with user. */
- int old_tape_des;
- char *next_archive_name;
-- dynamic_string new_name;
-+ dynamic_string new_name = DYNAMIC_STRING_INITIALIZER;
- char *str_res;
-
-- ds_init (&new_name, 128);
--
- /* Open files for interactive communication. */
- tty_in = fopen (TTY_NAME, "r");
- if (tty_in == NULL)
-@@ -925,7 +923,7 @@ get_next_reel (int tape_des)
- error (PAXEXIT_FAILURE, 0, _("internal error: tape descriptor changed from %d to %d"),
- old_tape_des, tape_des);
-
-- free (new_name.ds_string);
-+ ds_free (&new_name);
- fclose (tty_in);
- fclose (tty_out);
- }
---
-2.25.1
-
-
-From fb7a51bf85b8e6f045cacb4fb783db4a414741bf Mon Sep 17 00:00:00 2001
-From: Sergey Poznyakoff <gray@gnu.org>
-Date: Wed, 11 Aug 2021 18:10:38 +0300
-Subject: [PATCH 2/3] Fix previous commit
-
-* src/dstring.c (ds_reset,ds_concat): Don't call ds_resize in a
-loop.
----
- src/dstring.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/src/dstring.c b/src/dstring.c
-index 358f356..90c691c 100644
---- a/src/dstring.c
-+++ b/src/dstring.c
-@@ -64,7 +64,7 @@ void
- ds_reset (dynamic_string *s, size_t len)
- {
- while (len > s->ds_size)
-- ds_resize (s);
-+ s->ds_string = x2nrealloc (s->ds_string, &s->ds_size, 1);
- s->ds_idx = len;
- }
-
-@@ -116,7 +116,7 @@ ds_concat (dynamic_string *s, char const *str)
- {
- size_t len = strlen (str);
- while (len + 1 > s->ds_size)
-- ds_resize (s);
-+ s->ds_string = x2nrealloc (s->ds_string, &s->ds_size, 1);
- memcpy (s->ds_string + s->ds_idx, str, len);
- s->ds_idx += len;
- s->ds_string[s->ds_idx] = 0;
---
-2.25.1
-
-
-From 86b37d74b15f9bb5fe62fd1642cc126d3ace0189 Mon Sep 17 00:00:00 2001
-From: Sergey Poznyakoff <gray@gnu.org>
-Date: Wed, 18 Aug 2021 09:41:39 +0300
-Subject: [PATCH 3/3] Fix dynamic string reallocations
-
-* src/dstring.c (ds_resize): Take additional argument: number of
-bytes to leave available after ds_idx. All uses changed.
----
- src/dstring.c | 18 ++++++++----------
- 1 file changed, 8 insertions(+), 10 deletions(-)
-
-diff --git a/src/dstring.c b/src/dstring.c
-index 90c691c..0f597cc 100644
---- a/src/dstring.c
-+++ b/src/dstring.c
-@@ -49,9 +49,9 @@ ds_free (dynamic_string *string)
- /* Expand dynamic string STRING, if necessary. */
-
- void
--ds_resize (dynamic_string *string)
-+ds_resize (dynamic_string *string, size_t len)
- {
-- if (string->ds_idx == string->ds_size)
-+ while (len + string->ds_idx >= string->ds_size)
- {
- string->ds_string = x2nrealloc (string->ds_string, &string->ds_size,
- 1);
-@@ -63,8 +63,7 @@ ds_resize (dynamic_string *string)
- void
- ds_reset (dynamic_string *s, size_t len)
- {
-- while (len > s->ds_size)
-- s->ds_string = x2nrealloc (s->ds_string, &s->ds_size, 1);
-+ ds_resize (s, len);
- s->ds_idx = len;
- }
-
-@@ -86,10 +85,10 @@ ds_fgetstr (FILE *f, dynamic_string *s, char eos)
- /* Read the input string. */
- while ((next_ch = getc (f)) != eos && next_ch != EOF)
- {
-- ds_resize (s);
-+ ds_resize (s, 0);
- s->ds_string[s->ds_idx++] = next_ch;
- }
-- ds_resize (s);
-+ ds_resize (s, 0);
- s->ds_string[s->ds_idx] = '\0';
-
- if (s->ds_idx == 0 && next_ch == EOF)
-@@ -101,12 +100,12 @@ ds_fgetstr (FILE *f, dynamic_string *s, char eos)
- void
- ds_append (dynamic_string *s, int c)
- {
-- ds_resize (s);
-+ ds_resize (s, 0);
- s->ds_string[s->ds_idx] = c;
- if (c)
- {
- s->ds_idx++;
-- ds_resize (s);
-+ ds_resize (s, 0);
- s->ds_string[s->ds_idx] = 0;
- }
- }
-@@ -115,8 +114,7 @@ void
- ds_concat (dynamic_string *s, char const *str)
- {
- size_t len = strlen (str);
-- while (len + 1 > s->ds_size)
-- s->ds_string = x2nrealloc (s->ds_string, &s->ds_size, 1);
-+ ds_resize (s, len);
- memcpy (s->ds_string + s->ds_idx, str, len);
- s->ds_idx += len;
- s->ds_string[s->ds_idx] = 0;
---
-2.25.1
-
diff --git a/meta/recipes-extended/cpio/cpio_2.13.bb b/meta/recipes-extended/cpio/cpio_2.14.bb
index e72a114de9..c0b97ee166 100644
--- a/meta/recipes-extended/cpio/cpio_2.13.bb
+++ b/meta/recipes-extended/cpio/cpio_2.14.bb
@@ -7,13 +7,10 @@ LICENSE = "GPL-3.0-only"
LIC_FILES_CHKSUM = "file://COPYING;md5=f27defe1e96c2e1ecd4e0c9be8967949"
SRC_URI = "${GNU_MIRROR}/cpio/cpio-${PV}.tar.gz \
- file://0001-Unset-need_charset_alias-when-building-for-musl.patch \
- file://0002-src-global.c-Remove-superfluous-declaration-of-progr.patch \
- file://CVE-2021-38185.patch \
+ file://0001-configure-Include-needed-header-for-major-minor-macr.patch \
"
-SRC_URI[md5sum] = "389c5452d667c23b5eceb206f5000810"
-SRC_URI[sha256sum] = "e87470d9c984317f658567c03bfefb6b0c829ff17dbf6b0de48d71a4c8f3db88"
+SRC_URI[sha256sum] = "145a340fd9d55f0b84779a44a12d5f79d77c99663967f8cfa168d7905ca52454"
inherit autotools gettext texinfo
diff --git a/meta/recipes-extended/cpio/files/0001-configure-Include-needed-header-for-major-minor-macr.patch b/meta/recipes-extended/cpio/files/0001-configure-Include-needed-header-for-major-minor-macr.patch
new file mode 100644
index 0000000000..360dd1ebd8
--- /dev/null
+++ b/meta/recipes-extended/cpio/files/0001-configure-Include-needed-header-for-major-minor-macr.patch
@@ -0,0 +1,47 @@
+From 8179be21e664cedb2e9d238cc2f6d04965e97275 Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Thu, 11 May 2023 10:18:44 +0300
+Subject: [PATCH] configure: Include needed header for major/minor macros
+
+This helps in avoiding the warning about implicit function declaration
+which is elevated as error with newer compilers e.g. clang 16
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/cpio.git/commit/?id=8179be21e664cedb2e9d238cc2f6d04965e97275]
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ configure.ac | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+diff --git a/configure.ac b/configure.ac
+index de479e7..c601029 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -43,8 +43,22 @@ AC_TYPE_UID_T
+ AC_CHECK_TYPE(gid_t, int)
+
+ AC_HEADER_DIRENT
+-AX_COMPILE_CHECK_RETTYPE([major], [0])
+-AX_COMPILE_CHECK_RETTYPE([minor], [0])
++AX_COMPILE_CHECK_RETTYPE([major], [0], [
++#include <sys/types.h>
++#ifdef MAJOR_IN_MKDEV
++# include <sys/mkdev.h>
++#endif
++#ifdef MAJOR_IN_SYSMACROS
++# include <sys/sysmacros.h>
++#endif])
++AX_COMPILE_CHECK_RETTYPE([minor], [0], [
++#include <sys/types.h>
++#ifdef MAJOR_IN_MKDEV
++# include <sys/mkdev.h>
++#endif
++#ifdef MAJOR_IN_SYSMACROS
++# include <sys/sysmacros.h>
++#endif])
+
+ AC_CHECK_FUNCS([fchmod fchown])
+ # This is needed for mingw build
+--
+2.34.1
diff --git a/meta/recipes-extended/cracklib/cracklib_2.9.7.bb b/meta/recipes-extended/cracklib/cracklib_2.9.8.bb
index 629069e844..a3db6eb394 100644
--- a/meta/recipes-extended/cracklib/cracklib_2.9.7.bb
+++ b/meta/recipes-extended/cracklib/cracklib_2.9.8.bb
@@ -9,11 +9,12 @@ DEPENDS = "cracklib-native zlib"
EXTRA_OECONF = "--without-python --libdir=${base_libdir}"
-SRC_URI = "git://github.com/cracklib/cracklib;protocol=https;branch=master \
+SRC_URI = "git://github.com/cracklib/cracklib;protocol=https;branch=main \
file://0001-packlib.c-support-dictionary-byte-order-dependent.patch \
- file://0002-craklib-fix-testnum-and-teststr-failed.patch"
+ file://0002-craklib-fix-testnum-and-teststr-failed.patch \
+ "
-SRCREV = "f83934cf3cced0c9600c7d81332f4169f122a2cf"
+SRCREV = "d9e8f9f47718539aeba80f90f4e072549926dc9c"
S = "${WORKDIR}/git/src"
inherit autotools gettext
diff --git a/meta/recipes-extended/cups/cups.inc b/meta/recipes-extended/cups/cups.inc
index 8f2ad8a009..047ab33898 100644
--- a/meta/recipes-extended/cups/cups.inc
+++ b/meta/recipes-extended/cups/cups.inc
@@ -15,6 +15,10 @@ SRC_URI = "https://github.com/OpenPrinting/cups/releases/download/v${PV}/cups-${
file://0004-cups-fix-multilib-install-file-conflicts.patch \
file://volatiles.99_cups \
file://cups-volatiles.conf \
+ file://CVE-2023-32324.patch \
+ file://CVE-2023-34241.patch \
+ file://CVE-2023-32360.patch \
+ file://CVE-2023-4504.patch \
"
UPSTREAM_CHECK_URI = "https://github.com/OpenPrinting/cups/releases"
@@ -26,6 +30,8 @@ CVE_CHECK_IGNORE += "CVE-2008-1033"
CVE_CHECK_IGNORE += "CVE-2009-0032"
# This is an Ubuntu only issue.
CVE_CHECK_IGNORE += "CVE-2018-6553"
+# This is fixed in 2.4.2 but the cve-check class still reports it
+CVE_CHECK_IGNORE += "CVE-2022-26691"
LEAD_SONAME = "libcupsdriver.so"
@@ -46,6 +52,7 @@ PACKAGECONFIG[gnutls] = "--with-tls=gnutls,--with-tls=no,gnutls"
PACKAGECONFIG[pam] = "--enable-pam --with-pam-module=unix, --disable-pam, libpam"
PACKAGECONFIG[systemd] = "--with-systemd=${systemd_system_unitdir},--without-systemd,systemd"
PACKAGECONFIG[xinetd] = "--with-xinetd=${sysconfdir}/xinetd.d,--without-xinetd,xinetd"
+PACKAGECONFIG[webif] = "--enable-webif,--disable-webif"
EXTRA_OECONF = " \
--enable-dbus \
@@ -65,7 +72,7 @@ EXTRA_OECONF = " \
EXTRA_AUTORECONF += "--exclude=autoheader"
do_install () {
- oe_runmake "DESTDIR=${D}" install
+ oe_runmake "BUILDROOT=${D}" install
# Remove /var/run from package as cupsd will populate it on startup
rm -fr ${D}/${localstatedir}/run
@@ -73,7 +80,7 @@ do_install () {
rmdir ${D}/${libexecdir}/${BPN}/driver
# Fix the pam configuration file permissions
- if ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'true', 'false', d)}; then
+ if ${@bb.utils.contains('PACKAGECONFIG', 'pam', 'true', 'false', d)}; then
chmod 0644 ${D}${sysconfdir}/pam.d/cups
fi
@@ -91,7 +98,7 @@ do_install () {
fi
}
-PACKAGES =+ "${PN}-lib ${PN}-libimage"
+PACKAGES =+ "${PN}-lib ${PN}-libimage ${PN}-webif"
RDEPENDS:${PN} += "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'procps', '', d)}"
FILES:${PN} += "${libexecdir}/cups/"
@@ -100,13 +107,10 @@ FILES:${PN}-lib = "${libdir}/libcups.so.*"
FILES:${PN}-libimage = "${libdir}/libcupsimage.so.*"
-#package the html for the webgui inside the main packages (~1MB uncompressed)
+# put the html for the web interface into its own PACKAGE
+FILES:${PN}-webif += "${datadir}/doc/cups/ ${datadir}/icons/"
+RRECOMMENDS:${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'webif', '${PN}-webif', '', d)}"
-FILES:${PN} += "${datadir}/doc/cups/images \
- ${datadir}/doc/cups/*html \
- ${datadir}/doc/cups/*.css \
- ${datadir}/icons/ \
- "
CONFFILES:${PN} += "${sysconfdir}/cups/cupsd.conf"
MULTILIB_SCRIPTS = "${PN}-dev:${bindir}/cups-config"
diff --git a/meta/recipes-extended/cups/cups/CVE-2023-32324.patch b/meta/recipes-extended/cups/cups/CVE-2023-32324.patch
new file mode 100644
index 0000000000..40b89c9899
--- /dev/null
+++ b/meta/recipes-extended/cups/cups/CVE-2023-32324.patch
@@ -0,0 +1,36 @@
+From 07cbffd11107eed3aaf1c64e35552aec20f792da Mon Sep 17 00:00:00 2001
+From: Zdenek Dohnal <zdohnal@redhat.com>
+Date: Thu, 1 Jun 2023 12:04:00 +0200
+Subject: [PATCH] cups/string.c: Return if `size` is 0 (fixes CVE-2023-32324)
+
+CVE: CVE-2023-32324
+Upstream-Status: Backport [https://github.com/OpenPrinting/cups/commit/fd8bc2d32589]
+
+(cherry picked from commit fd8bc2d32589d1fd91fe1c0521be2a7c0462109e)
+Signed-off-by: Sanjay Chitroda <schitrod@cisco.com>
+---
+ cups/string.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/cups/string.c b/cups/string.c
+index 93cdad19..6ef58515 100644
+--- a/cups/string.c
++++ b/cups/string.c
+@@ -1,6 +1,7 @@
+ /*
+ * String functions for CUPS.
+ *
++ * Copyright © 2023 by OpenPrinting.
+ * Copyright © 2007-2019 by Apple Inc.
+ * Copyright © 1997-2007 by Easy Software Products.
+ *
+@@ -730,6 +731,9 @@ _cups_strlcpy(char *dst, /* O - Destination string */
+ size_t srclen; /* Length of source string */
+
+
++ if (size == 0)
++ return (0);
++
+ /*
+ * Figure out how much room is needed...
+ */
diff --git a/meta/recipes-extended/cups/cups/CVE-2023-32360.patch b/meta/recipes-extended/cups/cups/CVE-2023-32360.patch
new file mode 100644
index 0000000000..c3db722f1f
--- /dev/null
+++ b/meta/recipes-extended/cups/cups/CVE-2023-32360.patch
@@ -0,0 +1,35 @@
+From a0c8b9c9556882f00c68b9727a95a1b6d1452913 Mon Sep 17 00:00:00 2001
+From: Michael R Sweet <michael.r.sweet@gmail.com>
+Date: Thu, 14 Sep 2023 09:16:45 +0000
+Subject: [PATCH] Require authentication for CUPS-Get-Document.
+
+CVE: CVE-2023-32360
+
+Upstream-Status: Backport [https://github.com/OpenPrinting/cups/commit/a0c8b9c9556882f00c68b9727a95a1b6d1452913]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ conf/cupsd.conf.in | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/conf/cupsd.conf.in b/conf/cupsd.conf.in
+index b258849..08f5070 100644
+--- a/conf/cupsd.conf.in
++++ b/conf/cupsd.conf.in
+@@ -68,7 +68,13 @@ IdleExitTimeout @EXIT_TIMEOUT@
+ Order deny,allow
+ </Limit>
+
+- <Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
++ <Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job>
++ Require user @OWNER @SYSTEM
++ Order deny,allow
++ </Limit>
++
++ <Limit CUPS-Get-Document>
++ AuthType Default
+ Require user @OWNER @SYSTEM
+ Order deny,allow
+ </Limit>
+--
+2.35.5
diff --git a/meta/recipes-extended/cups/cups/CVE-2023-34241.patch b/meta/recipes-extended/cups/cups/CVE-2023-34241.patch
new file mode 100644
index 0000000000..95b3925b36
--- /dev/null
+++ b/meta/recipes-extended/cups/cups/CVE-2023-34241.patch
@@ -0,0 +1,68 @@
+From ffd290b4ab247f82722927ba9b21358daa16dbf1 Mon Sep 17 00:00:00 2001
+From: Rose <83477269+AtariDreams@users.noreply.github.com>
+Date: Thu, 1 Jun 2023 11:33:39 -0400
+Subject: [PATCH] Log result of httpGetHostname BEFORE closing the connection
+
+httpClose frees the memory of con->http. This is problematic because httpGetHostname then tries to access the memory it points to.
+
+We have to log the hostname first.
+
+Upstream-Status: Backport [https://github.com/OpenPrinting/cups/commit/9809947a959e18409dcf562a3466ef246cb90cb2]
+CVE: CVE-2023-34241
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ scheduler/client.c | 16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/scheduler/client.c b/scheduler/client.c
+index e7e419f..441c1d7 100644
+--- a/scheduler/client.c
++++ b/scheduler/client.c
+@@ -193,13 +193,11 @@ cupsdAcceptClient(cupsd_listener_t *lis)/* I - Listener socket */
+ /*
+ * Can't have an unresolved IP address with double-lookups enabled...
+ */
+-
+- httpClose(con->http);
+-
+ cupsdLogClient(con, CUPSD_LOG_WARN,
+- "Name lookup failed - connection from %s closed!",
++ "Name lookup failed - closing connection from %s!",
+ httpGetHostname(con->http, NULL, 0));
+
++ httpClose(con->http);
+ free(con);
+ return;
+ }
+@@ -235,11 +233,11 @@ cupsdAcceptClient(cupsd_listener_t *lis)/* I - Listener socket */
+ * with double-lookups enabled...
+ */
+
+- httpClose(con->http);
+-
+ cupsdLogClient(con, CUPSD_LOG_WARN,
+- "IP lookup failed - connection from %s closed!",
++ "IP lookup failed - closing connection from %s!",
+ httpGetHostname(con->http, NULL, 0));
++
++ httpClose(con->http);
+ free(con);
+ return;
+ }
+@@ -256,11 +254,11 @@ cupsdAcceptClient(cupsd_listener_t *lis)/* I - Listener socket */
+
+ if (!hosts_access(&wrap_req))
+ {
+- httpClose(con->http);
+-
+ cupsdLogClient(con, CUPSD_LOG_WARN,
+ "Connection from %s refused by /etc/hosts.allow and "
+ "/etc/hosts.deny rules.", httpGetHostname(con->http, NULL, 0));
++
++ httpClose(con->http);
+ free(con);
+ return;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-extended/cups/cups/CVE-2023-4504.patch b/meta/recipes-extended/cups/cups/CVE-2023-4504.patch
new file mode 100644
index 0000000000..e52e43a209
--- /dev/null
+++ b/meta/recipes-extended/cups/cups/CVE-2023-4504.patch
@@ -0,0 +1,42 @@
+CVE: CVE-2023-4504
+Upstream-Status: Backport [https://github.com/OpenPrinting/cups/commit/2431caddb7e6a87f04ac90b5c6366ad268b6ff31 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+From 2431caddb7e6a87f04ac90b5c6366ad268b6ff31 Mon Sep 17 00:00:00 2001
+From: Zdenek Dohnal <zdohnal@redhat.com>
+Date: Wed, 20 Sep 2023 14:45:17 +0200
+Subject: [PATCH] raster-interpret.c: Fix CVE-2023-4504
+
+We didn't check for end of buffer if it looks there is an escaped
+character - check for NULL terminator there and if found, return NULL
+as return value and in `ptr`, because a lone backslash is not
+a valid PostScript character.
+---
+ cups/raster-interpret.c | 14 +++++++++++++-
+ 1 files changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/cups/raster-interpret.c b/cups/raster-interpret.c
+index 6fcf731b5..b8655c8c6 100644
+--- a/cups/raster-interpret.c
++++ b/cups/raster-interpret.c
+@@ -1116,7 +1116,19 @@ scan_ps(_cups_ps_stack_t *st, /* I - Stack */
+
+ cur ++;
+
+- if (*cur == 'b')
++ /*
++ * Return NULL if we reached NULL terminator, a lone backslash
++ * is not a valid character in PostScript.
++ */
++
++ if (!*cur)
++ {
++ *ptr = NULL;
++
++ return (NULL);
++ }
++
++ if (*cur == 'b')
+ *valptr++ = '\b';
+ else if (*cur == 'f')
+ *valptr++ = '\f';
diff --git a/meta/recipes-extended/diffutils/diffutils/0001-Skip-strip-trailing-cr-test-case.patch b/meta/recipes-extended/diffutils/diffutils/0001-Skip-strip-trailing-cr-test-case.patch
index aac1c43465..32793233f9 100644
--- a/meta/recipes-extended/diffutils/diffutils/0001-Skip-strip-trailing-cr-test-case.patch
+++ b/meta/recipes-extended/diffutils/diffutils/0001-Skip-strip-trailing-cr-test-case.patch
@@ -1,4 +1,4 @@
-From bd7fb8be2ae2d75347cf7733302d5093046ffa85 Mon Sep 17 00:00:00 2001
+From f31395c931bc633206eccfcfaaaa5d15021a3e86 Mon Sep 17 00:00:00 2001
From: Peiran Hong <peiran.hong@windriver.com>
Date: Thu, 5 Sep 2019 15:42:22 -0400
Subject: [PATCH] Skip strip-trailing-cr test case
@@ -10,26 +10,20 @@ package.
Upstream-Status: Inappropriate [embedded specific]
Signed-off-by: Peiran Hong <peiran.hong@windriver.com>
+
---
- tests/Makefile.am | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
+ tests/Makefile.am | 1 -
+ 1 file changed, 1 deletion(-)
diff --git a/tests/Makefile.am b/tests/Makefile.am
-index 83a7c9d..04d51b5 100644
+index 79bacfb..4adb4d7 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
-@@ -21,8 +21,10 @@ TESTS = \
+@@ -22,7 +22,6 @@ TESTS = \
stdin \
strcoll-0-names \
filename-quoting \
- strip-trailing-cr \
- colors
-+# Skipping this test since it requires valgrind
-+# and thus is too heavy for diffutils package
-+# strip-trailing-cr
-
- XFAIL_TESTS = large-subopt
-
---
-2.21.0
-
+ timezone \
+ colors \
+ y2038-vs-32bit
diff --git a/meta/recipes-extended/diffutils/diffutils/0001-mcontext-is-not-a-standard-layout-so-glibc-and-musl-.patch b/meta/recipes-extended/diffutils/diffutils/0001-mcontext-is-not-a-standard-layout-so-glibc-and-musl-.patch
deleted file mode 100644
index 4928e1eaff..0000000000
--- a/meta/recipes-extended/diffutils/diffutils/0001-mcontext-is-not-a-standard-layout-so-glibc-and-musl-.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From f385ad6639380eb6dfa8b8eb4a5ba65dd12db744 Mon Sep 17 00:00:00 2001
-From: Khem Raj <raj.khem@gmail.com>
-Date: Fri, 25 Mar 2022 13:43:19 -0700
-Subject: [PATCH] mcontext is not a standard layout so glibc and musl differ
-
-This is already applied to libsigsegv upstream, hopefully next version
-of grep will update its internal copy and we can drop this patch
-
-Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=libsigsegv.git;a=commitdiff;h=a6ff69873110c0a8ba6f7fd90532dbc11224828c]
-
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
----
- lib/sigsegv.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/lib/sigsegv.c b/lib/sigsegv.c
-index 998c827..b6f4841 100644
---- a/lib/sigsegv.c
-+++ b/lib/sigsegv.c
-@@ -219,8 +219,8 @@ int libsigsegv_version = LIBSIGSEGV_VERSION;
- # define SIGSEGV_FAULT_STACKPOINTER ((ucontext_t *) ucp)->uc_mcontext.gp_regs[1]
- # else /* 32-bit */
- /* both should be equivalent */
--# if 0
--# define SIGSEGV_FAULT_STACKPOINTER ((ucontext_t *) ucp)->uc_mcontext.regs->gpr[1]
-+# if ! defined __GLIBC__
-+# define SIGSEGV_FAULT_STACKPOINTER ((ucontext_t *) ucp)->uc_regs->gregs[1]
- # else
- # define SIGSEGV_FAULT_STACKPOINTER ((ucontext_t *) ucp)->uc_mcontext.uc_regs->gregs[1]
- # endif
---
-2.35.1
-
diff --git a/meta/recipes-extended/diffutils/diffutils_3.8.bb b/meta/recipes-extended/diffutils/diffutils_3.10.bb
index 8889c83ee2..08e8305612 100644
--- a/meta/recipes-extended/diffutils/diffutils_3.8.bb
+++ b/meta/recipes-extended/diffutils/diffutils_3.10.bb
@@ -6,10 +6,9 @@ require diffutils.inc
SRC_URI = "${GNU_MIRROR}/diffutils/diffutils-${PV}.tar.xz \
file://run-ptest \
file://0001-Skip-strip-trailing-cr-test-case.patch \
- file://0001-mcontext-is-not-a-standard-layout-so-glibc-and-musl-.patch \
"
-SRC_URI[sha256sum] = "a6bdd7d1b31266d11c4f4de6c1b748d4607ab0231af5188fc2533d0ae2438fec"
+SRC_URI[sha256sum] = "90e5e93cc724e4ebe12ede80df1634063c7a855692685919bfe60b556c9bd09e"
EXTRA_OECONF += "ac_cv_path_PR_PROGRAM=${bindir}/pr --without-libsigsegv-prefix"
diff --git a/meta/recipes-extended/gawk/gawk/CVE-2023-4156.patch b/meta/recipes-extended/gawk/gawk/CVE-2023-4156.patch
new file mode 100644
index 0000000000..bc157d6afb
--- /dev/null
+++ b/meta/recipes-extended/gawk/gawk/CVE-2023-4156.patch
@@ -0,0 +1,28 @@
+From e709eb829448ce040087a3fc5481db6bfcaae212 Mon Sep 17 00:00:00 2001
+From: "Arnold D. Robbins" <arnold@skeeve.com>
+Date: Wed, 3 Aug 2022 13:00:54 +0300
+Subject: [PATCH] Smal bug fix in builtin.c.
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/gawk/tree/debian/patches/CVE-2023-4156.patch?h=ubuntu/jammy-security
+Upstream commit https://git.savannah.gnu.org/gitweb/?p=gawk.git;a=commitdiff;h=e709eb829448ce040087a3fc5481db6bfcaae212]
+CVE: CVE-2023-4156
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ ChangeLog | 6 ++++++
+ builtin.c | 5 ++++-
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- gawk-5.1.0.orig/builtin.c
++++ gawk-5.1.0/builtin.c
+@@ -957,7 +957,10 @@ check_pos:
+ s1++;
+ n0--;
+ }
+- if (val >= num_args) {
++ // val could be less than zero if someone provides a field width
++ // so large that it causes integer overflow. Mainly fuzzers do this,
++ // but let's try to be good anyway.
++ if (val < 0 || val >= num_args) {
+ toofew = true;
+ break;
+ }
diff --git a/meta/recipes-extended/gawk/gawk_5.1.1.bb b/meta/recipes-extended/gawk/gawk_5.1.1.bb
index fe339805d0..0b0d0897bc 100644
--- a/meta/recipes-extended/gawk/gawk_5.1.1.bb
+++ b/meta/recipes-extended/gawk/gawk_5.1.1.bb
@@ -18,6 +18,7 @@ PACKAGECONFIG[mpfr] = "--with-mpfr,--without-mpfr, mpfr"
SRC_URI = "${GNU_MIRROR}/gawk/gawk-${PV}.tar.gz \
file://remove-sensitive-tests.patch \
file://run-ptest \
+ file://CVE-2023-4156.patch \
"
SRC_URI[sha256sum] = "6168d8d1dc8f74bd17d9dc22fa9634c49070f232343b744901da15fb4f06bffd"
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2022-2085.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2022-2085.patch
new file mode 100644
index 0000000000..58cb93727a
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2022-2085.patch
@@ -0,0 +1,44 @@
+From ae1061d948d88667bdf51d47d918c4684d0f67df Mon Sep 17 00:00:00 2001
+From: Robin Watts <Robin.Watts@artifex.com>
+Date: Wed, 16 Feb 2022 15:22:50 +0000
+Subject: [PATCH] Bug 704945: Add init_device_procs entry for mem_x_device.
+
+When allocating a buffer device, we rely on an init_device_procs
+being defined for the device we are using as a prototype. Which
+device we use as a prototype depends upon the number of bits per
+pixel we are using. For bpp > 64, we use mem_x_device, which does
+not currently have an init_device_procs defined.
+
+This is a fairly hard case to tickle, as very few devices use
+more than 64 bits per pixel. The DeviceN device is one of the
+few that does, and then the problem only kicks in if the
+MaxBitmap figure is high enough (or conversely the resolution is
+low enough).
+
+
+http://git.ghostscript.com/?p=ghostpdl.git;a=patch;h=ae1061d948d88667bdf51d47d918c4684d0f67df
+Upstream-Status: Backport
+CVE: CVE-2022-2085
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ base/gdevmx.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/base/gdevmx.c b/base/gdevmx.c
+index 08b0cbcfe..89e9ff774 100644
+--- a/base/gdevmx.c
++++ b/base/gdevmx.c
+@@ -1,4 +1,4 @@
+-/* Copyright (C) 2001-2021 Artifex Software, Inc.
++/* Copyright (C) 2001-2022 Artifex Software, Inc.
+ All Rights Reserved.
+
+ This software is provided AS-IS with no warranty, either express or
+@@ -25,4 +25,4 @@
+
+ /* The device descriptor. */
+ const gx_device_memory mem_x_device =
+- mem_device("imagex", 256, 0, NULL);
++ mem_device("imagex", 256, 0, mem_initialize_device_procs);
+--
+2.25.1
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-0001.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-0001.patch
new file mode 100644
index 0000000000..99fcc61b9b
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-0001.patch
@@ -0,0 +1,146 @@
+From ed607fedbcd41f4a0e71df6af4ba5b07dd630209 Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Wed, 7 Jun 2023 10:23:06 +0100
+Subject: [PATCH 1/2] Bug 706761: Don't "reduce" %pipe% file names for
+ permission validation
+
+For regular file names, we try to simplfy relative paths before we use them.
+
+Because the %pipe% device can, effectively, accept command line calls, we
+shouldn't be simplifying that string, because the command line syntax can end
+up confusing the path simplifying code. That can result in permitting a pipe
+command which does not match what was originally permitted.
+
+Special case "%pipe" in the validation code so we always deal with the entire
+string.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commitdiff;h=5e65eeae225c7d02d447de5abaf4a8e6d234fcea]
+CVE: CVE-2023-36664
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ base/gpmisc.c | 31 +++++++++++++++++++--------
+ base/gslibctx.c | 56 ++++++++++++++++++++++++++++++++++++-------------
+ 2 files changed, 64 insertions(+), 23 deletions(-)
+
+diff --git a/base/gpmisc.c b/base/gpmisc.c
+index 8b6458a..c61ab3f 100644
+--- a/base/gpmisc.c
++++ b/base/gpmisc.c
+@@ -1076,16 +1076,29 @@ gp_validate_path_len(const gs_memory_t *mem,
+ && !memcmp(path + cdirstrl, dirsepstr, dirsepstrl)) {
+ prefix_len = 0;
+ }
+- rlen = len+1;
+- bufferfull = (char *)gs_alloc_bytes(mem->thread_safe_memory, rlen + prefix_len, "gp_validate_path");
+- if (bufferfull == NULL)
+- return gs_error_VMerror;
+-
+- buffer = bufferfull + prefix_len;
+- if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
+- return gs_error_invalidfileaccess;
+- buffer[rlen] = 0;
+
++ /* "%pipe%" do not follow the normal rules for path definitions, so we
++ don't "reduce" them to avoid unexpected results
++ */
++ if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ bufferfull = buffer = (char *)gs_alloc_bytes(mem->thread_safe_memory, len + 1, "gp_validate_path");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++ memcpy(buffer, path, len);
++ buffer[len] = 0;
++ rlen = len;
++ }
++ else {
++ rlen = len+1;
++ bufferfull = (char *)gs_alloc_bytes(mem->thread_safe_memory, rlen + prefix_len, "gp_validate_path");
++ if (bufferfull == NULL)
++ return gs_error_VMerror;
++
++ buffer = bufferfull + prefix_len;
++ if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
++ return gs_error_invalidfileaccess;
++ buffer[rlen] = 0;
++ }
+ while (1) {
+ switch (mode[0])
+ {
+diff --git a/base/gslibctx.c b/base/gslibctx.c
+index 5bf497b..5fdfe25 100644
+--- a/base/gslibctx.c
++++ b/base/gslibctx.c
+@@ -734,14 +734,28 @@ gs_add_control_path_len_flags(const gs_memory_t *mem, gs_path_control_t type, co
+ return gs_error_rangecheck;
+ }
+
+- rlen = len+1;
+- buffer = (char *)gs_alloc_bytes(core->memory, rlen, "gp_validate_path");
+- if (buffer == NULL)
+- return gs_error_VMerror;
++ /* "%pipe%" do not follow the normal rules for path definitions, so we
++ don't "reduce" them to avoid unexpected results
++ */
++ if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ buffer = (char *)gs_alloc_bytes(core->memory, len + 1, "gs_add_control_path_len");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++ memcpy(buffer, path, len);
++ buffer[len] = 0;
++ rlen = len;
++ }
++ else {
++ rlen = len + 1;
+
+- if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
+- return gs_error_invalidfileaccess;
+- buffer[rlen] = 0;
++ buffer = (char *)gs_alloc_bytes(core->memory, rlen, "gs_add_control_path_len");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++
++ if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
++ return gs_error_invalidfileaccess;
++ buffer[rlen] = 0;
++ }
+
+ n = control->num;
+ for (i = 0; i < n; i++)
+@@ -827,14 +841,28 @@ gs_remove_control_path_len_flags(const gs_memory_t *mem, gs_path_control_t type,
+ return gs_error_rangecheck;
+ }
+
+- rlen = len+1;
+- buffer = (char *)gs_alloc_bytes(core->memory, rlen, "gp_validate_path");
+- if (buffer == NULL)
+- return gs_error_VMerror;
++ /* "%pipe%" do not follow the normal rules for path definitions, so we
++ don't "reduce" them to avoid unexpected results
++ */
++ if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ buffer = (char *)gs_alloc_bytes(core->memory, len + 1, "gs_remove_control_path_len");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++ memcpy(buffer, path, len);
++ buffer[len] = 0;
++ rlen = len;
++ }
++ else {
++ rlen = len+1;
+
+- if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
+- return gs_error_invalidfileaccess;
+- buffer[rlen] = 0;
++ buffer = (char *)gs_alloc_bytes(core->memory, rlen, "gs_remove_control_path_len");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++
++ if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
++ return gs_error_invalidfileaccess;
++ buffer[rlen] = 0;
++ }
+
+ n = control->num;
+ for (i = 0; i < n; i++) {
+--
+2.40.1
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-0002.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-0002.patch
new file mode 100644
index 0000000000..7d78e6b1b1
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-0002.patch
@@ -0,0 +1,60 @@
+From f96350aeb7f8c2e3f7129866c694a24f241db18c Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Wed, 14 Jun 2023 09:08:12 +0100
+Subject: [PATCH 2/2] Bug 706778: 706761 revisit
+
+Two problems with the original commit. The first a silly typo inverting the
+logic of a test.
+
+The second was forgetting that we actually actually validate two candidate
+strings for pipe devices. One with the expected "%pipe%" prefix, the other
+using the pipe character prefix: "|".
+
+This addresses both those.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commitdiff;h=fb342fdb60391073a69147cb71af1ac416a81099]
+CVE: CVE-2023-36664
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ base/gpmisc.c | 2 +-
+ base/gslibctx.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/base/gpmisc.c b/base/gpmisc.c
+index c61ab3f..e459f6a 100644
+--- a/base/gpmisc.c
++++ b/base/gpmisc.c
+@@ -1080,7 +1080,7 @@ gp_validate_path_len(const gs_memory_t *mem,
+ /* "%pipe%" do not follow the normal rules for path definitions, so we
+ don't "reduce" them to avoid unexpected results
+ */
+- if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ if (path[0] == '|' || (len > 5 && memcmp(path, "%pipe", 5) == 0)) {
+ bufferfull = buffer = (char *)gs_alloc_bytes(mem->thread_safe_memory, len + 1, "gp_validate_path");
+ if (buffer == NULL)
+ return gs_error_VMerror;
+diff --git a/base/gslibctx.c b/base/gslibctx.c
+index 5fdfe25..2a1addf 100644
+--- a/base/gslibctx.c
++++ b/base/gslibctx.c
+@@ -737,7 +737,7 @@ gs_add_control_path_len_flags(const gs_memory_t *mem, gs_path_control_t type, co
+ /* "%pipe%" do not follow the normal rules for path definitions, so we
+ don't "reduce" them to avoid unexpected results
+ */
+- if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ if (path[0] == '|' || (len > 5 && memcmp(path, "%pipe", 5) == 0)) {
+ buffer = (char *)gs_alloc_bytes(core->memory, len + 1, "gs_add_control_path_len");
+ if (buffer == NULL)
+ return gs_error_VMerror;
+@@ -844,7 +844,7 @@ gs_remove_control_path_len_flags(const gs_memory_t *mem, gs_path_control_t type,
+ /* "%pipe%" do not follow the normal rules for path definitions, so we
+ don't "reduce" them to avoid unexpected results
+ */
+- if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ if (path[0] == '|' || (len > 5 && memcmp(path, "%pipe", 5) == 0)) {
+ buffer = (char *)gs_alloc_bytes(core->memory, len + 1, "gs_remove_control_path_len");
+ if (buffer == NULL)
+ return gs_error_VMerror;
+--
+2.40.1
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-38559.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-38559.patch
new file mode 100644
index 0000000000..2b2b85fa27
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-38559.patch
@@ -0,0 +1,32 @@
+From 34b0eec257c3a597e0515946f17fb973a33a7b5b Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Mon, 17 Jul 2023 14:06:37 +0100
+Subject: [PATCH] Bug 706897: Copy pcx buffer overrun fix from
+ devices/gdevpcx.c
+
+Bounds check the buffer, before dereferencing the pointer.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commitdiff;h=d81b82c70bc1fb9991bb95f1201abb5dea55f57f]
+
+CVE: CVE-2023-38559
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ base/gdevdevn.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/base/gdevdevn.c b/base/gdevdevn.c
+index f679127..66c771b 100644
+--- a/base/gdevdevn.c
++++ b/base/gdevdevn.c
+@@ -1950,7 +1950,7 @@ devn_pcx_write_rle(const byte * from, const byte * end, int step, gp_file * file
+ byte data = *from;
+
+ from += step;
+- if (data != *from || from == end) {
++ if (from >= end || data != *from) {
+ if (data >= 0xc0)
+ gp_fputc(0xc1, file);
+ } else {
+--
+2.40.0
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-43115.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-43115.patch
new file mode 100644
index 0000000000..979f354ed5
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-43115.patch
@@ -0,0 +1,62 @@
+From 8b0f20002536867bd73ff4552408a72597190cbe Mon Sep 17 00:00:00 2001
+From: Ken Sharp <ken.sharp@artifex.com>
+Date: Thu, 24 Aug 2023 15:24:35 +0100
+Subject: [PATCH] IJS device - try and secure the IJS server startup
+
+Bug #707051 ""ijs" device can execute arbitrary commands"
+
+The problem is that the 'IJS' device needs to start the IJS server, and
+that is indeed an arbitrary command line. There is (apparently) no way
+to validate it. Indeed, this is covered quite clearly in the comments
+at the start of the source:
+
+ * WARNING: The ijs server can be selected on the gs command line
+ * which is a security risk, since any program can be run.
+
+Previously this used the awful LockSafetyParams hackery, which we
+abandoned some time ago because it simply couldn't be made secure (it
+was implemented in PostScript and was therefore vulnerable to PostScript
+programs).
+
+This commit prevents PostScript programs switching to the IJS device
+after SAFER has been activated, and prevents changes to the IjsServer
+parameter after SAFER has been activated.
+
+SAFER is activated, unless explicitly disabled, before any user
+PostScript is executed which means that the device and the server
+invocation can only be configured on the command line. This does at
+least provide minimal security against malicious PostScript programs.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=8b0f20002536867bd73ff4552408a72597190cbe]
+
+CVE: CVE-2023-43115
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ devices/gdevijs.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/devices/gdevijs.c b/devices/gdevijs.c
+index 8cbd84b97..16f5a1752 100644
+--- a/devices/gdevijs.c
++++ b/devices/gdevijs.c
+@@ -888,6 +888,8 @@ gsijs_initialize_device(gx_device *dev)
+ static const char rgb[] = "DeviceRGB";
+ gx_device_ijs *ijsdev = (gx_device_ijs *)dev;
+
++ if (ijsdev->memory->gs_lib_ctx->core->path_control_active)
++ return_error(gs_error_invalidaccess);
+ if (!ijsdev->ColorSpace) {
+ ijsdev->ColorSpace = gs_malloc(ijsdev->memory, sizeof(rgb), 1,
+ "gsijs_initialize");
+@@ -1326,7 +1328,7 @@ gsijs_put_params(gx_device *dev, gs_param_list *plist)
+ if (code >= 0)
+ code = gsijs_read_string(plist, "IjsServer",
+ ijsdev->IjsServer, sizeof(ijsdev->IjsServer),
+- dev->LockSafetyParams, is_open);
++ ijsdev->memory->gs_lib_ctx->core->path_control_active, is_open);
+
+ if (code >= 0)
+ code = gsijs_read_string_malloc(plist, "DeviceManufacturer",
+--
+2.40.0
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-46751.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-46751.patch
new file mode 100644
index 0000000000..6fe5590892
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-46751.patch
@@ -0,0 +1,41 @@
+From 5d2da96e81c7455338302c71a291088a8396245a Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Mon, 16 Oct 2023 16:49:40 +0100
+Subject: [PATCH] Bug 707264: Fix tiffsep(1) requirement for seekable output
+ files
+
+In the device initialization redesign, tiffsep and tiffsep1 lost the requirement
+for the output files to be seekable.
+
+Fixing that highlighted a problem with the error handling in
+gdev_prn_open_printer_seekable() where closing the erroring file would leave a
+dangling pointer, and lead to a crash.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=5d2da96e81c7455338302c71a291088a8396245a]
+CVE: CVE-2023-46751
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ base/gdevprn.c | 1 +
+ devices/gdevtsep.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/base/gdevprn.c
++++ b/base/gdevprn.c
+@@ -1251,6 +1251,7 @@ gdev_prn_open_printer_seekable(gx_device
+ && !IS_LIBCTX_STDERR(pdev->memory, gp_get_file(ppdev->file))) {
+
+ code = gx_device_close_output_file(pdev, ppdev->fname, ppdev->file);
++ ppdev->file = NULL;
+ if (code < 0)
+ return code;
+ }
+--- a/devices/gdevtsep.c
++++ b/devices/gdevtsep.c
+@@ -738,6 +738,7 @@ tiffsep_initialize_device_procs(gx_devic
+ {
+ gdev_prn_initialize_device_procs(dev);
+
++ set_dev_proc(dev, output_page, gdev_prn_output_page_seekable);
+ set_dev_proc(dev, open_device, tiffsep_prn_open);
+ set_dev_proc(dev, close_device, tiffsep_prn_close);
+ set_dev_proc(dev, map_color_rgb, tiffsep_decode_color);
diff --git a/meta/recipes-extended/ghostscript/ghostscript/cve-2023-28879.patch b/meta/recipes-extended/ghostscript/ghostscript/cve-2023-28879.patch
new file mode 100644
index 0000000000..9b057d609a
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/cve-2023-28879.patch
@@ -0,0 +1,60 @@
+From 37ed5022cecd584de868933b5b60da2e995b3179 Mon Sep 17 00:00:00 2001
+From: Ken Sharp <ken.sharp@artifex.com>
+Date: Fri, 24 Mar 2023 13:19:57 +0000
+Subject: [PATCH] Graphics library - prevent buffer overrun in (T)BCP encoding
+
+Bug #706494 "Buffer Overflow in s_xBCPE_process"
+
+As described in detail in the bug report, if the write buffer is filled
+to one byte less than full, and we then try to write an escaped
+character, we overrun the buffer because we don't check before
+writing two bytes to it.
+
+This just checks if we have two bytes before starting to write an
+escaped character and exits if we don't (replacing the consumed byte
+of the input).
+
+Up for further discussion; why do we even permit a BCP encoding filter
+anyway ? I think we should remove this, at least when SAFER is true.
+---
+CVE: CVE-2023-28879
+
+Upstream-Status: Backport [see text]
+
+git://git.ghostscript.com/ghostpdl
+cherry-pick
+
+Signed-off-by: Joe Slater <joe.slater@windriver.com>
+
+---
+ base/sbcp.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/base/sbcp.c b/base/sbcp.c
+index 979ae0992..47fc233ec 100644
+--- a/base/sbcp.c
++++ b/base/sbcp.c
+@@ -1,4 +1,4 @@
+-/* Copyright (C) 2001-2021 Artifex Software, Inc.
++/* Copyright (C) 2001-2023 Artifex Software, Inc.
+ All Rights Reserved.
+
+ This software is provided AS-IS with no warranty, either express or
+@@ -50,6 +50,14 @@ s_xBCPE_process(stream_state * st, stream_cursor_read * pr,
+ byte ch = *++p;
+
+ if (ch <= 31 && escaped[ch]) {
++ /* Make sure we have space to store two characters in the write buffer,
++ * if we don't then exit without consuming the input character, we'll process
++ * that on the next time round.
++ */
++ if (pw->limit - q < 2) {
++ p--;
++ break;
++ }
+ if (p == rlimit) {
+ p--;
+ break;
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript_9.55.0.bb b/meta/recipes-extended/ghostscript/ghostscript_9.55.0.bb
index c28e62f089..e99c740685 100644
--- a/meta/recipes-extended/ghostscript/ghostscript_9.55.0.bb
+++ b/meta/recipes-extended/ghostscript/ghostscript_9.55.0.bb
@@ -10,7 +10,7 @@ dot-matrix, inkjet and laser models. \
HOMEPAGE = "http://www.ghostscript.com"
SECTION = "console/utils"
-LICENSE = "GPL-3.0-only"
+LICENSE = "AGPL-3.0-or-later"
LIC_FILES_CHKSUM = "file://LICENSE;md5=f98ffa763e50cded76f49bce73aade16"
DEPENDS = "ghostscript-native tiff jpeg fontconfig cups libpng"
@@ -23,6 +23,9 @@ UPSTREAM_CHECK_REGEX = "(?P<pver>\d+(\.\d+)+)\.tar"
# however we use an external jpeg which doesn't have the issue.
CVE_CHECK_IGNORE += "CVE-2013-6629"
+# Issue in the GhostPCL. GhostPCL not part of this GhostScript recipe.
+CVE_CHECK_IGNORE += "CVE-2023-38560"
+
def gs_verdir(v):
return "".join(v.split("."))
@@ -33,6 +36,13 @@ SRC_URI_BASE = "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases/d
file://do-not-check-local-libpng-source.patch \
file://avoid-host-contamination.patch \
file://mkdir-p.patch \
+ file://CVE-2022-2085.patch \
+ file://cve-2023-28879.patch \
+ file://CVE-2023-36664-0001.patch \
+ file://CVE-2023-36664-0002.patch \
+ file://CVE-2023-38559.patch \
+ file://CVE-2023-43115.patch \
+ file://CVE-2023-46751.patch \
"
SRC_URI = "${SRC_URI_BASE} \
diff --git a/meta/recipes-extended/gperf/gperf/1862c6e57a308a05889c80c048dbc58bdc378dcb.patch b/meta/recipes-extended/gperf/gperf/1862c6e57a308a05889c80c048dbc58bdc378dcb.patch
new file mode 100644
index 0000000000..98959db0a8
--- /dev/null
+++ b/meta/recipes-extended/gperf/gperf/1862c6e57a308a05889c80c048dbc58bdc378dcb.patch
@@ -0,0 +1,181 @@
+From 1862c6e57a308a05889c80c048dbc58bdc378dcb Mon Sep 17 00:00:00 2001
+From: Bruno Haible <bruno@clisp.org>
+Date: Tue, 5 Jul 2022 07:51:46 +0200
+Subject: [PATCH] Add support for reproducible builds.
+
+Suggested by Richard Purdie <richard.purdie@linuxfoundation.org> in
+<https://lists.gnu.org/archive/html/bug-gperf/2022-07/msg00000.html>.
+
+* autogen.sh: Import also lib/filename.h.
+* Makefile.in (IMPORTED_FILES): Add lib/filename.h.
+* src/options.cc: Include filename.h.
+(Options::print_options): Print only the base name of the program name.
+* tests/*.exp: Update.
+
+Upstream-Status: Backport
+
+Index: gperf-3.1/ChangeLog
+===================================================================
+--- gperf-3.1.orig/ChangeLog
++++ gperf-3.1/ChangeLog
+@@ -1,3 +1,14 @@
++2022-07-05 Bruno Haible <bruno@clisp.org>
++
++ Add support for reproducible builds.
++ Suggested by Richard Purdie <richard.purdie@linuxfoundation.org> in
++ <https://lists.gnu.org/archive/html/bug-gperf/2022-07/msg00000.html>.
++ * autogen.sh: Import also lib/filename.h.
++ * Makefile.in (IMPORTED_FILES): Add lib/filename.h.
++ * src/options.cc: Include filename.h.
++ (Options::print_options): Print only the base name of the program name.
++ * tests/*.exp: Update.
++
+ 2017-01-02 Marcel Schaible <marcel.schaible@studium.fernuni-hagen.de>
+
+ * gperf-3.1 released.
+Index: gperf-3.1/src/options.cc
+===================================================================
+--- gperf-3.1.orig/src/options.cc
++++ gperf-3.1/src/options.cc
+@@ -26,6 +26,7 @@
+ #include <string.h> /* declares strcmp() */
+ #include <ctype.h> /* declares isdigit() */
+ #include <limits.h> /* defines CHAR_MAX */
++#include "filename.h"
+ #include "getopt.h"
+ #include "version.h"
+
+@@ -280,6 +281,16 @@ Options::print_options () const
+ {
+ const char *arg = _argument_vector[i];
+
++ if (i == 0)
++ {
++ /* _argument_vector[0] is the program name. Print only its base name.
++ This is useful for reproducible builds. */
++ const char *p = arg + strlen (arg);
++ while (p > arg && ! ISSLASH (p[-1]))
++ p--;
++ arg = p;
++ }
++
+ /* Escape arg if it contains shell metacharacters. */
+ if (*arg == '-')
+ {
+Index: gperf-3.1/lib/filename.h
+===================================================================
+--- /dev/null
++++ gperf-3.1/lib/filename.h
+@@ -0,0 +1,112 @@
++/* Basic filename support macros.
++ Copyright (C) 2001-2022 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <https://www.gnu.org/licenses/>. */
++
++/* From Paul Eggert and Jim Meyering. */
++
++#ifndef _FILENAME_H
++#define _FILENAME_H
++
++#include <string.h>
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++/* Filename support.
++ ISSLASH(C) tests whether C is a directory separator
++ character.
++ HAS_DEVICE(Filename) tests whether Filename contains a device
++ specification.
++ FILE_SYSTEM_PREFIX_LEN(Filename) length of the device specification
++ at the beginning of Filename,
++ index of the part consisting of
++ alternating components and slashes.
++ FILE_SYSTEM_DRIVE_PREFIX_CAN_BE_RELATIVE
++ 1 when a non-empty device specification
++ can be followed by an empty or relative
++ part,
++ 0 when a non-empty device specification
++ must be followed by a slash,
++ 0 when device specification don't exist.
++ IS_ABSOLUTE_FILE_NAME(Filename)
++ tests whether Filename is independent of
++ any notion of "current directory".
++ IS_RELATIVE_FILE_NAME(Filename)
++ tests whether Filename may be concatenated
++ to a directory filename.
++ Note: On native Windows, OS/2, DOS, "c:" is neither an absolute nor a
++ relative file name!
++ IS_FILE_NAME_WITH_DIR(Filename) tests whether Filename contains a device
++ or directory specification.
++ */
++#if defined _WIN32 || defined __CYGWIN__ \
++ || defined __EMX__ || defined __MSDOS__ || defined __DJGPP__
++ /* Native Windows, Cygwin, OS/2, DOS */
++# define ISSLASH(C) ((C) == '/' || (C) == '\\')
++ /* Internal macro: Tests whether a character is a drive letter. */
++# define _IS_DRIVE_LETTER(C) \
++ (((C) >= 'A' && (C) <= 'Z') || ((C) >= 'a' && (C) <= 'z'))
++ /* Help the compiler optimizing it. This assumes ASCII. */
++# undef _IS_DRIVE_LETTER
++# define _IS_DRIVE_LETTER(C) \
++ (((unsigned int) (C) | ('a' - 'A')) - 'a' <= 'z' - 'a')
++# define HAS_DEVICE(Filename) \
++ (_IS_DRIVE_LETTER ((Filename)[0]) && (Filename)[1] == ':')
++# define FILE_SYSTEM_PREFIX_LEN(Filename) (HAS_DEVICE (Filename) ? 2 : 0)
++# ifdef __CYGWIN__
++# define FILE_SYSTEM_DRIVE_PREFIX_CAN_BE_RELATIVE 0
++# else
++ /* On native Windows, OS/2, DOS, the system has the notion of a
++ "current directory" on each drive. */
++# define FILE_SYSTEM_DRIVE_PREFIX_CAN_BE_RELATIVE 1
++# endif
++# if FILE_SYSTEM_DRIVE_PREFIX_CAN_BE_RELATIVE
++# define IS_ABSOLUTE_FILE_NAME(Filename) \
++ ISSLASH ((Filename)[FILE_SYSTEM_PREFIX_LEN (Filename)])
++# else
++# define IS_ABSOLUTE_FILE_NAME(Filename) \
++ (ISSLASH ((Filename)[0]) || HAS_DEVICE (Filename))
++# endif
++# define IS_RELATIVE_FILE_NAME(Filename) \
++ (! (ISSLASH ((Filename)[0]) || HAS_DEVICE (Filename)))
++# define IS_FILE_NAME_WITH_DIR(Filename) \
++ (strchr ((Filename), '/') != NULL || strchr ((Filename), '\\') != NULL \
++ || HAS_DEVICE (Filename))
++#else
++ /* Unix */
++# define ISSLASH(C) ((C) == '/')
++# define HAS_DEVICE(Filename) ((void) (Filename), 0)
++# define FILE_SYSTEM_PREFIX_LEN(Filename) ((void) (Filename), 0)
++# define FILE_SYSTEM_DRIVE_PREFIX_CAN_BE_RELATIVE 0
++# define IS_ABSOLUTE_FILE_NAME(Filename) ISSLASH ((Filename)[0])
++# define IS_RELATIVE_FILE_NAME(Filename) (! ISSLASH ((Filename)[0]))
++# define IS_FILE_NAME_WITH_DIR(Filename) (strchr ((Filename), '/') != NULL)
++#endif
++
++/* Deprecated macros. For backward compatibility with old users of the
++ 'filename' module. */
++#define IS_ABSOLUTE_PATH IS_ABSOLUTE_FILE_NAME
++#define IS_PATH_WITH_DIR IS_FILE_NAME_WITH_DIR
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* _FILENAME_H */
diff --git a/meta/recipes-extended/gperf/gperf_3.1.bb b/meta/recipes-extended/gperf/gperf_3.1.bb
index 82750fca05..c9f09c7931 100644
--- a/meta/recipes-extended/gperf/gperf_3.1.bb
+++ b/meta/recipes-extended/gperf/gperf_3.1.bb
@@ -9,6 +9,8 @@ SRC_URI = "${GNU_MIRROR}/${BPN}/${BP}.tar.gz"
SRC_URI[md5sum] = "9e251c0a618ad0824b51117d5d9db87e"
SRC_URI[sha256sum] = "588546b945bba4b70b6a3a616e80b4ab466e3f33024a352fc2198112cdbb3ae2"
+SRC_URI += "file://1862c6e57a308a05889c80c048dbc58bdc378dcb.patch"
+
inherit autotools
# The nested configures don't find the parent aclocal.m4 out of the box, so tell
diff --git a/meta/recipes-extended/groff/files/0001-Make-manpages-mulitlib-identical.patch b/meta/recipes-extended/groff/files/0001-Make-manpages-mulitlib-identical.patch
index 9105da6457..c3cfc7cea8 100644
--- a/meta/recipes-extended/groff/files/0001-Make-manpages-mulitlib-identical.patch
+++ b/meta/recipes-extended/groff/files/0001-Make-manpages-mulitlib-identical.patch
@@ -3,7 +3,7 @@ From: Jeremy Puhlman <jpuhlman@mvista.com>
Date: Sat, 7 Mar 2020 00:59:13 +0000
Subject: [PATCH] Make manpages mulitlib identical
-Upstream-Status: Pending
+Upstream-Status: Submitted [by email to g.branden.robinson@gmail.com]
Signed-off-by: Jeremy Puhlman <jpuhlman@mvista.com>
---
Makefile.am | 2 +-
diff --git a/meta/recipes-extended/groff/files/0001-replace-perl-w-with-use-warnings.patch b/meta/recipes-extended/groff/files/0001-replace-perl-w-with-use-warnings.patch
index eda6a40f51..b028fa20aa 100644
--- a/meta/recipes-extended/groff/files/0001-replace-perl-w-with-use-warnings.patch
+++ b/meta/recipes-extended/groff/files/0001-replace-perl-w-with-use-warnings.patch
@@ -15,7 +15,7 @@ doesn't work:
So replace "perl -w" with "use warnings" to make it work.
-Upstream-Status: Pending
+Upstream-Status: Submitted [by email to g.branden.robinson@gmail.com]
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
diff --git a/meta/recipes-extended/less/less/CVE-2022-46663.patch b/meta/recipes-extended/less/less/CVE-2022-46663.patch
new file mode 100644
index 0000000000..4d61a52fa6
--- /dev/null
+++ b/meta/recipes-extended/less/less/CVE-2022-46663.patch
@@ -0,0 +1,31 @@
+From a78e1351113cef564d790a730d657a321624d79c Mon Sep 17 00:00:00 2001
+From: Mark Nudelman <markn@greenwoodsoftware.com>
+Date: Fri, 7 Oct 2022 19:25:46 -0700
+Subject: [PATCH] End OSC8 hyperlink on invalid embedded escape sequence.
+
+
+CVE: CVE-2022-46663
+Upstream-Status: Backport [https://github.com/gwsw/less/commit/a78e1351113cef564d790a730d657a321624d79c]
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ line.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/line.c b/line.c
+index 0ef9b07..9d49cf8 100644
+--- a/line.c
++++ b/line.c
+@@ -633,8 +633,8 @@ ansi_step(pansi, ch)
+ /* Hyperlink ends with \7 or ESC-backslash. */
+ if (ch == '\7')
+ return ANSI_END;
+- if (pansi->prev_esc && ch == '\\')
+- return ANSI_END;
++ if (pansi->prev_esc)
++ return (ch == '\\') ? ANSI_END : ANSI_ERR;
+ pansi->prev_esc = (ch == ESC);
+ return ANSI_MID;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-extended/less/less/CVE-2022-48624.patch b/meta/recipes-extended/less/less/CVE-2022-48624.patch
new file mode 100644
index 0000000000..409730bd4f
--- /dev/null
+++ b/meta/recipes-extended/less/less/CVE-2022-48624.patch
@@ -0,0 +1,41 @@
+From c6ac6de49698be84d264a0c4c0c40bb870b10144 Mon Sep 17 00:00:00 2001
+From: Mark Nudelman <markn@greenwoodsoftware.com>
+Date: Sat, 25 Jun 2022 11:54:43 -0700
+Subject: [PATCH] Shell-quote filenames when invoking LESSCLOSE.
+
+Upstream-Status: Backport [https://github.com/gwsw/less/commit/c6ac6de49698be84d264a0c4c0c40bb870b10144]
+CVE: CVE-2022-48624
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ filename.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/filename.c b/filename.c
+index 5824e385..dff20c08 100644
+--- a/filename.c
++++ b/filename.c
+@@ -972,6 +972,8 @@ close_altfile(altfilename, filename)
+ {
+ #if HAVE_POPEN
+ char *lessclose;
++ char *qfilename;
++ char *qaltfilename;
+ FILE *fd;
+ char *cmd;
+ int len;
+@@ -986,9 +988,13 @@ close_altfile(altfilename, filename)
+ error("LESSCLOSE ignored; must contain no more than 2 %%s", NULL_PARG);
+ return;
+ }
+- len = (int) (strlen(lessclose) + strlen(filename) + strlen(altfilename) + 2);
++ qfilename = shell_quote(filename);
++ qaltfilename = shell_quote(altfilename);
++ len = (int) (strlen(lessclose) + strlen(qfilename) + strlen(qaltfilename) + 2);
+ cmd = (char *) ecalloc(len, sizeof(char));
+- SNPRINTF2(cmd, len, lessclose, filename, altfilename);
++ SNPRINTF2(cmd, len, lessclose, qfilename, qaltfilename);
++ free(qaltfilename);
++ free(qfilename);
+ fd = shellcmd(cmd);
+ free(cmd);
+ if (fd != NULL)
diff --git a/meta/recipes-extended/less/less_600.bb b/meta/recipes-extended/less/less_600.bb
index 9ebe39daab..f88127a9e3 100644
--- a/meta/recipes-extended/less/less_600.bb
+++ b/meta/recipes-extended/less/less_600.bb
@@ -26,6 +26,8 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=1ebbd3e34237af26da5dc08a4e440464 \
DEPENDS = "ncurses"
SRC_URI = "http://www.greenwoodsoftware.com/${BPN}/${BPN}-${PV}.tar.gz \
+ file://CVE-2022-46663.patch \
+ file://CVE-2022-48624.patch \
"
SRC_URI[sha256sum] = "6633d6aa2b3cc717afb2c205778c7c42c4620f63b1d682f3d12c98af0be74d20"
diff --git a/meta/recipes-extended/libarchive/libarchive_3.6.1.bb b/meta/recipes-extended/libarchive/libarchive_3.6.2.bb
index c795b41628..0219ffa720 100644
--- a/meta/recipes-extended/libarchive/libarchive_3.6.1.bb
+++ b/meta/recipes-extended/libarchive/libarchive_3.6.2.bb
@@ -7,11 +7,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=d499814247adaee08d88080841cb5665"
DEPENDS = "e2fsprogs-native"
-PACKAGECONFIG ?= "zlib bz2 xz lzo zstd"
-
-PACKAGECONFIG:append:class-target = "\
- ${@bb.utils.filter('DISTRO_FEATURES', 'acl xattr', d)} \
-"
+PACKAGECONFIG ?= "zlib bz2 xz lzo zstd ${@bb.utils.filter('DISTRO_FEATURES', 'acl xattr', d)}"
DEPENDS_BZIP2 = "bzip2-replacement-native"
DEPENDS_BZIP2:class-target = "bzip2"
@@ -30,12 +26,15 @@ PACKAGECONFIG[lz4] = "--with-lz4,--without-lz4,lz4,"
PACKAGECONFIG[mbedtls] = "--with-mbedtls,--without-mbedtls,mbedtls,"
PACKAGECONFIG[zstd] = "--with-zstd,--without-zstd,zstd,"
-EXTRA_OECONF += "--enable-largefile"
+EXTRA_OECONF += "--enable-largefile --without-iconv"
SRC_URI = "http://libarchive.org/downloads/libarchive-${PV}.tar.gz"
UPSTREAM_CHECK_URI = "http://libarchive.org/"
-SRC_URI[sha256sum] = "c676146577d989189940f1959d9e3980d28513d74eedfbc6b7f15ea45fe54ee2"
+SRC_URI[sha256sum] = "ba6d02f15ba04aba9c23fd5f236bb234eab9d5209e95d1c4df85c44d5f19b9b3"
+
+# upstream-wontfix: upstream has documented that reported function is not thread-safe
+CVE_CHECK_IGNORE += "CVE-2023-30571"
inherit autotools update-alternatives pkgconfig
diff --git a/meta/recipes-extended/libnss-nis/libnss-nis.bb b/meta/recipes-extended/libnss-nis/libnss-nis.bb
index d0afb3ca0a..f0e687c330 100644
--- a/meta/recipes-extended/libnss-nis/libnss-nis.bb
+++ b/meta/recipes-extended/libnss-nis/libnss-nis.bb
@@ -13,9 +13,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
SECTION = "libs"
DEPENDS += "libtirpc libnsl2"
-PV = "3.1+git${SRCPV}"
+PV = "3.2"
-SRCREV = "062f31999b35393abf7595cb89dfc9590d5a42ad"
+SRCREV = "cd0d391af9535b56e612ed227c1b89be269f3d59"
SRC_URI = "git://github.com/thkukuk/libnss_nis;branch=master;protocol=https \
"
diff --git a/meta/recipes-extended/libtirpc/libtirpc/CVE-2021-46828.patch b/meta/recipes-extended/libtirpc/libtirpc/CVE-2021-46828.patch
new file mode 100644
index 0000000000..3d5e5b8db9
--- /dev/null
+++ b/meta/recipes-extended/libtirpc/libtirpc/CVE-2021-46828.patch
@@ -0,0 +1,155 @@
+From 3ee23a0a5a8c2261e788acbee67722fcbecbea28 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 27 Jul 2022 17:34:21 +0530
+Subject: [PATCH] CVE-2021-46828
+
+Upstream-Status: Backport [http://git.linux-nfs.org/?p=steved/libtirpc.git;a=commit;h=86529758570cef4c73fb9b9c4104fdc510f701ed}
+CVE: CVE-2021-46828
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/svc.c | 17 +++++++++++++-
+ src/svc_vc.c | 62 +++++++++++++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 77 insertions(+), 2 deletions(-)
+
+diff --git a/src/svc.c b/src/svc.c
+index 6db164b..3a8709f 100644
+--- a/src/svc.c
++++ b/src/svc.c
+@@ -57,7 +57,7 @@
+
+ #define max(a, b) (a > b ? a : b)
+
+-static SVCXPRT **__svc_xports;
++SVCXPRT **__svc_xports;
+ int __svc_maxrec;
+
+ /*
+@@ -194,6 +194,21 @@ __xprt_do_unregister (xprt, dolock)
+ rwlock_unlock (&svc_fd_lock);
+ }
+
++int
++svc_open_fds()
++{
++ int ix;
++ int nfds = 0;
++
++ rwlock_rdlock (&svc_fd_lock);
++ for (ix = 0; ix < svc_max_pollfd; ++ix) {
++ if (svc_pollfd[ix].fd != -1)
++ nfds++;
++ }
++ rwlock_unlock (&svc_fd_lock);
++ return (nfds);
++}
++
+ /*
+ * Add a service program to the callout list.
+ * The dispatch routine will be called when a rpc request for this
+diff --git a/src/svc_vc.c b/src/svc_vc.c
+index f1d9f00..3dc8a75 100644
+--- a/src/svc_vc.c
++++ b/src/svc_vc.c
+@@ -64,6 +64,8 @@
+
+
+ extern rwlock_t svc_fd_lock;
++extern SVCXPRT **__svc_xports;
++extern int svc_open_fds();
+
+ static SVCXPRT *makefd_xprt(int, u_int, u_int);
+ static bool_t rendezvous_request(SVCXPRT *, struct rpc_msg *);
+@@ -82,6 +84,7 @@ static void svc_vc_ops(SVCXPRT *);
+ static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
+ static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
+ void *in);
++static int __svc_destroy_idle(int timeout);
+
+ struct cf_rendezvous { /* kept in xprt->xp_p1 for rendezvouser */
+ u_int sendsize;
+@@ -313,13 +316,14 @@ done:
+ return (xprt);
+ }
+
++
+ /*ARGSUSED*/
+ static bool_t
+ rendezvous_request(xprt, msg)
+ SVCXPRT *xprt;
+ struct rpc_msg *msg;
+ {
+- int sock, flags;
++ int sock, flags, nfds, cnt;
+ struct cf_rendezvous *r;
+ struct cf_conn *cd;
+ struct sockaddr_storage addr;
+@@ -379,6 +383,16 @@ again:
+
+ gettimeofday(&cd->last_recv_time, NULL);
+
++ nfds = svc_open_fds();
++ if (nfds >= (_rpc_dtablesize() / 5) * 4) {
++ /* destroy idle connections */
++ cnt = __svc_destroy_idle(15);
++ if (cnt == 0) {
++ /* destroy least active */
++ __svc_destroy_idle(0);
++ }
++ }
++
+ return (FALSE); /* there is never an rpc msg to be processed */
+ }
+
+@@ -820,3 +834,49 @@ __svc_clean_idle(fd_set *fds, int timeout, bool_t cleanblock)
+ {
+ return FALSE;
+ }
++
++static int
++__svc_destroy_idle(int timeout)
++{
++ int i, ncleaned = 0;
++ SVCXPRT *xprt, *least_active;
++ struct timeval tv, tdiff, tmax;
++ struct cf_conn *cd;
++
++ gettimeofday(&tv, NULL);
++ tmax.tv_sec = tmax.tv_usec = 0;
++ least_active = NULL;
++ rwlock_wrlock(&svc_fd_lock);
++
++ for (i = 0; i <= svc_max_pollfd; i++) {
++ if (svc_pollfd[i].fd == -1)
++ continue;
++ xprt = __svc_xports[i];
++ if (xprt == NULL || xprt->xp_ops == NULL ||
++ xprt->xp_ops->xp_recv != svc_vc_recv)
++ continue;
++ cd = (struct cf_conn *)xprt->xp_p1;
++ if (!cd->nonblock)
++ continue;
++ if (timeout == 0) {
++ timersub(&tv, &cd->last_recv_time, &tdiff);
++ if (timercmp(&tdiff, &tmax, >)) {
++ tmax = tdiff;
++ least_active = xprt;
++ }
++ continue;
++ }
++ if (tv.tv_sec - cd->last_recv_time.tv_sec > timeout) {
++ __xprt_unregister_unlocked(xprt);
++ __svc_vc_dodestroy(xprt);
++ ncleaned++;
++ }
++ }
++ if (timeout == 0 && least_active != NULL) {
++ __xprt_unregister_unlocked(least_active);
++ __svc_vc_dodestroy(least_active);
++ ncleaned++;
++ }
++ rwlock_unlock(&svc_fd_lock);
++ return (ncleaned);
++}
+--
+2.25.1
+
diff --git a/meta/recipes-extended/libtirpc/libtirpc_1.3.2.bb b/meta/recipes-extended/libtirpc/libtirpc_1.3.2.bb
index 45b3d2befc..6980135a92 100644
--- a/meta/recipes-extended/libtirpc/libtirpc_1.3.2.bb
+++ b/meta/recipes-extended/libtirpc/libtirpc_1.3.2.bb
@@ -9,7 +9,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=f835cce8852481e4b2bbbdd23b5e47f3 \
PROVIDES = "virtual/librpc"
-SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BP}.tar.bz2"
+SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BP}.tar.bz2 \
+ file://CVE-2021-46828.patch \
+ "
UPSTREAM_CHECK_URI = "https://sourceforge.net/projects/libtirpc/files/libtirpc/"
UPSTREAM_CHECK_REGEX = "(?P<pver>\d+(\.\d+)+)/"
SRC_URI[sha256sum] = "e24eb88b8ce7db3b7ca6eb80115dd1284abc5ec32a8deccfed2224fc2532b9fd"
@@ -19,7 +21,7 @@ inherit autotools pkgconfig
EXTRA_OECONF = "--disable-gssapi"
do_install:append() {
- chown root:root ${D}${sysconfdir}/netconfig
+ test -e ${D}${sysconfdir}/netconfig && chown root:root ${D}${sysconfdir}/netconfig
}
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-extended/lighttpd/lighttpd_1.4.64.bb b/meta/recipes-extended/lighttpd/lighttpd_1.4.67.bb
index 8d2e77e011..838881f238 100644
--- a/meta/recipes-extended/lighttpd/lighttpd_1.4.64.bb
+++ b/meta/recipes-extended/lighttpd/lighttpd_1.4.67.bb
@@ -19,7 +19,7 @@ SRC_URI = "http://download.lighttpd.net/lighttpd/releases-1.4.x/lighttpd-${PV}.t
file://lighttpd \
"
-SRC_URI[sha256sum] = "e1489d9fa7496fbf2e071c338b593b2300d38c23f1e5967e52c9ef482e1b0e26"
+SRC_URI[sha256sum] = "7e04d767f51a8d824b32e2483ef2950982920d427d1272ef4667f49d6f89f358"
DEPENDS = "virtual/crypt"
diff --git a/meta/recipes-extended/logrotate/logrotate_3.20.1.bb b/meta/recipes-extended/logrotate/logrotate_3.20.1.bb
index 35977535aa..3df6ebd26d 100644
--- a/meta/recipes-extended/logrotate/logrotate_3.20.1.bb
+++ b/meta/recipes-extended/logrotate/logrotate_3.20.1.bb
@@ -67,7 +67,6 @@ do_install(){
install -p -m 644 ${S}/examples/logrotate.conf ${D}${sysconfdir}/logrotate.conf
install -p -m 644 ${S}/examples/btmp ${D}${sysconfdir}/logrotate.d/btmp
install -p -m 644 ${S}/examples/wtmp ${D}${sysconfdir}/logrotate.d/wtmp
- touch ${D}${localstatedir}/lib/logrotate.status
if ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'true', 'false', d)}; then
install -d ${D}${systemd_system_unitdir}
diff --git a/meta/recipes-extended/lsof/lsof_4.94.0.bb b/meta/recipes-extended/lsof/lsof_4.94.0.bb
index c2b8bc839b..d50959d73c 100644
--- a/meta/recipes-extended/lsof/lsof_4.94.0.bb
+++ b/meta/recipes-extended/lsof/lsof_4.94.0.bb
@@ -19,6 +19,15 @@ SRCREV = "005e014e1abdadb2493d8b3ce87b37a2c0a2351d"
S = "${WORKDIR}/git"
+
+inherit update-alternatives
+
+ALTERNATIVE:${PN} = "lsof"
+ALTERNATIVE_LINK_NAME[lsof] = "${sbindir}/lsof"
+# Make our priority higher than busybox
+ALTERNATIVE_PRIORITY = "100"
+
+
export LSOF_INCLUDE = "${STAGING_INCDIR}"
do_configure () {
diff --git a/meta/recipes-extended/ltp/ltp/0001-clock_gettime04-set-threshold-based-on-the-clock-res.patch b/meta/recipes-extended/ltp/ltp/0001-clock_gettime04-set-threshold-based-on-the-clock-res.patch
new file mode 100644
index 0000000000..b4879221ad
--- /dev/null
+++ b/meta/recipes-extended/ltp/ltp/0001-clock_gettime04-set-threshold-based-on-the-clock-res.patch
@@ -0,0 +1,89 @@
+From 9851deb86ef257a98d7433280161d8ca685aa669 Mon Sep 17 00:00:00 2001
+From: Li Wang <liwang@redhat.com>
+Date: Tue, 29 Mar 2022 13:03:51 +0800
+Subject: [PATCH] clock_gettime04: set threshold based on the clock resolution
+
+This is to get rid of the intermittent failures in clock_gettime04,
+which are likely caused by different clock tick rates on platforms.
+Here give two thresholds (in milliseconds) for comparison, one for
+COARSE clock and one for the rest.
+
+Error log:
+ clock_gettime04.c:163: TFAIL: CLOCK_REALTIME_COARSE(syscall with old kernel spec):
+ Difference between successive readings greater than 5 ms (1): 10
+ clock_gettime04.c:163: TFAIL: CLOCK_MONOTONIC_COARSE(vDSO with old kernel spec):
+ Difference between successive readings greater than 5 ms (2): 10
+
+From Waiman Long:
+ That failure happens for CLOCK_REALTIME_COARSE which is a faster but less
+ precise version of CLOCK_REALTIME. The time resolution is actually a clock
+ tick. Since arm64 has a HZ rate of 100. That means each tick is 10ms. So a
+ CLOCK_REALTIME_COARSE threshold of 5ms is probably not enough. I would say
+ in the case of CLOCK_REALTIME_COARSE, we have to increase the threshold based
+ on the clock tick rate of the system. This is more a test failure than is
+ an inherent problem in the kernel.
+
+Fixes #898
+
+Upstream-Status: Backport
+[https://github.com/linux-test-project/ltp/commit/9851deb86ef257a98d7433280161d8ca685aa669]
+
+Reported-by: Eirik Fuller <efuller@redhat.com>
+Signed-off-by: Li Wang <liwang@redhat.com>
+Cc: Waiman Long <llong@redhat.com>
+Cc: Viresh Kumar <viresh.kumar@linaro.org>
+Reviewed-by: Cyril Hrubis <chrubis@suse.cz>
+Acked-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ .../syscalls/clock_gettime/clock_gettime04.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+diff --git a/testcases/kernel/syscalls/clock_gettime/clock_gettime04.c b/testcases/kernel/syscalls/clock_gettime/clock_gettime04.c
+index a8d2c5b38..c279da79e 100644
+--- a/testcases/kernel/syscalls/clock_gettime/clock_gettime04.c
++++ b/testcases/kernel/syscalls/clock_gettime/clock_gettime04.c
+@@ -35,7 +35,7 @@ clockid_t clks[] = {
+ };
+
+ static gettime_t ptr_vdso_gettime, ptr_vdso_gettime64;
+-static long long delta = 5;
++static long long delta, precise_delta, coarse_delta;
+
+ static inline int do_vdso_gettime(gettime_t vdso, clockid_t clk_id, void *ts)
+ {
+@@ -92,9 +92,18 @@ static struct time64_variants variants[] = {
+
+ static void setup(void)
+ {
++ struct timespec res;
++
++ clock_getres(CLOCK_REALTIME, &res);
++ precise_delta = 5 + res.tv_nsec / 1000000;
++
++ clock_getres(CLOCK_REALTIME_COARSE, &res);
++ coarse_delta = 5 + res.tv_nsec / 1000000;
++
+ if (tst_is_virt(VIRT_ANY)) {
+ tst_res(TINFO, "Running in a virtual machine, multiply the delta by 10.");
+- delta *= 10;
++ precise_delta *= 10;
++ coarse_delta *= 10;
+ }
+
+ find_clock_gettime_vdso(&ptr_vdso_gettime, &ptr_vdso_gettime64);
+@@ -108,6 +117,11 @@ static void run(unsigned int i)
+ int count = 10000, ret;
+ unsigned int j;
+
++ if (clks[i] == CLOCK_REALTIME_COARSE || clks[i] == CLOCK_MONOTONIC_COARSE)
++ delta = coarse_delta;
++ else
++ delta = precise_delta;
++
+ do {
+ for (j = 0; j < ARRAY_SIZE(variants); j++) {
+ /* Refresh time in start */
+--
+2.34.1
+
diff --git a/meta/recipes-extended/ltp/ltp/0001-syscalls-pread02-extend-buffer-to-avoid-glibc-overflow-detection.patch b/meta/recipes-extended/ltp/ltp/0001-syscalls-pread02-extend-buffer-to-avoid-glibc-overflow-detection.patch
new file mode 100644
index 0000000000..94dd418f36
--- /dev/null
+++ b/meta/recipes-extended/ltp/ltp/0001-syscalls-pread02-extend-buffer-to-avoid-glibc-overflow-detection.patch
@@ -0,0 +1,58 @@
+From de988c9b5605a711b306c4203545b8d761875177 Mon Sep 17 00:00:00 2001
+From: Jan Stancek <jstancek@redhat.com>
+Date: Mon, 31 Jan 2022 12:00:46 +0100
+Subject: [PATCH] syscalls/pread02: extend buffer to avoid glibc overflow
+ detection
+
+Test started failing with recent glibc (glibc-2.34.9000-38.fc36),
+which detects that buffer in pread is potentially too small:
+ tst_test.c:1431: TINFO: Timeout per run is 0h 05m 00s
+ *** buffer overflow detected ***: terminated
+ tst_test.c:1484: TBROK: Test killed by SIGIOT/SIGABRT!
+
+(gdb) bt
+ #0 __pthread_kill_implementation at pthread_kill.c:44
+ #1 0x00007ffff7e46f73 in __pthread_kill_internal at pthread_kill.c:78
+ #2 0x00007ffff7df6a36 in __GI_raise at ../sysdeps/posix/raise.c:26
+ #3 0x00007ffff7de082f in __GI_abort () at abort.c:79
+ #4 0x00007ffff7e3b01e in __libc_message at ../sysdeps/posix/libc_fatal.c:155
+ #5 0x00007ffff7ed945a in __GI___fortify_fail at fortify_fail.c:26
+ #6 0x00007ffff7ed7dc6 in __GI___chk_fail () at chk_fail.c:28
+ #7 0x00007ffff7ed8214 in __pread_chk at pread_chk.c:26
+ #8 0x0000000000404d1a in pread at /usr/include/bits/unistd.h:74
+ #9 verify_pread (n=<optimized out>) at pread02.c:44
+ #10 0x000000000040dc19 in run_tests () at tst_test.c:1246
+ #11 testrun () at tst_test.c:1331
+ #12 fork_testrun () at tst_test.c:1462
+ #13 0x000000000040e9a1 in tst_run_tcases
+ #14 0x0000000000404bde in main
+
+Extend it to number of bytes we are trying to read from fd.
+
+Upstream-Status: Backport
+[https://github.com/linux-test-project/ltp/commit/de988c9b5605a711b306c4203545b8d761875177]
+
+Signed-off-by: Jan Stancek <jstancek@redhat.com>
+Acked-by: Petr Vorel <pvorel@suse.cz>
+Reviewed-by: Cyril Hrubis <chrubis@suse.cz>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ testcases/kernel/syscalls/pread/pread02.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/testcases/kernel/syscalls/pread/pread02.c b/testcases/kernel/syscalls/pread/pread02.c
+index de2a81fff..fda5fd190 100644
+--- a/testcases/kernel/syscalls/pread/pread02.c
++++ b/testcases/kernel/syscalls/pread/pread02.c
+@@ -39,7 +39,7 @@ struct test_case_t {
+ static void verify_pread(unsigned int n)
+ {
+ struct test_case_t *tc = &tcases[n];
+- char buf;
++ char buf[K1];
+
+ TST_EXP_FAIL2(pread(*tc->fd, &buf, tc->nb, tc->offst), tc->exp_errno,
+ "pread(%d, %zu, %ld) %s", *tc->fd, tc->nb, tc->offst, tc->desc);
+--
+2.34.1
+
diff --git a/meta/recipes-extended/ltp/ltp_20220121.bb b/meta/recipes-extended/ltp/ltp_20220121.bb
index 8a13dcf9d0..51e8db4f1e 100644
--- a/meta/recipes-extended/ltp/ltp_20220121.bb
+++ b/meta/recipes-extended/ltp/ltp_20220121.bb
@@ -28,6 +28,8 @@ SRC_URI = "git://github.com/linux-test-project/ltp.git;branch=master;protocol=ht
file://0001-Remove-OOM-tests-from-runtest-mm.patch \
file://0001-metadata-parse.sh-sort-filelist-for-reproducibility.patch \
file://disable_hanging_tests.patch \
+ file://0001-syscalls-pread02-extend-buffer-to-avoid-glibc-overflow-detection.patch \
+ file://0001-clock_gettime04-set-threshold-based-on-the-clock-res.patch \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-extended/mdadm/files/0001-DDF-Cleanup-validate_geometry_ddf_container.patch b/meta/recipes-extended/mdadm/files/0001-DDF-Cleanup-validate_geometry_ddf_container.patch
new file mode 100644
index 0000000000..cea435f83b
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/0001-DDF-Cleanup-validate_geometry_ddf_container.patch
@@ -0,0 +1,148 @@
+From ca458f4dcc4de9403298f67543466ce4bbc8f8ae Mon Sep 17 00:00:00 2001
+From: Logan Gunthorpe <logang@deltatee.com>
+Date: Wed, 22 Jun 2022 14:25:07 -0600
+Subject: [PATCH 1/4] DDF: Cleanup validate_geometry_ddf_container()
+
+Move the function up so that the function declaration is not necessary
+and remove the unused arguments to the function.
+
+No functional changes are intended but will help with a bug fix in the
+next patch.
+
+Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
+Acked-by: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+Signed-off-by: Jes Sorensen <jes@trained-monkey.org>
+
+Upstream-Status: Backport
+
+Reference to upstream patch:
+https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/commit/?id=679bd9508a30
+
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+---
+ super-ddf.c | 88 ++++++++++++++++++++++++-----------------------------
+ 1 file changed, 39 insertions(+), 49 deletions(-)
+
+diff --git a/super-ddf.c b/super-ddf.c
+index 3f304cd..65cf727 100644
+--- a/super-ddf.c
++++ b/super-ddf.c
+@@ -503,13 +503,6 @@ struct ddf_super {
+ static int load_super_ddf_all(struct supertype *st, int fd,
+ void **sbp, char *devname);
+ static int get_svd_state(const struct ddf_super *, const struct vcl *);
+-static int
+-validate_geometry_ddf_container(struct supertype *st,
+- int level, int layout, int raiddisks,
+- int chunk, unsigned long long size,
+- unsigned long long data_offset,
+- char *dev, unsigned long long *freesize,
+- int verbose);
+
+ static int validate_geometry_ddf_bvd(struct supertype *st,
+ int level, int layout, int raiddisks,
+@@ -3322,6 +3315,42 @@ static int reserve_space(struct supertype *st, int raiddisks,
+ return 1;
+ }
+
++static int
++validate_geometry_ddf_container(struct supertype *st,
++ int level, int raiddisks,
++ unsigned long long data_offset,
++ char *dev, unsigned long long *freesize,
++ int verbose)
++{
++ int fd;
++ unsigned long long ldsize;
++
++ if (level != LEVEL_CONTAINER)
++ return 0;
++ if (!dev)
++ return 1;
++
++ fd = dev_open(dev, O_RDONLY|O_EXCL);
++ if (fd < 0) {
++ if (verbose)
++ pr_err("ddf: Cannot open %s: %s\n",
++ dev, strerror(errno));
++ return 0;
++ }
++ if (!get_dev_size(fd, dev, &ldsize)) {
++ close(fd);
++ return 0;
++ }
++ close(fd);
++ if (freesize) {
++ *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
++ if (*freesize == 0)
++ return 0;
++ }
++
++ return 1;
++}
++
+ static int validate_geometry_ddf(struct supertype *st,
+ int level, int layout, int raiddisks,
+ int *chunk, unsigned long long size,
+@@ -3347,11 +3376,9 @@ static int validate_geometry_ddf(struct supertype *st,
+ level = LEVEL_CONTAINER;
+ if (level == LEVEL_CONTAINER) {
+ /* Must be a fresh device to add to a container */
+- return validate_geometry_ddf_container(st, level, layout,
+- raiddisks, *chunk,
+- size, data_offset, dev,
+- freesize,
+- verbose);
++ return validate_geometry_ddf_container(st, level, raiddisks,
++ data_offset, dev,
++ freesize, verbose);
+ }
+
+ if (!dev) {
+@@ -3449,43 +3476,6 @@ static int validate_geometry_ddf(struct supertype *st,
+ return 1;
+ }
+
+-static int
+-validate_geometry_ddf_container(struct supertype *st,
+- int level, int layout, int raiddisks,
+- int chunk, unsigned long long size,
+- unsigned long long data_offset,
+- char *dev, unsigned long long *freesize,
+- int verbose)
+-{
+- int fd;
+- unsigned long long ldsize;
+-
+- if (level != LEVEL_CONTAINER)
+- return 0;
+- if (!dev)
+- return 1;
+-
+- fd = dev_open(dev, O_RDONLY|O_EXCL);
+- if (fd < 0) {
+- if (verbose)
+- pr_err("ddf: Cannot open %s: %s\n",
+- dev, strerror(errno));
+- return 0;
+- }
+- if (!get_dev_size(fd, dev, &ldsize)) {
+- close(fd);
+- return 0;
+- }
+- close(fd);
+- if (freesize) {
+- *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
+- if (*freesize == 0)
+- return 0;
+- }
+-
+- return 1;
+-}
+-
+ static int validate_geometry_ddf_bvd(struct supertype *st,
+ int level, int layout, int raiddisks,
+ int *chunk, unsigned long long size,
+--
+2.39.1
+
diff --git a/meta/recipes-extended/mdadm/files/0001-mdadm-Fix-optional-write-behind-parameter.patch b/meta/recipes-extended/mdadm/files/0001-mdadm-Fix-optional-write-behind-parameter.patch
new file mode 100644
index 0000000000..186d1e76f2
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/0001-mdadm-Fix-optional-write-behind-parameter.patch
@@ -0,0 +1,45 @@
+From 41edf6f45895193f4a523cb0a08d639c9ff9ccc9 Mon Sep 17 00:00:00 2001
+From: Logan Gunthorpe <logang@deltatee.com>
+Date: Wed, 22 Jun 2022 14:25:12 -0600
+Subject: [PATCH] mdadm: Fix optional --write-behind parameter
+
+The commit noted below changed the behaviour of --write-behind to
+require an argument. This broke the 06wrmostly test with the error:
+
+ mdadm: Invalid value for maximum outstanding write-behind writes: (null).
+ Must be between 0 and 16383.
+
+To fix this, check if optarg is NULL before parising it, as the origial
+code did.
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/commit/?id=41edf6f45895193f4a523cb0a08d639c9ff9ccc9]
+
+Fixes: 60815698c0ac ("Refactor parse_num and use it to parse optarg.")
+Cc: Mateusz Grzonka <mateusz.grzonka@intel.com>
+Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
+Acked-by: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+Signed-off-by: Jes Sorensen <jes@trained-monkey.org>
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ mdadm.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/mdadm.c b/mdadm.c
+index d0c5e6de..56722ed9 100644
+--- a/mdadm.c
++++ b/mdadm.c
+@@ -1201,8 +1201,9 @@ int main(int argc, char *argv[])
+ case O(BUILD, WriteBehind):
+ case O(CREATE, WriteBehind):
+ s.write_behind = DEFAULT_MAX_WRITE_BEHIND;
+- if (parse_num(&s.write_behind, optarg) != 0 ||
+- s.write_behind < 0 || s.write_behind > 16383) {
++ if (optarg &&
++ (parse_num(&s.write_behind, optarg) != 0 ||
++ s.write_behind < 0 || s.write_behind > 16383)) {
+ pr_err("Invalid value for maximum outstanding write-behind writes: %s.\n\tMust be between 0 and 16383.\n",
+ optarg);
+ exit(2);
+--
+2.25.1
+
diff --git a/meta/recipes-extended/mdadm/files/0001-tests-00raid0-add-a-test-that-validates-raid0-with-l.patch b/meta/recipes-extended/mdadm/files/0001-tests-00raid0-add-a-test-that-validates-raid0-with-l.patch
new file mode 100644
index 0000000000..1c95834a7e
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/0001-tests-00raid0-add-a-test-that-validates-raid0-with-l.patch
@@ -0,0 +1,41 @@
+From 7539254342bc591717b0051734cc6c09c1b88640 Mon Sep 17 00:00:00 2001
+From: Sudhakar Panneerselvam <sudhakar.panneerselvam@oracle.com>
+Date: Wed, 22 Jun 2022 14:25:13 -0600
+Subject: [PATCH] tests/00raid0: add a test that validates raid0 with layout
+ fails for 0.9
+
+329dfc28debb disallows the creation of raid0 with layouts for 0.9
+metadata. This test confirms the new behavior.
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/commit/?id=7539254342bc591717b0051734cc6c09c1b88640]
+
+Signed-off-by: Sudhakar Panneerselvam <sudhakar.panneerselvam@oracle.com>
+Signed-off-by: Himanshu Madhani <himanshu.madhani@oracle.com>
+Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
+Signed-off-by: Jes Sorensen <jes@trained-monkey.org>
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ tests/00raid0 | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/tests/00raid0 b/tests/00raid0
+index 8bc18985..e6b21cc4 100644
+--- a/tests/00raid0
++++ b/tests/00raid0
+@@ -6,11 +6,9 @@ check raid0
+ testdev $md0 3 $mdsize2_l 512
+ mdadm -S $md0
+
+-# now with version-0.90 superblock
++# verify raid0 with layouts fail for 0.90
+ mdadm -CR $md0 -e0.90 -l0 -n4 $dev0 $dev1 $dev2 $dev3
+-check raid0
+-testdev $md0 4 $mdsize0 512
+-mdadm -S $md0
++check opposite_result
+
+ # now with no superblock
+ mdadm -B $md0 -l0 -n5 $dev0 $dev1 $dev2 $dev3 $dev4
+--
+2.25.1
+
diff --git a/meta/recipes-extended/mdadm/files/0001-tests-00readonly-Run-udevadm-settle-before-setting-r.patch b/meta/recipes-extended/mdadm/files/0001-tests-00readonly-Run-udevadm-settle-before-setting-r.patch
new file mode 100644
index 0000000000..c621c082e8
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/0001-tests-00readonly-Run-udevadm-settle-before-setting-r.patch
@@ -0,0 +1,39 @@
+From 39b381252c32275079344d30de18b76fda4bba26 Mon Sep 17 00:00:00 2001
+From: Logan Gunthorpe <logang@deltatee.com>
+Date: Wed, 27 Jul 2022 15:52:45 -0600
+Subject: [PATCH] tests/00readonly: Run udevadm settle before setting ro
+
+In some recent kernel versions, 00readonly fails with:
+
+ mdadm: failed to set readonly for /dev/md0: Device or resource busy
+ ERROR: array is not read-only!
+
+This was traced down to a race condition with udev holding a reference
+to the block device at the same time as trying to set it read only.
+
+To fix this, call udevadm settle before setting the array read only.
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/commit/?id=39b381252c32275079344d30de18b76fda4bba26]
+
+Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
+Signed-off-by: Jes Sorensen <jsorensen@fb.com>
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ tests/00readonly | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/tests/00readonly b/tests/00readonly
+index 39202487..afe243b3 100644
+--- a/tests/00readonly
++++ b/tests/00readonly
+@@ -12,6 +12,7 @@ do
+ $dev1 $dev2 $dev3 $dev4 --assume-clean
+ check nosync
+ check $level
++ udevadm settle
+ mdadm -ro $md0
+ check readonly
+ state=$(cat /sys/block/md0/md/array_state)
+--
+2.25.1
+
diff --git a/meta/recipes-extended/mdadm/files/0001-tests-02lineargrow-clear-the-superblock-at-every-ite.patch b/meta/recipes-extended/mdadm/files/0001-tests-02lineargrow-clear-the-superblock-at-every-ite.patch
new file mode 100644
index 0000000000..1a7104b76d
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/0001-tests-02lineargrow-clear-the-superblock-at-every-ite.patch
@@ -0,0 +1,33 @@
+From a2c832465fc75202e244327b2081231dfa974617 Mon Sep 17 00:00:00 2001
+From: Sudhakar Panneerselvam <sudhakar.panneerselvam@oracle.com>
+Date: Wed, 22 Jun 2022 14:25:16 -0600
+Subject: [PATCH] tests/02lineargrow: clear the superblock at every iteration
+
+This fixes 02lineargrow test as prior metadata causes --add operation
+to misbehave.
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/commit/?id=a2c832465fc75202e244327b2081231dfa974617]
+
+Signed-off-by: Sudhakar Panneerselvam <sudhakar.panneerselvam@oracle.com>
+Signed-off-by: Himanshu Madhani <himanshu.madhani@oracle.com>
+Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
+Signed-off-by: Jes Sorensen <jes@trained-monkey.org>
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ tests/02lineargrow | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/tests/02lineargrow b/tests/02lineargrow
+index e05c219d..595bf9f2 100644
+--- a/tests/02lineargrow
++++ b/tests/02lineargrow
+@@ -20,4 +20,6 @@ do
+ testdev $md0 3 $sz 1
+
+ mdadm -S $md0
++ mdadm --zero /dev/loop2
++ mdadm --zero /dev/loop3
+ done
+--
+2.25.1
+
diff --git a/meta/recipes-extended/mdadm/files/0001-tests-04update-metadata-avoid-passing-chunk-size-to.patch b/meta/recipes-extended/mdadm/files/0001-tests-04update-metadata-avoid-passing-chunk-size-to.patch
new file mode 100644
index 0000000000..9098fb2540
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/0001-tests-04update-metadata-avoid-passing-chunk-size-to.patch
@@ -0,0 +1,41 @@
+From de045db607b1ac4b70fc2a8878463e029c2ab1dc Mon Sep 17 00:00:00 2001
+From: Sudhakar Panneerselvam <sudhakar.panneerselvam@oracle.com>
+Date: Wed, 22 Jun 2022 14:25:15 -0600
+Subject: [PATCH] tests/04update-metadata: avoid passing chunk size to raid1
+
+'04update-metadata' test fails with error, "specifying chunk size is
+forbidden for this level" added by commit, 5b30a34aa4b5e. Hence,
+correcting the test to ignore passing chunk size to raid1.
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/commit/?id=de045db607b1ac4b70fc2a8878463e029c2ab1dc]
+
+Signed-off-by: Sudhakar Panneerselvam <sudhakar.panneerselvam@oracle.com>
+Signed-off-by: Himanshu Madhani <himanshu.madhani@oracle.com>
+[logang@deltatee.com: fix if/then style and dropped unrelated hunk]
+Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
+Signed-off-by: Jes Sorensen <jes@trained-monkey.org>
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ tests/04update-metadata | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/tests/04update-metadata b/tests/04update-metadata
+index 08c14af7..2b72a303 100644
+--- a/tests/04update-metadata
++++ b/tests/04update-metadata
+@@ -11,7 +11,11 @@ dlist="$dev0 $dev1 $dev2 $dev3"
+ for ls in linear/4 raid1/1 raid5/3 raid6/2
+ do
+ s=${ls#*/} l=${ls%/*}
+- mdadm -CR --assume-clean -e 0.90 $md0 --level $l -n 4 -c 64 $dlist
++ if [[ $l == 'raid1' ]]; then
++ mdadm -CR --assume-clean -e 0.90 $md0 --level $l -n 4 $dlist
++ else
++ mdadm -CR --assume-clean -e 0.90 $md0 --level $l -n 4 -c 64 $dlist
++ fi
+ testdev $md0 $s 19904 64
+ mdadm -S $md0
+ mdadm -A $md0 --update=metadata $dlist
+--
+2.25.1
+
diff --git a/meta/recipes-extended/mdadm/files/0001-tests-fix-raid0-tests-for-0.90-metadata.patch b/meta/recipes-extended/mdadm/files/0001-tests-fix-raid0-tests-for-0.90-metadata.patch
new file mode 100644
index 0000000000..d2e7d8ee50
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/0001-tests-fix-raid0-tests-for-0.90-metadata.patch
@@ -0,0 +1,102 @@
+From 14c2161edb77d7294199e8aa7daa9f9d1d0ad5d7 Mon Sep 17 00:00:00 2001
+From: Sudhakar Panneerselvam <sudhakar.panneerselvam@oracle.com>
+Date: Wed, 22 Jun 2022 14:25:14 -0600
+Subject: [PATCH] tests: fix raid0 tests for 0.90 metadata
+
+Some of the test cases fail because raid0 creation fails with the error,
+"0.90 metadata does not support layouts for RAID0" added by commit,
+329dfc28debb. Fix some of the test cases by switching from raid0 to
+linear level for 0.9 metadata where possible.
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/commit/?id=14c2161edb77d7294199e8aa7daa9f9d1d0ad5d7]
+
+Signed-off-by: Sudhakar Panneerselvam <sudhakar.panneerselvam@oracle.com>
+Signed-off-by: Himanshu Madhani <himanshu.madhani@oracle.com>
+Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
+Signed-off-by: Jes Sorensen <jes@trained-monkey.org>
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ tests/00raid0 | 4 ++--
+ tests/00readonly | 4 ++++
+ tests/03r0assem | 6 +++---
+ tests/04r0update | 4 ++--
+ tests/04update-metadata | 2 +-
+ 5 files changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/tests/00raid0 b/tests/00raid0
+index e6b21cc4..9b8896cb 100644
+--- a/tests/00raid0
++++ b/tests/00raid0
+@@ -20,8 +20,8 @@ mdadm -S $md0
+ # now same again with different chunk size
+ for chunk in 4 32 256
+ do
+- mdadm -CR $md0 -e0.90 -l raid0 --chunk $chunk -n3 $dev0 $dev1 $dev2
+- check raid0
++ mdadm -CR $md0 -e0.90 -l linear --chunk $chunk -n3 $dev0 $dev1 $dev2
++ check linear
+ testdev $md0 3 $mdsize0 $chunk
+ mdadm -S $md0
+
+diff --git a/tests/00readonly b/tests/00readonly
+index 28b0fa13..39202487 100644
+--- a/tests/00readonly
++++ b/tests/00readonly
+@@ -4,6 +4,10 @@ for metadata in 0.9 1.0 1.1 1.2
+ do
+ for level in linear raid0 raid1 raid4 raid5 raid6 raid10
+ do
++ if [[ $metadata == "0.9" && $level == "raid0" ]];
++ then
++ continue
++ fi
+ mdadm -CR $md0 -l $level -n 4 --metadata=$metadata \
+ $dev1 $dev2 $dev3 $dev4 --assume-clean
+ check nosync
+diff --git a/tests/03r0assem b/tests/03r0assem
+index 6744e322..44df0645 100644
+--- a/tests/03r0assem
++++ b/tests/03r0assem
+@@ -68,9 +68,9 @@ mdadm -S $md2
+ ### Now for version 0...
+
+ mdadm --zero-superblock $dev0 $dev1 $dev2
+-mdadm -CR $md2 -l0 --metadata=0.90 -n3 $dev0 $dev1 $dev2
+-check raid0
+-tst="testdev $md2 3 $mdsize0 512"
++mdadm -CR $md2 -llinear --metadata=0.90 -n3 $dev0 $dev1 $dev2
++check linear
++tst="testdev $md2 3 $mdsize0 1"
+ $tst
+
+ uuid=`mdadm -Db $md2 | sed 's/.*UUID=//'`
+diff --git a/tests/04r0update b/tests/04r0update
+index 73ee3b9f..b95efb06 100644
+--- a/tests/04r0update
++++ b/tests/04r0update
+@@ -1,7 +1,7 @@
+
+ # create a raid0, re-assemble with a different super-minor
+-mdadm -CR -e 0.90 $md0 -l0 -n3 $dev0 $dev1 $dev2
+-testdev $md0 3 $mdsize0 512
++mdadm -CR -e 0.90 $md0 -llinear -n3 $dev0 $dev1 $dev2
++testdev $md0 3 $mdsize0 1
+ minor1=`mdadm -E $dev0 | sed -n -e 's/.*Preferred Minor : //p'`
+ mdadm -S /dev/md0
+
+diff --git a/tests/04update-metadata b/tests/04update-metadata
+index 232fc1ff..08c14af7 100644
+--- a/tests/04update-metadata
++++ b/tests/04update-metadata
+@@ -8,7 +8,7 @@ set -xe
+
+ dlist="$dev0 $dev1 $dev2 $dev3"
+
+-for ls in raid0/4 linear/4 raid1/1 raid5/3 raid6/2
++for ls in linear/4 raid1/1 raid5/3 raid6/2
+ do
+ s=${ls#*/} l=${ls%/*}
+ mdadm -CR --assume-clean -e 0.90 $md0 --level $l -n 4 -c 64 $dlist
+--
+2.25.1
+
diff --git a/meta/recipes-extended/mdadm/files/0002-DDF-Fix-NULL-pointer-dereference-in-validate_geometr.patch b/meta/recipes-extended/mdadm/files/0002-DDF-Fix-NULL-pointer-dereference-in-validate_geometr.patch
new file mode 100644
index 0000000000..fafe88b49c
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/0002-DDF-Fix-NULL-pointer-dereference-in-validate_geometr.patch
@@ -0,0 +1,56 @@
+From 14f110f0286d38e29ef5e51d7f72e049c2f18323 Mon Sep 17 00:00:00 2001
+From: Logan Gunthorpe <logang@deltatee.com>
+Date: Wed, 22 Jun 2022 14:25:08 -0600
+Subject: [PATCH 2/4] DDF: Fix NULL pointer dereference in
+ validate_geometry_ddf()
+
+A relatively recent patch added a call to validate_geometry() in
+Manage_add() that has level=LEVEL_CONTAINER and chunk=NULL.
+
+This causes some ddf tests to segfault which aborts the test suite.
+
+To fix this, avoid dereferencing chunk when the level is
+LEVEL_CONTAINER or LEVEL_NONE.
+
+Fixes: 1f5d54a06df0 ("Manage: Call validate_geometry when adding drive to external container")
+Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
+Acked-by: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+Signed-off-by: Jes Sorensen <jes@trained-monkey.org>
+
+Upstream-Status: Backport
+
+Reference to upstream patch:
+https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/commit/?id=2b93288a5650
+
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+---
+ super-ddf.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/super-ddf.c b/super-ddf.c
+index 65cf727..3ef1293 100644
+--- a/super-ddf.c
++++ b/super-ddf.c
+@@ -3369,9 +3369,6 @@ static int validate_geometry_ddf(struct supertype *st,
+ * If given BVDs, we make an SVD, changing all the GUIDs in the process.
+ */
+
+- if (*chunk == UnSet)
+- *chunk = DEFAULT_CHUNK;
+-
+ if (level == LEVEL_NONE)
+ level = LEVEL_CONTAINER;
+ if (level == LEVEL_CONTAINER) {
+@@ -3381,6 +3378,9 @@ static int validate_geometry_ddf(struct supertype *st,
+ freesize, verbose);
+ }
+
++ if (*chunk == UnSet)
++ *chunk = DEFAULT_CHUNK;
++
+ if (!dev) {
+ mdu_array_info_t array = {
+ .level = level,
+--
+2.39.1
+
diff --git a/meta/recipes-extended/mdadm/files/0003-mdadm-Grow-Fix-use-after-close-bug-by-closing-after-.patch b/meta/recipes-extended/mdadm/files/0003-mdadm-Grow-Fix-use-after-close-bug-by-closing-after-.patch
new file mode 100644
index 0000000000..a954ab027a
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/0003-mdadm-Grow-Fix-use-after-close-bug-by-closing-after-.patch
@@ -0,0 +1,91 @@
+From bd064da1469a6a07331b076a0294a8c6c3c38526 Mon Sep 17 00:00:00 2001
+From: Logan Gunthorpe <logang@deltatee.com>
+Date: Wed, 22 Jun 2022 14:25:09 -0600
+Subject: [PATCH 3/4] mdadm/Grow: Fix use after close bug by closing after fork
+
+The test 07reshape-grow fails most of the time. But it succeeds around
+1 in 5 times. When it does succeed, it causes the tests to die because
+mdadm has segfaulted.
+
+The segfault was caused by mdadm attempting to repoen a file
+descriptor that was already closed. The backtrace of the segfault
+was:
+
+ #0 __strncmp_avx2 () at ../sysdeps/x86_64/multiarch/strcmp-avx2.S:101
+ #1 0x000056146e31d44b in devnm2devid (devnm=0x0) at util.c:956
+ #2 0x000056146e31dab4 in open_dev_flags (devnm=0x0, flags=0)
+ at util.c:1072
+ #3 0x000056146e31db22 in open_dev (devnm=0x0) at util.c:1079
+ #4 0x000056146e3202e8 in reopen_mddev (mdfd=4) at util.c:2244
+ #5 0x000056146e329f36 in start_array (mdfd=4,
+ mddev=0x7ffc55342450 "/dev/md0", content=0x7ffc55342860,
+ st=0x56146fc78660, ident=0x7ffc55342f70, best=0x56146fc6f5d0,
+ bestcnt=10, chosen_drive=0, devices=0x56146fc706b0, okcnt=5,
+ sparecnt=0, rebuilding_cnt=0, journalcnt=0, c=0x7ffc55342e90,
+ clean=1, avail=0x56146fc78720 "\001\001\001\001\001",
+ start_partial_ok=0, err_ok=0, was_forced=0)
+ at Assemble.c:1206
+ #6 0x000056146e32c36e in Assemble (st=0x56146fc78660,
+ mddev=0x7ffc55342450 "/dev/md0", ident=0x7ffc55342f70,
+ devlist=0x56146fc6e2d0, c=0x7ffc55342e90)
+ at Assemble.c:1914
+ #7 0x000056146e312ac9 in main (argc=11, argv=0x7ffc55343238)
+ at mdadm.c:1510
+
+The file descriptor was closed early in Grow_continue(). The noted commit
+moved the close() call to close the fd above the fork which caused the
+parent process to return with a closed fd.
+
+This meant reshape_array() and Grow_continue() would return in the parent
+with the fd forked. The fd would eventually be passed to reopen_mddev()
+which returned an unhandled NULL from fd2devnm() which would then be
+dereferenced in devnm2devid.
+
+Fix this by moving the close() call below the fork. This appears to
+fix the 07revert-grow test. While we're at it, switch to using
+close_fd() to invalidate the file descriptor.
+
+Fixes: 77b72fa82813 ("mdadm/Grow: prevent md's fd from being occupied during delayed time")
+Cc: Alex Wu <alexwu@synology.com>
+Cc: BingJing Chang <bingjingc@synology.com>
+Cc: Danny Shih <dannyshih@synology.com>
+Cc: ChangSyun Peng <allenpeng@synology.com>
+Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
+Acked-by: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+Signed-off-by: Jes Sorensen <jes@trained-monkey.org>
+
+Upstream-Status: Backport
+
+Reference to upstream patch:
+https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/commit/?id=548e9b916f86
+
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+---
+ Grow.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/Grow.c b/Grow.c
+index 9c6fc95..a8e4e83 100644
+--- a/Grow.c
++++ b/Grow.c
+@@ -3501,7 +3501,6 @@ started:
+ return 0;
+ }
+
+- close(fd);
+ /* Now we just need to kick off the reshape and watch, while
+ * handling backups of the data...
+ * This is all done by a forked background process.
+@@ -3522,6 +3521,9 @@ started:
+ break;
+ }
+
++ /* Close unused file descriptor in the forked process */
++ close_fd(&fd);
++
+ /* If another array on the same devices is busy, the
+ * reshape will wait for them. This would mean that
+ * the first section that we suspend will stay suspended
+--
+2.39.1
+
diff --git a/meta/recipes-extended/mdadm/files/0004-monitor-Avoid-segfault-when-calling-NULL-get_bad_blo.patch b/meta/recipes-extended/mdadm/files/0004-monitor-Avoid-segfault-when-calling-NULL-get_bad_blo.patch
new file mode 100644
index 0000000000..72cb40f782
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/0004-monitor-Avoid-segfault-when-calling-NULL-get_bad_blo.patch
@@ -0,0 +1,42 @@
+From 2296a4a441b4b8546e2eb32403930f1bb8f3ee4a Mon Sep 17 00:00:00 2001
+From: Logan Gunthorpe <logang@deltatee.com>
+Date: Wed, 22 Jun 2022 14:25:10 -0600
+Subject: [PATCH 4/4] monitor: Avoid segfault when calling NULL get_bad_blocks
+
+Not all struct superswitch implement a get_bad_blocks() function,
+yet mdmon seems to call it without checking for NULL and thus
+occasionally segfaults in the test 10ddf-geometry.
+
+Fix this by checking for NULL before calling it.
+
+Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
+Acked-by: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+Signed-off-by: Jes Sorensen <jes@trained-monkey.org>
+
+Upstream-Status: Backport
+
+Reference to upstream patch:
+https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/commit/?id=9ae62977b51d
+
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+---
+ monitor.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/monitor.c b/monitor.c
+index afc3e50..8e43c0d 100644
+--- a/monitor.c
++++ b/monitor.c
+@@ -312,6 +312,9 @@ static int check_for_cleared_bb(struct active_array *a, struct mdinfo *mdi)
+ struct md_bb *bb;
+ int i;
+
++ if (!ss->get_bad_blocks)
++ return -1;
++
+ /*
+ * Get a list of bad blocks for an array, then read list of
+ * acknowledged bad blocks from kernel and compare it against metadata
+--
+2.39.1
+
diff --git a/meta/recipes-extended/mdadm/files/0005-mdadm-test-Mark-and-ignore-broken-test-failures.patch b/meta/recipes-extended/mdadm/files/0005-mdadm-test-Mark-and-ignore-broken-test-failures.patch
new file mode 100644
index 0000000000..c55bfb125b
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/0005-mdadm-test-Mark-and-ignore-broken-test-failures.patch
@@ -0,0 +1,128 @@
+From feab1f72fcf032a4d21d0a69eb61b23a5ddb3352 Mon Sep 17 00:00:00 2001
+From: Logan Gunthorpe <logang@deltatee.com>
+Date: Wed, 22 Jun 2022 14:25:18 -0600
+Subject: [PATCH 5/6] mdadm/test: Mark and ignore broken test failures
+
+Add functionality to continue if a test marked as broken fails.
+
+To mark a test as broken, a file with the same name but with the suffix
+'.broken' should exist. The first line in the file will be printed with
+a KNOWN BROKEN message; the rest of the file can describe the how the
+test is broken.
+
+Also adds --skip-broken and --skip-always-broken to skip all the tests
+that have a .broken file or to skip all tests whose .broken file's first
+line contains the keyword always.
+
+Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
+Signed-off-by: Jes Sorensen <jes@trained-monkey.org>
+
+Upstream-Status: Backport
+
+Reference to upstream patch:
+https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/commit/?id=28520bf114b3
+
+[OP: adjusted context for mdadm-4.2]
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+---
+ test | 37 +++++++++++++++++++++++++++++++++++--
+ 1 file changed, 35 insertions(+), 2 deletions(-)
+
+diff --git a/test b/test
+index 8f189d9..ee8fba1 100755
+--- a/test
++++ b/test
+@@ -10,6 +10,8 @@ devlist=
+
+ savelogs=0
+ exitonerror=1
++ctrl_c_error=0
++skipbroken=0
+ prefix='[0-9][0-9]'
+
+ # use loop devices by default if doesn't specify --dev
+@@ -35,6 +37,7 @@ die() {
+
+ ctrl_c() {
+ exitonerror=1
++ ctrl_c_error=1
+ }
+
+ # mdadm always adds --quiet, and we want to see any unexpected messages
+@@ -79,8 +82,21 @@ mdadm() {
+ do_test() {
+ _script=$1
+ _basename=`basename $_script`
++ _broken=0
++
+ if [ -f "$_script" ]
+ then
++ if [ -f "${_script}.broken" ]; then
++ _broken=1
++ _broken_msg=$(head -n1 "${_script}.broken" | tr -d '\n')
++ if [ "$skipbroken" == "all" ]; then
++ return
++ elif [ "$skipbroken" == "always" ] &&
++ [[ "$_broken_msg" == *always* ]]; then
++ return
++ fi
++ fi
++
+ rm -f $targetdir/stderr
+ # this might have been reset: restore the default.
+ echo 2000 > /proc/sys/dev/raid/speed_limit_max
+@@ -97,10 +113,15 @@ do_test() {
+ else
+ save_log fail
+ _fail=1
++ if [ "$_broken" == "1" ]; then
++ echo " (KNOWN BROKEN TEST: $_broken_msg)"
++ fi
+ fi
+ [ "$savelogs" == "1" ] &&
+ mv -f $targetdir/log $logdir/$_basename.log
+- [ "$_fail" == "1" -a "$exitonerror" == "1" ] && exit 1
++ [ "$ctrl_c_error" == "1" ] && exit 1
++ [ "$_fail" == "1" -a "$exitonerror" == "1" \
++ -a "$_broken" == "0" ] && exit 1
+ fi
+ }
+
+@@ -117,6 +138,8 @@ do_help() {
+ --logdir=directory Directory to save all logfiles in
+ --save-logs Usually use with --logdir together
+ --keep-going | --no-error Don't stop on error, ie. run all tests
++ --skip-broken Skip tests that are known to be broken
++ --skip-always-broken Skip tests that are known to always fail
+ --dev=loop|lvm|ram|disk Use loop devices (default), LVM, RAM or disk
+ --disks= Provide a bunch of physical devices for test
+ --volgroup=name LVM volume group for LVM test
+@@ -211,6 +234,12 @@ parse_args() {
+ --keep-going | --no-error )
+ exitonerror=0
+ ;;
++ --skip-broken )
++ skipbroken=all
++ ;;
++ --skip-always-broken )
++ skipbroken=always
++ ;;
+ --disable-multipath )
+ unset MULTIPATH
+ ;;
+@@ -275,7 +304,11 @@ main() {
+ if [ $script == "$testdir/11spare-migration" ];then
+ continue
+ fi
+- do_test $script
++ case $script in
++ *.broken) ;;
++ *)
++ do_test $script
++ esac
+ done
+ fi
+
+--
+2.39.1
+
diff --git a/meta/recipes-extended/mdadm/files/0006-tests-Add-broken-files-for-all-broken-tests.patch b/meta/recipes-extended/mdadm/files/0006-tests-Add-broken-files-for-all-broken-tests.patch
new file mode 100644
index 0000000000..115b23bac5
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/0006-tests-Add-broken-files-for-all-broken-tests.patch
@@ -0,0 +1,454 @@
+From fd1c26ba129b069d9f73afaefdbe53683de3814a Mon Sep 17 00:00:00 2001
+From: Logan Gunthorpe <logang@deltatee.com>
+Date: Wed, 22 Jun 2022 14:25:19 -0600
+Subject: [PATCH 6/6] tests: Add broken files for all broken tests
+
+Each broken file contains the rough frequency of brokeness as well
+as a brief explanation of what happens when it breaks. Estimates
+of failure rates are not statistically significant and can vary
+run to run.
+
+This is really just a view from my window. Tests were done on a
+small VM with the default loop devices, not real hardware. We've
+seen different kernel configurations can cause bugs to appear as well
+(ie. different block schedulers). It may also be that different race
+conditions will be seen on machines with different performance
+characteristics.
+
+These annotations were done with the kernel currently in md/md-next:
+
+ facef3b96c5b ("md: Notify sysfs sync_completed in md_reap_sync_thread()")
+
+Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
+Signed-off-by: Jes Sorensen <jes@trained-monkey.org>
+
+Upstream-Status: Backport
+
+Reference to upstream patch:
+https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/commit/?id=daa86d663476
+
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+---
+ tests/01r5integ.broken | 7 ++++
+ tests/01raid6integ.broken | 7 ++++
+ tests/04r5swap.broken | 7 ++++
+ tests/07autoassemble.broken | 8 ++++
+ tests/07autodetect.broken | 5 +++
+ tests/07changelevelintr.broken | 9 +++++
+ tests/07changelevels.broken | 9 +++++
+ tests/07reshape5intr.broken | 45 ++++++++++++++++++++++
+ tests/07revert-grow.broken | 31 +++++++++++++++
+ tests/07revert-shrink.broken | 9 +++++
+ tests/07testreshape5.broken | 12 ++++++
+ tests/09imsm-assemble.broken | 6 +++
+ tests/09imsm-create-fail-rebuild.broken | 5 +++
+ tests/09imsm-overlap.broken | 7 ++++
+ tests/10ddf-assemble-missing.broken | 6 +++
+ tests/10ddf-fail-create-race.broken | 7 ++++
+ tests/10ddf-fail-two-spares.broken | 5 +++
+ tests/10ddf-incremental-wrong-order.broken | 9 +++++
+ tests/14imsm-r1_2d-grow-r1_3d.broken | 5 +++
+ tests/14imsm-r1_2d-takeover-r0_2d.broken | 6 +++
+ tests/18imsm-r10_4d-takeover-r0_2d.broken | 5 +++
+ tests/18imsm-r1_2d-takeover-r0_1d.broken | 6 +++
+ tests/19raid6auto-repair.broken | 5 +++
+ tests/19raid6repair.broken | 5 +++
+ 24 files changed, 226 insertions(+)
+ create mode 100644 tests/01r5integ.broken
+ create mode 100644 tests/01raid6integ.broken
+ create mode 100644 tests/04r5swap.broken
+ create mode 100644 tests/07autoassemble.broken
+ create mode 100644 tests/07autodetect.broken
+ create mode 100644 tests/07changelevelintr.broken
+ create mode 100644 tests/07changelevels.broken
+ create mode 100644 tests/07reshape5intr.broken
+ create mode 100644 tests/07revert-grow.broken
+ create mode 100644 tests/07revert-shrink.broken
+ create mode 100644 tests/07testreshape5.broken
+ create mode 100644 tests/09imsm-assemble.broken
+ create mode 100644 tests/09imsm-create-fail-rebuild.broken
+ create mode 100644 tests/09imsm-overlap.broken
+ create mode 100644 tests/10ddf-assemble-missing.broken
+ create mode 100644 tests/10ddf-fail-create-race.broken
+ create mode 100644 tests/10ddf-fail-two-spares.broken
+ create mode 100644 tests/10ddf-incremental-wrong-order.broken
+ create mode 100644 tests/14imsm-r1_2d-grow-r1_3d.broken
+ create mode 100644 tests/14imsm-r1_2d-takeover-r0_2d.broken
+ create mode 100644 tests/18imsm-r10_4d-takeover-r0_2d.broken
+ create mode 100644 tests/18imsm-r1_2d-takeover-r0_1d.broken
+ create mode 100644 tests/19raid6auto-repair.broken
+ create mode 100644 tests/19raid6repair.broken
+
+diff --git a/tests/01r5integ.broken b/tests/01r5integ.broken
+new file mode 100644
+index 0000000..2073763
+--- /dev/null
++++ b/tests/01r5integ.broken
+@@ -0,0 +1,7 @@
++fails rarely
++
++Fails about 1 in every 30 runs with a sha mismatch error:
++
++ c49ab26e1b01def7874af9b8a6d6d0c29fdfafe6 /dev/md0 does not match
++ 15dc2f73262f811ada53c65e505ceec9cf025cb9 /dev/md0 with /dev/loop3
++ missing
+diff --git a/tests/01raid6integ.broken b/tests/01raid6integ.broken
+new file mode 100644
+index 0000000..1df735f
+--- /dev/null
++++ b/tests/01raid6integ.broken
+@@ -0,0 +1,7 @@
++fails infrequently
++
++Fails about 1 in 5 with a sha mismatch:
++
++ 8286c2bc045ae2cfe9f8b7ae3a898fa25db6926f /dev/md0 does not match
++ a083a0738b58caab37fd568b91b177035ded37df /dev/md0 with /dev/loop2 and
++ /dev/loop3 missing
+diff --git a/tests/04r5swap.broken b/tests/04r5swap.broken
+new file mode 100644
+index 0000000..e38987d
+--- /dev/null
++++ b/tests/04r5swap.broken
+@@ -0,0 +1,7 @@
++always fails
++
++Fails with errors:
++
++ mdadm: /dev/loop0 has no superblock - assembly aborted
++
++ ERROR: no recovery happening
+diff --git a/tests/07autoassemble.broken b/tests/07autoassemble.broken
+new file mode 100644
+index 0000000..8be0940
+--- /dev/null
++++ b/tests/07autoassemble.broken
+@@ -0,0 +1,8 @@
++always fails
++
++Prints lots of messages, but the array doesn't assemble. Error
++possibly related to:
++
++ mdadm: /dev/md/1 is busy - skipping
++ mdadm: no recogniseable superblock on /dev/md/testing:0
++ mdadm: /dev/md/2 is busy - skipping
+diff --git a/tests/07autodetect.broken b/tests/07autodetect.broken
+new file mode 100644
+index 0000000..294954a
+--- /dev/null
++++ b/tests/07autodetect.broken
+@@ -0,0 +1,5 @@
++always fails
++
++Fails with error:
++
++ ERROR: no resync happening
+diff --git a/tests/07changelevelintr.broken b/tests/07changelevelintr.broken
+new file mode 100644
+index 0000000..284b490
+--- /dev/null
++++ b/tests/07changelevelintr.broken
+@@ -0,0 +1,9 @@
++always fails
++
++Fails with errors:
++
++ mdadm: this change will reduce the size of the array.
++ use --grow --array-size first to truncate array.
++ e.g. mdadm --grow /dev/md0 --array-size 56832
++
++ ERROR: no reshape happening
+diff --git a/tests/07changelevels.broken b/tests/07changelevels.broken
+new file mode 100644
+index 0000000..9b930d9
+--- /dev/null
++++ b/tests/07changelevels.broken
+@@ -0,0 +1,9 @@
++always fails
++
++Fails with errors:
++
++ mdadm: /dev/loop0 is smaller than given size. 18976K < 19968K + metadata
++ mdadm: /dev/loop1 is smaller than given size. 18976K < 19968K + metadata
++ mdadm: /dev/loop2 is smaller than given size. 18976K < 19968K + metadata
++
++ ERROR: /dev/md0 isn't a block device.
+diff --git a/tests/07reshape5intr.broken b/tests/07reshape5intr.broken
+new file mode 100644
+index 0000000..efe52a6
+--- /dev/null
++++ b/tests/07reshape5intr.broken
+@@ -0,0 +1,45 @@
++always fails
++
++This patch, recently added to md-next causes the test to always fail:
++
++7e6ba434cc60 ("md: don't unregister sync_thread with reconfig_mutex
++held")
++
++The new error is simply:
++
++ ERROR: no reshape happening
++
++Before the patch, the error seen is below.
++
++--
++
++fails infrequently
++
++Fails roughly 1 in 4 runs with errors:
++
++ mdadm: Merging with already-assembled /dev/md/0
++ mdadm: cannot re-read metadata from /dev/loop6 - aborting
++
++ ERROR: no reshape happening
++
++Also have seen a random deadlock:
++
++ INFO: task mdadm:109702 blocked for more than 30 seconds.
++ Not tainted 5.18.0-rc3-eid-vmlocalyes-dbg-00095-g3c2b5427979d #2040
++ "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
++ task:mdadm state:D stack: 0 pid:109702 ppid: 1 flags:0x00004000
++ Call Trace:
++ <TASK>
++ __schedule+0x67e/0x13b0
++ schedule+0x82/0x110
++ mddev_suspend+0x2e1/0x330
++ suspend_lo_store+0xbd/0x140
++ md_attr_store+0xcb/0x130
++ sysfs_kf_write+0x89/0xb0
++ kernfs_fop_write_iter+0x202/0x2c0
++ new_sync_write+0x222/0x330
++ vfs_write+0x3bc/0x4d0
++ ksys_write+0xd9/0x180
++ __x64_sys_write+0x43/0x50
++ do_syscall_64+0x3b/0x90
++ entry_SYSCALL_64_after_hwframe+0x44/0xae
+diff --git a/tests/07revert-grow.broken b/tests/07revert-grow.broken
+new file mode 100644
+index 0000000..9b6db86
+--- /dev/null
++++ b/tests/07revert-grow.broken
+@@ -0,0 +1,31 @@
++always fails
++
++This patch, recently added to md-next causes the test to always fail:
++
++7e6ba434cc60 ("md: don't unregister sync_thread with reconfig_mutex held")
++
++The errors are:
++
++ mdadm: No active reshape to revert on /dev/loop0
++ ERROR: active raid5 not found
++
++Before the patch, the error seen is below.
++
++--
++
++fails rarely
++
++Fails about 1 in every 30 runs with errors:
++
++ mdadm: Merging with already-assembled /dev/md/0
++ mdadm: backup file /tmp/md-backup inaccessible: No such file or directory
++ mdadm: failed to add /dev/loop1 to /dev/md/0: Invalid argument
++ mdadm: failed to add /dev/loop2 to /dev/md/0: Invalid argument
++ mdadm: failed to add /dev/loop3 to /dev/md/0: Invalid argument
++ mdadm: failed to add /dev/loop0 to /dev/md/0: Invalid argument
++ mdadm: /dev/md/0 assembled from 1 drive - need all 5 to start it
++ (use --run to insist).
++
++ grep: /sys/block/md*/md/sync_action: No such file or directory
++
++ ERROR: active raid5 not found
+diff --git a/tests/07revert-shrink.broken b/tests/07revert-shrink.broken
+new file mode 100644
+index 0000000..c33c39e
+--- /dev/null
++++ b/tests/07revert-shrink.broken
+@@ -0,0 +1,9 @@
++always fails
++
++Fails with errors:
++
++ mdadm: this change will reduce the size of the array.
++ use --grow --array-size first to truncate array.
++ e.g. mdadm --grow /dev/md0 --array-size 53760
++
++ ERROR: active raid5 not found
+diff --git a/tests/07testreshape5.broken b/tests/07testreshape5.broken
+new file mode 100644
+index 0000000..a8ce03e
+--- /dev/null
++++ b/tests/07testreshape5.broken
+@@ -0,0 +1,12 @@
++always fails
++
++Test seems to run 'test_stripe' at $dir directory, but $dir is never
++set. If $dir is adjusted to $PWD, the test still fails with:
++
++ mdadm: /dev/loop2 is not suitable for this array.
++ mdadm: create aborted
++ ++ return 1
++ ++ cmp -s -n 8192 /dev/md0 /tmp/RandFile
++ ++ echo cmp failed
++ cmp failed
++ ++ exit 2
+diff --git a/tests/09imsm-assemble.broken b/tests/09imsm-assemble.broken
+new file mode 100644
+index 0000000..a6d4d5c
+--- /dev/null
++++ b/tests/09imsm-assemble.broken
+@@ -0,0 +1,6 @@
++fails infrequently
++
++Fails roughly 1 in 10 runs with errors:
++
++ mdadm: /dev/loop2 is still in use, cannot remove.
++ /dev/loop2 removal from /dev/md/container should have succeeded
+diff --git a/tests/09imsm-create-fail-rebuild.broken b/tests/09imsm-create-fail-rebuild.broken
+new file mode 100644
+index 0000000..40c4b29
+--- /dev/null
++++ b/tests/09imsm-create-fail-rebuild.broken
+@@ -0,0 +1,5 @@
++always fails
++
++Fails with error:
++
++ **Error**: Array size mismatch - expected 3072, actual 16384
+diff --git a/tests/09imsm-overlap.broken b/tests/09imsm-overlap.broken
+new file mode 100644
+index 0000000..e7ccab7
+--- /dev/null
++++ b/tests/09imsm-overlap.broken
+@@ -0,0 +1,7 @@
++always fails
++
++Fails with errors:
++
++ **Error**: Offset mismatch - expected 15360, actual 0
++ **Error**: Offset mismatch - expected 15360, actual 0
++ /dev/md/vol3 failed check
+diff --git a/tests/10ddf-assemble-missing.broken b/tests/10ddf-assemble-missing.broken
+new file mode 100644
+index 0000000..bfd8d10
+--- /dev/null
++++ b/tests/10ddf-assemble-missing.broken
+@@ -0,0 +1,6 @@
++always fails
++
++Fails with errors:
++
++ ERROR: /dev/md/vol0 has unexpected state on /dev/loop10
++ ERROR: unexpected number of online disks on /dev/loop10
+diff --git a/tests/10ddf-fail-create-race.broken b/tests/10ddf-fail-create-race.broken
+new file mode 100644
+index 0000000..6c0df02
+--- /dev/null
++++ b/tests/10ddf-fail-create-race.broken
+@@ -0,0 +1,7 @@
++usually fails
++
++Fails about 9 out of 10 times with many errors:
++
++ mdadm: cannot open MISSING: No such file or directory
++ ERROR: non-degraded array found
++ ERROR: disk 0 not marked as failed in meta data
+diff --git a/tests/10ddf-fail-two-spares.broken b/tests/10ddf-fail-two-spares.broken
+new file mode 100644
+index 0000000..eeea56d
+--- /dev/null
++++ b/tests/10ddf-fail-two-spares.broken
+@@ -0,0 +1,5 @@
++fails infrequently
++
++Fails roughly 1 in 3 with error:
++
++ ERROR: /dev/md/vol1 should be optimal in meta data
+diff --git a/tests/10ddf-incremental-wrong-order.broken b/tests/10ddf-incremental-wrong-order.broken
+new file mode 100644
+index 0000000..a5af3ba
+--- /dev/null
++++ b/tests/10ddf-incremental-wrong-order.broken
+@@ -0,0 +1,9 @@
++always fails
++
++Fails with errors:
++ ERROR: sha1sum of /dev/md/vol0 has changed
++ ERROR: /dev/md/vol0 has unexpected state on /dev/loop10
++ ERROR: unexpected number of online disks on /dev/loop10
++ ERROR: /dev/md/vol0 has unexpected state on /dev/loop8
++ ERROR: unexpected number of online disks on /dev/loop8
++ ERROR: sha1sum of /dev/md/vol0 has changed
+diff --git a/tests/14imsm-r1_2d-grow-r1_3d.broken b/tests/14imsm-r1_2d-grow-r1_3d.broken
+new file mode 100644
+index 0000000..4ef1d40
+--- /dev/null
++++ b/tests/14imsm-r1_2d-grow-r1_3d.broken
+@@ -0,0 +1,5 @@
++always fails
++
++Fails with error:
++
++ mdadm/tests/func.sh: line 325: dvsize/chunk: division by 0 (error token is "chunk")
+diff --git a/tests/14imsm-r1_2d-takeover-r0_2d.broken b/tests/14imsm-r1_2d-takeover-r0_2d.broken
+new file mode 100644
+index 0000000..89cd4e5
+--- /dev/null
++++ b/tests/14imsm-r1_2d-takeover-r0_2d.broken
+@@ -0,0 +1,6 @@
++always fails
++
++Fails with error:
++
++ tests/func.sh: line 325: dvsize/chunk: division by 0 (error token
++ is "chunk")
+diff --git a/tests/18imsm-r10_4d-takeover-r0_2d.broken b/tests/18imsm-r10_4d-takeover-r0_2d.broken
+new file mode 100644
+index 0000000..a27399f
+--- /dev/null
++++ b/tests/18imsm-r10_4d-takeover-r0_2d.broken
+@@ -0,0 +1,5 @@
++fails rarely
++
++Fails about 1 run in 100 with message:
++
++ ERROR: size is wrong for /dev/md/vol0: 2 * 5120 (chunk=128) = 20480, not 0
+diff --git a/tests/18imsm-r1_2d-takeover-r0_1d.broken b/tests/18imsm-r1_2d-takeover-r0_1d.broken
+new file mode 100644
+index 0000000..aa1982e
+--- /dev/null
++++ b/tests/18imsm-r1_2d-takeover-r0_1d.broken
+@@ -0,0 +1,6 @@
++always fails
++
++Fails with error:
++
++ tests/func.sh: line 325: dvsize/chunk: division by 0 (error token
++ is "chunk")
+diff --git a/tests/19raid6auto-repair.broken b/tests/19raid6auto-repair.broken
+new file mode 100644
+index 0000000..e91a142
+--- /dev/null
++++ b/tests/19raid6auto-repair.broken
+@@ -0,0 +1,5 @@
++always fails
++
++Fails with:
++
++ "should detect errors"
+diff --git a/tests/19raid6repair.broken b/tests/19raid6repair.broken
+new file mode 100644
+index 0000000..e91a142
+--- /dev/null
++++ b/tests/19raid6repair.broken
+@@ -0,0 +1,5 @@
++always fails
++
++Fails with:
++
++ "should detect errors"
+--
+2.39.1
+
diff --git a/meta/recipes-extended/mdadm/files/run-ptest b/meta/recipes-extended/mdadm/files/run-ptest
index fae8071d43..2380c322a9 100644
--- a/meta/recipes-extended/mdadm/files/run-ptest
+++ b/meta/recipes-extended/mdadm/files/run-ptest
@@ -2,6 +2,6 @@
mkdir -p /mdadm-testing-dir
# make the test continue to execute even one fail
-dir=. ./test --keep-going --disable-integrity
+dir=. ./test --keep-going --disable-integrity --skip-broken
rm -rf /mdadm-testing-dir/*
diff --git a/meta/recipes-extended/mdadm/mdadm_4.2.bb b/meta/recipes-extended/mdadm/mdadm_4.2.bb
index 19035caaec..e4b98f82c1 100644
--- a/meta/recipes-extended/mdadm/mdadm_4.2.bb
+++ b/meta/recipes-extended/mdadm/mdadm_4.2.bb
@@ -24,6 +24,18 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/utils/raid/mdadm/${BPN}-${PV}.tar.xz \
file://0001-mdadm-skip-test-11spare-migration.patch \
file://0001-Fix-parsing-of-r-in-monitor-manager-mode.patch \
file://0001-Makefile-install-mdcheck.patch \
+ file://0001-mdadm-Fix-optional-write-behind-parameter.patch \
+ file://0001-tests-02lineargrow-clear-the-superblock-at-every-ite.patch \
+ file://0001-tests-00raid0-add-a-test-that-validates-raid0-with-l.patch \
+ file://0001-tests-fix-raid0-tests-for-0.90-metadata.patch \
+ file://0001-tests-00readonly-Run-udevadm-settle-before-setting-r.patch \
+ file://0001-tests-04update-metadata-avoid-passing-chunk-size-to.patch \
+ file://0001-DDF-Cleanup-validate_geometry_ddf_container.patch \
+ file://0002-DDF-Fix-NULL-pointer-dereference-in-validate_geometr.patch \
+ file://0003-mdadm-Grow-Fix-use-after-close-bug-by-closing-after-.patch \
+ file://0004-monitor-Avoid-segfault-when-calling-NULL-get_bad_blo.patch \
+ file://0005-mdadm-test-Mark-and-ignore-broken-test-failures.patch \
+ file://0006-tests-Add-broken-files-for-all-broken-tests.patch \
"
SRC_URI[sha256sum] = "461c215670864bb74a4d1a3620684aa2b2f8296dffa06743f26dda5557acf01d"
@@ -93,10 +105,16 @@ do_install_ptest() {
}
RDEPENDS:${PN} += "bash"
-RDEPENDS:${PN}-ptest += "bash e2fsprogs-mke2fs"
+RDEPENDS:${PN}-ptest += " \
+ bash \
+ e2fsprogs-mke2fs \
+ util-linux-lsblk \
+ util-linux-losetup \
+ util-linux-blockdev \
+ strace \
+"
RRECOMMENDS:${PN}-ptest += " \
coreutils \
- util-linux \
kernel-module-loop \
kernel-module-linear \
kernel-module-raid0 \
diff --git a/meta/recipes-extended/minicom/minicom/0001-Drop-superfluous-global-variable-definitions.patch b/meta/recipes-extended/minicom/minicom/0001-Drop-superfluous-global-variable-definitions.patch
deleted file mode 100644
index 01b23898e7..0000000000
--- a/meta/recipes-extended/minicom/minicom/0001-Drop-superfluous-global-variable-definitions.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From b65152ebc03832972115e6d98e50cb6190d01793 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Ond=C5=99ej=20Lyson=C4=9Bk?= <olysonek@redhat.com>
-Date: Mon, 3 Feb 2020 13:18:13 +0100
-Subject: [PATCH 1/3] Drop superfluous global variable definitions
-
-The file minicom.c, by including the minicom.h header, already defines
-the global variables 'dial_user' and 'dial_pass'. The object file
-minicom.o is always linked to dial.o. Thus the definitions in dial.c
-can be dropped.
-
-This fixes linking with gcc 10 which uses -fno-common by default,
-disallowing multiple global variable definitions.
-
-Upstream-Status: Backport [https://salsa.debian.org/minicom-team/minicom/-/commit/db269bba2a68fde03f5df45ac8372a8f1248ca96]
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
----
- src/dial.c | 2 --
- 1 file changed, 2 deletions(-)
-
-diff --git a/src/dial.c b/src/dial.c
-index eada5ee..d9d481f 100644
---- a/src/dial.c
-+++ b/src/dial.c
-@@ -146,8 +146,6 @@ static int newtype;
- /* Access to ".dialdir" denied? */
- static int dendd = 0;
- static char *tagged;
--char *dial_user;
--char *dial_pass;
-
- /* Change the baud rate. Treat all characters in the given array as if
- * they were key presses within the comm parameters dialog (C-A P) and
---
-2.24.1
-
diff --git a/meta/recipes-extended/minicom/minicom/0002-Drop-superfluous-global-variable-definitions.patch b/meta/recipes-extended/minicom/minicom/0002-Drop-superfluous-global-variable-definitions.patch
deleted file mode 100644
index e86b470b7e..0000000000
--- a/meta/recipes-extended/minicom/minicom/0002-Drop-superfluous-global-variable-definitions.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From 924bd2da3a00e030e29d82b74ef82900bd50b475 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Ond=C5=99ej=20Lyson=C4=9Bk?= <olysonek@redhat.com>
-Date: Mon, 3 Feb 2020 13:18:33 +0100
-Subject: [PATCH 2/3] Drop superfluous global variable definitions
-
-The only place where the EXTERN macro mechanism is used to define the
-global variables 'vt_outmap' and 'vt_inmap' is minicom.c (by defining
-an empty EXTERN macro and including the minicom.h header). The file
-vt100.c already defines these variables. The vt100.o object file is
-always linked to minicom.o. Thus it is safe not to define the
-variables in minicom.c and only declare them in the minicom.h header.
-
-This fixes linking with gcc 10 which uses -fno-common by default,
-disallowing multiple global variable definitions.
-
-Upstream-Status: Backport [https://salsa.debian.org/minicom-team/minicom/-/commit/c69cad5b5dda85d361a3a0c1fddc65e933f26d11]
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
----
- src/minicom.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/src/minicom.h b/src/minicom.h
-index 061c013..0f9693b 100644
---- a/src/minicom.h
-+++ b/src/minicom.h
-@@ -141,7 +141,7 @@ EXTERN int sbcolor; /* Status Bar Background Color */
- EXTERN int st_attr; /* Status Bar attributes. */
-
- /* jl 04.09.97 conversion tables */
--EXTERN unsigned char vt_outmap[256], vt_inmap[256];
-+extern unsigned char vt_outmap[256], vt_inmap[256];
-
- /* MARK updated 02/17/95 - history buffer */
- EXTERN int num_hist_lines; /* History buffer size */
---
-2.24.1
-
diff --git a/meta/recipes-extended/minicom/minicom/0003-Drop-superfluous-global-variable-definitions.patch b/meta/recipes-extended/minicom/minicom/0003-Drop-superfluous-global-variable-definitions.patch
deleted file mode 100644
index 3225a0c32a..0000000000
--- a/meta/recipes-extended/minicom/minicom/0003-Drop-superfluous-global-variable-definitions.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From a4fc603b3641d2efe31479116eb7ba66932901c7 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Ond=C5=99ej=20Lyson=C4=9Bk?= <olysonek@redhat.com>
-Date: Mon, 3 Feb 2020 13:21:41 +0100
-Subject: [PATCH 3/3] Drop superfluous global variable definitions
-
-The only place where the EXTERN macro mechanism is used to define the
-global variables 'portfd_is_socket', 'portfd_is_connected' and
-'portfd_sock_addr' is minicom.c (by defining an empty EXTERN macro and
-including the minicom.h header). The source file sysdep1_s.c already
-defines these variables. The sysdep1_s.o object file is always linked
-to minicom.o. Thus it is safe to drop the definitions from minicom.c
-and only declare the variables in the minicom.h header.
-
-This fixes linking with gcc 10 which uses -fno-common by default,
-disallowing multiple global variable definitions.
-
-Upstream-Status: Backport [https://salsa.debian.org/minicom-team/minicom/-/commit/c8382374c5d340aa4115d527aed76e876ee5456b]
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
----
- src/minicom.h | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/src/minicom.h b/src/minicom.h
-index 0f9693b..1e7cb8c 100644
---- a/src/minicom.h
-+++ b/src/minicom.h
-@@ -113,9 +113,9 @@ EXTERN char *dial_user; /* Our username there */
- EXTERN char *dial_pass; /* Our password */
-
- #ifdef USE_SOCKET
--EXTERN int portfd_is_socket; /* File descriptor is a unix socket */
--EXTERN int portfd_is_connected; /* 1 if the socket is connected */
--EXTERN struct sockaddr_un portfd_sock_addr; /* the unix socket address */
-+extern int portfd_is_socket; /* File descriptor is a unix socket */
-+extern int portfd_is_connected; /* 1 if the socket is connected */
-+extern struct sockaddr_un portfd_sock_addr; /* the unix socket address */
- #define portfd_connected ((portfd_is_socket && !portfd_is_connected) \
- ? -1 : portfd)
- #else
---
-2.24.1
-
diff --git a/meta/recipes-extended/newt/files/0001-detect-gold-as-GNU-linker-too.patch b/meta/recipes-extended/newt/files/0001-detect-gold-as-GNU-linker-too.patch
index a4b3afd959..090ed5c1c9 100644
--- a/meta/recipes-extended/newt/files/0001-detect-gold-as-GNU-linker-too.patch
+++ b/meta/recipes-extended/newt/files/0001-detect-gold-as-GNU-linker-too.patch
@@ -1,4 +1,4 @@
-From 58245b859ffbcb1780575bf1b0a018d55e74e434 Mon Sep 17 00:00:00 2001
+From 08ba909500412611953aea0fa2fe0d8fe76b6e24 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Andreas=20M=C3=BCller?= <schnitzeltony@googlemail.com>
Date: Wed, 21 Sep 2016 21:14:40 +0200
Subject: [PATCH] detect gold as GNU linker too
@@ -9,23 +9,21 @@ Content-Transfer-Encoding: 8bit
Upstream-Status: Pending
Signed-off-by: Andreas Müller <schnitzeltony@googlemail.com>
+
---
configure.ac | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/configure.ac b/configure.ac
-index 03e8bda..c2fce51 100644
+index 468c718..cd93f30 100644
--- a/configure.ac
+++ b/configure.ac
@@ -28,7 +28,7 @@ AC_CHECK_SIZEOF([void *])
AC_MSG_CHECKING([for GNU ld])
- LD=`$CC -print-prog-name=ld 2>&5`
+ LD=$($CC -print-prog-name=ld 2>&5)
--if test `$LD -v 2>&1 | $ac_cv_path_GREP -c "GNU ld"` = 0; then
-+if test `$LD -v 2>&1 | $ac_cv_path_GREP -c "GNU "` = 0; then
+-if test $($LD -v 2>&1 | $ac_cv_path_GREP -c "GNU ld") = 0; then
++if test $($LD -v 2>&1 | $ac_cv_path_GREP -c "GNU ") = 0; then
# Not
GNU_LD=""
AC_MSG_RESULT([no])
---
-2.5.5
-
diff --git a/meta/recipes-extended/newt/files/0002-don-t-ignore-CFLAGS-when-building-snack.patch b/meta/recipes-extended/newt/files/0002-don-t-ignore-CFLAGS-when-building-snack.patch
deleted file mode 100644
index ca235d5108..0000000000
--- a/meta/recipes-extended/newt/files/0002-don-t-ignore-CFLAGS-when-building-snack.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From f60dc1063607ca1f201ba4cbda467d8af3f78f64 Mon Sep 17 00:00:00 2001
-From: Miroslav Lichvar <mlichvar@redhat.com>
-Date: Tue, 1 Oct 2019 16:37:55 +0200
-Subject: [PATCH] don't ignore CFLAGS when building snack
-
-In addition to the flags returned by python-config --cflags, use the
-user-specified CFLAGS when building the snack object.
-
-Upstream-Status: Backport from master
-Signed-off-by: Joshua Watt <JPEWhacker@gmail.com>
----
- Makefile.in | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/Makefile.in b/Makefile.in
-index be5f87b..6facd5e 100644
---- a/Makefile.in
-+++ b/Makefile.in
-@@ -96,8 +96,8 @@ _snack.$(SOEXT): snack.c $(LIBNEWTSH)
- PIFLAGS=`$$pyconfig --includes`; \
- PLDFLAGS=`$$pyconfig --ldflags`; \
- PLFLAGS=`$$pyconfig --libs`; \
-- echo $(CC) $(SHCFLAGS) $(CPPFLAGS) $$PIFLAGS $$PCFLAGS -c -o $$ver/snack.o snack.c; \
-- $(CC) $(SHCFLAGS) $(CPPFLAGS) $$PIFLAGS $$PCFLAGS -c -o $$ver/snack.o snack.c; \
-+ echo $(CC) $(SHCFLAGS) $(CFLAGS) $(CPPFLAGS) $$PIFLAGS $$PCFLAGS -c -o $$ver/snack.o snack.c; \
-+ $(CC) $(SHCFLAGS) $(CFLAGS) $(CPPFLAGS) $$PIFLAGS $$PCFLAGS -c -o $$ver/snack.o snack.c; \
- echo $(CC) --shared $$PLDFLAGS $$PLFLAGS $(LDFLAGS) -o $$ver/_snack.$(SOEXT) $$ver/snack.o -L. -lnewt $(LIBS); \
- $(CC) --shared $$PLDFLAGS $$PLFLAGS $(LDFLAGS) -o $$ver/_snack.$(SOEXT) $$ver/snack.o -L. -lnewt $(LIBS); \
- done || :
diff --git a/meta/recipes-extended/newt/libnewt_0.52.21.bb b/meta/recipes-extended/newt/libnewt_0.52.23.bb
index 430e481b36..cd3731cf74 100644
--- a/meta/recipes-extended/newt/libnewt_0.52.21.bb
+++ b/meta/recipes-extended/newt/libnewt_0.52.23.bb
@@ -21,11 +21,9 @@ SRC_URI = "https://releases.pagure.org/newt/newt-${PV}.tar.gz \
file://cross_ar.patch \
file://Makefile.in-Add-tinfo-library-to-the-linking-librari.patch \
file://0001-detect-gold-as-GNU-linker-too.patch \
- file://0002-don-t-ignore-CFLAGS-when-building-snack.patch \
"
-SRC_URI[md5sum] = "a0a5fd6b53bb167a65e15996b249ebb5"
-SRC_URI[sha256sum] = "265eb46b55d7eaeb887fca7a1d51fe115658882dfe148164b6c49fccac5abb31"
+SRC_URI[sha256sum] = "caa372907b14ececfe298f0d512a62f41d33b290610244a58aed07bbc5ada12a"
S = "${WORKDIR}/newt-${PV}"
diff --git a/meta/recipes-extended/pam/libpam/0001-pam_motd-do-not-rely-on-all-filesystems-providing-a-.patch b/meta/recipes-extended/pam/libpam/0001-pam_motd-do-not-rely-on-all-filesystems-providing-a-.patch
new file mode 100644
index 0000000000..94dcb04f0a
--- /dev/null
+++ b/meta/recipes-extended/pam/libpam/0001-pam_motd-do-not-rely-on-all-filesystems-providing-a-.patch
@@ -0,0 +1,108 @@
+From 42404548721c653317c911c83d885e2fc7fbca70 Mon Sep 17 00:00:00 2001
+From: Per Jessen <per@jessen.ch>
+Date: Fri, 22 Apr 2022 18:15:36 +0200
+Subject: [PATCH] pam_motd: do not rely on all filesystems providing a filetype
+
+When using scandir() to look for MOTD files to display, we wrongly
+relied on all filesystems providing a filetype. This is a fix to divert
+to lstat() when we have no filetype. To maintain MT safety, it isn't
+possible to use lstat() in the scandir() filter function, so all of the
+filtering has been moved to an additional loop after scanning all the
+motd dirs.
+Also, remove superfluous alphasort from scandir(), we are doing
+a qsort() later.
+
+Resolves: https://github.com/linux-pam/linux-pam/issues/455
+
+Upstream-Status: Backport [https://github.com/linux-pam/linux-pam/commit/42404548721c653317c911c83d885e2fc7fbca70]
+
+Signed-off-by: Per Jessen <per@jessen.ch>
+Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
+---
+ modules/pam_motd/pam_motd.c | 49 ++++++++++++++++++++++++++++++-------
+ 1 file changed, 40 insertions(+), 9 deletions(-)
+
+diff --git a/modules/pam_motd/pam_motd.c b/modules/pam_motd/pam_motd.c
+index 6ac8cba2..5ca486e4 100644
+--- a/modules/pam_motd/pam_motd.c
++++ b/modules/pam_motd/pam_motd.c
+@@ -166,11 +166,6 @@ static int compare_strings(const void *a, const void *b)
+ }
+ }
+
+-static int filter_dirents(const struct dirent *d)
+-{
+- return (d->d_type == DT_REG || d->d_type == DT_LNK);
+-}
+-
+ static void try_to_display_directories_with_overrides(pam_handle_t *pamh,
+ char **motd_dir_path_split, unsigned int num_motd_dirs, int report_missing)
+ {
+@@ -199,8 +194,7 @@ static void try_to_display_directories_with_overrides(pam_handle_t *pamh,
+
+ for (i = 0; i < num_motd_dirs; i++) {
+ int rv;
+- rv = scandir(motd_dir_path_split[i], &(dirscans[i]),
+- filter_dirents, alphasort);
++ rv = scandir(motd_dir_path_split[i], &(dirscans[i]), NULL, NULL);
+ if (rv < 0) {
+ if (errno != ENOENT || report_missing) {
+ pam_syslog(pamh, LOG_ERR, "error scanning directory %s: %m",
+@@ -215,6 +209,41 @@ static void try_to_display_directories_with_overrides(pam_handle_t *pamh,
+ if (dirscans_size_total == 0)
+ goto out;
+
++ /* filter out unwanted names, directories, and complement data with lstat() */
++ for (i = 0; i < num_motd_dirs; i++) {
++ struct dirent **d = dirscans[i];
++ for (unsigned int j = 0; j < dirscans_sizes[i]; j++) {
++ int rc;
++ char *fullpath;
++ struct stat s;
++
++ switch(d[j]->d_type) { /* the filetype determines how to proceed */
++ case DT_REG: /* regular files and */
++ case DT_LNK: /* symlinks */
++ continue; /* are good. */
++ case DT_UNKNOWN: /* for file systems that do not provide */
++ /* a filetype, we use lstat() */
++ if (join_dir_strings(&fullpath, motd_dir_path_split[i],
++ d[j]->d_name) <= 0)
++ break;
++ rc = lstat(fullpath, &s);
++ _pam_drop(fullpath); /* free the memory alloc'ed by join_dir_strings */
++ if (rc != 0) /* if the lstat() somehow failed */
++ break;
++
++ if (S_ISREG(s.st_mode) || /* regular files and */
++ S_ISLNK(s.st_mode)) continue; /* symlinks are good */
++ break;
++ case DT_DIR: /* We don't want directories */
++ default: /* nor anything else */
++ break;
++ }
++ _pam_drop(d[j]); /* free memory */
++ d[j] = NULL; /* indicate this one was dropped */
++ dirscans_size_total--;
++ }
++ }
++
+ /* Allocate space for all file names found in the directories, including duplicates. */
+ if ((dirnames_all = calloc(dirscans_size_total, sizeof(*dirnames_all))) == NULL) {
+ pam_syslog(pamh, LOG_CRIT, "failed to allocate dirname array");
+@@ -225,8 +254,10 @@ static void try_to_display_directories_with_overrides(pam_handle_t *pamh,
+ unsigned int j;
+
+ for (j = 0; j < dirscans_sizes[i]; j++) {
+- dirnames_all[i_dirnames] = dirscans[i][j]->d_name;
+- i_dirnames++;
++ if (NULL != dirscans[i][j]) {
++ dirnames_all[i_dirnames] = dirscans[i][j]->d_name;
++ i_dirnames++;
++ }
+ }
+ }
+
+--
+2.39.0
+
diff --git a/meta/recipes-extended/pam/libpam/99_pam b/meta/recipes-extended/pam/libpam/99_pam
index 97e990d10b..a88247be13 100644
--- a/meta/recipes-extended/pam/libpam/99_pam
+++ b/meta/recipes-extended/pam/libpam/99_pam
@@ -1 +1 @@
-d root root 0755 /var/run/sepermit none
+d root root 0755 /run/sepermit none
diff --git a/meta/recipes-extended/pam/libpam/CVE-2022-28321-0002.patch b/meta/recipes-extended/pam/libpam/CVE-2022-28321-0002.patch
new file mode 100644
index 0000000000..e7bf03f9f7
--- /dev/null
+++ b/meta/recipes-extended/pam/libpam/CVE-2022-28321-0002.patch
@@ -0,0 +1,205 @@
+From 23393bef92c1e768eda329813d7af55481c6ca9f Mon Sep 17 00:00:00 2001
+From: Thorsten Kukuk <kukuk@suse.com>
+Date: Thu, 24 Feb 2022 10:37:32 +0100
+Subject: [PATCH 2/2] pam_access: handle hostnames in access.conf
+
+According to the manual page, the following entry is valid but does not
+work:
+-:root:ALL EXCEPT localhost
+
+See https://bugzilla.suse.com/show_bug.cgi?id=1019866
+
+Patched is based on PR#226 from Josef Moellers
+
+Upstream-Status: Backport
+CVE: CVE-2022-28321
+
+Reference to upstream patch:
+[https://github.com/linux-pam/linux-pam/commit/23393bef92c1e768eda329813d7af55481c6ca9f]
+
+Signed-off-by: Stefan Ghinea <stefan.ghinea@windriver.com>
+---
+ modules/pam_access/pam_access.c | 95 ++++++++++++++++++++++++++-------
+ 1 file changed, 76 insertions(+), 19 deletions(-)
+
+diff --git a/modules/pam_access/pam_access.c b/modules/pam_access/pam_access.c
+index 277192b..bca424f 100644
+--- a/modules/pam_access/pam_access.c
++++ b/modules/pam_access/pam_access.c
+@@ -637,7 +637,7 @@ remote_match (pam_handle_t *pamh, char *tok, struct login_info *item)
+ if ((str_len = strlen(string)) > tok_len
+ && strcasecmp(tok, string + str_len - tok_len) == 0)
+ return YES;
+- } else if (tok[tok_len - 1] == '.') {
++ } else if (tok[tok_len - 1] == '.') { /* internet network numbers (end with ".") */
+ struct addrinfo hint;
+
+ memset (&hint, '\0', sizeof (hint));
+@@ -678,7 +678,7 @@ remote_match (pam_handle_t *pamh, char *tok, struct login_info *item)
+ return NO;
+ }
+
+- /* Assume network/netmask with an IP of a host. */
++ /* Assume network/netmask, IP address or hostname. */
+ return network_netmask_match(pamh, tok, string, item);
+ }
+
+@@ -696,7 +696,7 @@ string_match (pam_handle_t *pamh, const char *tok, const char *string,
+ /*
+ * If the token has the magic value "ALL" the match always succeeds.
+ * Otherwise, return YES if the token fully matches the string.
+- * "NONE" token matches NULL string.
++ * "NONE" token matches NULL string.
+ */
+
+ if (strcasecmp(tok, "ALL") == 0) { /* all: always matches */
+@@ -714,7 +714,8 @@ string_match (pam_handle_t *pamh, const char *tok, const char *string,
+
+ /* network_netmask_match - match a string against one token
+ * where string is a hostname or ip (v4,v6) address and tok
+- * represents either a single ip (v4,v6) address or a network/netmask
++ * represents either a hostname, a single ip (v4,v6) address
++ * or a network/netmask
+ */
+ static int
+ network_netmask_match (pam_handle_t *pamh,
+@@ -723,10 +724,12 @@ network_netmask_match (pam_handle_t *pamh,
+ char *netmask_ptr;
+ char netmask_string[MAXHOSTNAMELEN + 1];
+ int addr_type;
++ struct addrinfo *ai = NULL;
+
+ if (item->debug)
+- pam_syslog (pamh, LOG_DEBUG,
++ pam_syslog (pamh, LOG_DEBUG,
+ "network_netmask_match: tok=%s, item=%s", tok, string);
++
+ /* OK, check if tok is of type addr/mask */
+ if ((netmask_ptr = strchr(tok, '/')) != NULL)
+ {
+@@ -760,54 +763,108 @@ network_netmask_match (pam_handle_t *pamh,
+ netmask_ptr = number_to_netmask(netmask, addr_type,
+ netmask_string, MAXHOSTNAMELEN);
+ }
+- }
++
++ /*
++ * Construct an addrinfo list from the IP address.
++ * This should not fail as the input is a correct IP address...
++ */
++ if (getaddrinfo (tok, NULL, NULL, &ai) != 0)
++ {
++ return NO;
++ }
++ }
+ else
+- /* NO, then check if it is only an addr */
+- if (isipaddr(tok, NULL, NULL) != YES)
++ {
++ /*
++ * It is either an IP address or a hostname.
++ * Let getaddrinfo sort everything out
++ */
++ if (getaddrinfo (tok, NULL, NULL, &ai) != 0)
+ {
++ pam_syslog(pamh, LOG_ERR, "cannot resolve hostname \"%s\"", tok);
++
+ return NO;
+ }
++ netmask_ptr = NULL;
++ }
+
+ if (isipaddr(string, NULL, NULL) != YES)
+ {
+- /* Assume network/netmask with a name of a host. */
+ struct addrinfo hint;
+
++ /* Assume network/netmask with a name of a host. */
+ memset (&hint, '\0', sizeof (hint));
+ hint.ai_flags = AI_CANONNAME;
+ hint.ai_family = AF_UNSPEC;
+
+ if (item->gai_rv != 0)
++ {
++ freeaddrinfo(ai);
+ return NO;
++ }
+ else if (!item->res &&
+ (item->gai_rv = getaddrinfo (string, NULL, &hint, &item->res)) != 0)
++ {
++ freeaddrinfo(ai);
+ return NO;
++ }
+ else
+ {
+ struct addrinfo *runp = item->res;
++ struct addrinfo *runp1;
+
+ while (runp != NULL)
+ {
+ char buf[INET6_ADDRSTRLEN];
+
+- DIAG_PUSH_IGNORE_CAST_ALIGN;
+- inet_ntop (runp->ai_family,
+- runp->ai_family == AF_INET
+- ? (void *) &((struct sockaddr_in *) runp->ai_addr)->sin_addr
+- : (void *) &((struct sockaddr_in6 *) runp->ai_addr)->sin6_addr,
+- buf, sizeof (buf));
+- DIAG_POP_IGNORE_CAST_ALIGN;
++ if (getnameinfo (runp->ai_addr, runp->ai_addrlen, buf, sizeof (buf), NULL, 0, NI_NUMERICHOST) != 0)
++ {
++ freeaddrinfo(ai);
++ return NO;
++ }
+
+- if (are_addresses_equal(buf, tok, netmask_ptr))
++ for (runp1 = ai; runp1 != NULL; runp1 = runp1->ai_next)
+ {
+- return YES;
++ char buf1[INET6_ADDRSTRLEN];
++
++ if (runp->ai_family != runp1->ai_family)
++ continue;
++
++ if (getnameinfo (runp1->ai_addr, runp1->ai_addrlen, buf1, sizeof (buf1), NULL, 0, NI_NUMERICHOST) != 0)
++ {
++ freeaddrinfo(ai);
++ return NO;
++ }
++
++ if (are_addresses_equal (buf, buf1, netmask_ptr))
++ {
++ freeaddrinfo(ai);
++ return YES;
++ }
+ }
+ runp = runp->ai_next;
+ }
+ }
+ }
+ else
+- return (are_addresses_equal(string, tok, netmask_ptr));
++ {
++ struct addrinfo *runp1;
++
++ for (runp1 = ai; runp1 != NULL; runp1 = runp1->ai_next)
++ {
++ char buf1[INET6_ADDRSTRLEN];
++
++ (void) getnameinfo (runp1->ai_addr, runp1->ai_addrlen, buf1, sizeof (buf1), NULL, 0, NI_NUMERICHOST);
++
++ if (are_addresses_equal(string, buf1, netmask_ptr))
++ {
++ freeaddrinfo(ai);
++ return YES;
++ }
++ }
++ }
++
++ freeaddrinfo(ai);
+
+ return NO;
+ }
+--
+2.37.3
+
diff --git a/meta/recipes-extended/pam/libpam/CVE-2024-22365.patch b/meta/recipes-extended/pam/libpam/CVE-2024-22365.patch
new file mode 100644
index 0000000000..e9e3a078e0
--- /dev/null
+++ b/meta/recipes-extended/pam/libpam/CVE-2024-22365.patch
@@ -0,0 +1,62 @@
+From 031bb5a5d0d950253b68138b498dc93be69a64cb Mon Sep 17 00:00:00 2001
+From: Matthias Gerstner <matthias.gerstner@suse.de>
+Date: Wed, 27 Dec 2023 14:01:59 +0100
+Subject: [PATCH] pam_namespace: protect_dir(): use O_DIRECTORY to prevent
+ local DoS situations
+
+Without O_DIRECTORY the path crawling logic is subject to e.g. FIFOs
+being placed in user controlled directories, causing the PAM module to
+block indefinitely during `openat()`.
+
+Pass O_DIRECTORY to cause the `openat()` to fail if the path does not
+refer to a directory.
+
+With this the check whether the final path element is a directory
+becomes unnecessary, drop it.
+
+Upstream-Status: Backport [https://github.com/linux-pam/linux-pam/commit/031bb5a5d0d950253b68138b498dc93be69a64cb]
+CVE: CVE-2024-22365
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ modules/pam_namespace/pam_namespace.c | 18 +-----------------
+ 1 file changed, 1 insertion(+), 17 deletions(-)
+
+diff --git a/modules/pam_namespace/pam_namespace.c b/modules/pam_namespace/pam_namespace.c
+index 4d4188d..d6b1d3c 100644
+--- a/modules/pam_namespace/pam_namespace.c
++++ b/modules/pam_namespace/pam_namespace.c
+@@ -1103,7 +1103,7 @@ static int protect_dir(const char *path, mode_t mode, int do_mkdir,
+ int dfd = AT_FDCWD;
+ int dfd_next;
+ int save_errno;
+- int flags = O_RDONLY;
++ int flags = O_RDONLY | O_DIRECTORY;
+ int rv = -1;
+ struct stat st;
+
+@@ -1157,22 +1157,6 @@ static int protect_dir(const char *path, mode_t mode, int do_mkdir,
+ rv = openat(dfd, dir, flags);
+ }
+
+- if (rv != -1) {
+- if (fstat(rv, &st) != 0) {
+- save_errno = errno;
+- close(rv);
+- rv = -1;
+- errno = save_errno;
+- goto error;
+- }
+- if (!S_ISDIR(st.st_mode)) {
+- close(rv);
+- errno = ENOTDIR;
+- rv = -1;
+- goto error;
+- }
+- }
+-
+ if (flags & O_NOFOLLOW) {
+ /* we are inside user-owned dir - protect */
+ if (protect_mount(rv, p, idata) == -1) {
+--
+2.25.1
+
diff --git a/meta/recipes-extended/pam/libpam_1.5.2.bb b/meta/recipes-extended/pam/libpam_1.5.2.bb
index 081986ef43..20745aa837 100644
--- a/meta/recipes-extended/pam/libpam_1.5.2.bb
+++ b/meta/recipes-extended/pam/libpam_1.5.2.bb
@@ -24,6 +24,9 @@ SRC_URI = "https://github.com/linux-pam/linux-pam/releases/download/v${PV}/Linux
file://0001-run-xtests.sh-check-whether-files-exist.patch \
file://run-ptest \
file://pam-volatiles.conf \
+ file://CVE-2022-28321-0002.patch \
+ file://0001-pam_motd-do-not-rely-on-all-filesystems-providing-a-.patch \
+ file://CVE-2024-22365.patch \
"
SRC_URI[sha256sum] = "e4ec7131a91da44512574268f493c6d8ca105c87091691b8e9b56ca685d4f94d"
diff --git a/meta/recipes-extended/parted/files/run-ptest b/meta/recipes-extended/parted/files/run-ptest
index c3d6fca339..096078967f 100644
--- a/meta/recipes-extended/parted/files/run-ptest
+++ b/meta/recipes-extended/parted/files/run-ptest
@@ -1,7 +1,7 @@
#!/bin/sh
-mkdir -p /etc/udev/mount.blacklist.d
-echo /dev/sda1 >> /etc/udev/mount.blacklist.d/parted-tmp
+mkdir -p /etc/udev/mount.ignorelist.d
+echo /dev/sda1 >> /etc/udev/mount.ignorelist.d/parted-tmp
rm -f tests/*.log
make -C tests test-suite.log
-rm /etc/udev/mount.blacklist.d/parted-tmp
+rm /etc/udev/mount.ignorelist.d/parted-tmp
diff --git a/meta/recipes-extended/procps/procps/CVE-2023-4016.patch b/meta/recipes-extended/procps/procps/CVE-2023-4016.patch
new file mode 100644
index 0000000000..c530b1cfea
--- /dev/null
+++ b/meta/recipes-extended/procps/procps/CVE-2023-4016.patch
@@ -0,0 +1,85 @@
+From 2c933ecba3bb1d3041a5a7a53a7b4078a6003413 Mon Sep 17 00:00:00 2001
+From: Craig Small <csmall@dropbear.xyz>
+Date: Thu, 10 Aug 2023 21:18:38 +1000
+Subject: [PATCH] ps: Fix possible buffer overflow in -C option
+
+ps allocates memory using malloc(length of arg * len of struct).
+In certain strange circumstances, the arg length could be very large
+and the multiplecation will overflow, allocating a small amount of
+memory.
+
+Subsequent strncpy() will then write into unallocated memory.
+The fix is to use calloc. It's slower but this is a one-time
+allocation. Other malloc(x * y) calls have also been replaced
+by calloc(x, y)
+
+References:
+ https://www.freelists.org/post/procps/ps-buffer-overflow-CVE-20234016
+ https://nvd.nist.gov/vuln/detail/CVE-2023-4016
+ https://gitlab.com/procps-ng/procps/-/issues/297
+ https://bugs.debian.org/1042887
+
+Signed-off-by: Craig Small <csmall@dropbear.xyz>
+
+CVE: CVE-2023-4016
+Upstream-Status: Backport [https://gitlab.com/procps-ng/procps/-/commit/2c933ecba3bb1d3041a5a7a53a7b4078a6003413]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+
+---
+ NEWS | 1 +
+ ps/parser.c | 8 ++++----
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/NEWS b/NEWS
+index b9509734..64fa3da8 100644
+--- a/NEWS
++++ b/NEWS
+@@ -1,3 +1,5 @@
++ * ps: Fix buffer overflow in -C option CVE-2023-4016 Debian #1042887, issue #297
++
+ procps-ng-3.3.17
+ ---------------
+ * library: Incremented to 8:3:0
+diff --git a/ps/parser.c b/ps/parser.c
+index 248aa741..15873dfa 100644
+--- a/ps/parser.c
++++ b/ps/parser.c
+@@ -184,7 +184,6 @@ static const char *parse_list(const char *arg, const char *(*parse_fn)(char *, s
+ const char *err; /* error code that could or did happen */
+ /*** prepare to operate ***/
+ node = malloc(sizeof(selection_node));
+- node->u = malloc(strlen(arg)*sizeof(sel_union)); /* waste is insignificant */
+ node->n = 0;
+ buf = strdup(arg);
+ /*** sanity check and count items ***/
+@@ -205,6 +204,7 @@ static const char *parse_list(const char *arg, const char *(*parse_fn)(char *, s
+ } while (*++walk);
+ if(need_item) goto parse_error;
+ node->n = items;
++ node->u = calloc(items, sizeof(sel_union));
+ /*** actually parse the list ***/
+ walk = buf;
+ while(items--){
+@@ -1031,15 +1031,15 @@ static const char *parse_trailing_pids(void){
+ thisarg = ps_argc - 1; /* we must be at the end now */
+
+ pidnode = malloc(sizeof(selection_node));
+- pidnode->u = malloc(i*sizeof(sel_union)); /* waste is insignificant */
++ pidnode->u = calloc(i, sizeof(sel_union)); /* waste is insignificant */
+ pidnode->n = 0;
+
+ grpnode = malloc(sizeof(selection_node));
+- grpnode->u = malloc(i*sizeof(sel_union)); /* waste is insignificant */
++ grpnode->u = calloc(i,sizeof(sel_union)); /* waste is insignificant */
+ grpnode->n = 0;
+
+ sidnode = malloc(sizeof(selection_node));
+- sidnode->u = malloc(i*sizeof(sel_union)); /* waste is insignificant */
++ sidnode->u = calloc(i, sizeof(sel_union)); /* waste is insignificant */
+ sidnode->n = 0;
+
+ while(i--){
+--
+GitLab
+
diff --git a/meta/recipes-extended/procps/procps_3.3.17.bb b/meta/recipes-extended/procps/procps_3.3.17.bb
index 0f5575c9ab..897f28f187 100644
--- a/meta/recipes-extended/procps/procps_3.3.17.bb
+++ b/meta/recipes-extended/procps/procps_3.3.17.bb
@@ -16,6 +16,7 @@ SRC_URI = "git://gitlab.com/procps-ng/procps.git;protocol=https;branch=master \
file://sysctl.conf \
file://0001-w.c-correct-musl-builds.patch \
file://0002-proc-escape.c-add-missing-include.patch \
+ file://CVE-2023-4016.patch \
"
SRCREV = "19a508ea121c0c4ac6d0224575a036de745eaaf8"
diff --git a/meta/recipes-extended/psmisc/psmisc.inc b/meta/recipes-extended/psmisc/psmisc.inc
index 12539dad53..44b82bd325 100644
--- a/meta/recipes-extended/psmisc/psmisc.inc
+++ b/meta/recipes-extended/psmisc/psmisc.inc
@@ -54,3 +54,5 @@ ALTERNATIVE_PRIORITY = "90"
ALTERNATIVE:killall = "killall"
ALTERNATIVE:fuser = "fuser"
+
+ALTERNATIVE:pstree = "pstree"
diff --git a/meta/recipes-extended/screen/screen/CVE-2023-24626.patch b/meta/recipes-extended/screen/screen/CVE-2023-24626.patch
new file mode 100644
index 0000000000..73caf9d81b
--- /dev/null
+++ b/meta/recipes-extended/screen/screen/CVE-2023-24626.patch
@@ -0,0 +1,40 @@
+From e9ad41bfedb4537a6f0de20f00b27c7739f168f7 Mon Sep 17 00:00:00 2001
+From: Alexander Naumov <alexander_naumov@opensuse.org>
+Date: Mon, 30 Jan 2023 17:22:25 +0200
+Subject: fix: missing signal sending permission check on failed query messages
+
+Signed-off-by: Alexander Naumov <alexander_naumov@opensuse.org>
+
+CVE: CVE-2023-24626
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/screen.git/commit/?id=e9ad41bfedb4537a6f0de20f00b27c7739f168f7]
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ socket.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/socket.c b/socket.c
+index bb68b35..9d87445 100644
+--- a/socket.c
++++ b/socket.c
+@@ -1285,11 +1285,16 @@ ReceiveMsg()
+ else
+ queryflag = -1;
+
+- Kill(m.m.command.apid,
++ if (CheckPid(m.m.command.apid)) {
++ Msg(0, "Query attempt with bad pid(%d)!", m.m.command.apid);
++ }
++ else {
++ Kill(m.m.command.apid,
+ (queryflag >= 0)
+ ? SIGCONT
+ : SIG_BYE); /* Send SIG_BYE if an error happened */
+- queryflag = -1;
++ queryflag = -1;
++ }
+ }
+ break;
+ case MSG_COMMAND:
+--
+2.25.1
+
diff --git a/meta/recipes-extended/screen/screen_4.9.0.bb b/meta/recipes-extended/screen/screen_4.9.0.bb
index b36173b8de..19070d87d8 100644
--- a/meta/recipes-extended/screen/screen_4.9.0.bb
+++ b/meta/recipes-extended/screen/screen_4.9.0.bb
@@ -21,6 +21,7 @@ SRC_URI = "${GNU_MIRROR}/screen/screen-${PV}.tar.gz \
file://0002-comm.h-now-depends-on-term.h.patch \
file://0001-fix-for-multijob-build.patch \
file://0001-Remove-more-compatibility-stuff.patch \
+ file://CVE-2023-24626.patch \
"
SRC_URI[sha256sum] = "f9335281bb4d1538ed078df78a20c2f39d3af9a4e91c57d084271e0289c730f4"
diff --git a/meta/recipes-extended/shadow/files/0001-Drop-nsswitch.conf-message-when-not-in-place-eg.-musl.patch b/meta/recipes-extended/shadow/files/0001-Drop-nsswitch.conf-message-when-not-in-place-eg.-musl.patch
new file mode 100644
index 0000000000..6c04769713
--- /dev/null
+++ b/meta/recipes-extended/shadow/files/0001-Drop-nsswitch.conf-message-when-not-in-place-eg.-musl.patch
@@ -0,0 +1,27 @@
+From aed5a184401fbbe901cb825be4004ced885b6f9a Mon Sep 17 00:00:00 2001
+From: Andrei Gherzan <andrei.gherzan@huawei.com>
+Date: Wed, 24 Aug 2022 00:54:47 +0200
+Subject: [PATCH] Drop nsswitch.conf message when not in place - eg. musl
+
+Upstream-Status: Inappropriate [issue reported at https://github.com/shadow-maint/shadow/issues/557]
+Signed-off-by: Andrei Gherzan <andrei.gherzan@huawei.com>
+---
+ lib/nss.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/nss.c b/lib/nss.c
+index af3e95a..74e0e16 100644
+--- a/lib/nss.c
++++ b/lib/nss.c
+@@ -57,7 +57,7 @@ void nss_init(char *nsswitch_path) {
+ // subid: files
+ nssfp = fopen(nsswitch_path, "r");
+ if (!nssfp) {
+- fprintf(shadow_logfd, "Failed opening %s: %m", nsswitch_path);
++ //fprintf(shadow_logfd, "Failed opening %s: %m", nsswitch_path);
+ atomic_store(&nss_init_completed, true);
+ return;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-extended/shadow/files/0001-Overhaul-valid_field.patch b/meta/recipes-extended/shadow/files/0001-Overhaul-valid_field.patch
new file mode 100644
index 0000000000..ac08be515b
--- /dev/null
+++ b/meta/recipes-extended/shadow/files/0001-Overhaul-valid_field.patch
@@ -0,0 +1,65 @@
+From 2eaea70111f65b16d55998386e4ceb4273c19eb4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20G=C3=B6ttsche?= <cgzones@googlemail.com>
+Date: Fri, 31 Mar 2023 14:46:50 +0200
+Subject: [PATCH] Overhaul valid_field()
+
+e5905c4b ("Added control character check") introduced checking for
+control characters but had the logic inverted, so it rejects all
+characters that are not control ones.
+
+Cast the character to `unsigned char` before passing to the character
+checking functions to avoid UB.
+
+Use strpbrk(3) for the illegal character test and return early.
+
+Upstream-Status: Backport [https://github.com/shadow-maint/shadow/commit/2eaea70111f65b16d55998386e4ceb4273c19eb4]
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ lib/fields.c | 24 ++++++++++--------------
+ 1 file changed, 10 insertions(+), 14 deletions(-)
+
+diff --git a/lib/fields.c b/lib/fields.c
+index fb51b582..53929248 100644
+--- a/lib/fields.c
++++ b/lib/fields.c
+@@ -37,26 +37,22 @@ int valid_field (const char *field, const char *illegal)
+
+ /* For each character of field, search if it appears in the list
+ * of illegal characters. */
++ if (illegal && NULL != strpbrk (field, illegal)) {
++ return -1;
++ }
++
++ /* Search if there are non-printable or control characters */
+ for (cp = field; '\0' != *cp; cp++) {
+- if (strchr (illegal, *cp) != NULL) {
++ unsigned char c = *cp;
++ if (!isprint (c)) {
++ err = 1;
++ }
++ if (iscntrl (c)) {
+ err = -1;
+ break;
+ }
+ }
+
+- if (0 == err) {
+- /* Search if there are non-printable or control characters */
+- for (cp = field; '\0' != *cp; cp++) {
+- if (!isprint (*cp)) {
+- err = 1;
+- }
+- if (!iscntrl (*cp)) {
+- err = -1;
+- break;
+- }
+- }
+- }
+-
+ return err;
+ }
+
+--
+2.34.1
+
diff --git a/meta/recipes-extended/shadow/files/CVE-2023-29383.patch b/meta/recipes-extended/shadow/files/CVE-2023-29383.patch
new file mode 100644
index 0000000000..f53341d3fc
--- /dev/null
+++ b/meta/recipes-extended/shadow/files/CVE-2023-29383.patch
@@ -0,0 +1,53 @@
+From e5905c4b84d4fb90aefcd96ee618411ebfac663d Mon Sep 17 00:00:00 2001
+From: tomspiderlabs <128755403+tomspiderlabs@users.noreply.github.com>
+Date: Thu, 23 Mar 2023 23:39:38 +0000
+Subject: [PATCH] Added control character check
+
+Added control character check, returning -1 (to "err") if control characters are present.
+
+CVE: CVE-2023-29383
+Upstream-Status: Backport
+
+Reference to upstream:
+https://github.com/shadow-maint/shadow/commit/e5905c4b84d4fb90aefcd96ee618411ebfac663d
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ lib/fields.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/lib/fields.c b/lib/fields.c
+index 640be931..fb51b582 100644
+--- a/lib/fields.c
++++ b/lib/fields.c
+@@ -21,9 +21,9 @@
+ *
+ * The supplied field is scanned for non-printable and other illegal
+ * characters.
+- * + -1 is returned if an illegal character is present.
+- * + 1 is returned if no illegal characters are present, but the field
+- * contains a non-printable character.
++ * + -1 is returned if an illegal or control character is present.
++ * + 1 is returned if no illegal or control characters are present,
++ * but the field contains a non-printable character.
+ * + 0 is returned otherwise.
+ */
+ int valid_field (const char *field, const char *illegal)
+@@ -45,10 +45,13 @@ int valid_field (const char *field, const char *illegal)
+ }
+
+ if (0 == err) {
+- /* Search if there are some non-printable characters */
++ /* Search if there are non-printable or control characters */
+ for (cp = field; '\0' != *cp; cp++) {
+ if (!isprint (*cp)) {
+ err = 1;
++ }
++ if (!iscntrl (*cp)) {
++ err = -1;
+ break;
+ }
+ }
+--
+2.34.1
+
diff --git a/meta/recipes-extended/shadow/files/CVE-2023-4641-0001.patch b/meta/recipes-extended/shadow/files/CVE-2023-4641-0001.patch
new file mode 100644
index 0000000000..2d3c462f4d
--- /dev/null
+++ b/meta/recipes-extended/shadow/files/CVE-2023-4641-0001.patch
@@ -0,0 +1,36 @@
+From 58b6e97a9eef866e9e479fb781aaaf59fb11ef36 Mon Sep 17 00:00:00 2001
+From: Christian Göttsche <cgzones@googlemail.com>
+Date: Mon Apr 25 12:17:40 2022 +0200
+Subject: [PATCH 1/2] passwd: erase password copy on all error branches
+
+CVE: CVE-2023-4641
+
+Upstream-Status: Backport [https://github.com/shadow-maint/shadow/commit/58b6e97a9eef866e9e479fb781aaaf59fb11ef36]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/passwd.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/src/passwd.c b/src/passwd.c
+index 80531ec..8c6f81a 100644
+--- a/src/passwd.c
++++ b/src/passwd.c
+@@ -289,6 +289,7 @@ static int new_password (const struct passwd *pw)
+ cp = getpass (_("New password: "));
+ if (NULL == cp) {
+ memzero (orig, sizeof orig);
++ memzero (pass, sizeof pass);
+ return -1;
+ }
+ if (warned && (strcmp (pass, cp) != 0)) {
+@@ -316,6 +317,7 @@ static int new_password (const struct passwd *pw)
+ cp = getpass (_("Re-enter new password: "));
+ if (NULL == cp) {
+ memzero (orig, sizeof orig);
++ memzero (pass, sizeof pass);
+ return -1;
+ }
+ if (strcmp (cp, pass) != 0) {
+--
+2.40.0
diff --git a/meta/recipes-extended/shadow/files/CVE-2023-4641-0002.patch b/meta/recipes-extended/shadow/files/CVE-2023-4641-0002.patch
new file mode 100644
index 0000000000..a37379d7a0
--- /dev/null
+++ b/meta/recipes-extended/shadow/files/CVE-2023-4641-0002.patch
@@ -0,0 +1,147 @@
+From 65c88a43a23c2391dcc90c0abda3e839e9c57904 Mon Sep 17 00:00:00 2001
+From: Alejandro Colomar <alx@kernel.org>
+Date: Sat, 10 Jun 2023 16:20:05 +0200
+Subject: [PATCH 2/2] gpasswd(1): Fix password leak
+
+How to trigger this password leak?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When gpasswd(1) asks for the new password, it asks twice (as is usual
+for confirming the new password). Each of those 2 password prompts
+uses agetpass() to get the password. If the second agetpass() fails,
+the first password, which has been copied into the 'static' buffer
+'pass' via STRFCPY(), wasn't being zeroed.
+
+agetpass() is defined in <./libmisc/agetpass.c> (around line 91), and
+can fail for any of the following reasons:
+
+- malloc(3) or readpassphrase(3) failure.
+
+ These are going to be difficult to trigger. Maybe getting the system
+ to the limits of memory utilization at that exact point, so that the
+ next malloc(3) gets ENOMEM, and possibly even the OOM is triggered.
+ About readpassphrase(3), ENFILE and EINTR seem the only plausible
+ ones, and EINTR probably requires privilege or being the same user;
+ but I wouldn't discard ENFILE so easily, if a process starts opening
+ files.
+
+- The password is longer than PASS_MAX.
+
+ The is plausible with physical access. However, at that point, a
+ keylogger will be a much simpler attack.
+
+And, the attacker must be able to know when the second password is being
+introduced, which is not going to be easy.
+
+How to read the password after the leak?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Provoking the leak yourself at the right point by entering a very long
+password is easy, and inspecting the process stack at that point should
+be doable. Try to find some consistent patterns.
+
+Then, search for those patterns in free memory, right after the victim
+leaks their password.
+
+Once you get the leak, a program should read all the free memory
+searching for patterns that gpasswd(1) leaves nearby the leaked
+password.
+
+On 6/10/23 03:14, Seth Arnold wrote:
+> An attacker process wouldn't be able to use malloc(3) for this task.
+> There's a handful of tools available for userspace to allocate memory:
+>
+> - brk / sbrk
+> - mmap MAP_ANONYMOUS
+> - mmap /dev/zero
+> - mmap some other file
+> - shm_open
+> - shmget
+>
+> Most of these return only pages of zeros to a process. Using mmap of an
+> existing file, you can get some of the contents of the file demand-loaded
+> into the memory space on the first use.
+>
+> The MAP_UNINITIALIZED flag only works if the kernel was compiled with
+> CONFIG_MMAP_ALLOW_UNINITIALIZED. This is rare.
+>
+> malloc(3) doesn't zero memory, to our collective frustration, but all the
+> garbage in the allocations is from previous allocations in the current
+> process. It isn't leftover from other processes.
+>
+> The avenues available for reading the memory:
+> - /dev/mem and /dev/kmem (requires root, not available with Secure Boot)
+> - /proc/pid/mem (requires ptrace privileges, mediated by YAMA)
+> - ptrace (requires ptrace privileges, mediated by YAMA)
+> - causing memory to be swapped to disk, and then inspecting the swap
+>
+> These all require a certain amount of privileges.
+
+How to fix it?
+~~~~~~~~~~~~~~
+
+memzero(), which internally calls explicit_bzero(3), or whatever
+alternative the system provides with a slightly different name, will
+make sure that the buffer is zeroed in memory, and optimizations are not
+allowed to impede this zeroing.
+
+This is not really 100% effective, since compilers may place copies of
+the string somewhere hidden in the stack. Those copies won't get zeroed
+by explicit_bzero(3). However, that's arguably a compiler bug, since
+compilers should make everything possible to avoid optimizing strings
+that are later passed to explicit_bzero(3). But we all know that
+sometimes it's impossible to have perfect knowledge in the compiler, so
+this is plausible. Nevertheless, there's nothing we can do against such
+issues, except minimizing the time such passwords are stored in plain
+text.
+
+Security concerns
+~~~~~~~~~~~~~~~~~
+
+We believe this isn't easy to exploit. Nevertheless, and since the fix
+is trivial, this fix should probably be applied soon, and backported to
+all supported distributions, to prevent someone else having more
+imagination than us to find a way.
+
+Affected versions
+~~~~~~~~~~~~~~~~~
+
+All. Bug introduced in shadow 19990709. That's the second commit in
+the git history.
+
+Fixes: 45c6603cc86c ("[svn-upgrade] Integrating new upstream version, shadow (19990709)")
+Reported-by: Alejandro Colomar <alx@kernel.org>
+Cc: Serge Hallyn <serge@hallyn.com>
+Cc: Iker Pedrosa <ipedrosa@redhat.com>
+Cc: Seth Arnold <seth.arnold@canonical.com>
+Cc: Christian Brauner <christian@brauner.io>
+Cc: Balint Reczey <rbalint@debian.org>
+Cc: Sam James <sam@gentoo.org>
+Cc: David Runge <dvzrv@archlinux.org>
+Cc: Andreas Jaeger <aj@suse.de>
+Cc: <~hallyn/shadow@lists.sr.ht>
+Signed-off-by: Alejandro Colomar <alx@kernel.org>
+
+CVE: CVE-2023-4641
+
+Upstream-Status: Backport [https://github.com/shadow-maint/shadow/commit/65c88a43a23c2391dcc90c0abda3e839e9c57904]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/gpasswd.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/src/gpasswd.c b/src/gpasswd.c
+index c7c9477..00ca569 100644
+--- a/src/gpasswd.c
++++ b/src/gpasswd.c
+@@ -896,6 +896,7 @@ static void change_passwd (struct group *gr)
+ strzero (cp);
+ cp = getpass (_("Re-enter new password: "));
+ if (NULL == cp) {
++ memzero (pass, sizeof pass);
+ exit (1);
+ }
+
+--
+2.40.0
diff --git a/meta/recipes-extended/shadow/files/login.defs_shadow-sysroot b/meta/recipes-extended/shadow/files/login.defs_shadow-sysroot
index 8a68dd341a..09df77d2e7 100644
--- a/meta/recipes-extended/shadow/files/login.defs_shadow-sysroot
+++ b/meta/recipes-extended/shadow/files/login.defs_shadow-sysroot
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause OR Artistic-1.0
#
# /etc/login.defs - Configuration control definitions for the shadow package.
#
diff --git a/meta/recipes-extended/shadow/shadow-sysroot_4.6.bb b/meta/recipes-extended/shadow/shadow-sysroot_4.6.bb
index e05fa237a2..6580bd9166 100644
--- a/meta/recipes-extended/shadow/shadow-sysroot_4.6.bb
+++ b/meta/recipes-extended/shadow/shadow-sysroot_4.6.bb
@@ -3,7 +3,7 @@ HOMEPAGE = "http://github.com/shadow-maint/shadow"
BUGTRACKER = "http://github.com/shadow-maint/shadow/issues"
SECTION = "base utils"
LICENSE = "BSD-3-Clause | Artistic-1.0"
-LIC_FILES_CHKSUM = "file://login.defs_shadow-sysroot;md5=25e2f2de4dfc8f966ac5cdfce45cd7d5"
+LIC_FILES_CHKSUM = "file://login.defs_shadow-sysroot;endline=1;md5=ceddfb61608e4db87012499555184aed"
DEPENDS = "base-passwd"
diff --git a/meta/recipes-extended/shadow/shadow.inc b/meta/recipes-extended/shadow/shadow.inc
index f5fdf436f7..57b5002e8b 100644
--- a/meta/recipes-extended/shadow/shadow.inc
+++ b/meta/recipes-extended/shadow/shadow.inc
@@ -16,6 +16,10 @@ SRC_URI = "https://github.com/shadow-maint/shadow/releases/download/v${PV}/${BP}
${@bb.utils.contains('PACKAGECONFIG', 'pam', '${PAM_SRC_URI}', '', d)} \
file://shadow-relaxed-usernames.patch \
file://useradd \
+ file://CVE-2023-29383.patch \
+ file://0001-Overhaul-valid_field.patch \
+ file://CVE-2023-4641-0001.patch \
+ file://CVE-2023-4641-0002.patch \
"
SRC_URI:append:class-target = " \
@@ -26,6 +30,7 @@ SRC_URI:append:class-target = " \
SRC_URI:append:class-native = " \
file://0001-Disable-use-of-syslog-for-sysroot.patch \
file://commonio.c-fix-unexpected-open-failure-in-chroot-env.patch \
+ file://0001-Drop-nsswitch.conf-message-when-not-in-place-eg.-musl.patch \
"
SRC_URI:append:class-nativesdk = " \
file://0001-Disable-use-of-syslog-for-sysroot.patch \
@@ -33,6 +38,7 @@ SRC_URI:append:class-nativesdk = " \
SRC_URI[sha256sum] = "f262089be6a1011d50ec7849e14571b7b2e788334368f3dccb718513f17935ed"
+
# Additional Policy files for PAM
PAM_SRC_URI = "file://pam.d/chfn \
file://pam.d/chpasswd \
@@ -149,6 +155,13 @@ do_install:append() {
# Handle link properly after rename, otherwise missing files would
# lead rpm failed dependencies.
ln -sf newgrp.${BPN} ${D}${bindir}/sg
+
+ # usermod requires the subuid/subgid files to be in place before being
+ # able to use the -v/-V flags otherwise it fails:
+ # usermod: /etc/subuid does not exist, you cannot use the flags -v or -V
+ install -d ${D}${sysconfdir}
+ touch ${D}${sysconfdir}/subuid
+ touch ${D}${sysconfdir}/subgid
}
PACKAGES =+ "${PN}-base"
diff --git a/meta/recipes-extended/shadow/shadow_4.11.1.bb b/meta/recipes-extended/shadow/shadow_4.11.1.bb
index 40b11345c9..d1a3fd5593 100644
--- a/meta/recipes-extended/shadow/shadow_4.11.1.bb
+++ b/meta/recipes-extended/shadow/shadow_4.11.1.bb
@@ -9,3 +9,6 @@ BBCLASSEXTEND = "native nativesdk"
# Severity is low and marked as closed and won't fix.
# https://bugzilla.redhat.com/show_bug.cgi?id=884658
CVE_CHECK_IGNORE += "CVE-2013-4235"
+
+# This is an issue for a different shadow
+CVE_CHECK_IGNORE += "CVE-2016-15024"
diff --git a/meta/recipes-extended/stress-ng/stress-ng-0.13.12/0001-Makefile-avoid-calling-sync.patch b/meta/recipes-extended/stress-ng/stress-ng-0.13.12/0001-Makefile-avoid-calling-sync.patch
new file mode 100644
index 0000000000..fec8c524eb
--- /dev/null
+++ b/meta/recipes-extended/stress-ng/stress-ng-0.13.12/0001-Makefile-avoid-calling-sync.patch
@@ -0,0 +1,35 @@
+From 1d1801902a4944c6f5fa521c19b32fbac7342a0c Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.i.king@gmail.com>
+Date: Sat, 6 Aug 2022 13:05:59 +0000
+Subject: [PATCH] Makefile: avoid calling sync
+
+Original commit message:
+Makefile: use ld-gold if it is available
+
+Speed up linking by using ld-gold if is available. Add build
+time detection to see if compiler allows it
+
+MJ: backported only the "sync" removal from Makefile as calling
+ it from do_compile in the middle of big OE world build harms
+ the build time.
+
+Upstream-Status: Backport [V0.14.04 c10e5c3f9f5560a085279f4c4b399c2f34cb897d]
+
+Signed-off-by: Colin Ian King <colin.i.king@gmail.com>
+Signed-off-by: Martin Jansa <martin.jansa@gmail.com>
+---
+ Makefile | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/Makefile b/Makefile
+index f8f71c54b..23db4c612 100644
+--- a/Makefile
++++ b/Makefile
+@@ -425,7 +425,6 @@ OBJS += $(CONFIG_OBJS)
+ stress-ng: $(OBJS)
+ $(Q)echo "LD $@"
+ $(V)$(CC) $(CPPFLAGS) $(CFLAGS) $(OBJS) -lm $(LDFLAGS) -o $@
+- $(V)sync
+
+ config.h:
+ +$(MAKE) -f Makefile.config STATIC=$(STATIC) -j
diff --git a/meta/recipes-extended/stress-ng/stress-ng-0.13.12/0001-stress-cpu-disable-float128-math-on-powerpc64-to-avo.patch b/meta/recipes-extended/stress-ng/stress-ng-0.13.12/0001-stress-cpu-disable-float128-math-on-powerpc64-to-avo.patch
new file mode 100644
index 0000000000..bb35b3030a
--- /dev/null
+++ b/meta/recipes-extended/stress-ng/stress-ng-0.13.12/0001-stress-cpu-disable-float128-math-on-powerpc64-to-avo.patch
@@ -0,0 +1,43 @@
+From ea9ee4dd64ee88e03a959b2c694aa8feb53c7e78 Mon Sep 17 00:00:00 2001
+From: He Zhe <zhe.he@windriver.com>
+Date: Wed, 28 Sep 2022 16:47:24 +0800
+Subject: [PATCH] stress-cpu: disable float128 math on powerpc64 to avoid
+ SIGILL
+
+float128 requires instructions of xsmaddqp and xsmsubqp which are added to
+qemu since v7.0 by the following commit.
+https://github.com/qemu/qemu/commit/3bb1aed246d7b59ceee625a82628f7369d492a8f
+
+While kirkstone is still at v6.2 and thus experiences SIGILL as follow
+root@qemuppc64:~# stress-ng --cpu 2 --timeout 30s
+stress-ng: info: [972] setting to a 30 second run per stressor
+stress-ng: info: [972] dispatching hogs: 2 cpu
+stress-ng: info: [973] stressor terminated with unexpected signal signal 4 'SIGILL'
+<snip>
+
+Upstream-Status: Inappropriate [This is specific to kirkstone since qemu on
+master branch has upgraded to v7.1.]
+
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+---
+ stress-cpu.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/stress-cpu.c b/stress-cpu.c
+index 0a08f1d1..2849e715 100644
+--- a/stress-cpu.c
++++ b/stress-cpu.c
+@@ -41,6 +41,10 @@
+ #undef HAVE_FLOAT_DECIMAL128
+ #endif
+
++#if defined(STRESS_ARCH_PPC64)
++#undef HAVE_FLOAT128
++#endif
++
+ #define GAMMA (0.57721566490153286060651209008240243104215933593992L)
+ #define OMEGA (0.56714329040978387299996866221035554975381578718651L)
+ #define PSI (3.35988566624317755317201130291892717968890513373197L)
+--
+2.25.1
+
diff --git a/meta/recipes-extended/stress-ng/stress-ng_0.13.12.bb b/meta/recipes-extended/stress-ng/stress-ng_0.13.12.bb
index fe177a4de0..72dafddaf8 100644
--- a/meta/recipes-extended/stress-ng/stress-ng_0.13.12.bb
+++ b/meta/recipes-extended/stress-ng/stress-ng_0.13.12.bb
@@ -5,7 +5,10 @@ HOMEPAGE = "https://github.com/ColinIanKing/stress-ng#readme"
LICENSE = "GPL-2.0-only"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
-SRC_URI = "git://github.com/ColinIanKing/stress-ng.git;protocol=https;branch=master"
+SRC_URI = "git://github.com/ColinIanKing/stress-ng.git;protocol=https;branch=master \
+ file://0001-stress-cpu-disable-float128-math-on-powerpc64-to-avo.patch \
+ file://0001-Makefile-avoid-calling-sync.patch \
+ "
SRCREV = "f59bcb2fe1e25042e77d5e4942f72bfa026fa305"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-extended/sudo/files/0001-lib-util-mksigname.c-correctly-include-header-for-ou.patch b/meta/recipes-extended/sudo/files/0001-lib-util-mksigname.c-correctly-include-header-for-ou.patch
deleted file mode 100644
index f63ed553be..0000000000
--- a/meta/recipes-extended/sudo/files/0001-lib-util-mksigname.c-correctly-include-header-for-ou.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From f993c5c88faacc43971899aae2168ffb3e34dc80 Mon Sep 17 00:00:00 2001
-From: Alexander Kanavin <alex@linutronix.de>
-Date: Fri, 24 Sep 2021 13:36:24 +0200
-Subject: [PATCH] lib/util/mksigname.c: correctly include header for out of
- tree builds
-
-Upstream-Status: Submitted [https://github.com/sudo-project/sudo/pull/123]
-Signed-off-by: Alexander Kanavin <alex@linutronix.de>
----
- lib/util/mksigname.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/lib/util/mksigname.c b/lib/util/mksigname.c
-index de8b1ad..0a69e7e 100644
---- a/lib/util/mksigname.c
-+++ b/lib/util/mksigname.c
-@@ -36,7 +36,7 @@ main(int argc, char *argv[])
- {
- unsigned int i;
-
--#include "mksigname.h"
-+#include "lib/util/mksigname.h"
-
- printf("const char *const sudo_sys_signame[] = {\n");
- for (i = 0; i < nitems(sudo_sys_signame); i++) {
diff --git a/meta/recipes-extended/sudo/files/0001-sudo.conf.in-fix-conflict-with-multilib.patch b/meta/recipes-extended/sudo/files/0001-sudo.conf.in-fix-conflict-with-multilib.patch
index f4fc376bb8..041c717e00 100644
--- a/meta/recipes-extended/sudo/files/0001-sudo.conf.in-fix-conflict-with-multilib.patch
+++ b/meta/recipes-extended/sudo/files/0001-sudo.conf.in-fix-conflict-with-multilib.patch
@@ -1,4 +1,7 @@
-sudo.conf.in: fix conflict with multilib
+From 6e835350b7413210c410d3578cfab804186b7a4f Mon Sep 17 00:00:00 2001
+From: Kai Kang <kai.kang@windriver.com>
+Date: Tue, 17 Nov 2020 11:13:40 +0800
+Subject: [PATCH] sudo.conf.in: fix conflict with multilib
When pass ${libdir} to --libexecdir of sudo, it fails to install sudo
and lib32-sudo at same time:
@@ -12,12 +15,13 @@ Update the comments in sudo.conf.in to avoid the conflict.
Signed-off-by: Kai Kang <kai.kang@windriver.com>
Upstream-Status: Inappropriate [OE configuration specific]
+
---
examples/sudo.conf.in | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/examples/sudo.conf.in b/examples/sudo.conf.in
-index 6535d3a..50afc8f 100644
+index 2187457..0908d24 100644
--- a/examples/sudo.conf.in
+++ b/examples/sudo.conf.in
@@ -4,7 +4,7 @@
@@ -33,8 +37,8 @@ index 6535d3a..50afc8f 100644
# The compiled-in value is usually sufficient and should only be changed
# if you rename or move the sudo_intercept.so file.
#
--#Path intercept @plugindir@/sudo_intercept.so
-+#Path intercept $plugindir/sudo_intercept.so
+-#Path intercept @intercept_file@
++#Path intercept $intercept_file
#
# Sudo noexec:
@@ -42,8 +46,8 @@ index 6535d3a..50afc8f 100644
# The compiled-in value is usually sufficient and should only be changed
# if you rename or move the sudo_noexec.so file.
#
--#Path noexec @plugindir@/sudo_noexec.so
-+#Path noexec $plugindir/sudo_noexec.so
+-#Path noexec @noexec_file@
++#Path noexec $noexec_file
#
# Sudo plugin directory:
@@ -55,7 +59,4 @@ index 6535d3a..50afc8f 100644
+#Path plugin_dir $plugindir
#
- # Sudo developer mode:
---
-2.17.1
-
+ # Core dumps:
diff --git a/meta/recipes-extended/sudo/sudo.inc b/meta/recipes-extended/sudo/sudo.inc
index 8947c46129..d3b6bf1ad8 100644
--- a/meta/recipes-extended/sudo/sudo.inc
+++ b/meta/recipes-extended/sudo/sudo.inc
@@ -4,11 +4,10 @@ HOMEPAGE = "http://www.sudo.ws"
BUGTRACKER = "http://www.sudo.ws/bugs/"
SECTION = "admin"
LICENSE = "ISC & BSD-3-Clause & BSD-2-Clause & Zlib"
-LIC_FILES_CHKSUM = "file://LICENSE.md;md5=16cf60b466f3a0606427a7b624a3a670 \
+LIC_FILES_CHKSUM = "file://LICENSE.md;md5=5100e20d35f9015f9eef6bdb27ba194f \
file://plugins/sudoers/redblack.c;beginline=1;endline=46;md5=03e35317699ba00b496251e0dfe9f109 \
file://lib/util/reallocarray.c;beginline=3;endline=15;md5=397dd45c7683e90b9f8bf24638cf03bf \
file://lib/util/fnmatch.c;beginline=3;endline=27;md5=004d7d2866ba1f5b41174906849d2e0f \
- file://lib/util/getcwd.c;beginline=2;endline=27;md5=50f8d9667750e18dea4e84a935c12009 \
file://lib/util/glob.c;beginline=2;endline=31;md5=2852f68687544e3eb8a0a61665506f0e \
file://lib/util/snprintf.c;beginline=3;endline=33;md5=b70df6179969e38fcf68da91b53b8029 \
file://include/sudo_queue.h;beginline=2;endline=27;md5=ad578e9664d17a010b63e4bc0576ee8d \
@@ -29,12 +28,12 @@ EXTRA_OECONF = "--with-editor=${base_bindir}/vi --with-env-editor"
EXTRA_OECONF:append:libc-musl = " --disable-hardening "
do_compile:prepend () {
- # Remove build host references from sudo_usage.h
+ # Remove build host references from config.h
sed -i \
-e 's,--with-libtool-sysroot=${STAGING_DIR_TARGET},,g' \
-e 's,--build=${BUILD_SYS},,g' \
-e 's,--host=${HOST_SYS},,g' \
- ${B}/src/sudo_usage.h
+ ${B}/config.h
}
# Explicitly create ${localstatedir}/lib before do_install to ensure
diff --git a/meta/recipes-extended/sudo/sudo_1.9.10.bb b/meta/recipes-extended/sudo/sudo_1.9.15p2.bb
index aa0d814ed7..431dfba3c2 100644
--- a/meta/recipes-extended/sudo/sudo_1.9.10.bb
+++ b/meta/recipes-extended/sudo/sudo_1.9.15p2.bb
@@ -3,12 +3,11 @@ require sudo.inc
SRC_URI = "https://www.sudo.ws/dist/sudo-${PV}.tar.gz \
${@bb.utils.contains('DISTRO_FEATURES', 'pam', '${PAM_SRC_URI}', '', d)} \
file://0001-sudo.conf.in-fix-conflict-with-multilib.patch \
- file://0001-lib-util-mksigname.c-correctly-include-header-for-ou.patch \
"
PAM_SRC_URI = "file://sudo.pam"
-SRC_URI[sha256sum] = "44a1461098e7c7b8e6ac597499c24fb2e43748c0c139a8b4944e57d1349a64f4"
+SRC_URI[sha256sum] = "199c0cdbfa7efcfffa9c88684a8e2fb206a62b70a316507e4a91c89c873bbcc8"
DEPENDS += " virtual/crypt ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'libpam', '', d)}"
RDEPENDS:${PN} += " ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'pam-plugin-limits pam-plugin-keyinit', '', d)}"
diff --git a/meta/recipes-extended/sysklogd/files/0001-syslogd.service-KillMode-process-is-not-recommended-.patch b/meta/recipes-extended/sysklogd/files/0001-syslogd.service-KillMode-process-is-not-recommended-.patch
new file mode 100644
index 0000000000..6c7e7cea44
--- /dev/null
+++ b/meta/recipes-extended/sysklogd/files/0001-syslogd.service-KillMode-process-is-not-recommended-.patch
@@ -0,0 +1,33 @@
+From b732dd0001c66f3ff1e0aef919c84ca9f0f81252 Mon Sep 17 00:00:00 2001
+From: Joachim Wiberg <troglobit@gmail.com>
+Date: Sat, 22 Apr 2023 07:40:24 +0200
+Subject: [PATCH 1/2] syslogd.service: KillMode=process is not recommended,
+ drop
+
+The default 'control-group' ensures all processes started by sysklogd
+are stopped when the service is stopped, this is what we want.
+
+Signed-off-by: Joachim Wiberg <troglobit@gmail.com>
+
+Upstream-Status: Backport [https://github.com/troglobit/sysklogd/commit/c82c004de7e25e770039cba5d6a34c30dd548533]
+
+Signed-off-by: Changqing Li <changqing.li@windriver.com>
+---
+ syslogd.service.in | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/syslogd.service.in b/syslogd.service.in
+index 91e080a..d614c5f 100644
+--- a/syslogd.service.in
++++ b/syslogd.service.in
+@@ -9,7 +9,6 @@ EnvironmentFile=-@SYSCONFDIR@/default/syslogd
+ ExecStart=@SBINDIR@/syslogd -F -p /run/systemd/journal/syslog $SYSLOGD_OPTS
+ StandardOutput=null
+ Restart=on-failure
+-KillMode=process
+
+ [Install]
+ WantedBy=multi-user.target
+--
+2.25.1
+
diff --git a/meta/recipes-extended/sysklogd/files/0002-Fix-62-early-log-messages-lost-when-running-in-syste.patch b/meta/recipes-extended/sysklogd/files/0002-Fix-62-early-log-messages-lost-when-running-in-syste.patch
new file mode 100644
index 0000000000..78ae57eeeb
--- /dev/null
+++ b/meta/recipes-extended/sysklogd/files/0002-Fix-62-early-log-messages-lost-when-running-in-syste.patch
@@ -0,0 +1,75 @@
+From ba8156eab79784ef816958327e701923890e98f7 Mon Sep 17 00:00:00 2001
+From: Joachim Wiberg <troglobit@gmail.com>
+Date: Sat, 22 Apr 2023 08:27:57 +0200
+Subject: [PATCH 2/2] Fix #62: early log messages lost when running in systemd
+
+This is a follow-up to d7576c7 which initially added support for running
+in systemd based systems. Since the unit file sources the syslog.socket
+we have /run/systemd/journal/syslog open already on descriptor 3. All
+we need to do is verify that's the mode syslogd runs in.
+
+Signed-off-by: Joachim Wiberg <troglobit@gmail.com>
+
+Upstream-Status: Backport [https://github.com/troglobit/sysklogd/commit/7ec64e5f9c1bc284792d028647fb36ef3e64dff7]
+
+Signed-off-by: Changqing Li <changqing.li@windriver.com>
+---
+ src/syslogd.c | 21 +++++++++++++++------
+ syslogd.service.in | 2 +-
+ 2 files changed, 16 insertions(+), 7 deletions(-)
+
+diff --git a/src/syslogd.c b/src/syslogd.c
+index fa4303f..e96ca9a 100644
+--- a/src/syslogd.c
++++ b/src/syslogd.c
+@@ -162,6 +162,7 @@ void untty(void);
+ static void parsemsg(const char *from, char *msg);
+ static int opensys(const char *file);
+ static void printsys(char *msg);
++static void unix_cb(int sd, void *arg);
+ static void logmsg(struct buf_msg *buffer);
+ static void fprintlog_first(struct filed *f, struct buf_msg *buffer);
+ static void fprintlog_successive(struct filed *f, int flags);
+@@ -436,12 +437,20 @@ int main(int argc, char *argv[])
+ .pe_serv = "syslog",
+ });
+
+- /* Default to _PATH_LOG for the UNIX domain socket */
+- if (!pflag)
+- addpeer(&(struct peer) {
+- .pe_name = _PATH_LOG,
+- .pe_mode = 0666,
+- });
++ /* Figure out where to read system log messages from */
++ if (!pflag) {
++ /* Do we run under systemd-journald (Requires=syslog.socket)? */
++ if (fcntl(3, F_GETFD) != -1) {
++ if (socket_register(3, NULL, unix_cb, NULL) == -1)
++ err(1, "failed registering syslog.socket (3)");
++ } else {
++ /* Default to _PATH_LOG for the UNIX domain socket */
++ addpeer(&(struct peer) {
++ .pe_name = _PATH_LOG,
++ .pe_mode = 0666,
++ });
++ }
++ }
+
+ if (!Foreground && !Debug) {
+ ppid = waitdaemon(30);
+diff --git a/syslogd.service.in b/syslogd.service.in
+index d614c5f..bc82af9 100644
+--- a/syslogd.service.in
++++ b/syslogd.service.in
+@@ -6,7 +6,7 @@ Requires=syslog.socket
+
+ [Service]
+ EnvironmentFile=-@SYSCONFDIR@/default/syslogd
+-ExecStart=@SBINDIR@/syslogd -F -p /run/systemd/journal/syslog $SYSLOGD_OPTS
++ExecStart=@SBINDIR@/syslogd -F $SYSLOGD_OPTS
+ StandardOutput=null
+ Restart=on-failure
+
+--
+2.25.1
+
diff --git a/meta/recipes-extended/sysklogd/sysklogd_2.3.0.bb b/meta/recipes-extended/sysklogd/sysklogd_2.3.0.bb
index 7043f3d391..0dc5ef93e2 100644
--- a/meta/recipes-extended/sysklogd/sysklogd_2.3.0.bb
+++ b/meta/recipes-extended/sysklogd/sysklogd_2.3.0.bb
@@ -12,6 +12,8 @@ inherit update-rc.d update-alternatives systemd autotools
SRC_URI = "git://github.com/troglobit/sysklogd.git;branch=master;protocol=https \
file://sysklogd \
+ file://0001-syslogd.service-KillMode-process-is-not-recommended-.patch \
+ file://0002-Fix-62-early-log-messages-lost-when-running-in-syste.patch \
"
SRCREV = "03c2c9c68d5d02675326527774e7e9cba3490ba0"
diff --git a/meta/recipes-extended/sysstat/sysstat/CVE-2022-39377.patch b/meta/recipes-extended/sysstat/sysstat/CVE-2022-39377.patch
new file mode 100644
index 0000000000..dce7b0d61f
--- /dev/null
+++ b/meta/recipes-extended/sysstat/sysstat/CVE-2022-39377.patch
@@ -0,0 +1,93 @@
+From 9c4eaf150662ad40607923389d4519bc83b93540 Mon Sep 17 00:00:00 2001
+From: Sebastien <seb@fedora-2.home>
+Date: Sat, 15 Oct 2022 14:24:22 +0200
+Subject: [PATCH] Fix size_t overflow in sa_common.c (GHSL-2022-074)
+
+allocate_structures function located in sa_common.c insufficiently
+checks bounds before arithmetic multiplication allowing for an
+overflow in the size allocated for the buffer representing system
+activities.
+
+This patch checks that the post-multiplied value is not greater than
+UINT_MAX.
+
+Signed-off-by: Sebastien <seb@fedora-2.home>
+
+Upstream-Status: Backport from
+[https://github.com/sysstat/sysstat/commit/a953ee3307d51255cc96e1f211882e97f795eed9]
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ common.c | 25 +++++++++++++++++++++++++
+ common.h | 2 ++
+ sa_common.c | 6 ++++++
+ 3 files changed, 33 insertions(+)
+
+diff --git a/common.c b/common.c
+index 81c7762..1a84b05 100644
+--- a/common.c
++++ b/common.c
+@@ -1655,4 +1655,29 @@ int parse_values(char *strargv, unsigned char bitmap[], int max_val, const char
+
+ return 0;
+ }
++
++/*
++ ***************************************************************************
++ * Check if the multiplication of the 3 values may be greater than UINT_MAX.
++ *
++ * IN:
++ * @val1 First value.
++ * @val2 Second value.
++ * @val3 Third value.
++ ***************************************************************************
++ */
++void check_overflow(size_t val1, size_t val2, size_t val3)
++{
++ if ((unsigned long long) val1 *
++ (unsigned long long) val2 *
++ (unsigned long long) val3 > UINT_MAX) {
++#ifdef DEBUG
++ fprintf(stderr, "%s: Overflow detected (%llu). Aborting...\n",
++ __FUNCTION__,
++ (unsigned long long) val1 * (unsigned long long) val2 * (unsigned long long) val3);
++#endif
++ exit(4);
++ }
++}
++
+ #endif /* SOURCE_SADC undefined */
+diff --git a/common.h b/common.h
+index 55b6657..e8ab98a 100644
+--- a/common.h
++++ b/common.h
+@@ -260,6 +260,8 @@ int check_dir
+ (char *);
+
+ #ifndef SOURCE_SADC
++void check_overflow
++ (size_t, size_t, size_t);
+ int count_bits
+ (void *, int);
+ int count_csvalues
+diff --git a/sa_common.c b/sa_common.c
+index 3699a84..b2cec4a 100644
+--- a/sa_common.c
++++ b/sa_common.c
+@@ -459,7 +459,13 @@ void allocate_structures(struct activity *act[])
+ int i, j;
+
+ for (i = 0; i < NR_ACT; i++) {
++
+ if (act[i]->nr_ini > 0) {
++
++ /* Look for a possible overflow */
++ check_overflow((size_t) act[i]->msize, (size_t) act[i]->nr_ini,
++ (size_t) act[i]->nr2);
++
+ for (j = 0; j < 3; j++) {
+ SREALLOC(act[i]->buf[j], void,
+ (size_t) act[i]->msize * (size_t) act[i]->nr_ini * (size_t) act[i]->nr2);
+--
+2.34.1
+
diff --git a/meta/recipes-extended/sysstat/sysstat/CVE-2023-33204.patch b/meta/recipes-extended/sysstat/sysstat/CVE-2023-33204.patch
new file mode 100644
index 0000000000..3a12f7a3ed
--- /dev/null
+++ b/meta/recipes-extended/sysstat/sysstat/CVE-2023-33204.patch
@@ -0,0 +1,80 @@
+From e806a902cc90a0b87da00854de8d5fd8222540fc Mon Sep 17 00:00:00 2001
+From: Pavel Kopylov <pkopylov@>
+Date: Wed, 17 May 2023 11:33:45 +0200
+Subject: [PATCH] Fix an overflow which is still possible for some values.
+
+Upstream-Status: Backport [https://github.com/sysstat/sysstat/commit/954ff2e2673c]
+CVE: CVE-2023-33204
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@...>
+Signed-off-by: Sanjay Chitroda <schitrod@...>
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ common.c | 18 ++++++++++--------
+ common.h | 2 +-
+ sa_common.c | 4 ++--
+ 3 files changed, 13 insertions(+), 11 deletions(-)
+
+diff --git a/common.c b/common.c
+index db9b0ed..e05c5bb 100644
+--- a/common.c
++++ b/common.c
+@@ -1640,17 +1640,19 @@ int parse_values(char *strargv, unsigned char bitmap[], int max_val, const char
+ * @val3 Third value.
+ ***************************************************************************
+ */
+-void check_overflow(size_t val1, size_t val2, size_t val3)
++void check_overflow(unsigned int val1, unsigned int val2,
++ unsigned int val3)
+ {
+- if ((unsigned long long) val1 *
+- (unsigned long long) val2 *
+- (unsigned long long) val3 > UINT_MAX) {
++ if ((val1 != 0) && (val2 != 0) && (val3 != 0) &&
++ (((unsigned long long) UINT_MAX / (unsigned long long) val1 <
++ (unsigned long long) val2) ||
++ ((unsigned long long) UINT_MAX / ((unsigned long long) val1 * (unsigned long long) val2) <
++ (unsigned long long) val3))) {
+ #ifdef DEBUG
+- fprintf(stderr, "%s: Overflow detected (%llu). Aborting...\n",
+- __FUNCTION__,
+- (unsigned long long) val1 * (unsigned long long) val2 * (unsigned long long) val3);
++ fprintf(stderr, "%s: Overflow detected (%u,%u,%u). Aborting...\n",
++ __FUNCTION__, val1, val2, val3);
+ #endif
+- exit(4);
++ exit(4);
+ }
+ }
+
+diff --git a/common.h b/common.h
+index 0ac5896..b2ffe9f 100644
+--- a/common.h
++++ b/common.h
+@@ -256,7 +256,7 @@ int check_dir
+
+ #ifndef SOURCE_SADC
+ void check_overflow
+- (size_t, size_t, size_t);
++ (unsigned int, unsigned int, unsigned int);
+ int count_bits
+ (void *, int);
+ int count_csvalues
+diff --git a/sa_common.c b/sa_common.c
+index 1b8fcaa..1144cfe 100644
+--- a/sa_common.c
++++ b/sa_common.c
+@@ -452,8 +452,8 @@ void allocate_structures(struct activity *act[])
+ if (act[i]->nr_ini > 0) {
+
+ /* Look for a possible overflow */
+- check_overflow((size_t) act[i]->msize, (size_t) act[i]->nr_ini,
+- (size_t) act[i]->nr2);
++ check_overflow((unsigned int) act[i]->msize, (unsigned int) act[i]->nr_ini,
++ (unsigned int) act[i]->nr2);
+
+ for (j = 0; j < 3; j++) {
+ SREALLOC(act[i]->buf[j], void,
+--
+2.25.1
+
diff --git a/meta/recipes-extended/sysstat/sysstat_12.4.5.bb b/meta/recipes-extended/sysstat/sysstat_12.4.5.bb
index fe3db4d8a5..f8a950e8a2 100644
--- a/meta/recipes-extended/sysstat/sysstat_12.4.5.bb
+++ b/meta/recipes-extended/sysstat/sysstat_12.4.5.bb
@@ -2,6 +2,8 @@ require sysstat.inc
LIC_FILES_CHKSUM = "file://COPYING;md5=a23a74b3f4caf9616230789d94217acb"
-SRC_URI += "file://0001-configure.in-remove-check-for-chkconfig.patch"
-
+SRC_URI += "file://0001-configure.in-remove-check-for-chkconfig.patch \
+ file://CVE-2022-39377.patch \
+ file://CVE-2023-33204.patch \
+ "
SRC_URI[sha256sum] = "ef445acea301bbb996e410842f6290a8d049e884d4868cfef7e85dc04b7eee5b"
diff --git a/meta/recipes-extended/tar/tar_1.34.bb b/meta/recipes-extended/tar/tar_1.35.bb
index 7307cd57a2..4dbd418b60 100644
--- a/meta/recipes-extended/tar/tar_1.34.bb
+++ b/meta/recipes-extended/tar/tar_1.35.bb
@@ -4,11 +4,11 @@ or disk archive, and can restore individual files from the archive."
HOMEPAGE = "http://www.gnu.org/software/tar/"
SECTION = "base"
LICENSE = "GPL-3.0-only"
-LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504"
+LIC_FILES_CHKSUM = "file://COPYING;md5=1ebbd3e34237af26da5dc08a4e440464"
SRC_URI = "${GNU_MIRROR}/tar/tar-${PV}.tar.bz2"
-SRC_URI[sha256sum] = "b44cc67f8a1f6b0250b7c860e952b37e8ed932a90bd9b1862a511079255646ff"
+SRC_URI[sha256sum] = "7edb8886a3dc69420a1446e1e2d061922b642f1cf632d2cd0f9ee7e690775985"
inherit autotools gettext texinfo
diff --git a/meta/recipes-extended/tcp-wrappers/tcp-wrappers-7.6/0001-Fix-implicit-function-declaration-warnings.patch b/meta/recipes-extended/tcp-wrappers/tcp-wrappers-7.6/0001-Fix-implicit-function-declaration-warnings.patch
new file mode 100644
index 0000000000..ec793ac8ff
--- /dev/null
+++ b/meta/recipes-extended/tcp-wrappers/tcp-wrappers-7.6/0001-Fix-implicit-function-declaration-warnings.patch
@@ -0,0 +1,109 @@
+From 9c97b5db237a793e0d1b6b0241570bdc6e35ee24 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Sun, 7 Aug 2022 17:42:24 -0700
+Subject: [PATCH] Fix implicit-function-declaration warnings
+
+These are seen with clang-15+
+
+Upstream-Status: Inappropriate [upstream is dead]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ hosts_access.c | 3 +++
+ safe_finger.c | 1 +
+ shell_cmd.c | 3 +++
+ tcpd.c | 2 +-
+ tcpdchk.c | 1 +
+ workarounds.c | 1 +
+ 6 files changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/hosts_access.c b/hosts_access.c
+index 0133e5e..58697ea 100644
+--- a/hosts_access.c
++++ b/hosts_access.c
+@@ -33,6 +33,7 @@ static char sccsid[] = "@(#) hosts_access.c 1.21 97/02/12 02:13:22";
+ #endif
+ #include <netinet/in.h>
+ #include <arpa/inet.h>
++#include <rpcsvc/ypclnt.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <syslog.h>
+@@ -45,6 +46,8 @@ static char sccsid[] = "@(#) hosts_access.c 1.21 97/02/12 02:13:22";
+ #endif
+
+ extern int errno;
++extern int match_pattern_ylo(const char *s, const char *pattern);
++extern unsigned long cidr_mask_addr(char* str);
+
+ #ifndef INADDR_NONE
+ #define INADDR_NONE (-1) /* XXX should be 0xffffffff */
+diff --git a/safe_finger.c b/safe_finger.c
+index 23afab1..a6458fb 100644
+--- a/safe_finger.c
++++ b/safe_finger.c
+@@ -34,6 +34,7 @@ static char sccsid[] = "@(#) safe_finger.c 1.4 94/12/28 17:42:41";
+ #include <syslog.h>
+
+ extern void exit();
++extern int pipe_stdin(char **argv);
+
+ /* Local stuff */
+
+diff --git a/shell_cmd.c b/shell_cmd.c
+index 62d31bc..a566092 100644
+--- a/shell_cmd.c
++++ b/shell_cmd.c
+@@ -16,10 +16,13 @@ static char sccsid[] = "@(#) shell_cmd.c 1.5 94/12/28 17:42:44";
+
+ #include <sys/types.h>
+ #include <sys/param.h>
++#include <sys/wait.h>
++#include <fcntl.h>
+ #include <signal.h>
+ #include <stdio.h>
+ #include <syslog.h>
+ #include <string.h>
++#include <unistd.h>
+
+ extern void exit();
+
+diff --git a/tcpd.c b/tcpd.c
+index dc9ff17..4353caa 100644
+--- a/tcpd.c
++++ b/tcpd.c
+@@ -46,7 +46,7 @@ void fix_options(struct request_info *);
+ int allow_severity = SEVERITY; /* run-time adjustable */
+ int deny_severity = LOG_WARNING; /* ditto */
+
+-main(argc, argv)
++void main(argc, argv)
+ int argc;
+ char **argv;
+ {
+diff --git a/tcpdchk.c b/tcpdchk.c
+index 5dca8bd..67c12ce 100644
+--- a/tcpdchk.c
++++ b/tcpdchk.c
+@@ -38,6 +38,7 @@ static char sccsid[] = "@(#) tcpdchk.c 1.8 97/02/12 02:13:25";
+
+ extern int errno;
+ extern void exit();
++extern unsigned long cidr_mask_addr(char* str);
+ extern int optind;
+ extern char *optarg;
+
+diff --git a/workarounds.c b/workarounds.c
+index b22b378..6335049 100644
+--- a/workarounds.c
++++ b/workarounds.c
+@@ -21,6 +21,7 @@ char sccsid[] = "@(#) workarounds.c 1.6 96/03/19 16:22:25";
+ #include <stdio.h>
+ #include <syslog.h>
+ #include <string.h>
++#include <unistd.h>
+
+ extern int errno;
+
+--
+2.37.1
+
diff --git a/meta/recipes-extended/tcp-wrappers/tcp-wrappers_7.6.bb b/meta/recipes-extended/tcp-wrappers/tcp-wrappers_7.6.bb
index 814d7fd913..8137d257c8 100644
--- a/meta/recipes-extended/tcp-wrappers/tcp-wrappers_7.6.bb
+++ b/meta/recipes-extended/tcp-wrappers/tcp-wrappers_7.6.bb
@@ -50,6 +50,7 @@ SRC_URI = "http://ftp.porcupine.org/pub/security/tcp_wrappers_${PV}.tar.gz \
file://fix_warnings.patch \
file://fix_warnings2.patch \
file://0001-Remove-fgets-extern-declaration.patch \
+ file://0001-Fix-implicit-function-declaration-warnings.patch \
"
SRC_URI[md5sum] = "e6fa25f71226d090f34de3f6b122fb5a"
diff --git a/meta/recipes-extended/timezone/timezone.inc b/meta/recipes-extended/timezone/timezone.inc
index cdd1a2ac3c..4734adcc08 100644
--- a/meta/recipes-extended/timezone/timezone.inc
+++ b/meta/recipes-extended/timezone/timezone.inc
@@ -6,14 +6,15 @@ SECTION = "base"
LICENSE = "PD & BSD-3-Clause"
LIC_FILES_CHKSUM = "file://LICENSE;md5=c679c9d6b02bc2757b3eaf8f53c43fba"
-PV = "2022a"
+PV = "2024a"
-SRC_URI =" http://www.iana.org/time-zones/repository/releases/tzcode${PV}.tar.gz;name=tzcode \
- http://www.iana.org/time-zones/repository/releases/tzdata${PV}.tar.gz;name=tzdata \
+SRC_URI =" http://www.iana.org/time-zones/repository/releases/tzcode${PV}.tar.gz;name=tzcode;subdir=tz \
+ http://www.iana.org/time-zones/repository/releases/tzdata${PV}.tar.gz;name=tzdata;subdir=tz \
"
-UPSTREAM_CHECK_URI = "http://www.iana.org/time-zones"
+S = "${WORKDIR}/tz"
-SRC_URI[tzcode.sha256sum] = "f8575e7e33be9ee265df2081092526b81c80abac3f4a04399ae9d4d91cdadac7"
-SRC_URI[tzdata.sha256sum] = "ef7fffd9f4f50f4f58328b35022a32a5a056b245c5cb3d6791dddb342f871664"
+UPSTREAM_CHECK_URI = "http://www.iana.org/time-zones"
+SRC_URI[tzcode.sha256sum] = "80072894adff5a458f1d143e16e4ca1d8b2a122c9c5399da482cb68cba6a1ff8"
+SRC_URI[tzdata.sha256sum] = "0d0434459acbd2059a7a8da1f3304a84a86591f6ed69c6248fffa502b6edffe3"
diff --git a/meta/recipes-extended/timezone/tzcode-native.bb b/meta/recipes-extended/timezone/tzcode-native.bb
index e3582ba674..d0b23a9d80 100644
--- a/meta/recipes-extended/timezone/tzcode-native.bb
+++ b/meta/recipes-extended/timezone/tzcode-native.bb
@@ -1,10 +1,7 @@
require timezone.inc
-#
SUMMARY = "tzcode, timezone zoneinfo utils -- zic, zdump, tzselect"
-S = "${WORKDIR}"
-
inherit native
EXTRA_OEMAKE += "cc='${CC}'"
diff --git a/meta/recipes-extended/timezone/tzdata.bb b/meta/recipes-extended/timezone/tzdata.bb
index 7f4322d867..dd1960ffa7 100644
--- a/meta/recipes-extended/timezone/tzdata.bb
+++ b/meta/recipes-extended/timezone/tzdata.bb
@@ -4,8 +4,6 @@ DEPENDS = "tzcode-native"
inherit allarch
-S = "${WORKDIR}"
-
DEFAULT_TIMEZONE ?= "Universal"
INSTALL_TIMEZONE_FILE ?= "1"
@@ -18,17 +16,21 @@ TZONES = " \
# "fat" is needed by e.g. MariaDB's mysql_tzinfo_to_sql
ZIC_FMT ?= "slim"
+do_configure[cleandirs] = "${B}"
+B = "${WORKDIR}/build"
+
do_compile() {
for zone in ${TZONES}; do
- ${STAGING_BINDIR_NATIVE}/zic -b ${ZIC_FMT} -d ${WORKDIR}${datadir}/zoneinfo -L /dev/null ${S}/${zone}
- ${STAGING_BINDIR_NATIVE}/zic -b ${ZIC_FMT} -d ${WORKDIR}${datadir}/zoneinfo/posix -L /dev/null ${S}/${zone}
- ${STAGING_BINDIR_NATIVE}/zic -b ${ZIC_FMT} -d ${WORKDIR}${datadir}/zoneinfo/right -L ${S}/leapseconds ${S}/${zone}
+ ${STAGING_BINDIR_NATIVE}/zic -b ${ZIC_FMT} -d ${B}/zoneinfo -L /dev/null ${S}/${zone}
+ ${STAGING_BINDIR_NATIVE}/zic -b ${ZIC_FMT} -d ${B}/zoneinfo/posix -L /dev/null ${S}/${zone}
+ ${STAGING_BINDIR_NATIVE}/zic -b ${ZIC_FMT} -d ${B}/zoneinfo/right -L ${S}/leapseconds ${S}/${zone}
done
}
do_install() {
- install -d ${D}$exec_prefix ${D}${datadir}/zoneinfo
- cp -pPR ${WORKDIR}$exec_prefix ${D}${base_prefix}
+ install -d ${D}${datadir}/zoneinfo
+ cp -pPR ${B}/zoneinfo/* ${D}${datadir}/zoneinfo
+
# libc is removing zoneinfo files from package
cp -pP "${S}/zone.tab" ${D}${datadir}/zoneinfo
cp -pP "${S}/zone1970.tab" ${D}${datadir}/zoneinfo
diff --git a/meta/recipes-extended/unzip/unzip/0001-unix-configure-fix-detection-for-cross-compilation.patch b/meta/recipes-extended/unzip/unzip/0001-unix-configure-fix-detection-for-cross-compilation.patch
new file mode 100644
index 0000000000..2fa7f481b7
--- /dev/null
+++ b/meta/recipes-extended/unzip/unzip/0001-unix-configure-fix-detection-for-cross-compilation.patch
@@ -0,0 +1,103 @@
+From 5cbf901b5c3b6a7d1d0ed91b6df4194bb6d25a40 Mon Sep 17 00:00:00 2001
+From: Chen Qi <Qi.Chen@windriver.com>
+Date: Thu, 15 Jun 2023 07:14:17 -0700
+Subject: [PATCH] unix/configure: fix detection for cross compilation
+
+We're doing cross compilation, running a cross-compiled problem
+on host to detemine feature is not correct. So we change runtime
+check into compile-time check to detect the features.
+
+Upstream-Status: Inactive-Upstream
+
+Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+---
+ unix/configure | 44 +++++++++++++++-----------------------------
+ 1 file changed, 15 insertions(+), 29 deletions(-)
+
+diff --git a/unix/configure b/unix/configure
+index 8fd82dd..68dee98 100755
+--- a/unix/configure
++++ b/unix/configure
+@@ -259,6 +259,10 @@ cat > conftest.c << _EOF_
+ #include <sys/stat.h>
+ #include <unistd.h>
+ #include <stdio.h>
++
++_Static_assert(sizeof(off_t) < 8, "sizeof off_t < 8 failed");
++_Static_assert(sizeof((struct stat){0}.st_size) < 8, "sizeof st_size < 8 failed");
++
+ int main()
+ {
+ off_t offset;
+@@ -278,21 +282,10 @@ _EOF_
+ # compile it
+ $CC $CFLAGS $LDFLAGS -o conftest conftest.c >/dev/null 2>/dev/null
+ if [ $? -ne 0 ]; then
+- echo -- no Large File Support
++ echo -- yes we have Large File Support!
++ CFLAGSR="${CFLAGSR} -DLARGE_FILE_SUPPORT"
+ else
+-# run it
+- ./conftest
+- r=$?
+- if [ $r -eq 1 ]; then
+- echo -- no Large File Support - no 64-bit off_t
+- elif [ $r -eq 2 ]; then
+- echo -- no Large File Support - no 64-bit stat
+- elif [ $r -eq 3 ]; then
+- echo -- yes we have Large File Support!
+- CFLAGSR="${CFLAGSR} -DLARGE_FILE_SUPPORT"
+- else
+- echo -- no Large File Support - conftest returned $r
+- fi
++ echo -- no Large File Support
+ fi
+
+ # Added 11/24/2005 EG
+@@ -302,6 +295,11 @@ cat > conftest.c << _EOF_
+ #include <stdlib.h>
+ #include <stdio.h>
+ #include <wchar.h>
++
++#ifndef __STDC_ISO_10646__
++#error "__STDC_ISO_10646__ not defined
++#endif
++
+ int main()
+ {
+ size_t wsize;
+@@ -327,19 +325,8 @@ if [ $? -ne 0 ]; then
+ echo "-- no Unicode (wchar_t) support"
+ else
+ # have wide char support
+-# run it
+- ./conftest
+- r=$?
+- if [ $r -eq 0 ]; then
+- echo -- no Unicode wchar_t support - wchar_t allocation error
+- elif [ $r -eq 1 ]; then
+- echo -- no Unicode support - wchar_t encoding unspecified
+- elif [ $r -eq 2 ]; then
+- echo -- have wchar_t with known UCS encoding - enabling Unicode support!
+- CFLAGSR="${CFLAGSR} -DUNICODE_SUPPORT -DUNICODE_WCHAR"
+- else
+- echo "-- no Unicode (wchar_t) support - conftest returned $r"
+- fi
++ echo -- have wchar_t with known UCS encoding - enabling Unicode support!
++ CFLAGSR="${CFLAGSR} -DUNICODE_SUPPORT -DUNICODE_WCHAR"
+ fi
+
+ echo "Check for setlocale support (needed for UNICODE Native check)"
+@@ -418,8 +405,7 @@ temp_link="link_$$"
+ echo "int main() { lchmod(\"${temp_file}\", 0666); }" \
+ ) > conftest.c
+ ln -s "${temp_link}" "${temp_file}" && \
+- $CC $BFLAG $LDFLAGS -o conftest conftest.c >/dev/null 2>/dev/null && \
+- ./conftest
++ $CC -Werror=implicit-function-declaration $BFLAG $LDFLAGS -o conftest conftest.c >/dev/null
+ [ $? -ne 0 ] && CFLAGSR="${CFLAGSR} -DNO_LCHMOD"
+ rm -f "${temp_file}"
+
+--
+2.34.1
+
diff --git a/meta/recipes-extended/unzip/unzip/CVE-2022-0529.patch b/meta/recipes-extended/unzip/unzip/CVE-2022-0529.patch
new file mode 100644
index 0000000000..1c1e120deb
--- /dev/null
+++ b/meta/recipes-extended/unzip/unzip/CVE-2022-0529.patch
@@ -0,0 +1,39 @@
+https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1010355
+
+CVE: CVE-2022-0529
+Upstream-Status: Inactive-Upstream [need a new release]
+
+diff --git a/process.c b/process.c
+index d2a846e..99b9c7b 100644
+--- a/process.c
++++ b/process.c
+@@ -2507,13 +2507,15 @@ char *wide_to_local_string(wide_string, escape_all)
+ char buf[9];
+ char *buffer = NULL;
+ char *local_string = NULL;
++ size_t buffer_size;
+
+ for (wsize = 0; wide_string[wsize]; wsize++) ;
+
+ if (max_bytes < MAX_ESCAPE_BYTES)
+ max_bytes = MAX_ESCAPE_BYTES;
+
+- if ((buffer = (char *)malloc(wsize * max_bytes + 1)) == NULL) {
++ buffer_size = wsize * max_bytes + 1;
++ if ((buffer = (char *)malloc(buffer_size)) == NULL) {
+ return NULL;
+ }
+
+@@ -2552,7 +2554,11 @@ char *wide_to_local_string(wide_string, escape_all)
+ /* no MB for this wide */
+ /* use escape for wide character */
+ char *escape_string = wide_to_escape_string(wide_string[i]);
+- strcat(buffer, escape_string);
++ size_t buffer_len = strlen(buffer);
++ size_t escape_string_len = strlen(escape_string);
++ if (buffer_len + escape_string_len + 1 > buffer_size)
++ escape_string_len = buffer_size - buffer_len - 1;
++ strncat(buffer, escape_string, escape_string_len);
+ free(escape_string);
+ }
+ }
diff --git a/meta/recipes-extended/unzip/unzip/CVE-2022-0530.patch b/meta/recipes-extended/unzip/unzip/CVE-2022-0530.patch
new file mode 100644
index 0000000000..363dafddc9
--- /dev/null
+++ b/meta/recipes-extended/unzip/unzip/CVE-2022-0530.patch
@@ -0,0 +1,33 @@
+https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1010355
+
+CVE: CVE-2022-0530
+Upstream-Status: Inactive-Upstream [need a new release]
+
+diff --git a/fileio.c b/fileio.c
+index 6290824..77e4b5f 100644
+--- a/fileio.c
++++ b/fileio.c
+@@ -2361,6 +2361,9 @@ int do_string(__G__ length, option) /* return PK-type error code */
+ /* convert UTF-8 to local character set */
+ fn = utf8_to_local_string(G.unipath_filename,
+ G.unicode_escape_all);
++ if (fn == NULL)
++ return PK_ERR;
++
+ /* make sure filename is short enough */
+ if (strlen(fn) >= FILNAMSIZ) {
+ fn[FILNAMSIZ - 1] = '\0';
+diff --git a/process.c b/process.c
+index d2a846e..715bc0f 100644
+--- a/process.c
++++ b/process.c
+@@ -2605,6 +2605,8 @@ char *utf8_to_local_string(utf8_string, escape_all)
+ int escape_all;
+ {
+ zwchar *wide = utf8_to_wide_string(utf8_string);
++ if (wide == NULL)
++ return NULL;
+ char *loc = wide_to_local_string(wide, escape_all);
+ free(wide);
+ return loc;
+
diff --git a/meta/recipes-extended/unzip/unzip_6.0.bb b/meta/recipes-extended/unzip/unzip_6.0.bb
index c222a684b4..e3fffa30ab 100644
--- a/meta/recipes-extended/unzip/unzip_6.0.bb
+++ b/meta/recipes-extended/unzip/unzip_6.0.bb
@@ -29,6 +29,9 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/infozip/UnZip%206.x%20%28latest%29/UnZip%206.0/
file://unzip_optimization.patch \
file://0001-configure-Pass-LDFLAGS-to-tests-doing-link-step.patch \
file://CVE-2021-4217.patch \
+ file://CVE-2022-0529.patch \
+ file://CVE-2022-0530.patch \
+ file://0001-unix-configure-fix-detection-for-cross-compilation.patch \
"
UPSTREAM_VERSION_UNKNOWN = "1"
diff --git a/meta/recipes-extended/watchdog/watchdog/0001-shutdown-Do-not-guard-sys-quota.h-sys-swap.h-and-sys.patch b/meta/recipes-extended/watchdog/watchdog/0001-shutdown-Do-not-guard-sys-quota.h-sys-swap.h-and-sys.patch
new file mode 100644
index 0000000000..8c419e1d11
--- /dev/null
+++ b/meta/recipes-extended/watchdog/watchdog/0001-shutdown-Do-not-guard-sys-quota.h-sys-swap.h-and-sys.patch
@@ -0,0 +1,37 @@
+From ca1d379fa13c4055d42d2ff3a647b4397768efcd Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Tue, 23 Aug 2022 19:23:26 -0700
+Subject: [PATCH] shutdown: Do not guard sys/quota.h sys/swap.h and
+ sys/reboot.h with __GLIBC__
+
+These headers are provided by uclibc/musl/glibc and bionic so we can
+assume they are not needed to be glibc specific includes. This also
+ensures that we get proper declaration of reboot() API
+
+Upstream-Status: Submitted [https://sourceforge.net/p/watchdog/patches/12/]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ src/shutdown.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/src/shutdown.c b/src/shutdown.c
+index 1d9a857..6aea0d0 100644
+--- a/src/shutdown.c
++++ b/src/shutdown.c
+@@ -29,13 +29,9 @@
+ #include "extern.h"
+ #include "ext2_mnt.h"
+
+-#if defined __GLIBC__
+ #include <sys/quota.h>
+ #include <sys/swap.h>
+ #include <sys/reboot.h>
+-#else /* __GLIBC__ */
+-#include <linux/quota.h>
+-#endif /* __GLIBC__ */
+
+ #include <unistd.h>
+
+--
+2.37.2
+
diff --git a/meta/recipes-extended/watchdog/watchdog_5.16.bb b/meta/recipes-extended/watchdog/watchdog_5.16.bb
index 1163846ed8..26fcc10487 100644
--- a/meta/recipes-extended/watchdog/watchdog_5.16.bb
+++ b/meta/recipes-extended/watchdog/watchdog_5.16.bb
@@ -13,6 +13,7 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/watchdog/watchdog-${PV}.tar.gz \
file://watchdog.init \
file://wd_keepalive.init \
file://0001-wd_keepalive.service-use-run-instead-of-var-run.patch \
+ file://0001-shutdown-Do-not-guard-sys-quota.h-sys-swap.h-and-sys.patch \
"
SRC_URI[md5sum] = "1b4f51cabc64d1bee2fce7cdd626831f"
diff --git a/meta/recipes-extended/wget/wget.inc b/meta/recipes-extended/wget/wget.inc
index 58cb5ca73d..30abaff7b7 100644
--- a/meta/recipes-extended/wget/wget.inc
+++ b/meta/recipes-extended/wget/wget.inc
@@ -7,7 +7,7 @@ FTP sites"
HOMEPAGE = "https://www.gnu.org/software/wget/"
SECTION = "console/network"
LICENSE = "GPL-3.0-only"
-LIC_FILES_CHKSUM = "file://COPYING;md5=c678957b0c8e964aa6c70fd77641a71e"
+LIC_FILES_CHKSUM = "file://COPYING;md5=6f65012d1daf98cb09b386cfb68df26b"
inherit autotools gettext texinfo update-alternatives pkgconfig
diff --git a/meta/recipes-extended/wget/wget_1.21.3.bb b/meta/recipes-extended/wget/wget_1.21.4.bb
index f176a1546c..1d31b0116d 100644
--- a/meta/recipes-extended/wget/wget_1.21.3.bb
+++ b/meta/recipes-extended/wget/wget_1.21.4.bb
@@ -2,6 +2,6 @@ SRC_URI = "${GNU_MIRROR}/wget/wget-${PV}.tar.gz \
file://0002-improve-reproducibility.patch \
"
-SRC_URI[sha256sum] = "5726bb8bc5ca0f6dc7110f6416e4bb7019e2d2ff5bf93d1ca2ffcc6656f220e5"
+SRC_URI[sha256sum] = "81542f5cefb8faacc39bbbc6c82ded80e3e4a88505ae72ea51df27525bcde04c"
require wget.inc
diff --git a/meta/recipes-extended/xdg-utils/xdg-utils/CVE-2022-4055.patch b/meta/recipes-extended/xdg-utils/xdg-utils/CVE-2022-4055.patch
new file mode 100644
index 0000000000..383634ad53
--- /dev/null
+++ b/meta/recipes-extended/xdg-utils/xdg-utils/CVE-2022-4055.patch
@@ -0,0 +1,165 @@
+From f67c4d1f8bd2e3cbcb9eb49f5e897075e7426780 Mon Sep 17 00:00:00 2001
+From: Gabriel Corona <gabriel.corona@enst-bretagne.fr>
+Date: Thu, 25 Aug 2022 23:51:45 +0200
+Subject: [PATCH] Disable special support for Thunderbird in xdg-email (fixes
+ CVE-2020-27748, CVE-2022-4055)
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xdg/xdg-utils/-/commit/f67c4d1f8bd2e3cbcb9eb49f5e897075e7426780]
+CVE: CVE-2022-4055
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ scripts/xdg-email.in | 108 -------------------------------------------
+ 1 file changed, 108 deletions(-)
+
+diff --git a/scripts/xdg-email.in b/scripts/xdg-email.in
+index 13ba2d5..b700679 100644
+--- a/scripts/xdg-email.in
++++ b/scripts/xdg-email.in
+@@ -30,76 +30,8 @@ _USAGE
+
+ #@xdg-utils-common@
+
+-run_thunderbird()
+-{
+- local THUNDERBIRD MAILTO NEWMAILTO TO CC BCC SUBJECT BODY
+- THUNDERBIRD="$1"
+- MAILTO=$(echo "$2" | sed 's/^mailto://')
+- echo "$MAILTO" | grep -qs "^?"
+- if [ "$?" = "0" ] ; then
+- MAILTO=$(echo "$MAILTO" | sed 's/^?//')
+- else
+- MAILTO=$(echo "$MAILTO" | sed 's/^/to=/' | sed 's/?/\&/')
+- fi
+-
+- MAILTO=$(echo "$MAILTO" | sed 's/&/\n/g')
+- TO=$(/bin/echo -e $(echo "$MAILTO" | grep '^to=' | sed 's/^to=//;s/%\(..\)/\\x\1/g' | awk '{ printf "%s,",$0 }'))
+- CC=$(/bin/echo -e $(echo "$MAILTO" | grep '^cc=' | sed 's/^cc=//;s/%\(..\)/\\x\1/g' | awk '{ printf "%s,",$0 }'))
+- BCC=$(/bin/echo -e $(echo "$MAILTO" | grep '^bcc=' | sed 's/^bcc=//;s/%\(..\)/\\x\1/g' | awk '{ printf "%s,",$0 }'))
+- SUBJECT=$(echo "$MAILTO" | grep '^subject=' | tail -n 1)
+- BODY=$(echo "$MAILTO" | grep '^body=' | tail -n 1)
+-
+- if [ -z "$TO" ] ; then
+- NEWMAILTO=
+- else
+- NEWMAILTO="to='$TO'"
+- fi
+- if [ -n "$CC" ] ; then
+- NEWMAILTO="${NEWMAILTO},cc='$CC'"
+- fi
+- if [ -n "$BCC" ] ; then
+- NEWMAILTO="${NEWMAILTO},bcc='$BCC'"
+- fi
+- if [ -n "$SUBJECT" ] ; then
+- NEWMAILTO="${NEWMAILTO},$SUBJECT"
+- fi
+- if [ -n "$BODY" ] ; then
+- NEWMAILTO="${NEWMAILTO},$BODY"
+- fi
+-
+- NEWMAILTO=$(echo "$NEWMAILTO" | sed 's/^,//')
+- DEBUG 1 "Running $THUNDERBIRD -compose \"$NEWMAILTO\""
+- "$THUNDERBIRD" -compose "$NEWMAILTO"
+- if [ $? -eq 0 ]; then
+- exit_success
+- else
+- exit_failure_operation_failed
+- fi
+-}
+-
+ open_kde()
+ {
+- if [ -n "$KDE_SESSION_VERSION" ] && [ "$KDE_SESSION_VERSION" -ge 5 ]; then
+- local kreadconfig=kreadconfig$KDE_SESSION_VERSION
+- else
+- local kreadconfig=kreadconfig
+- fi
+-
+- if which $kreadconfig >/dev/null 2>&1; then
+- local profile=$($kreadconfig --file emaildefaults \
+- --group Defaults --key Profile)
+- if [ -n "$profile" ]; then
+- local client=$($kreadconfig --file emaildefaults \
+- --group "PROFILE_$profile" \
+- --key EmailClient \
+- | cut -d ' ' -f 1)
+-
+- if echo "$client" | grep -Eq 'thunderbird|icedove'; then
+- run_thunderbird "$client" "$1"
+- fi
+- fi
+- fi
+-
+ local command
+ case "$KDE_SESSION_VERSION" in
+ '') command=kmailservice ;;
+@@ -130,15 +62,6 @@ open_kde()
+
+ open_gnome3()
+ {
+- local client
+- local desktop
+- desktop=`xdg-mime query default "x-scheme-handler/mailto"`
+- client=`desktop_file_to_binary "$desktop"`
+- echo $client | grep -E 'thunderbird|icedove' > /dev/null 2>&1
+- if [ $? -eq 0 ] ; then
+- run_thunderbird "$client" "$1"
+- fi
+-
+ if gio help open 2>/dev/null 1>&2; then
+ DEBUG 1 "Running gio open \"$1\""
+ gio open "$1"
+@@ -159,13 +82,6 @@ open_gnome3()
+
+ open_gnome()
+ {
+- local client
+- client=`gconftool-2 --get /desktop/gnome/url-handlers/mailto/command | cut -d ' ' -f 1` || ""
+- echo $client | grep -E 'thunderbird|icedove' > /dev/null 2>&1
+- if [ $? -eq 0 ] ; then
+- run_thunderbird "$client" "$1"
+- fi
+-
+ if gio help open 2>/dev/null 1>&2; then
+ DEBUG 1 "Running gio open \"$1\""
+ gio open "$1"
+@@ -231,15 +147,6 @@ open_flatpak()
+
+ open_generic()
+ {
+- local client
+- local desktop
+- desktop=`xdg-mime query default "x-scheme-handler/mailto"`
+- client=`desktop_file_to_binary "$desktop"`
+- echo $client | grep -E 'thunderbird|icedove' > /dev/null 2>&1
+- if [ $? -eq 0 ] ; then
+- run_thunderbird "$client" "$1"
+- fi
+-
+ xdg-open "$1"
+ local ret=$?
+
+@@ -364,21 +271,6 @@ while [ $# -gt 0 ] ; do
+ shift
+ ;;
+
+- --attach)
+- if [ -z "$1" ] ; then
+- exit_failure_syntax "file argument missing for --attach option"
+- fi
+- check_input_file "$1"
+- file=`readlink -f "$1"` # Normalize path
+- if [ -z "$file" ] || [ ! -f "$file" ] ; then
+- exit_failure_file_missing "file '$1' does not exist"
+- fi
+-
+- url_encode "$file"
+- options="${options}attach=${result}&"
+- shift
+- ;;
+-
+ -*)
+ exit_failure_syntax "unexpected option '$parm'"
+ ;;
+--
+2.25.1
+
diff --git a/meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb b/meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb
index 73acf6b744..4d93180535 100644
--- a/meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb
+++ b/meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb
@@ -21,6 +21,7 @@ SRC_URI = "https://portland.freedesktop.org/download/${BPN}-${PV}.tar.gz \
file://0001-Reinstate-xdg-terminal.patch \
file://0001-Don-t-build-the-in-script-manual.patch \
file://1f199813e0eb0246f63b54e9e154970e609575af.patch \
+ file://CVE-2022-4055.patch \
"
SRC_URI[md5sum] = "902042508b626027a3709d105f0b63ff"
diff --git a/meta/recipes-extended/xinetd/xinetd_2.3.15.4.bb b/meta/recipes-extended/xinetd/xinetd_2.3.15.4.bb
index 62ee70d244..897417314d 100644
--- a/meta/recipes-extended/xinetd/xinetd_2.3.15.4.bb
+++ b/meta/recipes-extended/xinetd/xinetd_2.3.15.4.bb
@@ -30,6 +30,8 @@ INITSCRIPT_PARAMS = "defaults"
PACKAGECONFIG ??= "tcp-wrappers"
PACKAGECONFIG[tcp-wrappers] = "--with-libwrap,,tcp-wrappers"
+CFLAGS += "-D_GNU_SOURCE"
+
CONFFILES:${PN} = "${sysconfdir}/xinetd.conf"
do_install:append() {
diff --git a/meta/recipes-extended/xz/xz/CVE-2022-1271.patch b/meta/recipes-extended/xz/xz/CVE-2022-1271.patch
deleted file mode 100644
index e43e73cf12..0000000000
--- a/meta/recipes-extended/xz/xz/CVE-2022-1271.patch
+++ /dev/null
@@ -1,96 +0,0 @@
-From dc932a1e9c0d9f1db71be11a9b82496e3a72f112 Mon Sep 17 00:00:00 2001
-From: Lasse Collin <lasse.collin@tukaani.org>
-Date: Tue, 29 Mar 2022 19:19:12 +0300
-Subject: [PATCH] xzgrep: Fix escaping of malicious filenames (ZDI-CAN-16587).
-
-Malicious filenames can make xzgrep to write to arbitrary files
-or (with a GNU sed extension) lead to arbitrary code execution.
-
-xzgrep from XZ Utils versions up to and including 5.2.5 are
-affected. 5.3.1alpha and 5.3.2alpha are affected as well.
-This patch works for all of them.
-
-This bug was inherited from gzip's zgrep. gzip 1.12 includes
-a fix for zgrep.
-
-The issue with the old sed script is that with multiple newlines,
-the N-command will read the second line of input, then the
-s-commands will be skipped because it's not the end of the
-file yet, then a new sed cycle starts and the pattern space
-is printed and emptied. So only the last line or two get escaped.
-
-One way to fix this would be to read all lines into the pattern
-space first. However, the included fix is even simpler: All lines
-except the last line get a backslash appended at the end. To ensure
-that shell command substitution doesn't eat a possible trailing
-newline, a colon is appended to the filename before escaping.
-The colon is later used to separate the filename from the grep
-output so it is fine to add it here instead of a few lines later.
-
-The old code also wasn't POSIX compliant as it used \n in the
-replacement section of the s-command. Using \<newline> is the
-POSIX compatible method.
-
-LC_ALL=C was added to the two critical sed commands. POSIX sed
-manual recommends it when using sed to manipulate pathnames
-because in other locales invalid multibyte sequences might
-cause issues with some sed implementations. In case of GNU sed,
-these particular sed scripts wouldn't have such problems but some
-other scripts could have, see:
-
- info '(sed)Locale Considerations'
-
-This vulnerability was discovered by:
-cleemy desu wayo working with Trend Micro Zero Day Initiative
-
-Thanks to Jim Meyering and Paul Eggert discussing the different
-ways to fix this and for coordinating the patch release schedule
-with gzip.
-
-Upstream-Status: Backport [https://tukaani.org/xz/xzgrep-ZDI-CAN-16587.patch]
-CVE: CVE-2022-1271
-
-Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
----
- src/scripts/xzgrep.in | 20 ++++++++++++--------
- 1 file changed, 12 insertions(+), 8 deletions(-)
-
-diff --git a/src/scripts/xzgrep.in b/src/scripts/xzgrep.in
-index 9db5c3a..f64dddb 100644
---- a/src/scripts/xzgrep.in
-+++ b/src/scripts/xzgrep.in
-@@ -179,22 +179,26 @@ for i; do
- { test $# -eq 1 || test $no_filename -eq 1; }; then
- eval "$grep"
- else
-+ # Append a colon so that the last character will never be a newline
-+ # which would otherwise get lost in shell command substitution.
-+ i="$i:"
-+
-+ # Escape & \ | and newlines only if such characters are present
-+ # (speed optimization).
- case $i in
- (*'
- '* | *'&'* | *'\'* | *'|'*)
-- i=$(printf '%s\n' "$i" |
-- sed '
-- $!N
-- $s/[&\|]/\\&/g
-- $s/\n/\\n/g
-- ');;
-+ i=$(printf '%s\n' "$i" | LC_ALL=C sed 's/[&\|]/\\&/g; $!s/$/\\/');;
- esac
-- sed_script="s|^|$i:|"
-+
-+ # $i already ends with a colon so don't add it here.
-+ sed_script="s|^|$i|"
-
- # Fail if grep or sed fails.
- r=$(
- exec 4>&1
-- (eval "$grep" 4>&-; echo $? >&4) 3>&- | sed "$sed_script" >&3 4>&-
-+ (eval "$grep" 4>&-; echo $? >&4) 3>&- |
-+ LC_ALL=C sed "$sed_script" >&3 4>&-
- ) || r=2
- exit $r
- fi >&3 5>&-
diff --git a/meta/recipes-extended/xz/xz_5.2.5.bb b/meta/recipes-extended/xz/xz_5.2.6.bb
index 720e070f4a..3482622471 100644
--- a/meta/recipes-extended/xz/xz_5.2.5.bb
+++ b/meta/recipes-extended/xz/xz_5.2.6.bb
@@ -24,11 +24,8 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=97d554a32881fee0aa283d96e47cb24a \
file://lib/getopt.c;endline=23;md5=2069b0ee710572c03bb3114e4532cd84 \
"
-SRC_URI = "https://tukaani.org/xz/xz-${PV}.tar.gz \
- file://CVE-2022-1271.patch \
- "
-SRC_URI[md5sum] = "0d270c997aff29708c74d53f599ef717"
-SRC_URI[sha256sum] = "f6f4910fd033078738bd82bfba4f49219d03b17eb0794eb91efbae419f4aba10"
+SRC_URI = "https://tukaani.org/xz/xz-${PV}.tar.gz"
+SRC_URI[sha256sum] = "a2105abee17bcd2ebd15ced31b4f5eda6e17efd6b10f921a01cda4a44c91b3a0"
UPSTREAM_CHECK_REGEX = "xz-(?P<pver>\d+(\.\d+)+)\.tar"
CACHED_CONFIGUREVARS += "gl_cv_posix_shell=/bin/sh"
diff --git a/meta/recipes-extended/zip/zip-3.0/0001-unix-configure-use-_Static_assert-to-do-correct-dete.patch b/meta/recipes-extended/zip/zip-3.0/0001-unix-configure-use-_Static_assert-to-do-correct-dete.patch
new file mode 100644
index 0000000000..106f246a7c
--- /dev/null
+++ b/meta/recipes-extended/zip/zip-3.0/0001-unix-configure-use-_Static_assert-to-do-correct-dete.patch
@@ -0,0 +1,96 @@
+From 9916fc6f1f93f3e092e3c6937c30dc8137c26d34 Mon Sep 17 00:00:00 2001
+From: Chen Qi <Qi.Chen@windriver.com>
+Date: Thu, 15 Jun 2023 18:31:26 +0800
+Subject: [PATCH] unix/configure: use _Static_assert to do correct detection
+
+We're doing cross compilation, running a cross-compiled problem
+on host to detemine feature is not correct. Use _Static_assert
+to do the detection correctly.
+
+Upstream-Status: Inactive-Upstream
+
+Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+---
+ unix/configure | 42 ++++++++++++------------------------------
+ 1 file changed, 12 insertions(+), 30 deletions(-)
+
+diff --git a/unix/configure b/unix/configure
+index f2b3d02..f917086 100644
+--- a/unix/configure
++++ b/unix/configure
+@@ -361,6 +361,10 @@ cat > conftest.c << _EOF_
+ #include <sys/stat.h>
+ #include <unistd.h>
+ #include <stdio.h>
++
++_Static_assert(sizeof((struct stat){0}.st_uid) == 2, "sizeof st_uid is not 16 bit");
++_Static_assert(sizeof((struct stat){0}.st_gid) == 2, "sizeof st_gid is not 16 bit");
++
+ int main()
+ {
+ struct stat s;
+@@ -385,21 +389,7 @@ if [ $? -ne 0 ]; then
+ echo -- UID/GID test failed on compile - disabling old 16-bit UID/GID support
+ CFLAGS="${CFLAGS} -DUIDGID_NOT_16BIT"
+ else
+-# run it
+- ./conftest
+- r=$?
+- if [ $r -eq 1 ]; then
+- echo -- UID not 2 bytes - disabling old 16-bit UID/GID support
+- CFLAGS="${CFLAGS} -DUIDGID_NOT_16BIT"
+- elif [ $r -eq 2 ]; then
+- echo -- GID not 2 bytes - disabling old 16-bit UID/GID support
+- CFLAGS="${CFLAGS} -DUIDGID_NOT_16BIT"
+- elif [ $r -eq 3 ]; then
+- echo -- 16-bit UIDs and GIDs - keeping old 16-bit UID/GID support
+- else
+- echo -- test failed - conftest returned $r - disabling old 16-bit UID/GID support
+- CFLAGS="${CFLAGS} -DUIDGID_NOT_16BIT"
+- fi
++ echo -- 16-bit UIDs and GIDs - keeping old 16-bit UID/GID support
+ fi
+
+
+@@ -417,6 +407,10 @@ cat > conftest.c << _EOF_
+ #include <sys/stat.h>
+ #include <unistd.h>
+ #include <stdio.h>
++
++_Static_assert(sizeof(off_t) < 8, "sizeof off_t < 8 failed");
++_Static_assert(sizeof((struct stat){0}.st_size) < 8, "sizeof st_size < 8 failed");
++
+ int main()
+ {
+ off_t offset;
+@@ -436,24 +430,12 @@ _EOF_
+ # compile it
+ $CC -o conftest conftest.c >/dev/null 2>/dev/null
+ if [ $? -ne 0 ]; then
+- echo -- no Large File Support
++ echo -- yes we have Large File Support!
++ CFLAGS="${CFLAGS} -DLARGE_FILE_SUPPORT"
+ else
+-# run it
+- ./conftest
+- r=$?
+- if [ $r -eq 1 ]; then
+- echo -- no Large File Support - no 64-bit off_t
+- elif [ $r -eq 2 ]; then
+- echo -- no Large File Support - no 64-bit stat
+- elif [ $r -eq 3 ]; then
+- echo -- yes we have Large File Support!
+- CFLAGS="${CFLAGS} -DLARGE_FILE_SUPPORT"
+- else
+- echo -- no Large File Support - conftest returned $r
+- fi
++ echo -- no Large File Support
+ fi
+
+-
+ # Check for wide char for Unicode support
+ # Added 11/24/2005 EG
+
+--
+2.34.1
+
diff --git a/meta/recipes-extended/zip/zip_3.0.bb b/meta/recipes-extended/zip/zip_3.0.bb
index 07a67b9634..83e1e52e97 100644
--- a/meta/recipes-extended/zip/zip_3.0.bb
+++ b/meta/recipes-extended/zip/zip_3.0.bb
@@ -17,6 +17,7 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/infozip/Zip%203.x%20%28latest%29/3.0/zip30.tar.
file://0001-configure-use-correct-CPP.patch \
file://0002-configure-support-PIC-code-build.patch \
file://0001-configure-Use-CFLAGS-and-LDFLAGS-when-doing-link-tes.patch \
+ file://0001-unix-configure-use-_Static_assert-to-do-correct-dete.patch \
"
UPSTREAM_VERSION_UNKNOWN = "1"
diff --git a/meta/recipes-gnome/epiphany/epiphany_42.2.bb b/meta/recipes-gnome/epiphany/epiphany_42.4.bb
index dc1b34ac92..98923a3bdc 100644
--- a/meta/recipes-gnome/epiphany/epiphany_42.2.bb
+++ b/meta/recipes-gnome/epiphany/epiphany_42.4.bb
@@ -27,8 +27,9 @@ SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@oe.utils.trim_version("${PV}", 1)}/${GN
file://0002-help-meson.build-disable-the-use-of-yelp.patch \
file://migrator.patch \
file://distributor.patch \
+ file://CVE-2023-26081.patch \
"
-SRC_URI[archive.sha256sum] = "92c02cf886d10d2ccff5de658e1a420eab31d20bb50e746d430e9535b485192d"
+SRC_URI[archive.sha256sum] = "370938ad2920eeb28bc2435944776b7ba55a0e2ede65836f79818cfb7e8f0860"
PACKAGECONFIG_SOUP ?= "soup2"
PACKAGECONFIG ??= "${PACKAGECONFIG_SOUP}"
diff --git a/meta/recipes-gnome/epiphany/files/CVE-2023-26081.patch b/meta/recipes-gnome/epiphany/files/CVE-2023-26081.patch
new file mode 100644
index 0000000000..af1e20bd8f
--- /dev/null
+++ b/meta/recipes-gnome/epiphany/files/CVE-2023-26081.patch
@@ -0,0 +1,90 @@
+From 53363c3c8178bf9193dad9fa3516f4e10cff0ffd Mon Sep 17 00:00:00 2001
+From: Michael Catanzaro <mcatanzaro@redhat.com>
+Date: Fri, 3 Feb 2023 13:07:15 -0600
+Subject: [PATCH] Don't autofill passwords in sandboxed contexts
+
+If using the sandbox CSP or iframe tag, the web content is supposed to
+be not trusted by the main resource origin. Therefore, we'd better
+disable the password manager entirely so the untrusted web content
+cannot exfiltrate passwords.
+
+https://github.com/google/security-research/security/advisories/GHSA-mhhf-w9xw-pp9x
+
+Part-of: <https://gitlab.gnome.org/GNOME/epiphany/-/merge_requests/1275>
+
+Upstream-Status: Backport
+[https://gitlab.gnome.org/GNOME/epiphany/-/commit/53363c3c8178bf9193dad9fa3516f4e10cff0ffd]
+CVE: CVE-2023-26081
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ .../resources/js/ephy.js | 26 +++++++++++++++++++
+ 1 file changed, 26 insertions(+)
+
+diff --git a/embed/web-process-extension/resources/js/ephy.js b/embed/web-process-extension/resources/js/ephy.js
+index 38b806f..44d1792 100644
+--- a/embed/web-process-extension/resources/js/ephy.js
++++ b/embed/web-process-extension/resources/js/ephy.js
+@@ -352,6 +352,12 @@ Ephy.hasModifiedForms = function()
+ }
+ };
+
++Ephy.isSandboxedWebContent = function()
++{
++ // https://github.com/google/security-research/security/advisories/GHSA-mhhf-w9xw-pp9x
++ return self.origin === null || self.origin === 'null';
++};
++
+ Ephy.PasswordManager = class PasswordManager
+ {
+ constructor(pageID, frameID)
+@@ -385,6 +391,11 @@ Ephy.PasswordManager = class PasswordManager
+
+ query(origin, targetOrigin, username, usernameField, passwordField)
+ {
++ if (Ephy.isSandboxedWebContent()) {
++ Ephy.log(`Not querying passwords for origin=${origin} because web content is sandboxed`);
++ return Promise.resolve(null);
++ }
++
+ Ephy.log(`Querying passwords for origin=${origin}, targetOrigin=${targetOrigin}, username=${username}, usernameField=${usernameField}, passwordField=${passwordField}`);
+
+ return new Promise((resolver, reject) => {
+@@ -396,6 +407,11 @@ Ephy.PasswordManager = class PasswordManager
+
+ save(origin, targetOrigin, username, password, usernameField, passwordField, isNew)
+ {
++ if (Ephy.isSandboxedWebContent()) {
++ Ephy.log(`Not saving password for origin=${origin} because web content is sandboxed`);
++ return;
++ }
++
+ Ephy.log(`Saving password for origin=${origin}, targetOrigin=${targetOrigin}, username=${username}, usernameField=${usernameField}, passwordField=${passwordField}, isNew=${isNew}`);
+
+ window.webkit.messageHandlers.passwordManagerSave.postMessage({
+@@ -407,6 +423,11 @@ Ephy.PasswordManager = class PasswordManager
+ // FIXME: Why is pageID a parameter here?
+ requestSave(origin, targetOrigin, username, password, usernameField, passwordField, isNew, pageID)
+ {
++ if (Ephy.isSandboxedWebContent()) {
++ Ephy.log(`Not requesting to save password for origin=${origin} because web content is sandboxed`);
++ return;
++ }
++
+ Ephy.log(`Requesting to save password for origin=${origin}, targetOrigin=${targetOrigin}, username=${username}, usernameField=${usernameField}, passwordField=${passwordField}, isNew=${isNew}`);
+
+ window.webkit.messageHandlers.passwordManagerRequestSave.postMessage({
+@@ -426,6 +447,11 @@ Ephy.PasswordManager = class PasswordManager
+
+ queryUsernames(origin)
+ {
++ if (Ephy.isSandboxedWebContent()) {
++ Ephy.log(`Not querying usernames for origin=${origin} because web content is sandboxed`);
++ return Promise.resolve(null);
++ }
++
+ Ephy.log(`Requesting usernames for origin=${origin}`);
+
+ return new Promise((resolver, reject) => {
+--
+2.35.5
+
diff --git a/meta/recipes-gnome/gcr/gcr_3.40.0.bb b/meta/recipes-gnome/gcr/gcr_3.40.0.bb
index 717c31c325..8719884f25 100644
--- a/meta/recipes-gnome/gcr/gcr_3.40.0.bb
+++ b/meta/recipes-gnome/gcr/gcr_3.40.0.bb
@@ -13,6 +13,8 @@ DEPENDS = "p11-kit glib-2.0 libgcrypt gnupg-native \
CACHED_CONFIGUREVARS += "ac_cv_path_GPG='gpg2'"
+CFLAGS += "-D_GNU_SOURCE"
+
GNOMEBASEBUILDCLASS = "meson"
GTKDOC_MESON_OPTION = "gtk_doc"
inherit gnomebase gtk-icon-cache gtk-doc features_check upstream-version-is-even vala gobject-introspection gettext mime mime-xdg
diff --git a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/0001-Add-use_prebuilt_tools-option.patch b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/0001-Add-use_prebuilt_tools-option.patch
deleted file mode 100644
index a8206a4507..0000000000
--- a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/0001-Add-use_prebuilt_tools-option.patch
+++ /dev/null
@@ -1,171 +0,0 @@
-From ba73bb0f3d2023839bc3b681c49b7ec1192cceb4 Mon Sep 17 00:00:00 2001
-From: Alexander Kanavin <alex.kanavin@gmail.com>
-Date: Sat, 8 May 2021 21:58:54 +0200
-Subject: [PATCH] Add use_prebuilt_tools option
-
-This allows using the gdk-pixbuf tools from the host to
-build and install tests in a cross-compile scenarion.
-
-Upstream-Status: Submitted [https://gitlab.gnome.org/GNOME/gdk-pixbuf/-/merge_requests/119]
-Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
-
----
- gdk-pixbuf/meson.build | 11 +++++++++--
- meson.build | 6 +++---
- meson_options.txt | 4 ++++
- tests/meson.build | 16 ++++++++--------
- thumbnailer/meson.build | 24 ++++++++++++++++++------
- 5 files changed, 42 insertions(+), 19 deletions(-)
-
-diff --git a/gdk-pixbuf/meson.build b/gdk-pixbuf/meson.build
-index 8b0590b..7331491 100644
---- a/gdk-pixbuf/meson.build
-+++ b/gdk-pixbuf/meson.build
-@@ -342,13 +342,20 @@ foreach bin: gdkpixbuf_bin
- include_directories: [ root_inc, gdk_pixbuf_inc ],
- c_args: common_cflags + gdk_pixbuf_cflags,
- install: true)
-- meson.override_find_program(bin_name, bin)
-+ if not get_option('use_prebuilt_tools')
-+ meson.override_find_program(bin_name, bin)
-+ endif
-
- # Used in tests
- set_variable(bin_name.underscorify(), bin)
- endforeach
-
--if not meson.is_cross_build()
-+if get_option('use_prebuilt_tools')
-+ gdk_pixbuf_query_loaders = find_program('gdk-pixbuf-query-loaders', required: true)
-+ gdk_pixbuf_pixdata = find_program('gdk-pixbuf-pixdata', required: true)
-+endif
-+
-+if not meson.is_cross_build() or get_option('use_prebuilt_tools')
- # The 'loaders.cache' used for testing, so we don't accidentally
- # load the installed cache; we always build it by default
- loaders_cache = custom_target('loaders.cache',
-diff --git a/meson.build b/meson.build
-index 7a1409b..0bc73eb 100644
---- a/meson.build
-+++ b/meson.build
-@@ -403,16 +403,16 @@ subdir('gdk-pixbuf')
- # i18n
- subdir('po')
-
--if not meson.is_cross_build()
-+if not meson.is_cross_build() or get_option('use_prebuilt_tools')
- subdir('tests')
-- subdir('thumbnailer')
- endif
-+subdir('thumbnailer')
-
- # Documentation
- build_docs = get_option('gtk_doc') or get_option('docs')
- subdir('docs')
-
--if not meson.is_cross_build()
-+if not meson.is_cross_build() or get_option('use_prebuilt_tools')
- meson.add_install_script('build-aux/post-install.py',
- gdk_pixbuf_bindir,
- gdk_pixbuf_libdir,
-diff --git a/meson_options.txt b/meson_options.txt
-index 0ee6718..cc29855 100644
---- a/meson_options.txt
-+++ b/meson_options.txt
-@@ -49,4 +49,8 @@ option('gio_sniffing',
- description: 'Perform file type detection using GIO (Unused on MacOS and Windows)',
- type: 'boolean',
- value: true)
-+option('use_prebuilt_tools',
-+ description: 'Use prebuilt gdk-pixbuf tools from the host for cross-compilation',
-+ type: 'boolean',
-+ value: false)
-
-diff --git a/tests/meson.build b/tests/meson.build
-index 7c6cb11..1029e6a 100644
---- a/tests/meson.build
-+++ b/tests/meson.build
-@@ -5,6 +5,12 @@
- # $PATH. Ideally we should use gnome.compile_resources() and let Meson deal with
- # this problem: See https://github.com/mesonbuild/meson/issues/8266.
- if enabled_loaders.contains('png') and host_system != 'windows'
-+
-+ resources_deps = [loaders_cache,]
-+ if not get_option('use_prebuilt_tools')
-+ resources_deps += [gdk_pixbuf_pixdata,]
-+ endif
-+
- # Resources; we cannot use gnome.compile_resources() here, because we need to
- # override the environment in order to use the utilities we just built instead
- # of the system ones
-@@ -21,10 +27,7 @@ if enabled_loaders.contains('png') and host_system != 'windows'
- '@INPUT@',
- '@OUTPUT@',
- ],
-- depends: [
-- gdk_pixbuf_pixdata,
-- loaders_cache,
-- ],
-+ depends: resources_deps,
- )
-
- resources_h = custom_target('resources.h',
-@@ -40,10 +43,7 @@ if enabled_loaders.contains('png') and host_system != 'windows'
- '@INPUT@',
- '@OUTPUT@',
- ],
-- depends: [
-- gdk_pixbuf_pixdata,
-- loaders_cache,
-- ],
-+ depends: resources_deps,
- )
- no_resources = false
- else
-diff --git a/thumbnailer/meson.build b/thumbnailer/meson.build
-index b6a206d..9336c21 100644
---- a/thumbnailer/meson.build
-+++ b/thumbnailer/meson.build
-@@ -6,13 +6,29 @@ bin = executable('gdk-pixbuf-thumbnailer',
- ],
- dependencies: gdk_pixbuf_deps + [ gdkpixbuf_dep ],
- install: true)
--meson.override_find_program('gdk-pixbuf-thumbnailer', bin)
-+if not get_option('use_prebuilt_tools')
-+ meson.override_find_program('gdk-pixbuf-thumbnailer', bin)
-+endif
-
- gdk_pixbuf_print_mime_types = executable('gdk-pixbuf-print-mime-types',
- 'gdk-pixbuf-print-mime-types.c',
-+ install: true,
- c_args: common_cflags,
- dependencies: gdk_pixbuf_deps + [ gdkpixbuf_dep ])
-
-+if get_option('use_prebuilt_tools')
-+ gdk_pixbuf_print_mime_types = find_program('gdk-pixbuf-print-mime-types', required: true)
-+endif
-+
-+thumbnailer_deps = [loaders_cache,]
-+
-+if not get_option('use_prebuilt_tools')
-+ thumbnailer_deps += [
-+ gdk_pixbuf_print_mime_types,
-+ gdk_pixbuf_pixdata,
-+ ]
-+endif
-+
- custom_target('thumbnailer',
- input: 'gdk-pixbuf-thumbnailer.thumbnailer.in',
- output: 'gdk-pixbuf-thumbnailer.thumbnailer',
-@@ -25,10 +41,6 @@ custom_target('thumbnailer',
- '@INPUT@',
- '@OUTPUT@',
- ],
-- depends: [
-- gdk_pixbuf_print_mime_types,
-- gdk_pixbuf_pixdata,
-- loaders_cache,
-- ],
-+ depends: thumbnailer_deps,
- install: true,
- install_dir: join_paths(gdk_pixbuf_datadir, 'thumbnailers'))
diff --git a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/0001-meson.build-allow-a-subset-of-tests-in-cross-compile.patch b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/0001-meson.build-allow-a-subset-of-tests-in-cross-compile.patch
new file mode 100644
index 0000000000..7250fa3f62
--- /dev/null
+++ b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/0001-meson.build-allow-a-subset-of-tests-in-cross-compile.patch
@@ -0,0 +1,66 @@
+From 9d3b374e75692da3d1d05344a1693c85a3098f47 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex@linutronix.de>
+Date: Thu, 26 Jan 2023 20:29:46 +0100
+Subject: [PATCH] meson.build: allow (a subset of) tests in cross compile
+ settings
+
+There is no need to completely disable tests: most of them
+do not require running target executables at build time,
+and so can be built and installed.
+
+This requires inserting a couple of specific guards around
+items that do require running target executables.
+
+Upstream-Status: Submitted [https://gitlab.gnome.org/GNOME/gdk-pixbuf/-/merge_requests/150]
+Signed-off-by: Alexander Kanavin <alex@linutronix.de>
+---
+ meson.build | 6 +++---
+ tests/meson.build | 10 ++++++----
+ 2 files changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/meson.build b/meson.build
+index 8a16c8f..7c8b20f 100644
+--- a/meson.build
++++ b/meson.build
+@@ -369,10 +369,10 @@ subdir('gdk-pixbuf')
+ # i18n
+ subdir('po')
+
++if get_option('tests')
++ subdir('tests')
++endif
+ if not meson.is_cross_build()
+- if get_option('tests')
+- subdir('tests')
+- endif
+ subdir('thumbnailer')
+ endif
+
+diff --git a/tests/meson.build b/tests/meson.build
+index 28c2525..c45e765 100644
+--- a/tests/meson.build
++++ b/tests/meson.build
+@@ -4,7 +4,7 @@
+ # gdk-pixbuf-pixdata from build directory because it needs all DLL locations in
+ # $PATH. Ideally we should use gnome.compile_resources() and let Meson deal with
+ # this problem: See https://github.com/mesonbuild/meson/issues/8266.
+-if enabled_loaders.contains('png') and host_system != 'windows'
++if enabled_loaders.contains('png') and host_system != 'windows' and not meson.is_cross_build()
+ # Resources; we cannot use gnome.compile_resources() here, because we need to
+ # override the environment in order to use the utilities we just built instead
+ # of the system ones
+@@ -166,9 +166,11 @@ endif
+ test_deps = gdk_pixbuf_deps + [ gdkpixbuf_dep, ]
+ test_args = [ '-k' ]
+ test_env = environment()
+-test_env.set('G_TEST_SRCDIR', meson.current_source_dir())
+-test_env.set('G_TEST_BUILDDIR', meson.current_build_dir())
+-test_env.set('GDK_PIXBUF_MODULE_FILE', loaders_cache.full_path())
++if not meson.is_cross_build()
++ test_env.set('G_TEST_SRCDIR', meson.current_source_dir())
++ test_env.set('G_TEST_BUILDDIR', meson.current_build_dir())
++ test_env.set('GDK_PIXBUF_MODULE_FILE', loaders_cache.full_path())
++endif
+
+ foreach test_name, test_data: installed_tests
+ test_sources = [ test_name + '.c', 'test-common.c' ]
diff --git a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/fatal-loader.patch b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/fatal-loader.patch
index 25410b11ea..dd580f8162 100644
--- a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/fatal-loader.patch
+++ b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/fatal-loader.patch
@@ -1,4 +1,4 @@
-From f00603d58d844422363b896ea7d07aaf48ddaa66 Mon Sep 17 00:00:00 2001
+From b511bd1efb43ffc49c753e309717a242ec686ef1 Mon Sep 17 00:00:00 2001
From: Ross Burton <ross.burton@intel.com>
Date: Tue, 1 Apr 2014 17:23:36 +0100
Subject: [PATCH] gdk-pixbuf: add an option so that loader errors are fatal
@@ -14,10 +14,10 @@ Signed-off-by: Ross Burton <ross.burton@intel.com>
1 file changed, 15 insertions(+), 4 deletions(-)
diff --git a/gdk-pixbuf/queryloaders.c b/gdk-pixbuf/queryloaders.c
-index 312aa78..b813d99 100644
+index 1d39b44..2b00815 100644
--- a/gdk-pixbuf/queryloaders.c
+++ b/gdk-pixbuf/queryloaders.c
-@@ -212,7 +212,7 @@ write_loader_info (GString *contents, const char *path, GdkPixbufFormat *info)
+@@ -216,7 +216,7 @@ write_loader_info (GString *contents, const char *path, GdkPixbufFormat *info)
g_string_append_c (contents, '\n');
}
@@ -26,7 +26,7 @@ index 312aa78..b813d99 100644
query_module (GString *contents, const char *dir, const char *file)
{
char *path;
-@@ -221,6 +221,7 @@ query_module (GString *contents, const char *dir, const char *file)
+@@ -225,6 +225,7 @@ query_module (GString *contents, const char *dir, const char *file)
void (*fill_vtable) (GdkPixbufModule *module);
gpointer fill_info_ptr;
gpointer fill_vtable_ptr;
@@ -34,7 +34,7 @@ index 312aa78..b813d99 100644
if (g_path_is_absolute (file))
path = g_strdup (file);
-@@ -270,10 +271,13 @@ query_module (GString *contents, const char *dir, const char *file)
+@@ -274,10 +275,13 @@ query_module (GString *contents, const char *dir, const char *file)
g_module_error());
else
g_fprintf (stderr, "Cannot load loader %s\n", path);
@@ -47,8 +47,8 @@ index 312aa78..b813d99 100644
+ return ret;
}
- #ifdef G_OS_WIN32
-@@ -314,6 +318,7 @@ int main (int argc, char **argv)
+ #if defined(G_OS_WIN32) && defined(GDK_PIXBUF_RELOCATABLE)
+@@ -318,6 +322,7 @@ int main (int argc, char **argv)
gint first_file = 1;
GFile *pixbuf_libdir_file;
gchar *pixbuf_libdir;
@@ -56,7 +56,7 @@ index 312aa78..b813d99 100644
#ifdef G_OS_WIN32
gchar *libdir;
-@@ -452,7 +457,9 @@ int main (int argc, char **argv)
+@@ -456,7 +461,9 @@ int main (int argc, char **argv)
}
modules = g_list_sort (modules, (GCompareFunc)strcmp);
for (l = modules; l != NULL; l = l->next)
@@ -67,7 +67,7 @@ index 312aa78..b813d99 100644
g_list_free_full (modules, g_free);
g_free (moduledir);
#else
-@@ -468,7 +475,8 @@ int main (int argc, char **argv)
+@@ -472,7 +479,8 @@ int main (int argc, char **argv)
infilename = g_locale_to_utf8 (infilename,
-1, NULL, NULL, NULL);
#endif
@@ -77,7 +77,7 @@ index 312aa78..b813d99 100644
}
g_free (cwd);
}
-@@ -486,5 +494,8 @@ int main (int argc, char **argv)
+@@ -490,5 +498,8 @@ int main (int argc, char **argv)
g_free (pixbuf_libdir);
diff --git a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.42.6.bb b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.42.10.bb
index 55c16e4d66..cca89a9059 100644
--- a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.42.6.bb
+++ b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.42.10.bb
@@ -12,18 +12,17 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c \
SECTION = "libs"
-DEPENDS = "glib-2.0 gdk-pixbuf-native shared-mime-info"
-DEPENDS:remove:class-native = "gdk-pixbuf-native"
+DEPENDS = "glib-2.0 shared-mime-info"
MAJ_VER = "${@oe.utils.trim_version("${PV}", 2)}"
SRC_URI = "${GNOME_MIRROR}/${BPN}/${MAJ_VER}/${BPN}-${PV}.tar.xz \
file://run-ptest \
file://fatal-loader.patch \
- file://0001-Add-use_prebuilt_tools-option.patch \
+ file://0001-meson.build-allow-a-subset-of-tests-in-cross-compile.patch \
"
-SRC_URI[sha256sum] = "c4a6b75b7ed8f58ca48da830b9fa00ed96d668d3ab4b1f723dcf902f78bde77f"
+SRC_URI[sha256sum] = "ee9b6c75d13ba096907a2e3c6b27b61bcd17f5c7ebeab5a5b439d2f2e39fe44b"
inherit meson pkgconfig gettext pixbufcache ptest-gnome upstream-version-is-even gobject-introspection gi-docgen lib_package
@@ -39,18 +38,12 @@ PACKAGECONFIG = "${GDK_PIXBUF_LOADERS} \
${@bb.utils.contains('PTEST_ENABLED', '1', 'tests', '', d)}"
PACKAGECONFIG:class-native = "${GDK_PIXBUF_LOADERS}"
-PACKAGECONFIG[png] = "-Dpng=true,-Dpng=false,libpng"
-PACKAGECONFIG[jpeg] = "-Djpeg=true,-Djpeg=false,jpeg"
-PACKAGECONFIG[tiff] = "-Dtiff=true,-Dtiff=false,tiff"
+PACKAGECONFIG[png] = "-Dpng=enabled,-Dpng=disabled,libpng"
+PACKAGECONFIG[jpeg] = "-Djpeg=enabled,-Djpeg=disabled,jpeg"
+PACKAGECONFIG[tiff] = "-Dtiff=enabled,-Dtiff=disabled,tiff"
PACKAGECONFIG[tests] = "-Dinstalled_tests=true,-Dinstalled_tests=false"
-EXTRA_OEMESON:class-target = " \
- -Duse_prebuilt_tools=true \
-"
-
-EXTRA_OEMESON:class-nativesdk = " \
- -Duse_prebuilt_tools=true \
-"
+EXTRA_OEMESON = "-Dman=false"
PACKAGES =+ "${PN}-xlib"
@@ -95,9 +88,11 @@ do_install:append() {
}
-# Remove a bad fuzzing attempt that sporadically fails without a way to reproduce
do_install_ptest() {
+ # Remove a bad fuzzing attempt that sporadically fails without a way to reproduce
rm ${D}/${datadir}/installed-tests/gdk-pixbuf/pixbuf-randomly-modified.test
+ # https://gitlab.gnome.org/GNOME/gdk-pixbuf/-/issues/215
+ rm ${D}/${datadir}/installed-tests/gdk-pixbuf/pixbuf-jpeg.test
}
do_install:append:class-native() {
@@ -111,10 +106,6 @@ do_install:append:class-native() {
XDG_DATA_DIRS=${STAGING_DATADIR} \
GDK_PIXBUF_MODULE_FILE=${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/${LIBV}/loaders.cache
- create_wrapper ${D}/${bindir}/gdk-pixbuf-print-mime-types \
- XDG_DATA_DIRS=${STAGING_DATADIR} \
- GDK_PIXBUF_MODULE_FILE=${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/${LIBV}/loaders.cache
-
create_wrapper ${D}/${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders \
XDG_DATA_DIRS=${STAGING_DATADIR} \
GDK_PIXBUF_MODULE_FILE=${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/${LIBV}/loaders.cache \
diff --git a/meta/recipes-gnome/gobject-introspection/gobject-introspection_1.72.0.bb b/meta/recipes-gnome/gobject-introspection/gobject-introspection_1.72.0.bb
index 355e77d107..9a47e908b7 100644
--- a/meta/recipes-gnome/gobject-introspection/gobject-introspection_1.72.0.bb
+++ b/meta/recipes-gnome/gobject-introspection/gobject-introspection_1.72.0.bb
@@ -113,9 +113,6 @@ EOF
}
do_compile:prepend() {
- # This prevents g-ir-scanner from writing cache data to $HOME
- export GI_SCANNER_DISABLE_CACHE=1
-
# Needed to run g-ir unit tests, which won't be able to find the built libraries otherwise
export GIR_EXTRA_LIBS_PATH=$B/.libs
}
diff --git a/meta/recipes-gnome/gtk-doc/gtk-doc_1.33.2.bb b/meta/recipes-gnome/gtk-doc/gtk-doc_1.33.2.bb
index 392913fcc6..150eca9274 100644
--- a/meta/recipes-gnome/gtk-doc/gtk-doc_1.33.2.bb
+++ b/meta/recipes-gnome/gtk-doc/gtk-doc_1.33.2.bb
@@ -18,6 +18,8 @@ PACKAGECONFIG ??= "${@bb.utils.contains("DISTRO_FEATURES", "api-documentation",
PACKAGECONFIG[working-scripts] = ",,libxslt-native xmlto-native python3-six python3-pygments"
PACKAGECONFIG[tests] = "--enable-tests,--disable-tests,glib-2.0"
+CACHED_CONFIGUREVARS += "ac_cv_path_XSLTPROC=xsltproc"
+
SRC_URI[archive.sha256sum] = "cc1b709a20eb030a278a1f9842a362e00402b7f834ae1df4c1998a723152bf43"
SRC_URI += "file://0001-Do-not-hardocode-paths-to-perl-python-in-scripts.patch \
file://0001-Do-not-error-out-if-xsltproc-is-not-found.patch \
diff --git a/meta/recipes-gnome/librsvg/librsvg_2.52.7.bb b/meta/recipes-gnome/librsvg/librsvg_2.52.10.bb
index 78eb93c635..21f502444b 100644
--- a/meta/recipes-gnome/librsvg/librsvg_2.52.7.bb
+++ b/meta/recipes-gnome/librsvg/librsvg_2.52.10.bb
@@ -20,7 +20,7 @@ SRC_URI += "file://0001-Makefile.am-pass-rust-target-to-cargo-also-when-not-.pat
file://0001-system-deps-src-lib.rs-do-not-probe-into-harcoded-li.patch \
"
-SRC_URI[archive.sha256sum] = "057c1eeeaf85c84e254bdb707459207f5840da5b4d52b4711c03140ed09e6887"
+SRC_URI[archive.sha256sum] = "6292dfcd6a8e1ce1784e0188914546af1633081d1fae9e22f7cb017e7e84ba8f"
# librsvg is still autotools-based, but is calling cargo from its automake-driven makefiles
# so we cannot use cargo class directly, but still need bits and pieces from it
@@ -73,3 +73,5 @@ FILES:librsvg-gtk = "${libdir}/gdk-pixbuf-2.0/*/*/*.so \
RRECOMMENDS:librsvg-gtk = "gdk-pixbuf-bin"
PIXBUF_PACKAGES = "librsvg-gtk"
+
+TARGET_CC_ARCH += "${LDFLAGS}"
diff --git a/meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch b/meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch
index 5232cf70c6..a2dba6cb20 100644
--- a/meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch
+++ b/meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch
@@ -1,19 +1,20 @@
-There is a potential infinite-loop in function _arc_error_normalized().
+There is an assertion in function _cairo_arc_in_direction().
CVE: CVE-2019-6461
Upstream-Status: Pending
Signed-off-by: Ross Burton <ross.burton@intel.com>
diff --git a/src/cairo-arc.c b/src/cairo-arc.c
-index 390397bae..f9249dbeb 100644
+index 390397bae..1bde774a4 100644
--- a/src/cairo-arc.c
+++ b/src/cairo-arc.c
-@@ -99,7 +99,7 @@ _arc_max_angle_for_tolerance_normalized (double tolerance)
- do {
- angle = M_PI / i++;
- error = _arc_error_normalized (angle);
-- } while (error > tolerance);
-+ } while (error > tolerance && error > __DBL_EPSILON__);
+@@ -186,7 +186,8 @@ _cairo_arc_in_direction (cairo_t *cr,
+ if (cairo_status (cr))
+ return;
- return angle;
- }
+- assert (angle_max >= angle_min);
++ if (angle_max < angle_min)
++ return;
+
+ if (angle_max - angle_min > 2 * M_PI * MAX_FULL_CIRCLES) {
+ angle_max = fmod (angle_max - angle_min, 2 * M_PI);
diff --git a/meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch b/meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch
index 4e4598c5b5..7c3209291b 100644
--- a/meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch
+++ b/meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch
@@ -1,20 +1,40 @@
-There is an assertion in function _cairo_arc_in_direction().
-
CVE: CVE-2019-6462
-Upstream-Status: Pending
-Signed-off-by: Ross Burton <ross.burton@intel.com>
+Upstream-Status: Backport
+Signed-off-by: Quentin Schulz <quentin.schulz@theobroma-systems.com>
+
+From ab2c5ee21e5f3d3ee4b3f67cfcd5811a4f99c3a0 Mon Sep 17 00:00:00 2001
+From: Heiko Lewin <hlewin@gmx.de>
+Date: Sun, 1 Aug 2021 11:16:03 +0000
+Subject: [PATCH] _arc_max_angle_for_tolerance_normalized: fix infinite loop
+
+---
+ src/cairo-arc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/src/cairo-arc.c b/src/cairo-arc.c
-index 390397bae..1bde774a4 100644
+index 390397bae..1c891d1a0 100644
--- a/src/cairo-arc.c
+++ b/src/cairo-arc.c
-@@ -186,7 +186,8 @@ _cairo_arc_in_direction (cairo_t *cr,
- if (cairo_status (cr))
- return;
+@@ -90,16 +90,18 @@ _arc_max_angle_for_tolerance_normalized (double tolerance)
+ { M_PI / 11.0, 9.81410988043554039085e-09 },
+ };
+ int table_size = ARRAY_LENGTH (table);
++ const int max_segments = 1000; /* this value is chosen arbitrarily. this gives an error of about 1.74909e-20 */
-- assert (angle_max >= angle_min);
-+ if (angle_max < angle_min)
-+ return;
+ for (i = 0; i < table_size; i++)
+ if (table[i].error < tolerance)
+ return table[i].angle;
- if (angle_max - angle_min > 2 * M_PI * MAX_FULL_CIRCLES) {
- angle_max = fmod (angle_max - angle_min, 2 * M_PI);
+ ++i;
++
+ do {
+ angle = M_PI / i++;
+ error = _arc_error_normalized (angle);
+- } while (error > tolerance);
++ } while (error > tolerance && i < max_segments);
+
+ return angle;
+ }
+--
+2.38.1
+
diff --git a/meta/recipes-graphics/cairo/cairo_1.16.0.bb b/meta/recipes-graphics/cairo/cairo_1.16.0.bb
index 67081bb8cb..ffb813d290 100644
--- a/meta/recipes-graphics/cairo/cairo_1.16.0.bb
+++ b/meta/recipes-graphics/cairo/cairo_1.16.0.bb
@@ -17,9 +17,13 @@ LICENSE:${PN}-doc = "MPL-1.1 | LGPL-2.1-only"
LICENSE:${PN}-gobject = "MPL-1.1 | LGPL-2.1-only"
LICENSE:${PN}-script-interpreter = "MPL-1.1 | LGPL-2.1-only"
LICENSE:${PN}-perf-utils = "GPL-3.0-or-later"
+# Adapt the licenses for cairo-dbg and cairo-src depending on whether
+# cairo-trace is being built.
+LICENSE:${PN}-dbg = "(MPL-1.1 | LGPL-2.1-only)${@bb.utils.contains('PACKAGECONFIG', 'trace', ' & GPL-3.0-or-later', '', d)}"
+LICENSE:${PN}-src = "(MPL-1.1 | LGPL-2.1-only)${@bb.utils.contains('PACKAGECONFIG', 'trace', ' & GPL-3.0-or-later', '', d)}"
LIC_FILES_CHKSUM = "file://COPYING;md5=e73e999e0c72b5ac9012424fa157ad77 \
- file://util/cairo-trace/COPYING-GPL-3;md5=d32239bcb673463ab874e80d47fae504"
+ ${@bb.utils.contains('PACKAGECONFIG', 'trace', 'file://util/cairo-trace/COPYING-GPL-3;md5=d32239bcb673463ab874e80d47fae504', '', d)}"
DEPENDS = "fontconfig glib-2.0 libpng pixman zlib"
diff --git a/meta/recipes-graphics/freetype/freetype/CVE-2023-2004.patch b/meta/recipes-graphics/freetype/freetype/CVE-2023-2004.patch
new file mode 100644
index 0000000000..f600309d3e
--- /dev/null
+++ b/meta/recipes-graphics/freetype/freetype/CVE-2023-2004.patch
@@ -0,0 +1,41 @@
+From e6fda039ad638866b7a6a5d046f03278ba1b7611 Mon Sep 17 00:00:00 2001
+From: Werner Lemberg <wl@gnu.org>
+Date: Mon, 14 Nov 2022 19:18:19 +0100
+Subject: [PATCH] * src/truetype/ttgxvar.c (tt_hvadvance_adjust): Integer
+ overflow.
+
+Reported as
+
+ https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=50462
+
+Upstream-Status: Backport [https://github.com/freetype/freetype/commit/e6fda039ad638866b7a6a5d046f03278ba1b7611]
+CVE: CVE-2023-2004
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/truetype/ttgxvar.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/src/truetype/ttgxvar.c b/src/truetype/ttgxvar.c
+index 7f2db0c..8968111 100644
+--- a/src/truetype/ttgxvar.c
++++ b/src/truetype/ttgxvar.c
+@@ -42,6 +42,7 @@
+ #include <ft2build.h>
+ #include <freetype/internal/ftdebug.h>
+ #include FT_CONFIG_CONFIG_H
++#include <freetype/internal/ftcalc.h>
+ #include <freetype/internal/ftstream.h>
+ #include <freetype/internal/sfnt.h>
+ #include <freetype/tttags.h>
+@@ -1147,7 +1148,7 @@
+ delta == 1 ? "" : "s",
+ vertical ? "VVAR" : "HVAR" ));
+
+- *avalue += delta;
++ *avalue = ADD_INT( *avalue, delta );
+
+ Exit:
+ return error;
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/freetype/freetype_2.11.1.bb b/meta/recipes-graphics/freetype/freetype_2.11.1.bb
index 5b464d3d70..29f4d8dfb7 100644
--- a/meta/recipes-graphics/freetype/freetype_2.11.1.bb
+++ b/meta/recipes-graphics/freetype/freetype_2.11.1.bb
@@ -12,10 +12,11 @@ LIC_FILES_CHKSUM = "file://LICENSE.TXT;md5=a5927784d823d443c6cae55701d01553 \
file://docs/FTL.TXT;md5=9f37b4e6afa3fef9dba8932b16bd3f97 \
file://docs/GPLv2.TXT;md5=8ef380476f642c20ebf40fecb0add2ec"
-SRC_URI = "${SAVANNAH_NONGNU_MIRROR}/${BPN}/${BP}.tar.xz \
+SRC_URI = "${SAVANNAH_GNU_MIRROR}/${BPN}/${BP}.tar.xz \
file://CVE-2022-27404.patch \
file://CVE-2022-27405.patch \
file://CVE-2022-27406.patch \
+ file://CVE-2023-2004.patch \
"
SRC_URI[sha256sum] = "3333ae7cfda88429c97a7ae63b7d01ab398076c3b67182e960e5684050f2c5c8"
diff --git a/meta/recipes-graphics/glslang/glslang_1.3.204.1.bb b/meta/recipes-graphics/glslang/glslang_1.3.204.1.bb
index 2af406212f..ff08f251cd 100644
--- a/meta/recipes-graphics/glslang/glslang_1.3.204.1.bb
+++ b/meta/recipes-graphics/glslang/glslang_1.3.204.1.bb
@@ -9,7 +9,7 @@ LICENSE = "BSD-3-Clause & BSD-2-Clause & MIT & Apache-2.0 & GPL-3-with-bison-exc
LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=2a2b5acd7bc4844964cfda45fe807dc3"
SRCREV = "2742e959347ae2fac58acd0d022c92a0ff1f24bf"
-SRC_URI = "git://github.com/KhronosGroup/glslang.git;protocol=https;branch=master \
+SRC_URI = "git://github.com/KhronosGroup/glslang.git;protocol=https;branch=main \
file://0001-generate-glslang-pkg-config.patch"
PE = "1"
UPSTREAM_CHECK_GITTAGREGEX = "sdk-(?P<pver>\d+(\.\d+)+)"
diff --git a/meta/recipes-graphics/harfbuzz/harfbuzz/0001-Fix-conditional.patch b/meta/recipes-graphics/harfbuzz/harfbuzz/0001-Fix-conditional.patch
new file mode 100644
index 0000000000..0f9b86973b
--- /dev/null
+++ b/meta/recipes-graphics/harfbuzz/harfbuzz/0001-Fix-conditional.patch
@@ -0,0 +1,25 @@
+From e421613e8f825508afa9a0b54d33085557c37441 Mon Sep 17 00:00:00 2001
+From: Behdad Esfahbod <behdad@behdad.org>
+Date: Wed, 1 Jun 2022 09:07:57 -0600
+Subject: [PATCH] [sbix] Fix conditional
+
+Signed-off: Pavel Zhukov <pavel.zhukov@huawei.com>
+Upstream-Status: Backport [e421613e8f825508afa9a0b54d33085557c37441]
+
+---
+ src/hb-ot-color-sbix-table.hh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/hb-ot-color-sbix-table.hh b/src/hb-ot-color-sbix-table.hh
+index 6efae43cda..d0e2235fb2 100644
+--- a/src/hb-ot-color-sbix-table.hh
++++ b/src/hb-ot-color-sbix-table.hh
+@@ -298,7 +298,7 @@ struct sbix
+
+ const PNGHeader &png = *blob->as<PNGHeader>();
+
+- if (png.IHDR.height >= 65536 | png.IHDR.width >= 65536)
++ if (png.IHDR.height >= 65536 || png.IHDR.width >= 65536)
+ {
+ hb_blob_destroy (blob);
+ return false;
diff --git a/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2022-33068.patch b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2022-33068.patch
new file mode 100644
index 0000000000..931b9abe1e
--- /dev/null
+++ b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2022-33068.patch
@@ -0,0 +1,35 @@
+From 62e803b36173fd096d7ad460dd1d1db9be542593 Mon Sep 17 00:00:00 2001
+From: Behdad Esfahbod <behdad@behdad.org>
+Date: Wed, 1 Jun 2022 07:38:21 -0600
+Subject: [PATCH] [sbix] Limit glyph extents
+
+Fixes https://github.com/harfbuzz/harfbuzz/issues/3557
+
+Upstream-Status: Backport [https://github.com/harfbuzz/harfbuzz/commit/62e803b36173fd096d7ad460dd1d1db9be542593]
+CVE:CVE-2022-33068
+Signed-off-by: Wentao Zhang<Wentao.Zhang@windriver.com>
+
+---
+ src/hb-ot-color-sbix-table.hh | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/src/hb-ot-color-sbix-table.hh b/src/hb-ot-color-sbix-table.hh
+index 9741ebd45..6efae43cd 100644
+--- a/src/hb-ot-color-sbix-table.hh
++++ b/src/hb-ot-color-sbix-table.hh
+@@ -298,6 +298,12 @@ struct sbix
+
+ const PNGHeader &png = *blob->as<PNGHeader>();
+
++ if (png.IHDR.height >= 65536 | png.IHDR.width >= 65536)
++ {
++ hb_blob_destroy (blob);
++ return false;
++ }
++
+ extents->x_bearing = x_offset;
+ extents->y_bearing = png.IHDR.height + y_offset;
+ extents->width = png.IHDR.width;
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre1.patch b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre1.patch
new file mode 100644
index 0000000000..6721b1bd70
--- /dev/null
+++ b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre1.patch
@@ -0,0 +1,135 @@
+From b29fbd16fa82b82bdf0dcb2f13a63f7dc23cf324 Mon Sep 17 00:00:00 2001
+From: Behdad Esfahbod <behdad@behdad.org>
+Date: Mon, 6 Feb 2023 13:08:52 -0700
+Subject: [PATCH] [gsubgpos] Refactor skippy_iter.match()
+
+Upstream-Status: Backport from [https://github.com/harfbuzz/harfbuzz/commit/b29fbd16fa82b82bdf0dcb2f13a63f7dc23cf324]
+Comment1: To backport the fix for CVE-2023-25193, add defination for MATCH, NOT_MATCH and SKIP.
+Signed-off-by: Siddharth <sdoshi@mvista.com>
+---
+ src/hb-ot-layout-gsubgpos.hh | 94 +++++++++++++++++++++---------------
+ 1 file changed, 54 insertions(+), 40 deletions(-)
+
+diff --git a/src/hb-ot-layout-gsubgpos.hh b/src/hb-ot-layout-gsubgpos.hh
+index d9a068c..d17a4da 100644
+--- a/src/hb-ot-layout-gsubgpos.hh
++++ b/src/hb-ot-layout-gsubgpos.hh
+@@ -522,33 +522,52 @@ struct hb_ot_apply_context_t :
+ may_skip (const hb_glyph_info_t &info) const
+ { return matcher.may_skip (c, info); }
+
++ enum match_t {
++ MATCH,
++ NOT_MATCH,
++ SKIP
++ };
++
++ match_t match (hb_glyph_info_t &info)
++ {
++ matcher_t::may_skip_t skip = matcher.may_skip (c, info);
++ if (unlikely (skip == matcher_t::SKIP_YES))
++ return SKIP;
++
++ matcher_t::may_match_t match = matcher.may_match (info, match_glyph_data);
++ if (match == matcher_t::MATCH_YES ||
++ (match == matcher_t::MATCH_MAYBE &&
++ skip == matcher_t::SKIP_NO))
++ return MATCH;
++
++ if (skip == matcher_t::SKIP_NO)
++ return NOT_MATCH;
++
++ return SKIP;
++ }
++
+ bool next (unsigned *unsafe_to = nullptr)
+ {
+ assert (num_items > 0);
+ while (idx + num_items < end)
+ {
+ idx++;
+- const hb_glyph_info_t &info = c->buffer->info[idx];
+-
+- matcher_t::may_skip_t skip = matcher.may_skip (c, info);
+- if (unlikely (skip == matcher_t::SKIP_YES))
+- continue;
+-
+- matcher_t::may_match_t match = matcher.may_match (info, match_glyph_data);
+- if (match == matcher_t::MATCH_YES ||
+- (match == matcher_t::MATCH_MAYBE &&
+- skip == matcher_t::SKIP_NO))
+- {
+- num_items--;
+- if (match_glyph_data) match_glyph_data++;
+- return true;
+- }
+-
+- if (skip == matcher_t::SKIP_NO)
++ switch (match (c->buffer->info[idx]))
+ {
+- if (unsafe_to)
+- *unsafe_to = idx + 1;
+- return false;
++ case MATCH:
++ {
++ num_items--;
++ if (match_glyph_data) match_glyph_data++;
++ return true;
++ }
++ case NOT_MATCH:
++ {
++ if (unsafe_to)
++ *unsafe_to = idx + 1;
++ return false;
++ }
++ case SKIP:
++ continue;
+ }
+ }
+ if (unsafe_to)
+@@ -561,27 +580,22 @@ struct hb_ot_apply_context_t :
+ while (idx > num_items - 1)
+ {
+ idx--;
+- const hb_glyph_info_t &info = c->buffer->out_info[idx];
+-
+- matcher_t::may_skip_t skip = matcher.may_skip (c, info);
+- if (unlikely (skip == matcher_t::SKIP_YES))
+- continue;
+-
+- matcher_t::may_match_t match = matcher.may_match (info, match_glyph_data);
+- if (match == matcher_t::MATCH_YES ||
+- (match == matcher_t::MATCH_MAYBE &&
+- skip == matcher_t::SKIP_NO))
+- {
+- num_items--;
+- if (match_glyph_data) match_glyph_data++;
+- return true;
+- }
+-
+- if (skip == matcher_t::SKIP_NO)
++ switch (match (c->buffer->out_info[idx]))
+ {
+- if (unsafe_from)
+- *unsafe_from = hb_max (1u, idx) - 1u;
+- return false;
++ case MATCH:
++ {
++ num_items--;
++ if (match_glyph_data) match_glyph_data++;
++ return true;
++ }
++ case NOT_MATCH:
++ {
++ if (unsafe_from)
++ *unsafe_from = hb_max (1u, idx) - 1u;
++ return false;
++ }
++ case SKIP:
++ continue;
+ }
+ }
+ if (unsafe_from)
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193.patch b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193.patch
new file mode 100644
index 0000000000..a1ec1422cc
--- /dev/null
+++ b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193.patch
@@ -0,0 +1,185 @@
+From 8708b9e081192786c027bb7f5f23d76dbe5c19e8 Mon Sep 17 00:00:00 2001
+From: Behdad Esfahbod <behdad@behdad.org>
+Date: Mon, 6 Feb 2023 14:51:25 -0700
+Subject: [PATCH] [GPOS] Avoid O(n^2) behavior in mark-attachment
+
+Upstream-Status: Backport from [https://github.com/harfbuzz/harfbuzz/commit/8708b9e081192786c027bb7f5f23d76dbe5c19e8]
+Comment1: The Original Patch [https://github.com/harfbuzz/harfbuzz/commit/85be877925ddbf34f74a1229f3ca1716bb6170dc] causes regression and was reverted. This Patch completes the fix.
+Comment2: The Patch contained files MarkBasePosFormat1.hh and MarkLigPosFormat1.hh which were moved from hb-ot-layout-gpos-table.hh as per https://github.com/harfbuzz/harfbuzz/commit/197d9a5c994eb41c8c89b7b958b26b1eacfeeb00
+CVE: CVE-2023-25193
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/hb-ot-layout-gpos-table.hh | 98 ++++++++++++++++++++++------------
+ src/hb-ot-layout-gsubgpos.hh | 5 +-
+ 2 files changed, 68 insertions(+), 35 deletions(-)
+
+diff --git a/src/hb-ot-layout-gpos-table.hh b/src/hb-ot-layout-gpos-table.hh
+index 2f9186a..46b09d0 100644
+--- a/src/hb-ot-layout-gpos-table.hh
++++ b/src/hb-ot-layout-gpos-table.hh
+@@ -2150,6 +2150,25 @@ struct MarkBasePosFormat1
+
+ const Coverage &get_coverage () const { return this+markCoverage; }
+
++ static inline bool accept (hb_buffer_t *buffer, unsigned idx)
++ {
++ /* We only want to attach to the first of a MultipleSubst sequence.
++ * https://github.com/harfbuzz/harfbuzz/issues/740
++ * Reject others...
++ * ...but stop if we find a mark in the MultipleSubst sequence:
++ * https://github.com/harfbuzz/harfbuzz/issues/1020 */
++ return !_hb_glyph_info_multiplied (&buffer->info[idx]) ||
++ 0 == _hb_glyph_info_get_lig_comp (&buffer->info[idx]) ||
++ (idx == 0 ||
++ _hb_glyph_info_is_mark (&buffer->info[idx - 1]) ||
++ !_hb_glyph_info_multiplied (&buffer->info[idx - 1]) ||
++ _hb_glyph_info_get_lig_id (&buffer->info[idx]) !=
++ _hb_glyph_info_get_lig_id (&buffer->info[idx - 1]) ||
++ _hb_glyph_info_get_lig_comp (&buffer->info[idx]) !=
++ _hb_glyph_info_get_lig_comp (&buffer->info[idx - 1]) + 1
++ );
++ }
++
+ bool apply (hb_ot_apply_context_t *c) const
+ {
+ TRACE_APPLY (this);
+@@ -2157,47 +2176,46 @@ struct MarkBasePosFormat1
+ unsigned int mark_index = (this+markCoverage).get_coverage (buffer->cur().codepoint);
+ if (likely (mark_index == NOT_COVERED)) return_trace (false);
+
+- /* Now we search backwards for a non-mark glyph */
++ /* Now we search backwards for a non-mark glyph.
++ * We don't use skippy_iter.prev() to avoid O(n^2) behavior. */
++
+ hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
+- skippy_iter.reset (buffer->idx, 1);
+ skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks);
+- do {
+- unsigned unsafe_from;
+- if (!skippy_iter.prev (&unsafe_from))
++ unsigned j;
++ for (j = buffer->idx; j > c->last_base_until; j--)
++ {
++ auto match = skippy_iter.match (buffer->info[j - 1]);
++ if (match == skippy_iter.MATCH)
+ {
+- buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1);
+- return_trace (false);
++ if (!accept (buffer, j - 1))
++ match = skippy_iter.SKIP;
+ }
++ if (match == skippy_iter.MATCH)
++ {
++ c->last_base = (signed) j - 1;
++ break;
++ }
++ }
++ c->last_base_until = buffer->idx;
++ if (c->last_base == -1)
++ {
++ buffer->unsafe_to_concat_from_outbuffer (0, buffer->idx + 1);
++ return_trace (false);
++ }
+
+- /* We only want to attach to the first of a MultipleSubst sequence.
+- * https://github.com/harfbuzz/harfbuzz/issues/740
+- * Reject others...
+- * ...but stop if we find a mark in the MultipleSubst sequence:
+- * https://github.com/harfbuzz/harfbuzz/issues/1020 */
+- if (!_hb_glyph_info_multiplied (&buffer->info[skippy_iter.idx]) ||
+- 0 == _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx]) ||
+- (skippy_iter.idx == 0 ||
+- _hb_glyph_info_is_mark (&buffer->info[skippy_iter.idx - 1]) ||
+- _hb_glyph_info_get_lig_id (&buffer->info[skippy_iter.idx]) !=
+- _hb_glyph_info_get_lig_id (&buffer->info[skippy_iter.idx - 1]) ||
+- _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx]) !=
+- _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx - 1]) + 1
+- ))
+- break;
+- skippy_iter.reject ();
+- } while (true);
++ unsigned idx = (unsigned) c->last_base;
+
+ /* Checking that matched glyph is actually a base glyph by GDEF is too strong; disabled */
+- //if (!_hb_glyph_info_is_base_glyph (&buffer->info[skippy_iter.idx])) { return_trace (false); }
++ //if (!_hb_glyph_info_is_base_glyph (&buffer->info[idx])) { return_trace (false); }
+
+- unsigned int base_index = (this+baseCoverage).get_coverage (buffer->info[skippy_iter.idx].codepoint);
++ unsigned int base_index = (this+baseCoverage).get_coverage (buffer->info[idx].codepoint);
+ if (base_index == NOT_COVERED)
+ {
+- buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1);
++ buffer->unsafe_to_concat_from_outbuffer (idx, buffer->idx + 1);
+ return_trace (false);
+ }
+
+- return_trace ((this+markArray).apply (c, mark_index, base_index, this+baseArray, classCount, skippy_iter.idx));
++ return_trace ((this+markArray).apply (c, mark_index, base_index, this+baseArray, classCount, idx));
+ }
+
+ bool subset (hb_subset_context_t *c) const
+@@ -2423,20 +2441,32 @@ struct MarkLigPosFormat1
+ if (likely (mark_index == NOT_COVERED)) return_trace (false);
+
+ /* Now we search backwards for a non-mark glyph */
++
+ hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
+- skippy_iter.reset (buffer->idx, 1);
+ skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks);
+- unsigned unsafe_from;
+- if (!skippy_iter.prev (&unsafe_from))
++
++ unsigned j;
++ for (j = buffer->idx; j > c->last_base_until; j--)
+ {
+- buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1);
++ auto match = skippy_iter.match (buffer->info[j - 1]);
++ if (match == skippy_iter.MATCH)
++ {
++ c->last_base = (signed) j - 1;
++ break;
++ }
++ }
++ c->last_base_until = buffer->idx;
++ if (c->last_base == -1)
++ {
++ buffer->unsafe_to_concat_from_outbuffer (0, buffer->idx + 1);
+ return_trace (false);
+ }
+
++ j = (unsigned) c->last_base;
++
+ /* Checking that matched glyph is actually a ligature by GDEF is too strong; disabled */
+- //if (!_hb_glyph_info_is_ligature (&buffer->info[skippy_iter.idx])) { return_trace (false); }
++ //if (!_hb_glyph_info_is_ligature (&buffer->info[j])) { return_trace (false); }
+
+- unsigned int j = skippy_iter.idx;
+ unsigned int lig_index = (this+ligatureCoverage).get_coverage (buffer->info[j].codepoint);
+ if (lig_index == NOT_COVERED)
+ {
+diff --git a/src/hb-ot-layout-gsubgpos.hh b/src/hb-ot-layout-gsubgpos.hh
+index 65de131..d9a068c 100644
+--- a/src/hb-ot-layout-gsubgpos.hh
++++ b/src/hb-ot-layout-gsubgpos.hh
+@@ -641,6 +641,9 @@ struct hb_ot_apply_context_t :
+ uint32_t random_state;
+
+
++ signed last_base = -1; // GPOS uses
++ unsigned last_base_until = 0; // GPOS uses
++
+ hb_ot_apply_context_t (unsigned int table_index_,
+ hb_font_t *font_,
+ hb_buffer_t *buffer_) :
+@@ -673,7 +676,7 @@ struct hb_ot_apply_context_t :
+ iter_context.init (this, true);
+ }
+
+- void set_lookup_mask (hb_mask_t mask) { lookup_mask = mask; init_iters (); }
++ void set_lookup_mask (hb_mask_t mask) { lookup_mask = mask; last_base = -1; last_base_until = 0; init_iters (); }
+ void set_auto_zwj (bool auto_zwj_) { auto_zwj = auto_zwj_; init_iters (); }
+ void set_auto_zwnj (bool auto_zwnj_) { auto_zwnj = auto_zwnj_; init_iters (); }
+ void set_random (bool random_) { random = random_; }
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/harfbuzz/harfbuzz_4.0.1.bb b/meta/recipes-graphics/harfbuzz/harfbuzz_4.0.1.bb
index bf77a5e56c..f7dc61ebd5 100644
--- a/meta/recipes-graphics/harfbuzz/harfbuzz_4.0.1.bb
+++ b/meta/recipes-graphics/harfbuzz/harfbuzz_4.0.1.bb
@@ -11,7 +11,11 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=6ee0f16281694fb6aa689cca1e0fb3da \
UPSTREAM_CHECK_URI = "https://github.com/${BPN}/${BPN}/releases"
UPSTREAM_CHECK_REGEX = "harfbuzz-(?P<pver>\d+(\.\d+)+).tar"
-SRC_URI = "https://github.com/${BPN}/${BPN}/releases/download/${PV}/${BPN}-${PV}.tar.xz"
+SRC_URI = "https://github.com/${BPN}/${BPN}/releases/download/${PV}/${BPN}-${PV}.tar.xz \
+ file://CVE-2022-33068.patch \
+ file://0001-Fix-conditional.patch \
+ file://CVE-2023-25193-pre1.patch \
+ file://CVE-2023-25193.patch"
SRC_URI[sha256sum] = "98f68777272db6cd7a3d5152bac75083cd52a26176d87bc04c8b3929d33bce49"
inherit meson pkgconfig lib_package gtk-doc gobject-introspection
@@ -33,9 +37,9 @@ PACKAGES =+ "${PN}-icu ${PN}-icu-dev ${PN}-subset"
LEAD_SONAME = "libharfbuzz.so"
do_install:append() {
- # If no tools are installed due to PACKAGECONFIG then this directory is
- #still installed, so remove it to stop packaging wanings.
- rmdir --ignore-fail-on-non-empty ${D}${bindir}
+ # If no tools are installed due to PACKAGECONFIG then this directory might
+ # still be installed, so remove it to stop packaging warnings.
+ [ ! -d ${D}${bindir} ] || rmdir --ignore-fail-on-non-empty ${D}${bindir}
}
FILES:${PN}-icu = "${libdir}/libharfbuzz-icu.so.*"
diff --git a/meta/recipes-graphics/jpeg/files/CVE-2023-2804-1.patch b/meta/recipes-graphics/jpeg/files/CVE-2023-2804-1.patch
new file mode 100644
index 0000000000..fd8a66bca7
--- /dev/null
+++ b/meta/recipes-graphics/jpeg/files/CVE-2023-2804-1.patch
@@ -0,0 +1,103 @@
+From 42ce199c9cfe129e5e21afd48dfe757a6acf87c4 Mon Sep 17 00:00:00 2001
+From: DRC <information@libjpeg-turbo.org>
+Date: Tue, 4 Apr 2023 19:06:20 -0500
+Subject: [PATCH] Decomp: Don't enable 2-pass color quant w/ RGB565
+
+The 2-pass color quantization algorithm assumes 3-sample pixels. RGB565
+is the only 3-component colorspace that doesn't have 3-sample pixels, so
+we need to treat it as a special case when determining whether to enable
+2-pass color quantization. Otherwise, attempting to initialize 2-pass
+color quantization with an RGB565 output buffer could cause
+prescan_quantize() to read from uninitialized memory and subsequently
+underflow/overflow the histogram array.
+
+djpeg is supposed to fail gracefully if both -rgb565 and -colors are
+specified, because none of its destination managers (image writers)
+support color quantization with RGB565. However, prescan_quantize() was
+called before that could occur. It is possible but very unlikely that
+these issues could have been reproduced in applications other than
+djpeg. The issues involve the use of two features (12-bit precision and
+RGB565) that are incompatible, and they also involve the use of two
+rarely-used legacy features (RGB565 and color quantization) that don't
+make much sense when combined.
+
+Fixes #668
+Fixes #671
+Fixes #680
+
+CVE: CVE-2023-2804
+Upstream-Status: Backport [https://github.com/libjpeg-turbo/libjpeg-turbo/commit/42ce199c9cfe129e5e21afd48dfe757a6acf87c4]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ ChangeLog.md | 12 ++++++++++++
+ jdmaster.c | 5 +++--
+ jquant2.c | 5 +++--
+ 3 files changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/ChangeLog.md b/ChangeLog.md
+index 1c1e6538a..f1bfb3d87 100644
+--- a/ChangeLog.md
++++ b/ChangeLog.md
+@@ -1,3 +1,15 @@
++2.1.6
++=====
++
++### Significant changes relative to 2.1.5.1:
++
++1. Fixed an oversight in 1.4 beta1[8] that caused various segfaults and buffer
++overruns when attempting to decompress various specially-crafted malformed
++12-bit-per-component JPEG images using a 12-bit-per-component build of djpeg
++(`-DWITH_12BIT=1`) with both color quantization and RGB565 color conversion
++enabled.
++
++
+ 2.1.5.1
+ =======
+
+diff --git a/jdmaster.c b/jdmaster.c
+index a3690bf56..a9446adfd 100644
+--- a/jdmaster.c
++++ b/jdmaster.c
+@@ -5,7 +5,7 @@
+ * Copyright (C) 1991-1997, Thomas G. Lane.
+ * Modified 2002-2009 by Guido Vollbeding.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2009-2011, 2016, 2019, 2022, D. R. Commander.
++ * Copyright (C) 2009-2011, 2016, 2019, 2022-2023, D. R. Commander.
+ * Copyright (C) 2013, Linaro Limited.
+ * Copyright (C) 2015, Google, Inc.
+ * For conditions of distribution and use, see the accompanying README.ijg
+@@ -480,7 +480,8 @@ master_selection(j_decompress_ptr cinfo)
+ if (cinfo->raw_data_out)
+ ERREXIT(cinfo, JERR_NOTIMPL);
+ /* 2-pass quantizer only works in 3-component color space. */
+- if (cinfo->out_color_components != 3) {
++ if (cinfo->out_color_components != 3 ||
++ cinfo->out_color_space == JCS_RGB565) {
+ cinfo->enable_1pass_quant = TRUE;
+ cinfo->enable_external_quant = FALSE;
+ cinfo->enable_2pass_quant = FALSE;
+diff --git a/jquant2.c b/jquant2.c
+index 44efb18ca..1c14ef763 100644
+--- a/jquant2.c
++++ b/jquant2.c
+@@ -4,7 +4,7 @@
+ * This file was part of the Independent JPEG Group's software:
+ * Copyright (C) 1991-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2009, 2014-2015, 2020, D. R. Commander.
++ * Copyright (C) 2009, 2014-2015, 2020, 2023, D. R. Commander.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+ *
+@@ -1230,7 +1230,8 @@ jinit_2pass_quantizer(j_decompress_ptr cinfo)
+ cquantize->error_limiter = NULL;
+
+ /* Make sure jdmaster didn't give me a case I can't handle */
+- if (cinfo->out_color_components != 3)
++ if (cinfo->out_color_components != 3 ||
++ cinfo->out_color_space == JCS_RGB565)
+ ERREXIT(cinfo, JERR_NOTIMPL);
+
+ /* Allocate the histogram/inverse colormap storage */
diff --git a/meta/recipes-graphics/jpeg/files/CVE-2023-2804-2.patch b/meta/recipes-graphics/jpeg/files/CVE-2023-2804-2.patch
new file mode 100644
index 0000000000..af955a72f6
--- /dev/null
+++ b/meta/recipes-graphics/jpeg/files/CVE-2023-2804-2.patch
@@ -0,0 +1,75 @@
+From 2e1b8a462f7f9f9bf6cd25a8516caa8203cc4593 Mon Sep 17 00:00:00 2001
+From: DRC <information@libjpeg-turbo.org>
+Date: Thu, 6 Apr 2023 18:33:41 -0500
+Subject: [PATCH] jpeg_crop_scanline: Fix calc w/sclg + 2x4,4x2 samp
+
+When computing the downsampled width for a particular component,
+jpeg_crop_scanline() needs to take into account the fact that the
+libjpeg code uses a combination of IDCT scaling and upsampling to
+implement 4x2 and 2x4 upsampling with certain decompression scaling
+factors. Failing to account for that led to incomplete upsampling of
+4x2- or 2x4-subsampled components, which caused the color converter to
+read from uninitialized memory. With 12-bit data precision, this caused
+a buffer overrun or underrun and subsequent segfault if the
+uninitialized memory contained a value that was outside of the valid
+sample range (because the color converter uses the value as an array
+index.)
+
+Fixes #669
+
+CVE: CVE-2023-2804
+Upstream-Status: Backport [https://github.com/libjpeg-turbo/libjpeg-turbo/commit/2e1b8a462f7f9f9bf6cd25a8516caa8203cc4593]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ ChangeLog.md | 8 ++++++++
+ jdapistd.c | 10 ++++++----
+ 2 files changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/ChangeLog.md b/ChangeLog.md
+index f1bfb3d87..0a075c3c5 100644
+--- a/ChangeLog.md
++++ b/ChangeLog.md
+@@ -9,6 +9,14 @@ overruns when attempting to decompress various specially-crafted malformed
+ (`-DWITH_12BIT=1`) with both color quantization and RGB565 color conversion
+ enabled.
+
++2. Fixed an issue whereby `jpeg_crop_scanline()` sometimes miscalculated the
++downsampled width for components with 4x2 or 2x4 subsampling factors if
++decompression scaling was enabled. This caused the components to be upsampled
++incompletely, which caused the color converter to read from uninitialized
++memory. With 12-bit data precision, this caused a buffer overrun or underrun
++and subsequent segfault if the sample value read from unitialized memory was
++outside of the valid sample range.
++
+
+ 2.1.5.1
+ =======
+diff --git a/jdapistd.c b/jdapistd.c
+index 02cd0cb93..96cded112 100644
+--- a/jdapistd.c
++++ b/jdapistd.c
+@@ -4,7 +4,7 @@
+ * This file was part of the Independent JPEG Group's software:
+ * Copyright (C) 1994-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2010, 2015-2020, 2022, D. R. Commander.
++ * Copyright (C) 2010, 2015-2020, 2022-2023, D. R. Commander.
+ * Copyright (C) 2015, Google, Inc.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+@@ -236,9 +236,11 @@ jpeg_crop_scanline(j_decompress_ptr cinfo, JDIMENSION *xoffset,
+ /* Set downsampled_width to the new output width. */
+ orig_downsampled_width = compptr->downsampled_width;
+ compptr->downsampled_width =
+- (JDIMENSION)jdiv_round_up((long)(cinfo->output_width *
+- compptr->h_samp_factor),
+- (long)cinfo->max_h_samp_factor);
++ (JDIMENSION)jdiv_round_up((long)cinfo->output_width *
++ (long)(compptr->h_samp_factor *
++ compptr->_DCT_scaled_size),
++ (long)(cinfo->max_h_samp_factor *
++ cinfo->_min_DCT_scaled_size));
+ if (compptr->downsampled_width < 2 && orig_downsampled_width >= 2)
+ reinit_upsampler = TRUE;
+
diff --git a/meta/recipes-graphics/jpeg/libjpeg-turbo_2.1.3.bb b/meta/recipes-graphics/jpeg/libjpeg-turbo_2.1.5.1.bb
index fdc035d5f7..86bf471eea 100644
--- a/meta/recipes-graphics/jpeg/libjpeg-turbo_2.1.3.bb
+++ b/meta/recipes-graphics/jpeg/libjpeg-turbo_2.1.5.1.bb
@@ -12,9 +12,11 @@ DEPENDS:append:x86:class-target = " nasm-native"
SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BPN}-${PV}.tar.gz \
file://0001-libjpeg-turbo-fix-package_qa-error.patch \
+ file://CVE-2023-2804-1.patch \
+ file://CVE-2023-2804-2.patch \
"
-SRC_URI[sha256sum] = "467b310903832b033fe56cd37720d1b73a6a3bd0171dbf6ff0b620385f4f76d0"
+SRC_URI[sha256sum] = "2fdc3feb6e9deb17adec9bafa3321419aa19f8f4e5dea7bf8486844ca22207bf"
UPSTREAM_CHECK_URI = "http://sourceforge.net/projects/libjpeg-turbo/files/"
UPSTREAM_CHECK_REGEX = "/libjpeg-turbo/files/(?P<pver>(\d+[\.\-_]*)+)/"
diff --git a/meta/recipes-graphics/kmscube/kmscube_git.bb b/meta/recipes-graphics/kmscube/kmscube_git.bb
index 58ce26a3d5..98f110527e 100644
--- a/meta/recipes-graphics/kmscube/kmscube_git.bb
+++ b/meta/recipes-graphics/kmscube/kmscube_git.bb
@@ -6,7 +6,7 @@ OpenGL or OpenGL ES."
HOMEPAGE = "https://cgit.freedesktop.org/mesa/kmscube/"
LICENSE = "MIT"
SECTION = "graphics"
-DEPENDS = "virtual/libgles3 virtual/libgles2 virtual/egl libdrm"
+DEPENDS = "virtual/libgles3 virtual/libgles2 virtual/egl libdrm virtual/libgbm"
LIC_FILES_CHKSUM = "file://kmscube.c;beginline=1;endline=23;md5=8b309d4ee67b7315ff7381270dd631fb"
@@ -20,7 +20,6 @@ S = "${WORKDIR}/git"
inherit meson pkgconfig features_check
REQUIRED_DISTRO_FEATURES = "opengl"
-DEPENDS = "virtual/libgbm"
PACKAGECONFIG ??= ""
PACKAGECONFIG[gstreamer] = "-Dgstreamer=enabled,-Dgstreamer=disabled,gstreamer1.0 gstreamer1.0-plugins-base"
diff --git a/meta/recipes-graphics/libepoxy/files/0001-dispatch_common.h-define-also-EGL_NO_X11.patch b/meta/recipes-graphics/libepoxy/files/0001-dispatch_common.h-define-also-EGL_NO_X11.patch
deleted file mode 100644
index 971a3f54e0..0000000000
--- a/meta/recipes-graphics/libepoxy/files/0001-dispatch_common.h-define-also-EGL_NO_X11.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From 7211120d1e2f059d900f3379b9790484dbcf7761 Mon Sep 17 00:00:00 2001
-From: Martin Jansa <Martin.Jansa@gmail.com>
-Date: Fri, 25 Oct 2019 11:09:34 +0000
-Subject: [PATCH] dispatch_common.h: define also EGL_NO_X11
-
-MESA_EGL_NO_X11_HEADERS was renamed to EGL_NO_X11 in:
-https://github.com/mesa3d/mesa/commit/6202a13b71e18dc31ba7e2f4ea915b67eacc1ddb
-
-Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
-Upstream-Status: Pending
-
----
- src/dispatch_common.h | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/src/dispatch_common.h b/src/dispatch_common.h
-index a136943..448c9b1 100644
---- a/src/dispatch_common.h
-+++ b/src/dispatch_common.h
-@@ -55,6 +55,7 @@
- * as EGL_NO_X11
- */
- # define MESA_EGL_NO_X11_HEADERS 1
-+# define EGL_NO_X11 1
- # endif
- #include "epoxy/egl.h"
- #endif
diff --git a/meta/recipes-graphics/libepoxy/libepoxy_1.5.9.bb b/meta/recipes-graphics/libepoxy/libepoxy_1.5.10.bb
index 487fc00360..3e29935640 100644
--- a/meta/recipes-graphics/libepoxy/libepoxy_1.5.9.bb
+++ b/meta/recipes-graphics/libepoxy/libepoxy_1.5.10.bb
@@ -9,10 +9,9 @@ SECTION = "libs"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://COPYING;md5=58ef4c80d401e07bd9ee8b6b58cf464b"
-SRC_URI = "https://github.com/anholt/${BPN}/releases/download/${PV}/${BP}.tar.xz \
- file://0001-dispatch_common.h-define-also-EGL_NO_X11.patch \
- "
-SRC_URI[sha256sum] = "d168a19a6edfdd9977fef1308ccf516079856a4275cf876de688fb7927e365e4"
+SRC_URI = "git://github.com/anholt/libepoxy;branch=master;protocol=https"
+SRCREV = "c84bc9459357a40e46e2fec0408d04fbdde2c973"
+S = "${WORKDIR}/git"
UPSTREAM_CHECK_URI = "https://github.com/anholt/libepoxy/releases"
inherit meson pkgconfig features_check
diff --git a/meta/recipes-graphics/libsdl2/libsdl2/0001-Fix-potential-memory-leak-in-GLES_CreateTextur.patch b/meta/recipes-graphics/libsdl2/libsdl2/0001-Fix-potential-memory-leak-in-GLES_CreateTextur.patch
new file mode 100644
index 0000000000..31bda54dd3
--- /dev/null
+++ b/meta/recipes-graphics/libsdl2/libsdl2/0001-Fix-potential-memory-leak-in-GLES_CreateTextur.patch
@@ -0,0 +1,40 @@
+From 3cf2048b647484cc3a6abd0d78be60cead47b42d Mon Sep 17 00:00:00 2001
+From: Changqing Li <changqing.li@windriver.com>
+Date: Fri, 24 Feb 2023 16:59:19 +0800
+Subject: [PATCH] Fix potential memory leak in GLES_CreateTextur
+
+CVE: CVE-2022-4743
+Upstream-Status: Backport [https://github.com/libsdl-org/SDL/commit/00b67f55727bc0944c3266e2b875440da132ce4b]
+
+Signed-off-by: Changqing Li <changqing.li@windriver.com>
+---
+ src/render/opengles/SDL_render_gles.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/src/render/opengles/SDL_render_gles.c b/src/render/opengles/SDL_render_gles.c
+index a6b58f2..237b1d6 100644
+--- a/src/render/opengles/SDL_render_gles.c
++++ b/src/render/opengles/SDL_render_gles.c
+@@ -368,6 +368,9 @@ GLES_CreateTexture(SDL_Renderer * renderer, SDL_Texture * texture)
+ renderdata->glGenTextures(1, &data->texture);
+ result = renderdata->glGetError();
+ if (result != GL_NO_ERROR) {
++ if (texture->access == SDL_TEXTUREACCESS_STREAMING) {
++ SDL_free(data->pixels);
++ }
+ SDL_free(data);
+ return GLES_SetError("glGenTextures()", result);
+ }
+@@ -396,6 +399,9 @@ GLES_CreateTexture(SDL_Renderer * renderer, SDL_Texture * texture)
+
+ result = renderdata->glGetError();
+ if (result != GL_NO_ERROR) {
++ if (texture->access == SDL_TEXTUREACCESS_STREAMING) {
++ SDL_free(data->pixels);
++ }
+ SDL_free(data);
+ return GLES_SetError("glTexImage2D()", result);
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/libsdl2/libsdl2_2.0.20.bb b/meta/recipes-graphics/libsdl2/libsdl2_2.0.20.bb
index c1c827af79..abcf232e25 100644
--- a/meta/recipes-graphics/libsdl2/libsdl2_2.0.20.bb
+++ b/meta/recipes-graphics/libsdl2/libsdl2_2.0.20.bb
@@ -24,6 +24,7 @@ PROVIDES = "virtual/libsdl2"
SRC_URI = "http://www.libsdl.org/release/SDL2-${PV}.tar.gz \
file://optional-libunwind-generic.patch \
file://0001-sdlchecks.cmake-pass-cflags-to-the-appropriate-cmake.patch \
+ file://0001-Fix-potential-memory-leak-in-GLES_CreateTextur.patch \
"
SRC_URI:append:class-native = " file://0001-Disable-libunwind-in-native-OE-builds-by-not-looking.patch"
diff --git a/meta/recipes-graphics/piglit/piglit/0001-cmake-use-proper-WAYLAND_INCLUDE_DIRS-variable.patch b/meta/recipes-graphics/piglit/piglit/0002-cmake-use-proper-WAYLAND_INCLUDE_DIRS-variable.patch
index 5d6ec368ba..5d6ec368ba 100644
--- a/meta/recipes-graphics/piglit/piglit/0001-cmake-use-proper-WAYLAND_INCLUDE_DIRS-variable.patch
+++ b/meta/recipes-graphics/piglit/piglit/0002-cmake-use-proper-WAYLAND_INCLUDE_DIRS-variable.patch
diff --git a/meta/recipes-graphics/piglit/piglit/0002-tests-util-piglit-shader.c-do-not-hardcode-build-pat.patch b/meta/recipes-graphics/piglit/piglit/0003-tests-util-piglit-shader.c-do-not-hardcode-build-pat.patch
index 16c7c5c803..16c7c5c803 100644
--- a/meta/recipes-graphics/piglit/piglit/0002-tests-util-piglit-shader.c-do-not-hardcode-build-pat.patch
+++ b/meta/recipes-graphics/piglit/piglit/0003-tests-util-piglit-shader.c-do-not-hardcode-build-pat.patch
diff --git a/meta/recipes-graphics/piglit/piglit/0005-cmake-Don-t-enable-GLX-if-tests-are-disabled.patch b/meta/recipes-graphics/piglit/piglit/0005-cmake-Don-t-enable-GLX-if-tests-are-disabled.patch
new file mode 100644
index 0000000000..ef6fda0f4e
--- /dev/null
+++ b/meta/recipes-graphics/piglit/piglit/0005-cmake-Don-t-enable-GLX-if-tests-are-disabled.patch
@@ -0,0 +1,32 @@
+From 13ff43fe760ac343b33d8e8c84b89886aac07116 Mon Sep 17 00:00:00 2001
+From: Tom Hochstein <tom.hochstein@nxp.com>
+Date: Fri, 3 Jun 2022 10:44:29 -0500
+Subject: [PATCH] cmake: Don't enable GLX if tests are disabled
+
+Allow building for systems that don't support GLX.
+
+Upstream-Status: Submitted [https://gitlab.freedesktop.org/mesa/piglit/-/merge_requests/720]
+Signed-off-by: Tom Hochstein <tom.hochstein@nxp.com>
+---
+ CMakeLists.txt | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index e1aeb5ddf..85e171aba 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -134,10 +134,7 @@ if(PIGLIT_BUILD_CL_TESTS)
+ endif(PIGLIT_BUILD_CL_TESTS)
+
+ IF(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
+- if(X11_FOUND AND OPENGL_gl_LIBRARY)
+- # Assume the system has GLX. In the future, systems may exist
+- # with libGL and libX11 but no GLX, but that world hasn't
+- # arrived yet.
++ if(X11_FOUND AND OPENGL_gl_LIBRARY AND PIGLIT_BUILD_GLX_TESTS)
+ set(PIGLIT_HAS_GLX True)
+ add_definitions(-DPIGLIT_HAS_GLX)
+ endif()
+--
+2.17.1
+
diff --git a/meta/recipes-graphics/piglit/piglit_git.bb b/meta/recipes-graphics/piglit/piglit_git.bb
index 3ae7a14e46..78a5d6248a 100644
--- a/meta/recipes-graphics/piglit/piglit_git.bb
+++ b/meta/recipes-graphics/piglit/piglit_git.bb
@@ -8,10 +8,11 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=b2beded7103a3d8a442a2a0391d607b0"
SRC_URI = "git://gitlab.freedesktop.org/mesa/piglit.git;protocol=https;branch=main \
file://0001-cmake-install-bash-completions-in-the-right-place.patch \
- file://0001-cmake-use-proper-WAYLAND_INCLUDE_DIRS-variable.patch \
file://0001-Add-a-missing-include-for-htobe32-definition.patch \
- file://0002-tests-util-piglit-shader.c-do-not-hardcode-build-pat.patch \
- "
+ file://0002-cmake-use-proper-WAYLAND_INCLUDE_DIRS-variable.patch \
+ file://0003-tests-util-piglit-shader.c-do-not-hardcode-build-pat.patch \
+ file://0005-cmake-Don-t-enable-GLX-if-tests-are-disabled.patch"
+
UPSTREAM_CHECK_COMMITS = "1"
SRCREV = "2f80c7cc9c02d37574dc8ba3140b7dd8eb3cbf82"
@@ -36,10 +37,12 @@ REQUIRED_DISTRO_FEATURES += "opengl"
export TEMP = "${B}/temp/"
do_compile[dirs] =+ "${B}/temp/"
-PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'x11', d)}"
+PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11 glx', '', d)}"
PACKAGECONFIG[freeglut] = "-DPIGLIT_USE_GLUT=1,-DPIGLIT_USE_GLUT=0,freeglut,"
+PACKAGECONFIG[glx] = "-DPIGLIT_BUILD_GLX_TESTS=ON,-DPIGLIT_BUILD_GLX_TESTS=OFF"
+PACKAGECONFIG[opencl] = "-DPIGLIT_BUILD_CL_TESTS=ON,-DPIGLIT_BUILD_CL_TESTS=OFF,virtual/opencl-icd"
PACKAGECONFIG[x11] = "-DPIGLIT_BUILD_GL_TESTS=ON,-DPIGLIT_BUILD_GL_TESTS=OFF,${X11_DEPS}, ${X11_RDEPS}"
-PACKAGECONFIG[vulkan] = "-DPIGLIT_BUILD_VK_TESTS=ON,-DPIGLIT_BUILD_VK_TESTS=OFF,vulkan-loader"
+PACKAGECONFIG[vulkan] = "-DPIGLIT_BUILD_VK_TESTS=ON,-DPIGLIT_BUILD_VK_TESTS=OFF,glslang-native vulkan-loader,glslang"
export PIGLIT_BUILD_DIR = "../../../../git"
diff --git a/meta/recipes-graphics/spir/spirv-headers_1.3.204.1.bb b/meta/recipes-graphics/spir/spirv-headers_1.3.204.1.bb
index 72416b441f..9e4a695325 100644
--- a/meta/recipes-graphics/spir/spirv-headers_1.3.204.1.bb
+++ b/meta/recipes-graphics/spir/spirv-headers_1.3.204.1.bb
@@ -8,7 +8,7 @@ LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://LICENSE;md5=c938b85bceb8fb26c1a807f28a52ae2d"
SRCREV = "b42ba6d92faf6b4938e6f22ddd186dbdacc98d78"
-SRC_URI = "git://github.com/KhronosGroup/SPIRV-Headers;protocol=https;branch=master"
+SRC_URI = "git://github.com/KhronosGroup/SPIRV-Headers;protocol=https;branch=main"
PE = "1"
UPSTREAM_CHECK_GITTAGREGEX = "sdk-(?P<pver>\d+(\.\d+)+)"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-graphics/vulkan/vulkan-samples_git.bb b/meta/recipes-graphics/vulkan/vulkan-samples_git.bb
index 53c7254ce7..ffb8d88ee6 100644
--- a/meta/recipes-graphics/vulkan/vulkan-samples_git.bb
+++ b/meta/recipes-graphics/vulkan/vulkan-samples_git.bb
@@ -5,7 +5,7 @@ LICENSE = "Apache-2.0"
LIC_FILES_CHKSUM = "file://LICENSE;md5=48aa35cefb768436223a6e7f18dc2a2a"
-SRC_URI = "gitsm://github.com/KhronosGroup/Vulkan-Samples.git;branch=master;protocol=https \
+SRC_URI = "gitsm://github.com/KhronosGroup/Vulkan-Samples.git;branch=main;protocol=https;lfs=0 \
file://0001-CMakeLists.txt-do-not-hardcode-lib-as-installation-t.patch \
file://debugfix.patch \
"
diff --git a/meta/recipes-graphics/waffle/waffle/0001-meson.build-request-native-wayland-scanner.patch b/meta/recipes-graphics/waffle/waffle/0001-meson.build-request-native-wayland-scanner.patch
new file mode 100644
index 0000000000..4b3a0e7c4a
--- /dev/null
+++ b/meta/recipes-graphics/waffle/waffle/0001-meson.build-request-native-wayland-scanner.patch
@@ -0,0 +1,28 @@
+From 0961787d2bf0d359a3ead89e9cec642818b32dea Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex@linutronix.de>
+Date: Tue, 5 Jul 2022 11:51:39 +0200
+Subject: [PATCH] meson.build: request native wayland-scanner
+
+This matters in cross compilation, as otherwise meson will
+try to use a cross-binary, and fail.
+
+Upstream-Status: Submitted [https://gitlab.freedesktop.org/mesa/waffle/-/merge_requests/110]
+Signed-off-by: Alexander Kanavin <alex@linutronix.de>
+
+---
+ meson.build | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/meson.build b/meson.build
+index ca6a212..3177bde 100644
+--- a/meson.build
++++ b/meson.build
+@@ -110,7 +110,7 @@ else
+ 'wayland-egl', version : '>= 9.1', required : get_option('wayland'),
+ )
+ dep_wayland_scanner = dependency(
+- 'wayland-scanner', version : '>= 1.15', required : get_option('wayland'),
++ 'wayland-scanner', version : '>= 1.15', required : get_option('wayland'), native: true,
+ )
+ if dep_wayland_scanner.found()
+ prog_wayland_scanner = find_program(dep_wayland_scanner.get_variable(pkgconfig: 'wayland_scanner'))
diff --git a/meta/recipes-graphics/waffle/waffle/0001-waffle-do-not-make-core-protocol-into-the-library.patch b/meta/recipes-graphics/waffle/waffle/0001-waffle-do-not-make-core-protocol-into-the-library.patch
index 24b2de5d9c..60e6318f7a 100644
--- a/meta/recipes-graphics/waffle/waffle/0001-waffle-do-not-make-core-protocol-into-the-library.patch
+++ b/meta/recipes-graphics/waffle/waffle/0001-waffle-do-not-make-core-protocol-into-the-library.patch
@@ -1,4 +1,4 @@
-From 7610ec4b572d3a54d30fca6798f0c406f3fd8a46 Mon Sep 17 00:00:00 2001
+From 71f9399d6cea1e2e885a98b98d82eb628832a86e Mon Sep 17 00:00:00 2001
From: Alexander Kanavin <alex@linutronix.de>
Date: Tue, 26 Oct 2021 08:52:17 +0200
Subject: [PATCH] waffle: do not make core protocol into the library
@@ -9,28 +9,13 @@ wayland.xml from the host.
Upstream-Status: Inappropriate [oe-core specific]
Signed-off-by: Alexander Kanavin <alex@linutronix.de>
+
---
- meson.build | 4 ----
src/waffle/meson.build | 7 -------
- 2 files changed, 11 deletions(-)
+ 1 file changed, 7 deletions(-)
-diff --git a/meson.build b/meson.build
-index ffc02ff..0bb6128 100644
---- a/meson.build
-+++ b/meson.build
-@@ -104,10 +104,6 @@ else
- dep_wayland_client = dependency(
- 'wayland-client', version : '>= 1.10', required : get_option('wayland'),
- )
-- if dep_wayland_client.found()
-- wayland_core_xml = join_paths(dep_wayland_client.get_pkgconfig_variable('pkgdatadir'),
-- 'wayland.xml')
-- endif
- dep_wayland_egl = dependency(
- 'wayland-egl', version : '>= 9.1', required : get_option('wayland'),
- )
diff --git a/src/waffle/meson.build b/src/waffle/meson.build
-index 01898c8..6245868 100644
+index e2636c7..3ff5762 100644
--- a/src/waffle/meson.build
+++ b/src/waffle/meson.build
@@ -88,12 +88,6 @@ if build_surfaceless
diff --git a/meta/recipes-graphics/waffle/waffle_1.7.0.bb b/meta/recipes-graphics/waffle/waffle_1.7.2.bb
index f1fd9e7630..cb917d8894 100644
--- a/meta/recipes-graphics/waffle/waffle_1.7.0.bb
+++ b/meta/recipes-graphics/waffle/waffle_1.7.2.bb
@@ -9,15 +9,16 @@ LICENSE = "BSD-2-Clause"
LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=4c5154407c2490750dd461c50ad94797 \
file://include/waffle-1/waffle.h;endline=24;md5=61dbf8697f61c78645e75a93c585b1bf"
-SRC_URI = "git://gitlab.freedesktop.org/mesa/waffle.git;protocol=https;branch=master \
+SRC_URI = "git://gitlab.freedesktop.org/mesa/waffle.git;protocol=https;branch=maint-1.7 \
file://0001-waffle-do-not-make-core-protocol-into-the-library.patch \
+ file://0001-meson.build-request-native-wayland-scanner.patch \
"
-SRCREV = "905c6c10f2483adf0cbfa024e2d3c2ed541fb300"
+SRCREV = "f3b42a7216105498842bc6ba77d8481b90d6f5f9"
S = "${WORKDIR}/git"
inherit meson features_check lib_package bash-completion pkgconfig
-DEPENDS:append = " python3 cmake-native"
+DEPENDS:append = " python3"
# This should be overridden per-machine to reflect the capabilities of the GL
# stack.
@@ -46,5 +47,5 @@ PACKAGECONFIG[surfaceless-egl] = "-Dsurfaceless_egl=enabled,-Dsurfaceless_egl=di
# TODO: optionally build manpages and examples
do_install:append() {
- sed -i -e "s,${WORKDIR},,g" ${D}/${libdir}/cmake/Waffle/WaffleConfig.cmake
+ rm -rf ${D}${datadir}/zsh
}
diff --git a/meta/recipes-graphics/wayland/wayland/CVE-2021-3782.patch b/meta/recipes-graphics/wayland/wayland/CVE-2021-3782.patch
new file mode 100644
index 0000000000..df204508e9
--- /dev/null
+++ b/meta/recipes-graphics/wayland/wayland/CVE-2021-3782.patch
@@ -0,0 +1,111 @@
+From 5eed6609619cc2e4eaa8618d11c15d442abf54be Mon Sep 17 00:00:00 2001
+From: Derek Foreman <derek.foreman@collabora.com>
+Date: Fri, 28 Jan 2022 13:18:37 -0600
+Subject: [PATCH] util: Limit size of wl_map
+
+Since server IDs are basically indistinguishable from really big client
+IDs at many points in the source, it's theoretically possible to overflow
+a map and either overflow server IDs into the client ID space, or grow
+client IDs into the server ID space. This would currently take a massive
+amount of RAM, but the definition of massive changes yearly.
+
+Prevent this by placing a ridiculous but arbitrary upper bound on the
+number of items we can put in a map: 0xF00000, somewhere over 15 million.
+This should satisfy pathological clients without restriction, but stays
+well clear of the 0xFF000000 transition point between server and client
+IDs. It will still take an improbable amount of RAM to hit this, and a
+client could still exhaust all RAM in this way, but our goal is to prevent
+overflow and undefined behaviour.
+
+Fixes #224
+
+Signed-off-by: Derek Foreman <derek.foreman@collabora.com>
+
+Upstream-Status: Backport
+CVE: CVE-2021-3782
+
+Reference to upstream patch:
+https://gitlab.freedesktop.org/wayland/wayland/-/commit/b19488c7154b902354cb26a27f11415d7799b0b2
+
+[DP: adjust context for wayland version 1.20.0]
+Signed-off-by: Dragos-Marian Panait <dragos.panait@windriver.com>
+---
+ src/wayland-private.h | 1 +
+ src/wayland-util.c | 25 +++++++++++++++++++++++--
+ 2 files changed, 24 insertions(+), 2 deletions(-)
+
+diff --git a/src/wayland-private.h b/src/wayland-private.h
+index 9bf8cb7..35dc40e 100644
+--- a/src/wayland-private.h
++++ b/src/wayland-private.h
+@@ -45,6 +45,7 @@
+ #define WL_MAP_SERVER_SIDE 0
+ #define WL_MAP_CLIENT_SIDE 1
+ #define WL_SERVER_ID_START 0xff000000
++#define WL_MAP_MAX_OBJECTS 0x00f00000
+ #define WL_CLOSURE_MAX_ARGS 20
+
+ struct wl_object {
+diff --git a/src/wayland-util.c b/src/wayland-util.c
+index d5973bf..3e45d19 100644
+--- a/src/wayland-util.c
++++ b/src/wayland-util.c
+@@ -195,6 +195,7 @@ wl_map_insert_new(struct wl_map *map, uint32_t flags, void *data)
+ union map_entry *start, *entry;
+ struct wl_array *entries;
+ uint32_t base;
++ uint32_t count;
+
+ if (map->side == WL_MAP_CLIENT_SIDE) {
+ entries = &map->client_entries;
+@@ -215,10 +216,25 @@ wl_map_insert_new(struct wl_map *map, uint32_t flags, void *data)
+ start = entries->data;
+ }
+
++ /* wl_array only grows, so if we have too many objects at
++ * this point there's no way to clean up. We could be more
++ * pro-active about trying to avoid this allocation, but
++ * it doesn't really matter because at this point there is
++ * nothing to be done but disconnect the client and delete
++ * the whole array either way.
++ */
++ count = entry - start;
++ if (count > WL_MAP_MAX_OBJECTS) {
++ /* entry->data is freshly malloced garbage, so we'd
++ * better make it a NULL so wl_map_for_each doesn't
++ * dereference it later. */
++ entry->data = NULL;
++ return 0;
++ }
+ entry->data = data;
+ entry->next |= (flags & 0x1) << 1;
+
+- return (entry - start) + base;
++ return count + base;
+ }
+
+ int
+@@ -235,6 +251,9 @@ wl_map_insert_at(struct wl_map *map, uint32_t flags, uint32_t i, void *data)
+ i -= WL_SERVER_ID_START;
+ }
+
++ if (i > WL_MAP_MAX_OBJECTS)
++ return -1;
++
+ count = entries->size / sizeof *start;
+ if (count < i)
+ return -1;
+@@ -269,8 +288,10 @@ wl_map_reserve_new(struct wl_map *map, uint32_t i)
+ i -= WL_SERVER_ID_START;
+ }
+
+- count = entries->size / sizeof *start;
++ if (i > WL_MAP_MAX_OBJECTS)
++ return -1;
+
++ count = entries->size / sizeof *start;
+ if (count < i)
+ return -1;
+
+--
+2.37.3
diff --git a/meta/recipes-graphics/wayland/wayland_1.20.0.bb b/meta/recipes-graphics/wayland/wayland_1.20.0.bb
index bd437767b2..9351d2ed6a 100644
--- a/meta/recipes-graphics/wayland/wayland_1.20.0.bb
+++ b/meta/recipes-graphics/wayland/wayland_1.20.0.bb
@@ -16,7 +16,9 @@ SRC_URI = "https://wayland.freedesktop.org/releases/${BPN}-${PV}.tar.xz \
file://run-ptest \
file://0002-Do-not-hardcode-the-path-to-wayland-scanner.patch \
file://0001-build-Fix-strndup-detection-on-MinGW.patch \
+ file://CVE-2021-3782.patch \
"
+
SRC_URI[sha256sum] = "b8a034154c7059772e0fdbd27dbfcda6c732df29cae56a82274f6ec5d7cd8725"
UPSTREAM_CHECK_URI = "https://wayland.freedesktop.org/releases.html"
diff --git a/meta/recipes-graphics/wayland/weston/dont-use-plane-add-prop.patch b/meta/recipes-graphics/wayland/weston/dont-use-plane-add-prop.patch
deleted file mode 100644
index 1ac0695222..0000000000
--- a/meta/recipes-graphics/wayland/weston/dont-use-plane-add-prop.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From ece4c3d261aeec230869c0304ed1011ff6837c16 Mon Sep 17 00:00:00 2001
-From: Khem Raj <raj.khem@gmail.com>
-Date: Sat, 12 Sep 2020 14:04:04 -0700
-Subject: [PATCH] Fix atomic modesetting with musl
-
-atomic modesetting seems to fail with drm weston backend and this patch fixes
-it, below errors are seen before weston exits
-
-atomic: couldn't commit new state: Invalid argument
-
-Upstream-Status: Submitted [https://gitlab.freedesktop.org/wayland/weston/-/issues/158]
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
-
----
- libweston/backend-drm/kms.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/libweston/backend-drm/kms.c b/libweston/backend-drm/kms.c
-index 780d007..9994da1 100644
---- a/libweston/backend-drm/kms.c
-+++ b/libweston/backend-drm/kms.c
-@@ -1142,8 +1142,8 @@ drm_pending_state_apply_atomic(struct drm_pending_state *pending_state,
- wl_list_for_each(plane, &b->plane_list, link) {
- drm_debug(b, "\t\t[atomic] starting with plane %lu disabled\n",
- (unsigned long) plane->plane_id);
-- plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID, 0);
-- plane_add_prop(req, plane, WDRM_PLANE_FB_ID, 0);
-+ //plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID, 0);
-+ //plane_add_prop(req, plane, WDRM_PLANE_FB_ID, 0);
- }
-
- flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
diff --git a/meta/recipes-graphics/wayland/weston_10.0.0.bb b/meta/recipes-graphics/wayland/weston_10.0.2.bb
index 93f7b59659..e09f94d9bb 100644
--- a/meta/recipes-graphics/wayland/weston_10.0.0.bb
+++ b/meta/recipes-graphics/wayland/weston_10.0.2.bb
@@ -6,16 +6,14 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=d79ee9e66bb0f95d3386a7acae780b70 \
file://libweston/compositor.c;endline=27;md5=eb6d5297798cabe2ddc65e2af519bcf0 \
"
-SRC_URI = "https://wayland.freedesktop.org/releases/${BPN}-${PV}.tar.xz \
+SRC_URI = "https://gitlab.freedesktop.org/wayland/weston/-/releases/${PV}/downloads/${BPN}-${PV}.tar.xz \
file://weston.png \
file://weston.desktop \
file://xwayland.weston-start \
file://systemd-notify.weston-start \
"
-SRC_URI:append:libc-musl = " file://dont-use-plane-add-prop.patch "
-
-SRC_URI[sha256sum] = "5c23964112b90238bed39e5dd1e41cd71a79398813cdc3bbb15a9fdc94e547ae"
+SRC_URI[sha256sum] = "89646ca0d9f8d413c2767e5c3828eaa3fa149c2a105b3729a6894fa7cf1549e7"
UPSTREAM_CHECK_URI = "https://wayland.freedesktop.org/releases.html"
@@ -76,7 +74,7 @@ PACKAGECONFIG[webp] = "-Dimage-webp=true,-Dimage-webp=false,libwebp"
# Weston with systemd-login support
PACKAGECONFIG[systemd] = "-Dsystemd=true -Dlauncher-logind=true,-Dsystemd=false -Dlauncher-logind=false,systemd dbus"
# Weston with Xwayland support (requires X11 and Wayland)
-PACKAGECONFIG[xwayland] = "-Dxwayland=true,-Dxwayland=false"
+PACKAGECONFIG[xwayland] = "-Dxwayland=true,-Dxwayland=false,xwayland"
# colord CMS support
PACKAGECONFIG[colord] = "-Dcolor-management-colord=true,-Dcolor-management-colord=false,colord"
# Clients support
diff --git a/meta/recipes-graphics/xorg-app/mkfontscale_1.2.1.bb b/meta/recipes-graphics/xorg-app/mkfontscale_1.2.2.bb
index 2d0c51a423..cd658ab219 100644
--- a/meta/recipes-graphics/xorg-app/mkfontscale_1.2.1.bb
+++ b/meta/recipes-graphics/xorg-app/mkfontscale_1.2.2.bb
@@ -17,5 +17,5 @@ BBCLASSEXTEND = "native"
LIC_FILES_CHKSUM = "file://COPYING;md5=99b1e1269aba5179139b9e4380fc0934"
-SRC_URI[md5sum] = "215940de158b1a3d8b3f8b442c606e2f"
-SRC_URI[sha256sum] = "ca0495eb974a179dd742bfa6199d561bda1c8da4a0c5a667f21fd82aaab6bac7"
+SRC_URI_EXT = "xz"
+SRC_URI[sha256sum] = "8ae3fb5b1fe7436e1f565060acaa3e2918fe745b0e4979b5593968914fe2d5c4"
diff --git a/meta/recipes-graphics/xorg-app/xdpyinfo_1.3.2.bb b/meta/recipes-graphics/xorg-app/xdpyinfo_1.3.4.bb
index 2d10b7acca..aaa8aa8903 100644
--- a/meta/recipes-graphics/xorg-app/xdpyinfo_1.3.2.bb
+++ b/meta/recipes-graphics/xorg-app/xdpyinfo_1.3.4.bb
@@ -14,7 +14,7 @@ PE = "1"
SRC_URI += "file://disable-xkb.patch"
-SRC_URI[md5sum] = "8809037bd48599af55dad81c508b6b39"
-SRC_URI[sha256sum] = "30238ed915619e06ceb41721e5f747d67320555cc38d459e954839c189ccaf51"
+SRC_URI_EXT = "xz"
+SRC_URI[sha256sum] = "a8ada581dbd7266440d7c3794fa89edf6b99b8857fc2e8c31042684f3af4822b"
EXTRA_OECONF = "--disable-xkb"
diff --git a/meta/recipes-graphics/xorg-app/xev_1.2.4.bb b/meta/recipes-graphics/xorg-app/xev_1.2.5.bb
index 9407fa65f1..0e3def6eee 100644
--- a/meta/recipes-graphics/xorg-app/xev_1.2.4.bb
+++ b/meta/recipes-graphics/xorg-app/xev_1.2.5.bb
@@ -14,4 +14,6 @@ DEPENDS += "libxrandr xorgproto"
SRC_URI += "file://diet-x11.patch"
-SRC_URI[sha256sum] = "d700e08bfe751ed2dbf802baa204b056d0e49348b6eb3c6f9cb035d8ae4885e2"
+SRC_URI[sha256sum] = "c9461a4389714e0f33974f9e75934bdc38d836a0f059b8dc089c7cbf2ce36ec1"
+
+SRC_URI_EXT = "xz"
diff --git a/meta/recipes-graphics/xorg-app/xmodmap_1.0.10.bb b/meta/recipes-graphics/xorg-app/xmodmap_1.0.11.bb
index 7dedb03a2b..dc955aa977 100644
--- a/meta/recipes-graphics/xorg-app/xmodmap_1.0.10.bb
+++ b/meta/recipes-graphics/xorg-app/xmodmap_1.0.11.bb
@@ -12,5 +12,6 @@ LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://COPYING;md5=272c17e96370e1e74773fa22d9989621"
PE = "1"
-SRC_URI[md5sum] = "51f1d30a525e9903280ffeea2744b1f6"
-SRC_URI[sha256sum] = "473f0941d7439d501bb895ff358832b936ec34c749b9704c37a15e11c318487c"
+SRC_URI[sha256sum] = "9a2f8168f7b0bc382828847403902cb6bf175e17658b36189eac87edda877e81"
+
+SRC_URI_EXT = "xz"
diff --git a/meta/recipes-graphics/xorg-app/xorg-app-common.inc b/meta/recipes-graphics/xorg-app/xorg-app-common.inc
index 1c64e20aac..5dbe8abe86 100644
--- a/meta/recipes-graphics/xorg-app/xorg-app-common.inc
+++ b/meta/recipes-graphics/xorg-app/xorg-app-common.inc
@@ -8,7 +8,8 @@ DEPENDS = "util-macros-native virtual/libx11"
# depends on virtual/libx11
REQUIRED_DISTRO_FEATURES = "x11"
-SRC_URI = "${XORG_MIRROR}/individual/app/${BPN}-${PV}.tar.bz2"
+SRC_URI_EXT = "bz2"
+SRC_URI = "${XORG_MIRROR}/individual/app/${BPN}-${PV}.tar.${SRC_URI_EXT}"
inherit autotools pkgconfig features_check
diff --git a/meta/recipes-graphics/xorg-app/xrandr_1.5.1.bb b/meta/recipes-graphics/xorg-app/xrandr_1.5.1.bb
index 57b43ff28c..0e0347f768 100644
--- a/meta/recipes-graphics/xorg-app/xrandr_1.5.1.bb
+++ b/meta/recipes-graphics/xorg-app/xrandr_1.5.1.bb
@@ -11,8 +11,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=fe1608bdb33cf8c62a4438f7d34679b3"
DEPENDS += "libxrandr libxrender"
PE = "1"
-SRC_URI = "${XORG_MIRROR}/individual/app/${BPN}-${PV}.tar.xz"
-
+SRC_URI_EXT = "xz"
SRC_URI[md5sum] = "fe40f7a4fd39dd3a02248d3e0b1972e4"
SRC_URI[sha256sum] = "7bc76daf9d72f8aff885efad04ce06b90488a1a169d118dea8a2b661832e8762"
diff --git a/meta/recipes-graphics/xorg-driver/xf86-input-synaptics/64bit_time_t_support.patch b/meta/recipes-graphics/xorg-driver/xf86-input-synaptics/64bit_time_t_support.patch
deleted file mode 100644
index 4bb7fb3e23..0000000000
--- a/meta/recipes-graphics/xorg-driver/xf86-input-synaptics/64bit_time_t_support.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-This patch avoids using time field of input_event structure which is not available
-on 32bit arches supporting 64bit time_t structs, Patch makes it compatible with new
-and keeps old input.h implementation functional as well.
-
-See https://sourceware.org/glibc/wiki/Y2038ProofnessDesign
-
-Upstream-Status: Pending
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
-
---- a/src/eventcomm.c
-+++ b/src/eventcomm.c
-@@ -575,10 +575,12 @@ SynapticsReadEvent(InputInfoPtr pInfo, s
- ev->type = EV_SYN;
- ev->code = SYN_REPORT;
- ev->value = 0;
-- ev->time = last_event_time;
-- } else if (ev->type == EV_SYN)
-- last_event_time = ev->time;
--
-+ ev->input_event_sec = last_event_time.tv_sec;
-+ ev->input_event_usec = last_event_time.tv_usec;
-+ } else if (ev->type == EV_SYN) {
-+ last_event_time.tv_sec = ev->input_event_sec;
-+ last_event_time.tv_usec = ev->input_event_usec;
-+ }
- return TRUE;
- }
-
-@@ -725,7 +727,7 @@ EventReadHwState(InputInfoPtr pInfo,
- case SYN_REPORT:
- hw->numFingers = count_fingers(pInfo, comm);
- if (proto_data->have_monotonic_clock)
-- hw->millis = 1000 * ev.time.tv_sec + ev.time.tv_usec / 1000;
-+ hw->millis = 1000 * ev.input_event_sec + ev.input_event_usec / 1000;
- else
- hw->millis = GetTimeInMillis();
- SynapticsCopyHwState(hwRet, hw);
---- a/src/eventcomm.h
-+++ b/src/eventcomm.h
-@@ -34,6 +34,11 @@
- #include <xf86Xinput.h>
- #include "synproto.h"
-
-+#ifndef input_event_sec
-+#define input_event_sec time.tv_sec
-+#define input_event_usec time.tv_usec
-+#endif
-+
- /* for auto-dev: */
- #define DEV_INPUT_EVENT "/dev/input"
- #define EVENT_DEV_NAME "event"
diff --git a/meta/recipes-graphics/xorg-driver/xf86-input-synaptics_1.9.1.bb b/meta/recipes-graphics/xorg-driver/xf86-input-synaptics_1.9.2.bb
index 388350c96e..8e446290b2 100644
--- a/meta/recipes-graphics/xorg-driver/xf86-input-synaptics_1.9.1.bb
+++ b/meta/recipes-graphics/xorg-driver/xf86-input-synaptics_1.9.2.bb
@@ -10,9 +10,8 @@ advanced features of the touchpad to become available."
LIC_FILES_CHKSUM = "file://COPYING;md5=55aacd3535a741824955c5eb8f061398"
-SRC_URI += "file://64bit_time_t_support.patch"
-
-SRC_URI[md5sum] = "cfb79d3c975151f9bbf30b727c260cb9"
-SRC_URI[sha256sum] = "7af83526eff1c76e8b9e1553b34245c203d029028d8044dd9dcf71eef1001576"
+SRC_URI[sha256sum] = "b8fa4aab913fc63754bbd6439e020658c412743a055201ddf212760593962c38"
DEPENDS += "libxi mtdev libxtst libevdev"
+
+XORG_DRIVER_COMPRESSOR = ".tar.xz"
diff --git a/meta/recipes-graphics/xorg-font/encodings/nocompiler.patch b/meta/recipes-graphics/xorg-font/encodings/nocompiler.patch
index ec7c7d80c1..9ee7abe775 100644
--- a/meta/recipes-graphics/xorg-font/encodings/nocompiler.patch
+++ b/meta/recipes-graphics/xorg-font/encodings/nocompiler.patch
@@ -1,4 +1,4 @@
-From b08c43a0842076e0a94e88ad6456a9326cd7ffc9 Mon Sep 17 00:00:00 2001
+From 0c0790e90a68bf8290da5c0e57142bf7c620f039 Mon Sep 17 00:00:00 2001
From: Richard Purdie <richard.purdie@linuxfoundation.org>
Date: Tue, 17 May 2011 23:03:02 +0000
Subject: [PATCH] Improve handling of 'all' architecture recipes and their
@@ -21,12 +21,12 @@ RP 17/5/2011
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/configure.ac b/configure.ac
-index 622c27b..5ee84ed 100644
+index b80e3de..80208bb 100644
--- a/configure.ac
+++ b/configure.ac
-@@ -3,12 +3,12 @@ AC_INIT([encodings], [1.0.5],
+@@ -3,12 +3,12 @@ AC_INIT([encodings], [1.0.6],
[https://gitlab.freedesktop.org/xorg/font/encodings/issues])
- AM_INIT_AUTOMAKE([foreign dist-bzip2])
+ AM_INIT_AUTOMAKE([foreign dist-xz])
-# Require xorg-macros: XORG_DEFAULT_OPTIONS
m4_ifndef([XORG_MACROS_VERSION],
diff --git a/meta/recipes-graphics/xorg-font/encodings_1.0.5.bb b/meta/recipes-graphics/xorg-font/encodings_1.0.6.bb
index 8ddbaf24dd..be82414eef 100644
--- a/meta/recipes-graphics/xorg-font/encodings_1.0.5.bb
+++ b/meta/recipes-graphics/xorg-font/encodings_1.0.6.bb
@@ -7,14 +7,14 @@ require xorg-font-common.inc
LICENSE = "PD"
LIC_FILES_CHKSUM = "file://COPYING;md5=9da93f2daf2d5572faa2bfaf0dbd9e76"
PE = "1"
-PR = "r3"
DEPENDS = "mkfontscale-native mkfontdir-native font-util-native"
RDEPENDS:${PN} = ""
SRC_URI += "file://nocompiler.patch"
-SRC_URI[md5sum] = "bbae4f247b88ccde0e85ed6a403da22a"
-SRC_URI[sha256sum] = "bd96e16143a044b19e87f217cf6a3763a70c561d1076aad6f6d862ec41774a31"
+SRC_URI[sha256sum] = "77e301de661f35a622b18f60b555a7e7d8c4d5f43ed41410e830d5ac9084fc26"
+
+SRC_URI_EXT = "xz"
inherit allarch
diff --git a/meta/recipes-graphics/xorg-font/font-util_1.3.2.bb b/meta/recipes-graphics/xorg-font/font-util_1.3.3.bb
index b3e832756b..64c705770d 100644
--- a/meta/recipes-graphics/xorg-font/font-util_1.3.2.bb
+++ b/meta/recipes-graphics/xorg-font/font-util_1.3.3.bb
@@ -16,7 +16,8 @@ RDEPENDS:${PN}:class-native = ""
BBCLASSEXTEND = "native"
-SRC_URI[md5sum] = "3d6adb76fdd072db8c8fae41b40855e8"
-SRC_URI[sha256sum] = "3ad880444123ac06a7238546fa38a2a6ad7f7e0cc3614de7e103863616522282"
+SRC_URI[sha256sum] = "e791c890779c40056ab63aaed5e031bb6e2890a98418ca09c534e6261a2eebd2"
SYSROOT_DIRS_IGNORE:remove = "${datadir}/fonts"
+
+SRC_URI_EXT = "xz"
diff --git a/meta/recipes-graphics/xorg-font/xorg-font-common.inc b/meta/recipes-graphics/xorg-font/xorg-font-common.inc
index 2df23efed4..edf7cf7642 100644
--- a/meta/recipes-graphics/xorg-font/xorg-font-common.inc
+++ b/meta/recipes-graphics/xorg-font/xorg-font-common.inc
@@ -9,7 +9,8 @@ RDEPENDS:${PN} = "encodings font-util font-alias"
XORG_PN = "${BPN}"
-SRC_URI = "${XORG_MIRROR}/individual/font/${XORG_PN}-${PV}.tar.bz2"
+SRC_URI_EXT = "bz2"
+SRC_URI = "${XORG_MIRROR}/individual/font/${XORG_PN}-${PV}.tar.${SRC_URI_EXT}"
S = "${WORKDIR}/${XORG_PN}-${PV}"
inherit autotools pkgconfig features_check
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3554.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3554.patch
new file mode 100644
index 0000000000..973f328304
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3554.patch
@@ -0,0 +1,58 @@
+From 1d11822601fd24a396b354fa616b04ed3df8b4ef Mon Sep 17 00:00:00 2001
+From: "Thomas E. Dickey" <dickey@invisible-island.net>
+Date: Tue, 4 Oct 2022 18:26:17 -0400
+Subject: [PATCH] fix a memory leak in XRegisterIMInstantiateCallback
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/1d11822601fd24a396b354fa616b04ed3df8b4ef]
+CVE: CVE-2022-3554
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+fix a memory leak in XRegisterIMInstantiateCallback
+
+Analysis:
+
+ _XimRegisterIMInstantiateCallback() opens an XIM and closes it using
+ the internal function pointers, but the internal close function does
+ not free the pointer to the XIM (this would be done in XCloseIM()).
+
+Report/patch:
+
+ Date: Mon, 03 Oct 2022 18:47:32 +0800
+ From: Po Lu <luangruo@yahoo.com>
+ To: xorg-devel@lists.x.org
+ Subject: Re: Yet another leak in Xlib
+
+ For reference, here's how I'm calling XRegisterIMInstantiateCallback:
+
+ XSetLocaleModifiers ("");
+ XRegisterIMInstantiateCallback (compositor.display,
+ XrmGetDatabase (compositor.display),
+ (char *) compositor.resource_name,
+ (char *) compositor.app_name,
+ IMInstantiateCallback, NULL);
+ and XMODIFIERS is:
+
+ @im=ibus
+
+Signed-off-by: Thomas E. Dickey's avatarThomas E. Dickey <dickey@invisible-island.net>
+---
+ modules/im/ximcp/imInsClbk.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/modules/im/ximcp/imInsClbk.c b/modules/im/ximcp/imInsClbk.c
+index 95b379c..c10e347 100644
+--- a/modules/im/ximcp/imInsClbk.c
++++ b/modules/im/ximcp/imInsClbk.c
+@@ -212,6 +212,9 @@ _XimRegisterIMInstantiateCallback(
+ if( xim ) {
+ lock = True;
+ xim->methods->close( (XIM)xim );
++ /* XIMs must be freed manually after being opened; close just
++ does the protocol to deinitialize the IM. */
++ XFree( xim );
+ lock = False;
+ icb->call = True;
+ callback( display, client_data, NULL );
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3555.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3555.patch
new file mode 100644
index 0000000000..919e7a00fb
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3555.patch
@@ -0,0 +1,40 @@
+From 8a368d808fec166b5fb3dfe6312aab22c7ee20af Mon Sep 17 00:00:00 2001
+From: Hodong <hodong@yozmos.com>
+Date: Thu, 20 Jan 2022 00:57:41 +0900
+Subject: [PATCH] Fix two memory leaks in _XFreeX11XCBStructure()
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/8a368d808fec166b5fb3dfe6312aab22c7ee20af]
+CVE: CVE-2022-3555
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+Fix two memory leaks in _XFreeX11XCBStructure()
+
+Even when XCloseDisplay() was called, some memory was leaked.
+
+XCloseDisplay() calls _XFreeDisplayStructure(), which calls
+_XFreeX11XCBStructure().
+
+However, _XFreeX11XCBStructure() did not destroy the condition variables,
+resulting in the leaking of some 40 bytes.
+
+Signed-off-by: default avatarHodong <hodong@yozmos.com>
+---
+ src/xcb_disp.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/src/xcb_disp.c b/src/xcb_disp.c
+index 70a602f..e9becee 100644
+--- a/src/xcb_disp.c
++++ b/src/xcb_disp.c
+@@ -102,6 +102,8 @@ void _XFreeX11XCBStructure(Display *dpy)
+ dpy->xcb->pending_requests = tmp->next;
+ free(tmp);
+ }
++ xcondition_clear(dpy->xcb->event_notify);
++ xcondition_clear(dpy->xcb->reply_notify);
+ xcondition_free(dpy->xcb->event_notify);
+ xcondition_free(dpy->xcb->reply_notify);
+ Xfree(dpy->xcb);
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-3138.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-3138.patch
new file mode 100644
index 0000000000..c724cf8fdd
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-3138.patch
@@ -0,0 +1,111 @@
+From 304a654a0d57bf0f00d8998185f0360332cfa36c Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Sat, 10 Jun 2023 16:30:07 -0700
+Subject: [PATCH] InitExt.c: Add bounds checks for extension request, event, &
+ error codes
+
+Fixes CVE-2023-3138: X servers could return values from XQueryExtension
+that would cause Xlib to write entries out-of-bounds of the arrays to
+store them, though this would only overwrite other parts of the Display
+struct, not outside the bounds allocated for that structure.
+
+Reported-by: Gregory James DUCK <gjduck@gmail.com>
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+CVE: CVE-2023-3138
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/304a654a0d57bf0f00d8998185f0360332cfa36c.patch]
+Signed-off-by: Poonam Jadhav <poonam.jadhav@kpit.com>
+---
+ src/InitExt.c | 42 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 42 insertions(+)
+
+diff --git a/src/InitExt.c b/src/InitExt.c
+index 4de46f15..afc00a6b 100644
+--- a/src/InitExt.c
++++ b/src/InitExt.c
+@@ -33,6 +33,18 @@ from The Open Group.
+ #include <X11/Xos.h>
+ #include <stdio.h>
+
++/* The X11 protocol spec reserves events 64 through 127 for extensions */
++#ifndef LastExtensionEvent
++#define LastExtensionEvent 127
++#endif
++
++/* The X11 protocol spec reserves requests 128 through 255 for extensions */
++#ifndef LastExtensionRequest
++#define FirstExtensionRequest 128
++#define LastExtensionRequest 255
++#endif
++
++
+ /*
+ * This routine is used to link a extension in so it will be called
+ * at appropriate times.
+@@ -242,6 +254,12 @@ WireToEventType XESetWireToEvent(
+ WireToEventType proc) /* routine to call when converting event */
+ {
+ register WireToEventType oldproc;
++ if (event_number < 0 ||
++ event_number > LastExtensionEvent) {
++ fprintf(stderr, "Xlib: ignoring invalid extension event %d\n",
++ event_number);
++ return (WireToEventType)_XUnknownWireEvent;
++ }
+ if (proc == NULL) proc = (WireToEventType)_XUnknownWireEvent;
+ LockDisplay (dpy);
+ oldproc = dpy->event_vec[event_number];
+@@ -263,6 +281,12 @@ WireToEventCookieType XESetWireToEventCookie(
+ )
+ {
+ WireToEventCookieType oldproc;
++ if (extension < FirstExtensionRequest ||
++ extension > LastExtensionRequest) {
++ fprintf(stderr, "Xlib: ignoring invalid extension opcode %d\n",
++ extension);
++ return (WireToEventCookieType)_XUnknownWireEventCookie;
++ }
+ if (proc == NULL) proc = (WireToEventCookieType)_XUnknownWireEventCookie;
+ LockDisplay (dpy);
+ oldproc = dpy->generic_event_vec[extension & 0x7F];
+@@ -284,6 +308,12 @@ CopyEventCookieType XESetCopyEventCookie(
+ )
+ {
+ CopyEventCookieType oldproc;
++ if (extension < FirstExtensionRequest ||
++ extension > LastExtensionRequest) {
++ fprintf(stderr, "Xlib: ignoring invalid extension opcode %d\n",
++ extension);
++ return (CopyEventCookieType)_XUnknownCopyEventCookie;
++ }
+ if (proc == NULL) proc = (CopyEventCookieType)_XUnknownCopyEventCookie;
+ LockDisplay (dpy);
+ oldproc = dpy->generic_event_copy_vec[extension & 0x7F];
+@@ -305,6 +335,12 @@ EventToWireType XESetEventToWire(
+ EventToWireType proc) /* routine to call when converting event */
+ {
+ register EventToWireType oldproc;
++ if (event_number < 0 ||
++ event_number > LastExtensionEvent) {
++ fprintf(stderr, "Xlib: ignoring invalid extension event %d\n",
++ event_number);
++ return (EventToWireType)_XUnknownNativeEvent;
++ }
+ if (proc == NULL) proc = (EventToWireType) _XUnknownNativeEvent;
+ LockDisplay (dpy);
+ oldproc = dpy->wire_vec[event_number];
+@@ -325,6 +361,12 @@ WireToErrorType XESetWireToError(
+ WireToErrorType proc) /* routine to call when converting error */
+ {
+ register WireToErrorType oldproc = NULL;
++ if (error_number < 0 ||
++ error_number > LastExtensionError) {
++ fprintf(stderr, "Xlib: ignoring invalid extension error %d\n",
++ error_number);
++ return (WireToErrorType)_XDefaultWireError;
++ }
+ if (proc == NULL) proc = (WireToErrorType)_XDefaultWireError;
+ LockDisplay (dpy);
+ if (!dpy->error_vec) {
+--
+GitLab
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43785.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43785.patch
new file mode 100644
index 0000000000..64f8776cc9
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43785.patch
@@ -0,0 +1,62 @@
+From 6858d468d9ca55fb4c5fd70b223dbc78a3358a7f Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Sun, 17 Sep 2023 14:19:40 -0700
+Subject: [PATCH] CVE-2023-43785: out-of-bounds memory access in
+ _XkbReadKeySyms()
+
+Make sure we allocate enough memory in the first place, and
+also handle error returns from _XkbReadBufferCopyKeySyms() when
+it detects out-of-bounds issues.
+
+Reported-by: Gregory James DUCK <gjduck@gmail.com>
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport from [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/6858d468d9ca55fb4c5fd70b223dbc78a3358a7f]
+CVE: CVE-2023-43785
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/xkb/XKBGetMap.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/src/xkb/XKBGetMap.c b/src/xkb/XKBGetMap.c
+index 2891d21..31199e4 100644
+--- a/src/xkb/XKBGetMap.c
++++ b/src/xkb/XKBGetMap.c
+@@ -182,7 +182,8 @@ _XkbReadKeySyms(XkbReadBufferPtr buf, XkbDescPtr xkb, xkbGetMapReply *rep)
+ if (offset + newMap->nSyms >= map->size_syms) {
+ register int sz;
+
+- sz = map->size_syms + 128;
++ sz = offset + newMap->nSyms;
++ sz = ((sz + (unsigned) 128) / 128) * 128;
+ _XkbResizeArray(map->syms, map->size_syms, sz, KeySym);
+ if (map->syms == NULL) {
+ map->size_syms = 0;
+@@ -191,8 +192,9 @@ _XkbReadKeySyms(XkbReadBufferPtr buf, XkbDescPtr xkb, xkbGetMapReply *rep)
+ map->size_syms = sz;
+ }
+ if (newMap->nSyms > 0) {
+- _XkbReadBufferCopyKeySyms(buf, (KeySym *) &map->syms[offset],
+- newMap->nSyms);
++ if (_XkbReadBufferCopyKeySyms(buf, (KeySym *) &map->syms[offset],
++ newMap->nSyms) == 0)
++ return BadLength;
+ offset += newMap->nSyms;
+ }
+ else {
+@@ -222,8 +224,10 @@ _XkbReadKeySyms(XkbReadBufferPtr buf, XkbDescPtr xkb, xkbGetMapReply *rep)
+ newSyms = XkbResizeKeySyms(xkb, i + rep->firstKeySym, tmp);
+ if (newSyms == NULL)
+ return BadAlloc;
+- if (newMap->nSyms > 0)
+- _XkbReadBufferCopyKeySyms(buf, newSyms, newMap->nSyms);
++ if (newMap->nSyms > 0) {
++ if (_XkbReadBufferCopyKeySyms(buf, newSyms, newMap->nSyms) == 0)
++ return BadLength;
++ }
+ else
+ newSyms[0] = NoSymbol;
+ oldMap->kt_index[0] = newMap->ktIndex[0];
+--
+2.35.7
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-0001.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-0001.patch
new file mode 100644
index 0000000000..db5b7067aa
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-0001.patch
@@ -0,0 +1,41 @@
+From 204c3393c4c90a29ed6bef64e43849536e863a86 Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Thu, 7 Sep 2023 15:54:30 -0700
+Subject: [PATCH] CVE-2023-43786: stack exhaustion from infinite recursion in
+ PutSubImage()
+
+When splitting a single line of pixels into chunks to send to the
+X server, be sure to take into account the number of bits per pixel,
+so we don't just loop forever trying to send more pixels than fit in
+the given request size and not breaking them down into a small enough
+chunk to fix.
+
+Fixes: "almost complete rewrite" (Dec. 12, 1987) from X11R2
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport from [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/204c3393c4c90a29ed6bef64e43849536e863a86]
+CVE: CVE-2023-43786
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/PutImage.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/src/PutImage.c b/src/PutImage.c
+index 857ee91..a6db7b4 100644
+--- a/src/PutImage.c
++++ b/src/PutImage.c
+@@ -914,8 +914,9 @@ PutSubImage (
+ req_width, req_height - SubImageHeight,
+ dest_bits_per_pixel, dest_scanline_pad);
+ } else {
+- int SubImageWidth = (((Available << 3) / dest_scanline_pad)
+- * dest_scanline_pad) - left_pad;
++ int SubImageWidth = ((((Available << 3) / dest_scanline_pad)
++ * dest_scanline_pad) - left_pad)
++ / dest_bits_per_pixel;
+
+ PutSubImage(dpy, d, gc, image, req_xoffset, req_yoffset, x, y,
+ (unsigned int) SubImageWidth, 1,
+--
+2.35.7
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-0002.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-0002.patch
new file mode 100644
index 0000000000..e46b3a2b24
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-0002.patch
@@ -0,0 +1,45 @@
+From 73a37d5f2fcadd6540159b432a70d80f442ddf4a Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Thu, 7 Sep 2023 15:55:04 -0700
+Subject: [PATCH] XPutImage: clip images to maximum height & width allowed by
+ protocol
+
+The PutImage request specifies height & width of the image as CARD16
+(unsigned 16-bit integer), same as the maximum dimensions of an X11
+Drawable, which the image is being copied to.
+
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport from [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/73a37d5f2fcadd6540159b432a70d80f442ddf4a]
+CVE: CVE-2023-43786
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/PutImage.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/src/PutImage.c b/src/PutImage.c
+index a6db7b4..ba411e3 100644
+--- a/src/PutImage.c
++++ b/src/PutImage.c
+@@ -30,6 +30,7 @@ in this Software without prior written authorization from The Open Group.
+ #include "Xlibint.h"
+ #include "Xutil.h"
+ #include <stdio.h>
++#include <limits.h>
+ #include "Cr.h"
+ #include "ImUtil.h"
+ #include "reallocarray.h"
+@@ -962,6 +963,10 @@ XPutImage (
+ height = image->height - req_yoffset;
+ if ((width <= 0) || (height <= 0))
+ return 0;
++ if (width > USHRT_MAX)
++ width = USHRT_MAX;
++ if (height > USHRT_MAX)
++ height = USHRT_MAX;
+
+ if ((image->bits_per_pixel == 1) || (image->format != ZPixmap)) {
+ dest_bits_per_pixel = 1;
+--
+2.35.7
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-0003.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-0003.patch
new file mode 100644
index 0000000000..2f47fe0bf2
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-0003.patch
@@ -0,0 +1,51 @@
+From b4031fc023816aca07fbd592ed97010b9b48784b Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Thu, 7 Sep 2023 16:12:27 -0700
+Subject: [PATCH] XCreatePixmap: trigger BadValue error for out-of-range
+ dimensions
+
+The CreatePixmap request specifies height & width of the image as CARD16
+(unsigned 16-bit integer), so if either is larger than that, set it to 0
+so the X server returns a BadValue error as the protocol requires.
+
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport from [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/b4031fc023816aca07fbd592ed97010b9b48784b]
+CVE: CVE-2023-43786
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/CrPixmap.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/src/CrPixmap.c b/src/CrPixmap.c
+index cdf3120..3cb2ca6 100644
+--- a/src/CrPixmap.c
++++ b/src/CrPixmap.c
+@@ -28,6 +28,7 @@ in this Software without prior written authorization from The Open Group.
+ #include <config.h>
+ #endif
+ #include "Xlibint.h"
++#include <limits.h>
+
+ #ifdef USE_DYNAMIC_XCURSOR
+ void
+@@ -47,6 +48,16 @@ Pixmap XCreatePixmap (
+ Pixmap pid;
+ register xCreatePixmapReq *req;
+
++ /*
++ * Force a BadValue X Error if the requested dimensions are larger
++ * than the X11 protocol has room for, since that's how callers expect
++ * to get notified of errors.
++ */
++ if (width > USHRT_MAX)
++ width = 0;
++ if (height > USHRT_MAX)
++ height = 0;
++
+ LockDisplay(dpy);
+ GetReq(CreatePixmap, req);
+ req->drawable = d;
+--
+2.35.7
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787.patch
new file mode 100644
index 0000000000..4b5cd694ab
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787.patch
@@ -0,0 +1,63 @@
+From 7916869d16bdd115ac5be30a67c3749907aea6a0 Mon Sep 17 00:00:00 2001
+From: Yair Mizrahi <yairm@jfrog.com>
+Date: Thu, 7 Sep 2023 16:15:32 -0700
+Subject: [PATCH] CVE-2023-43787: Integer overflow in XCreateImage() leading to
+ a heap overflow
+
+When the format is `Pixmap` it calculates the size of the image data as:
+ ROUNDUP((bits_per_pixel * width), image->bitmap_pad);
+There is no validation on the `width` of the image, and so this
+calculation exceeds the capacity of a 4-byte integer, causing an overflow.
+
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport from [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/7916869d16bdd115ac5be30a67c3749907aea6a0]
+CVE: CVE-2023-43787
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/ImUtil.c | 20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+diff --git a/src/ImUtil.c b/src/ImUtil.c
+index 36f08a0..fbfad33 100644
+--- a/src/ImUtil.c
++++ b/src/ImUtil.c
+@@ -30,6 +30,7 @@ in this Software without prior written authorization from The Open Group.
+ #include <X11/Xlibint.h>
+ #include <X11/Xutil.h>
+ #include <stdio.h>
++#include <limits.h>
+ #include "ImUtil.h"
+
+ static int _XDestroyImage(XImage *);
+@@ -361,13 +362,22 @@ XImage *XCreateImage (
+ /*
+ * compute per line accelerator.
+ */
+- {
+- if (format == ZPixmap)
++ if (format == ZPixmap) {
++ if ((INT_MAX / bits_per_pixel) < width) {
++ Xfree(image);
++ return NULL;
++ }
++
+ min_bytes_per_line =
+- ROUNDUP((bits_per_pixel * width), image->bitmap_pad);
+- else
++ ROUNDUP((bits_per_pixel * width), image->bitmap_pad);
++ } else {
++ if ((INT_MAX - offset) < width) {
++ Xfree(image);
++ return NULL;
++ }
++
+ min_bytes_per_line =
+- ROUNDUP((width + offset), image->bitmap_pad);
++ ROUNDUP((width + offset), image->bitmap_pad);
+ }
+ if (image_bytes_per_line == 0) {
+ image->bytes_per_line = min_bytes_per_line;
+--
+2.35.7
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11_1.7.3.1.bb b/meta/recipes-graphics/xorg-lib/libx11_1.7.3.1.bb
index 0c3abcd896..d783f60103 100644
--- a/meta/recipes-graphics/xorg-lib/libx11_1.7.3.1.bb
+++ b/meta/recipes-graphics/xorg-lib/libx11_1.7.3.1.bb
@@ -15,6 +15,14 @@ PE = "1"
SRC_URI = "${XORG_MIRROR}/individual/lib/${XORG_PN}-${PV}.tar.xz"
SRC_URI += "file://disable_tests.patch \
+ file://CVE-2022-3554.patch \
+ file://CVE-2022-3555.patch \
+ file://CVE-2023-3138.patch \
+ file://CVE-2023-43785.patch \
+ file://CVE-2023-43786-0001.patch \
+ file://CVE-2023-43786-0002.patch \
+ file://CVE-2023-43786-0003.patch \
+ file://CVE-2023-43787.patch \
"
SRC_URI[sha256sum] = "2ffd417266fb875028fdc0ef349694f63dbcd76d0b0cfacfb52e6151f4b60989"
diff --git a/meta/recipes-graphics/xorg-lib/libxpm_3.5.13.bb b/meta/recipes-graphics/xorg-lib/libxpm_3.5.17.bb
index 4f0a5d7ba0..7bc494a690 100644
--- a/meta/recipes-graphics/xorg-lib/libxpm_3.5.13.bb
+++ b/meta/recipes-graphics/xorg-lib/libxpm_3.5.17.bb
@@ -11,17 +11,18 @@ an extension of the monochrome XBM bitmap specificied in the X \
protocol."
LICENSE = "MIT"
-LIC_FILES_CHKSUM = "file://COPYING;md5=51f4270b012ecd4ab1a164f5f4ed6cf7"
+LIC_FILES_CHKSUM = "file://COPYING;md5=903942ebc9d807dfb68540f40bae5aff"
DEPENDS += "libxext libsm libxt gettext-native"
PE = "1"
XORG_PN = "libXpm"
+XORG_EXT = "tar.xz"
+EXTRA_OECONF += "--disable-open-zfile"
PACKAGES =+ "sxpm cxpm"
FILES:cxpm = "${bindir}/cxpm"
FILES:sxpm = "${bindir}/sxpm"
-SRC_URI[md5sum] = "6f0ecf8d103d528cfc803aa475137afa"
-SRC_URI[sha256sum] = "9cd1da57588b6cb71450eff2273ef6b657537a9ac4d02d0014228845b935ac25"
+SRC_URI[sha256sum] = "64b31f81019e7d388c822b0b28af8d51c4622b83f1f0cb6fa3fc95e271226e43"
BBCLASSEXTEND = "native"
diff --git a/meta/recipes-graphics/xorg-lib/pixman/CVE-2022-44638.patch b/meta/recipes-graphics/xorg-lib/pixman/CVE-2022-44638.patch
new file mode 100644
index 0000000000..d226766d49
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/pixman/CVE-2022-44638.patch
@@ -0,0 +1,33 @@
+CVE: CVE-2022-44638
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+From a1f88e842e0216a5b4df1ab023caebe33c101395 Mon Sep 17 00:00:00 2001
+From: Matt Turner <mattst88@gmail.com>
+Date: Wed, 2 Nov 2022 12:07:32 -0400
+Subject: [PATCH] Avoid integer overflow leading to out-of-bounds write
+
+Thanks to Maddie Stone and Google's Project Zero for discovering this
+issue, providing a proof-of-concept, and a great analysis.
+
+Closes: https://gitlab.freedesktop.org/pixman/pixman/-/issues/63
+---
+ pixman/pixman-trap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/pixman/pixman-trap.c b/pixman/pixman-trap.c
+index 91766fd..7560405 100644
+--- a/pixman/pixman-trap.c
++++ b/pixman/pixman-trap.c
+@@ -74,7 +74,7 @@ pixman_sample_floor_y (pixman_fixed_t y,
+
+ if (f < Y_FRAC_FIRST (n))
+ {
+- if (pixman_fixed_to_int (i) == 0x8000)
++ if (pixman_fixed_to_int (i) == 0xffff8000)
+ {
+ f = 0; /* saturate */
+ }
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-lib/pixman_0.40.0.bb b/meta/recipes-graphics/xorg-lib/pixman_0.40.0.bb
index ccfe277746..63fd6d2978 100644
--- a/meta/recipes-graphics/xorg-lib/pixman_0.40.0.bb
+++ b/meta/recipes-graphics/xorg-lib/pixman_0.40.0.bb
@@ -9,6 +9,7 @@ DEPENDS = "zlib"
SRC_URI = "https://www.cairographics.org/releases/${BP}.tar.gz \
file://0001-ARM-qemu-related-workarounds-in-cpu-features-detecti.patch \
+ file://CVE-2022-44638.patch \
"
SRC_URI[md5sum] = "73858c0862dd9896fb5f62ae267084a4"
SRC_URI[sha256sum] = "6d200dec3740d9ec4ec8d1180e25779c00bc749f94278c8b9021f5534db223fc"
@@ -18,7 +19,7 @@ UPSTREAM_CHECK_REGEX = "pixman-(?P<pver>\d+\.(\d*[02468])+(\.\d+)+)"
PE = "1"
-LICENSE = "MIT & MIT & PD"
+LICENSE = "MIT & PD"
LIC_FILES_CHKSUM = "file://COPYING;md5=14096c769ae0cbb5fcb94ec468be11b3 \
file://pixman/pixman-matrix.c;endline=21;md5=4a018dff3e4e25302724c88ff95c2456 \
file://pixman/pixman-arm-neon-asm.h;endline=24;md5=9a9cc1e51abbf1da58f4d9528ec9d49b \
diff --git a/meta/recipes-graphics/xorg-lib/xorg-lib-common.inc b/meta/recipes-graphics/xorg-lib/xorg-lib-common.inc
index 60bc8c76fa..68137c4147 100644
--- a/meta/recipes-graphics/xorg-lib/xorg-lib-common.inc
+++ b/meta/recipes-graphics/xorg-lib/xorg-lib-common.inc
@@ -6,8 +6,9 @@ LICENSE = "MIT"
DEPENDS = "util-macros"
XORG_PN = "${BPN}"
+XORG_EXT ?= "tar.bz2"
-SRC_URI = "${XORG_MIRROR}/individual/lib/${XORG_PN}-${PV}.tar.bz2"
+SRC_URI = "${XORG_MIRROR}/individual/lib/${XORG_PN}-${PV}.${XORG_EXT}"
S = "${WORKDIR}/${XORG_PN}-${PV}"
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg.inc b/meta/recipes-graphics/xorg-xserver/xserver-xorg.inc
index 057a1ba6ad..ecb164ddf7 100644
--- a/meta/recipes-graphics/xorg-xserver/xserver-xorg.inc
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg.inc
@@ -28,6 +28,8 @@ CVE_CHECK_IGNORE += "CVE-2011-4613"
# impossible or difficult to exploit. There is currently no upstream patch
# available for this flaw.
CVE_CHECK_IGNORE += "CVE-2020-25697"
+# This is specific to XQuartz, which is the macOS X server port
+CVE_CHECK_IGNORE += "CVE-2022-3553"
S = "${WORKDIR}/${XORG_PN}-${PV}"
@@ -80,9 +82,9 @@ PACKAGES =+ "${PN}-sdl \
SUMMARY:xf86-video-modesetting = "X.Org X server -- modesetting display driver"
INSANE_SKIP:${MLPREFIX}xf86-video-modesetting = "xorg-driver-abi"
-XSERVER_RRECOMMENDS = "xkeyboard-config rgb xserver-xf86-config xkbcomp xf86-input-libinput"
-RRECOMMENDS:${PN} += "${XSERVER_RRECOMMENDS}"
-RRECOMMENDS:${PN}-xwayland += "${XSERVER_RRECOMMENDS}"
+XSERVER_RDEPENDS = "xkeyboard-config rgb xserver-xf86-config xkbcomp xf86-input-libinput"
+RDEPENDS:${PN} += "${XSERVER_RDEPENDS}"
+RDEPENDS:${PN}-xwayland += "${XSERVER_RDEPENDS}"
RDEPENDS:${PN}-xvfb += "xkeyboard-config"
RDEPENDS:${PN}-module-exa = "${PN} (= ${EXTENDPKGV})"
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/0001-render-Fix-build-with-gcc-12.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/0001-render-Fix-build-with-gcc-12.patch
deleted file mode 100644
index df9332fae7..0000000000
--- a/meta/recipes-graphics/xorg-xserver/xserver-xorg/0001-render-Fix-build-with-gcc-12.patch
+++ /dev/null
@@ -1,90 +0,0 @@
-From 12041ad0610f1345d6b9994c32943fd4dd01f65d Mon Sep 17 00:00:00 2001
-From: Olivier Fourdan <ofourdan@redhat.com>
-Date: Thu, 20 Jan 2022 10:20:38 +0100
-Subject: [PATCH] render: Fix build with gcc 12
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-The xserver fails to compile with the latest gcc 12:
-
- render/picture.c: In function ‘CreateSolidPicture’:
- render/picture.c:874:26: error: array subscript ‘union _SourcePict[0]’ is partly outside array bounds of ‘unsigned char[16]’ [-Werror=array-bounds]
- 874 | pPicture->pSourcePict->type = SourcePictTypeSolidFill;
- | ^~
- render/picture.c:868:45: note: object of size 16 allocated by ‘malloc’
- 868 | pPicture->pSourcePict = (SourcePictPtr) malloc(sizeof(PictSolidFill));
- | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- render/picture.c: In function ‘CreateLinearGradientPicture’:
- render/picture.c:906:26: error: array subscript ‘union _SourcePict[0]’ is partly outside array bounds of ‘unsigned char[32]’ [-Werror=array-bounds]
- 906 | pPicture->pSourcePict->linear.type = SourcePictTypeLinear;
- | ^~
- render/picture.c:899:45: note: object of size 32 allocated by ‘malloc’
- 899 | pPicture->pSourcePict = (SourcePictPtr) malloc(sizeof(PictLinearGradient));
- | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- render/picture.c: In function ‘CreateConicalGradientPicture’:
- render/picture.c:989:26: error: array subscript ‘union _SourcePict[0]’ is partly outside array bounds of ‘unsigned char[32]’ [-Werror=array-bounds]
- 989 | pPicture->pSourcePict->conical.type = SourcePictTypeConical;
- | ^~
- render/picture.c:982:45: note: object of size 32 allocated by ‘malloc’
- 982 | pPicture->pSourcePict = (SourcePictPtr) malloc(sizeof(PictConicalGradient));
- | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- cc1: some warnings being treated as errors
- ninja: build stopped: subcommand failed.
-
-This is because gcc 12 has become stricter and raises a warning now.
-
-Fix the warning/error by allocating enough memory to store the union
-struct.
-
-Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/c6b0dcb82d4db07a2f32c09a8c09c85a5f57248e]
-Signed-off-by: Olivier Fourdan <ofourdan@redhat.com>
-Acked-by: Michel Dänzer <mdaenzer@redhat.com>
-Closes: https://gitlab.freedesktop.org/xorg/xserver/-/issues/1256
----
- render/picture.c | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
-
-diff --git a/render/picture.c b/render/picture.c
-index afa0d25..2be4b19 100644
---- a/render/picture.c
-+++ b/render/picture.c
-@@ -865,7 +865,7 @@ CreateSolidPicture(Picture pid, xRenderColor * color, int *error)
- }
-
- pPicture->id = pid;
-- pPicture->pSourcePict = (SourcePictPtr) malloc(sizeof(PictSolidFill));
-+ pPicture->pSourcePict = (SourcePictPtr) malloc(sizeof(SourcePict));
- if (!pPicture->pSourcePict) {
- *error = BadAlloc;
- free(pPicture);
-@@ -896,7 +896,7 @@ CreateLinearGradientPicture(Picture pid, xPointFixed * p1, xPointFixed * p2,
- }
-
- pPicture->id = pid;
-- pPicture->pSourcePict = (SourcePictPtr) malloc(sizeof(PictLinearGradient));
-+ pPicture->pSourcePict = (SourcePictPtr) malloc(sizeof(SourcePict));
- if (!pPicture->pSourcePict) {
- *error = BadAlloc;
- free(pPicture);
-@@ -936,7 +936,7 @@ CreateRadialGradientPicture(Picture pid, xPointFixed * inner,
- }
-
- pPicture->id = pid;
-- pPicture->pSourcePict = (SourcePictPtr) malloc(sizeof(PictRadialGradient));
-+ pPicture->pSourcePict = (SourcePictPtr) malloc(sizeof(SourcePict));
- if (!pPicture->pSourcePict) {
- *error = BadAlloc;
- free(pPicture);
-@@ -979,7 +979,7 @@ CreateConicalGradientPicture(Picture pid, xPointFixed * center, xFixed angle,
- }
-
- pPicture->id = pid;
-- pPicture->pSourcePict = (SourcePictPtr) malloc(sizeof(PictConicalGradient));
-+ pPicture->pSourcePict = (SourcePictPtr) malloc(sizeof(SourcePict));
- if (!pPicture->pSourcePict) {
- *error = BadAlloc;
- free(pPicture);
---
-2.35.1
-
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5367.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5367.patch
new file mode 100644
index 0000000000..508588481e
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5367.patch
@@ -0,0 +1,84 @@
+From 541ab2ecd41d4d8689e71855d93e492bc554719a Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 3 Oct 2023 11:53:05 +1000
+Subject: [PATCH] Xi/randr: fix handling of PropModeAppend/Prepend
+
+The handling of appending/prepending properties was incorrect, with at
+least two bugs: the property length was set to the length of the new
+part only, i.e. appending or prepending N elements to a property with P
+existing elements always resulted in the property having N elements
+instead of N + P.
+
+Second, when pre-pending a value to a property, the offset for the old
+values was incorrect, leaving the new property with potentially
+uninitalized values and/or resulting in OOB memory writes.
+For example, prepending a 3 element value to a 5 element property would
+result in this 8 value array:
+ [N, N, N, ?, ?, P, P, P ] P, P
+ ^OOB write
+
+The XI2 code is a copy/paste of the RandR code, so the bug exists in
+both.
+
+CVE-2023-5367, ZDI-CAN-22153
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/541ab2ecd41d4d8689e71855d93e492bc554719a]
+CVE: CVE-2023-5367
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/xiproperty.c | 4 ++--
+ randr/rrproperty.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/Xi/xiproperty.c b/Xi/xiproperty.c
+index 066ba21fba..d315f04d0e 100644
+--- a/Xi/xiproperty.c
++++ b/Xi/xiproperty.c
+@@ -730,7 +730,7 @@ XIChangeDeviceProperty(DeviceIntPtr dev, Atom property, Atom type,
+ XIDestroyDeviceProperty(prop);
+ return BadAlloc;
+ }
+- new_value.size = len;
++ new_value.size = total_len;
+ new_value.type = type;
+ new_value.format = format;
+
+@@ -747,7 +747,7 @@ XIChangeDeviceProperty(DeviceIntPtr dev, Atom property, Atom type,
+ case PropModePrepend:
+ new_data = new_value.data;
+ old_data = (void *) (((char *) new_value.data) +
+- (prop_value->size * size_in_bytes));
++ (len * size_in_bytes));
+ break;
+ }
+ if (new_data)
+diff --git a/randr/rrproperty.c b/randr/rrproperty.c
+index c2fb9585c6..25469f57b2 100644
+--- a/randr/rrproperty.c
++++ b/randr/rrproperty.c
+@@ -209,7 +209,7 @@ RRChangeOutputProperty(RROutputPtr output, Atom property, Atom type,
+ RRDestroyOutputProperty(prop);
+ return BadAlloc;
+ }
+- new_value.size = len;
++ new_value.size = total_len;
+ new_value.type = type;
+ new_value.format = format;
+
+@@ -226,7 +226,7 @@ RRChangeOutputProperty(RROutputPtr output, Atom property, Atom type,
+ case PropModePrepend:
+ new_data = new_value.data;
+ old_data = (void *) (((char *) new_value.data) +
+- (prop_value->size * size_in_bytes));
++ (len * size_in_bytes));
+ break;
+ }
+ if (new_data)
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5380.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5380.patch
new file mode 100644
index 0000000000..57e2a5abdf
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5380.patch
@@ -0,0 +1,102 @@
+From 564ccf2ce9616620456102727acb8b0256b7bbd7 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 5 Oct 2023 12:19:45 +1000
+Subject: [PATCH] mi: reset the PointerWindows reference on screen switch
+
+PointerWindows[] keeps a reference to the last window our sprite
+entered - changes are usually handled by CheckMotion().
+
+If we switch between screens via XWarpPointer our
+dev->spriteInfo->sprite->win is set to the new screen's root window.
+If there's another window at the cursor location CheckMotion() will
+trigger the right enter/leave events later. If there is not, it skips
+that process and we never trigger LeaveWindow() - PointerWindows[] for
+the device still refers to the previous window.
+
+If that window is destroyed we have a dangling reference that will
+eventually cause a use-after-free bug when checking the window hierarchy
+later.
+
+To trigger this, we require:
+- two protocol screens
+- XWarpPointer to the other screen's root window
+- XDestroyWindow before entering any other window
+
+This is a niche bug so we hack around it by making sure we reset the
+PointerWindows[] entry so we cannot have a dangling pointer. This
+doesn't handle Enter/Leave events correctly but the previous code didn't
+either.
+
+CVE-2023-5380, ZDI-CAN-21608
+
+This vulnerability was discovered by:
+Sri working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Reviewed-by: Adam Jackson <ajax@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/564ccf2ce9616620456102727acb8b0256b7bbd7]
+CVE: CVE-2023-5380
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/enterleave.h | 2 --
+ include/eventstr.h | 3 +++
+ mi/mipointer.c | 17 +++++++++++++++--
+ 3 files changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/dix/enterleave.h b/dix/enterleave.h
+index 4b833d8a3b..e8af924c68 100644
+--- a/dix/enterleave.h
++++ b/dix/enterleave.h
+@@ -58,8 +58,6 @@ extern void DeviceFocusEvent(DeviceIntPtr dev,
+
+ extern void EnterWindow(DeviceIntPtr dev, WindowPtr win, int mode);
+
+-extern void LeaveWindow(DeviceIntPtr dev);
+-
+ extern void CoreFocusEvent(DeviceIntPtr kbd,
+ int type, int mode, int detail, WindowPtr pWin);
+
+diff --git a/include/eventstr.h b/include/eventstr.h
+index 93308f9b24..a9926eaeef 100644
+--- a/include/eventstr.h
++++ b/include/eventstr.h
+@@ -335,4 +335,7 @@ union _InternalEvent {
+ GestureEvent gesture_event;
+ };
+
++extern void
++LeaveWindow(DeviceIntPtr dev);
++
+ #endif
+diff --git a/mi/mipointer.c b/mi/mipointer.c
+index a638f25d4a..8cf0035140 100644
+--- a/mi/mipointer.c
++++ b/mi/mipointer.c
+@@ -397,8 +397,21 @@ miPointerWarpCursor(DeviceIntPtr pDev, ScreenPtr pScreen, int x, int y)
+ #ifdef PANORAMIX
+ && noPanoramiXExtension
+ #endif
+- )
+- UpdateSpriteForScreen(pDev, pScreen);
++ ) {
++ DeviceIntPtr master = GetMaster(pDev, MASTER_POINTER);
++ /* Hack for CVE-2023-5380: if we're moving
++ * screens PointerWindows[] keeps referring to the
++ * old window. If that gets destroyed we have a UAF
++ * bug later. Only happens when jumping from a window
++ * to the root window on the other screen.
++ * Enter/Leave events are incorrect for that case but
++ * too niche to fix.
++ */
++ LeaveWindow(pDev);
++ if (master)
++ LeaveWindow(master);
++ UpdateSpriteForScreen(pDev, pScreen);
++ }
+ }
+
+ /**
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6377.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6377.patch
new file mode 100644
index 0000000000..0abd5914fa
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6377.patch
@@ -0,0 +1,79 @@
+From 0c1a93d319558fe3ab2d94f51d174b4f93810afd Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 28 Nov 2023 15:19:04 +1000
+Subject: [PATCH] Xi: allocate enough XkbActions for our buttons
+
+button->xkb_acts is supposed to be an array sufficiently large for all
+our buttons, not just a single XkbActions struct. Allocating
+insufficient memory here means when we memcpy() later in
+XkbSetDeviceInfo we write into memory that wasn't ours to begin with,
+leading to the usual security ooopsiedaisies.
+
+CVE-2023-6377, ZDI-CAN-22412, ZDI-CAN-22413
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/0c1a93d319558fe3ab2d94f51d174b4f93810afd]
+CVE: CVE-2023-6377
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/exevents.c | 12 ++++++------
+ dix/devices.c | 10 ++++++++++
+ 2 files changed, 16 insertions(+), 6 deletions(-)
+
+diff --git a/Xi/exevents.c b/Xi/exevents.c
+index dcd4efb3bc..54ea11a938 100644
+--- a/Xi/exevents.c
++++ b/Xi/exevents.c
+@@ -611,13 +611,13 @@ DeepCopyPointerClasses(DeviceIntPtr from, DeviceIntPtr to)
+ }
+
+ if (from->button->xkb_acts) {
+- if (!to->button->xkb_acts) {
+- to->button->xkb_acts = calloc(1, sizeof(XkbAction));
+- if (!to->button->xkb_acts)
+- FatalError("[Xi] not enough memory for xkb_acts.\n");
+- }
++ size_t maxbuttons = max(to->button->numButtons, from->button->numButtons);
++ to->button->xkb_acts = xnfreallocarray(to->button->xkb_acts,
++ maxbuttons,
++ sizeof(XkbAction));
++ memset(to->button->xkb_acts, 0, maxbuttons * sizeof(XkbAction));
+ memcpy(to->button->xkb_acts, from->button->xkb_acts,
+- sizeof(XkbAction));
++ from->button->numButtons * sizeof(XkbAction));
+ }
+ else {
+ free(to->button->xkb_acts);
+diff --git a/dix/devices.c b/dix/devices.c
+index b063128df0..3f3224d626 100644
+--- a/dix/devices.c
++++ b/dix/devices.c
+@@ -2539,6 +2539,8 @@ RecalculateMasterButtons(DeviceIntPtr slave)
+
+ if (master->button && master->button->numButtons != maxbuttons) {
+ int i;
++ int last_num_buttons = master->button->numButtons;
++
+ DeviceChangedEvent event = {
+ .header = ET_Internal,
+ .type = ET_DeviceChanged,
+@@ -2549,6 +2551,14 @@ RecalculateMasterButtons(DeviceIntPtr slave)
+ };
+
+ master->button->numButtons = maxbuttons;
++ if (last_num_buttons < maxbuttons) {
++ master->button->xkb_acts = xnfreallocarray(master->button->xkb_acts,
++ maxbuttons,
++ sizeof(XkbAction));
++ memset(&master->button->xkb_acts[last_num_buttons],
++ 0,
++ (maxbuttons - last_num_buttons) * sizeof(XkbAction));
++ }
+
+ memcpy(&event.buttons.names, master->button->labels, maxbuttons *
+ sizeof(Atom));
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6478.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6478.patch
new file mode 100644
index 0000000000..6392eae3f8
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6478.patch
@@ -0,0 +1,63 @@
+From 14f480010a93ff962fef66a16412fafff81ad632 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Mon, 27 Nov 2023 16:27:49 +1000
+Subject: [PATCH] randr: avoid integer truncation in length check of
+ ProcRRChange*Property
+
+Affected are ProcRRChangeProviderProperty and ProcRRChangeOutputProperty.
+See also xserver@8f454b79 where this same bug was fixed for the core
+protocol and XI.
+
+This fixes an OOB read and the resulting information disclosure.
+
+Length calculation for the request was clipped to a 32-bit integer. With
+the correct stuff->nUnits value the expected request size was
+truncated, passing the REQUEST_FIXED_SIZE check.
+
+The server then proceeded with reading at least stuff->num_items bytes
+(depending on stuff->format) from the request and stuffing whatever it
+finds into the property. In the process it would also allocate at least
+stuff->nUnits bytes, i.e. 4GB.
+
+CVE-2023-6478, ZDI-CAN-22561
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/14f480010a93ff962fef66a16412fafff81ad632]
+CVE: CVE-2023-6478
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ randr/rrproperty.c | 2 +-
+ randr/rrproviderproperty.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/randr/rrproperty.c b/randr/rrproperty.c
+index 25469f57b2..c4fef8a1f6 100644
+--- a/randr/rrproperty.c
++++ b/randr/rrproperty.c
+@@ -530,7 +530,7 @@ ProcRRChangeOutputProperty(ClientPtr client)
+ char format, mode;
+ unsigned long len;
+ int sizeInBytes;
+- int totalSize;
++ uint64_t totalSize;
+ int err;
+
+ REQUEST_AT_LEAST_SIZE(xRRChangeOutputPropertyReq);
+diff --git a/randr/rrproviderproperty.c b/randr/rrproviderproperty.c
+index b79c17f9bf..90c5a9a933 100644
+--- a/randr/rrproviderproperty.c
++++ b/randr/rrproviderproperty.c
+@@ -498,7 +498,7 @@ ProcRRChangeProviderProperty(ClientPtr client)
+ char format, mode;
+ unsigned long len;
+ int sizeInBytes;
+- int totalSize;
++ uint64_t totalSize;
+ int err;
+
+ REQUEST_AT_LEAST_SIZE(xRRChangeProviderPropertyReq);
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6816.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6816.patch
new file mode 100644
index 0000000000..0bfff268e7
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6816.patch
@@ -0,0 +1,55 @@
+From 9e2ecb2af8302dedc49cb6a63ebe063c58a9e7e3 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 14 Dec 2023 11:29:49 +1000
+Subject: [PATCH] dix: allocate enough space for logical button maps
+
+Both DeviceFocusEvent and the XIQueryPointer reply contain a bit for
+each logical button currently down. Since buttons can be arbitrarily mapped
+to anything up to 255 make sure we have enough bits for the maximum mapping.
+
+CVE-2023-6816, ZDI-CAN-22664, ZDI-CAN-22665
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/9e2ecb2af8302dedc49cb6a63ebe063c58a9e7e3]
+CVE: CVE-2023-6816
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/xiquerypointer.c | 3 +--
+ dix/enterleave.c | 5 +++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/Xi/xiquerypointer.c b/Xi/xiquerypointer.c
+index 5b77b1a444..2b05ac5f39 100644
+--- a/Xi/xiquerypointer.c
++++ b/Xi/xiquerypointer.c
+@@ -149,8 +149,7 @@ ProcXIQueryPointer(ClientPtr client)
+ if (pDev->button) {
+ int i;
+
+- rep.buttons_len =
+- bytes_to_int32(bits_to_bytes(pDev->button->numButtons));
++ rep.buttons_len = bytes_to_int32(bits_to_bytes(256)); /* button map up to 255 */
+ rep.length += rep.buttons_len;
+ buttons = calloc(rep.buttons_len, 4);
+ if (!buttons)
+diff --git a/dix/enterleave.c b/dix/enterleave.c
+index 867ec74363..ded8679d76 100644
+--- a/dix/enterleave.c
++++ b/dix/enterleave.c
+@@ -784,8 +784,9 @@ DeviceFocusEvent(DeviceIntPtr dev, int type, int mode, int detail,
+
+ mouse = IsFloating(dev) ? dev : GetMaster(dev, MASTER_POINTER);
+
+- /* XI 2 event */
+- btlen = (mouse->button) ? bits_to_bytes(mouse->button->numButtons) : 0;
++ /* XI 2 event contains the logical button map - maps are CARD8
++ * so we need 256 bits for the possibly maximum mapping */
++ btlen = (mouse->button) ? bits_to_bytes(256) : 0;
+ btlen = bytes_to_int32(btlen);
+ len = sizeof(xXIFocusInEvent) + btlen * 4;
+
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-1.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-1.patch
new file mode 100644
index 0000000000..80ebc64e59
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-1.patch
@@ -0,0 +1,87 @@
+From ece23be888a93b741aa1209d1dbf64636109d6a5 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Mon, 18 Dec 2023 14:27:50 +1000
+Subject: [PATCH] dix: Allocate sufficient xEvents for our DeviceStateNotify
+
+If a device has both a button class and a key class and numButtons is
+zero, we can get an OOB write due to event under-allocation.
+
+This function seems to assume a device has either keys or buttons, not
+both. It has two virtually identical code paths, both of which assume
+they're applying to the first event in the sequence.
+
+A device with both a key and button class triggered a logic bug - only
+one xEvent was allocated but the deviceStateNotify pointer was pushed on
+once per type. So effectively this logic code:
+
+ int count = 1;
+ if (button && nbuttons > 32) count++;
+ if (key && nbuttons > 0) count++;
+ if (key && nkeys > 32) count++; // this is basically always true
+ // count is at 2 for our keys + zero button device
+
+ ev = alloc(count * sizeof(xEvent));
+ FixDeviceStateNotify(ev);
+ if (button)
+ FixDeviceStateNotify(ev++);
+ if (key)
+ FixDeviceStateNotify(ev++); // santa drops into the wrong chimney here
+
+If the device has more than 3 valuators, the OOB is pushed back - we're
+off by one so it will happen when the last deviceValuator event is
+written instead.
+
+Fix this by allocating the maximum number of events we may allocate.
+Note that the current behavior is not protocol-correct anyway, this
+patch fixes only the allocation issue.
+
+Note that this issue does not trigger if the device has at least one
+button. While the server does not prevent a button class with zero
+buttons, it is very unlikely.
+
+CVE-2024-0229, ZDI-CAN-22678
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/ece23be888a93b741aa1209d1dbf64636109d6a5]
+CVE: CVE-2024-0229
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/enterleave.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/dix/enterleave.c b/dix/enterleave.c
+index ded8679d76..17964b00a4 100644
+--- a/dix/enterleave.c
++++ b/dix/enterleave.c
+@@ -675,7 +675,8 @@ static void
+ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+ {
+ int evcount = 1;
+- deviceStateNotify *ev, *sev;
++ deviceStateNotify sev[6 + (MAX_VALUATORS + 2)/3];
++ deviceStateNotify *ev;
+ deviceKeyStateNotify *kev;
+ deviceButtonStateNotify *bev;
+
+@@ -714,7 +715,7 @@ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+ }
+ }
+
+- sev = ev = xallocarray(evcount, sizeof(xEvent));
++ ev = sev;
+ FixDeviceStateNotify(dev, ev, NULL, NULL, NULL, first);
+
+ if (b != NULL) {
+@@ -770,7 +771,6 @@ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+
+ DeliverEventsToWindow(dev, win, (xEvent *) sev, evcount,
+ DeviceStateNotifyMask, NullGrab);
+- free(sev);
+ }
+
+ void
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-2.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-2.patch
new file mode 100644
index 0000000000..65df74376b
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-2.patch
@@ -0,0 +1,221 @@
+From 219c54b8a3337456ce5270ded6a67bcde53553d5 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Mon, 18 Dec 2023 12:26:20 +1000
+Subject: [PATCH] dix: fix DeviceStateNotify event calculation
+
+The previous code only made sense if one considers buttons and keys to
+be mutually exclusive on a device. That is not necessarily true, causing
+a number of issues.
+
+This function allocates and fills in the number of xEvents we need to
+send the device state down the wire. This is split across multiple
+32-byte devices including one deviceStateNotify event and optional
+deviceKeyStateNotify, deviceButtonStateNotify and (possibly multiple)
+deviceValuator events.
+
+The previous behavior would instead compose a sequence
+of [state, buttonstate, state, keystate, valuator...]. This is not
+protocol correct, and on top of that made the code extremely convoluted.
+
+Fix this by streamlining: add both button and key into the deviceStateNotify
+and then append the key state and button state, followed by the
+valuators. Finally, the deviceValuator events contain up to 6 valuators
+per event but we only ever sent through 3 at a time. Let's double that
+troughput.
+
+CVE-2024-0229, ZDI-CAN-22678
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/219c54b8a3337456ce5270ded6a67bcde53553d5]
+CVE: CVE-2024-0229
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/enterleave.c | 121 ++++++++++++++++++++---------------------------
+ 1 file changed, 52 insertions(+), 69 deletions(-)
+
+diff --git a/dix/enterleave.c b/dix/enterleave.c
+index 17964b00a4..7b7ba1098b 100644
+--- a/dix/enterleave.c
++++ b/dix/enterleave.c
+@@ -615,9 +615,15 @@ FixDeviceValuator(DeviceIntPtr dev, deviceValuator * ev, ValuatorClassPtr v,
+
+ ev->type = DeviceValuator;
+ ev->deviceid = dev->id;
+- ev->num_valuators = nval < 3 ? nval : 3;
++ ev->num_valuators = nval < 6 ? nval : 6;
+ ev->first_valuator = first;
+ switch (ev->num_valuators) {
++ case 6:
++ ev->valuator2 = v->axisVal[first + 5];
++ case 5:
++ ev->valuator2 = v->axisVal[first + 4];
++ case 4:
++ ev->valuator2 = v->axisVal[first + 3];
+ case 3:
+ ev->valuator2 = v->axisVal[first + 2];
+ case 2:
+@@ -626,7 +632,6 @@ FixDeviceValuator(DeviceIntPtr dev, deviceValuator * ev, ValuatorClassPtr v,
+ ev->valuator0 = v->axisVal[first];
+ break;
+ }
+- first += ev->num_valuators;
+ }
+
+ static void
+@@ -646,7 +651,7 @@ FixDeviceStateNotify(DeviceIntPtr dev, deviceStateNotify * ev, KeyClassPtr k,
+ ev->num_buttons = b->numButtons;
+ memcpy((char *) ev->buttons, (char *) b->down, 4);
+ }
+- else if (k) {
++ if (k) {
+ ev->classes_reported |= (1 << KeyClass);
+ ev->num_keys = k->xkbInfo->desc->max_key_code -
+ k->xkbInfo->desc->min_key_code;
+@@ -670,15 +675,26 @@ FixDeviceStateNotify(DeviceIntPtr dev, deviceStateNotify * ev, KeyClassPtr k,
+ }
+ }
+
+-
++/**
++ * The device state notify event is split across multiple 32-byte events.
++ * The first one contains the first 32 button state bits, the first 32
++ * key state bits, and the first 3 valuator values.
++ *
++ * If a device has more than that, the server sends out:
++ * - one deviceButtonStateNotify for buttons 32 and above
++ * - one deviceKeyStateNotify for keys 32 and above
++ * - one deviceValuator event per 6 valuators above valuator 4
++ *
++ * All events but the last one have the deviceid binary ORed with MORE_EVENTS,
++ */
+ static void
+ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+ {
++ /* deviceStateNotify, deviceKeyStateNotify, deviceButtonStateNotify
++ * and one deviceValuator for each 6 valuators */
++ deviceStateNotify sev[3 + (MAX_VALUATORS + 6)/6];
+ int evcount = 1;
+- deviceStateNotify sev[6 + (MAX_VALUATORS + 2)/3];
+- deviceStateNotify *ev;
+- deviceKeyStateNotify *kev;
+- deviceButtonStateNotify *bev;
++ deviceStateNotify *ev = sev;
+
+ KeyClassPtr k;
+ ButtonClassPtr b;
+@@ -691,82 +707,49 @@ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+
+ if ((b = dev->button) != NULL) {
+ nbuttons = b->numButtons;
+- if (nbuttons > 32)
++ if (nbuttons > 32) /* first 32 are encoded in deviceStateNotify */
+ evcount++;
+ }
+ if ((k = dev->key) != NULL) {
+ nkeys = k->xkbInfo->desc->max_key_code - k->xkbInfo->desc->min_key_code;
+- if (nkeys > 32)
++ if (nkeys > 32) /* first 32 are encoded in deviceStateNotify */
+ evcount++;
+- if (nbuttons > 0) {
+- evcount++;
+- }
+ }
+ if ((v = dev->valuator) != NULL) {
+ nval = v->numAxes;
+-
+- if (nval > 3)
+- evcount++;
+- if (nval > 6) {
+- if (!(k && b))
+- evcount++;
+- if (nval > 9)
+- evcount += ((nval - 7) / 3);
+- }
++ /* first three are encoded in deviceStateNotify, then
++ * it's 6 per deviceValuator event */
++ evcount += ((nval - 3) + 6)/6;
+ }
+
+- ev = sev;
+- FixDeviceStateNotify(dev, ev, NULL, NULL, NULL, first);
+-
+- if (b != NULL) {
+- FixDeviceStateNotify(dev, ev++, NULL, b, v, first);
+- first += 3;
+- nval -= 3;
+- if (nbuttons > 32) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- bev = (deviceButtonStateNotify *) ev++;
+- bev->type = DeviceButtonStateNotify;
+- bev->deviceid = dev->id;
+- memcpy((char *) &bev->buttons[4], (char *) &b->down[4],
+- DOWN_LENGTH - 4);
+- }
+- if (nval > 0) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- FixDeviceValuator(dev, (deviceValuator *) ev++, v, first);
+- first += 3;
+- nval -= 3;
+- }
++ BUG_RETURN(evcount <= ARRAY_SIZE(sev));
++
++ FixDeviceStateNotify(dev, ev, k, b, v, first);
++
++ if (b != NULL && nbuttons > 32) {
++ deviceButtonStateNotify *bev = (deviceButtonStateNotify *) ++ev;
++ (ev - 1)->deviceid |= MORE_EVENTS;
++ bev->type = DeviceButtonStateNotify;
++ bev->deviceid = dev->id;
++ memcpy((char *) &bev->buttons[4], (char *) &b->down[4],
++ DOWN_LENGTH - 4);
+ }
+
+- if (k != NULL) {
+- FixDeviceStateNotify(dev, ev++, k, NULL, v, first);
+- first += 3;
+- nval -= 3;
+- if (nkeys > 32) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- kev = (deviceKeyStateNotify *) ev++;
+- kev->type = DeviceKeyStateNotify;
+- kev->deviceid = dev->id;
+- memmove((char *) &kev->keys[0], (char *) &k->down[4], 28);
+- }
+- if (nval > 0) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- FixDeviceValuator(dev, (deviceValuator *) ev++, v, first);
+- first += 3;
+- nval -= 3;
+- }
++ if (k != NULL && nkeys > 32) {
++ deviceKeyStateNotify *kev = (deviceKeyStateNotify *) ++ev;
++ (ev - 1)->deviceid |= MORE_EVENTS;
++ kev->type = DeviceKeyStateNotify;
++ kev->deviceid = dev->id;
++ memmove((char *) &kev->keys[0], (char *) &k->down[4], 28);
+ }
+
++ first = 3;
++ nval -= 3;
+ while (nval > 0) {
+- FixDeviceStateNotify(dev, ev++, NULL, NULL, v, first);
+- first += 3;
+- nval -= 3;
+- if (nval > 0) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- FixDeviceValuator(dev, (deviceValuator *) ev++, v, first);
+- first += 3;
+- nval -= 3;
+- }
++ ev->deviceid |= MORE_EVENTS;
++ FixDeviceValuator(dev, (deviceValuator *) ++ev, v, first);
++ first += 6;
++ nval -= 6;
+ }
+
+ DeliverEventsToWindow(dev, win, (xEvent *) sev, evcount,
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-3.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-3.patch
new file mode 100644
index 0000000000..742c122fa8
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-3.patch
@@ -0,0 +1,41 @@
+From df3c65706eb169d5938df0052059f3e0d5981b74 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 21 Dec 2023 13:48:10 +1000
+Subject: [PATCH] Xi: when creating a new ButtonClass, set the number of
+ buttons
+
+There's a racy sequence where a master device may copy the button class
+from the slave, without ever initializing numButtons. This leads to a
+device with zero buttons but a button class which is invalid.
+
+Let's copy the numButtons value from the source - by definition if we
+don't have a button class yet we do not have any other slave devices
+with more than this number of buttons anyway.
+
+CVE-2024-0229, ZDI-CAN-22678
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/df3c65706eb169d5938df0052059f3e0d5981b74]
+CVE: CVE-2024-0229
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/exevents.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/Xi/exevents.c b/Xi/exevents.c
+index 54ea11a938..e161714682 100644
+--- a/Xi/exevents.c
++++ b/Xi/exevents.c
+@@ -605,6 +605,7 @@ DeepCopyPointerClasses(DeviceIntPtr from, DeviceIntPtr to)
+ to->button = calloc(1, sizeof(ButtonClassRec));
+ if (!to->button)
+ FatalError("[Xi] no memory for class shift.\n");
++ to->button->numButtons = from->button->numButtons;
+ }
+ else
+ classes->button = NULL;
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-4.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-4.patch
new file mode 100644
index 0000000000..d1a6214793
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-4.patch
@@ -0,0 +1,45 @@
+From 37539cb0bfe4ed96d4499bf371e6b1a474a740fe Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 21 Dec 2023 14:10:11 +1000
+Subject: [PATCH] Xi: require a pointer and keyboard device for
+ XIAttachToMaster
+
+If we remove a master device and specify which other master devices
+attached slaves should be returned to, enforce that those two are
+indeeed a pointer and a keyboard.
+
+Otherwise we can try to attach the keyboards to pointers and vice versa,
+leading to possible crashes later.
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/37539cb0bfe4ed96d4499bf371e6b1a474a740fe]
+CVE: CVE-2024-0229
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/xichangehierarchy.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/Xi/xichangehierarchy.c b/Xi/xichangehierarchy.c
+index 504defe566..d2d985848d 100644
+--- a/Xi/xichangehierarchy.c
++++ b/Xi/xichangehierarchy.c
+@@ -270,7 +270,7 @@ remove_master(ClientPtr client, xXIRemoveMasterInfo * r, int flags[MAXDEVICES])
+ if (rc != Success)
+ goto unwind;
+
+- if (!IsMaster(newptr)) {
++ if (!IsMaster(newptr) || !IsPointerDevice(newptr)) {
+ client->errorValue = r->return_pointer;
+ rc = BadDevice;
+ goto unwind;
+@@ -281,7 +281,7 @@ remove_master(ClientPtr client, xXIRemoveMasterInfo * r, int flags[MAXDEVICES])
+ if (rc != Success)
+ goto unwind;
+
+- if (!IsMaster(newkeybd)) {
++ if (!IsMaster(newkeybd) || !IsKeyboardDevice(newkeybd)) {
+ client->errorValue = r->return_keyboard;
+ rc = BadDevice;
+ goto unwind;
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0408.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0408.patch
new file mode 100644
index 0000000000..c8f75d8a7e
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0408.patch
@@ -0,0 +1,64 @@
+From e5e8586a12a3ec915673edffa10dc8fe5e15dac3 Mon Sep 17 00:00:00 2001
+From: Olivier Fourdan <ofourdan@redhat.com>
+Date: Wed, 6 Dec 2023 12:09:41 +0100
+Subject: [PATCH] glx: Call XACE hooks on the GLX buffer
+
+The XSELINUX code will label resources at creation by checking the
+access mode. When the access mode is DixCreateAccess, it will call the
+function to label the new resource SELinuxLabelResource().
+
+However, GLX buffers do not go through the XACE hooks when created,
+hence leaving the resource actually unlabeled.
+
+When, later, the client tries to create another resource using that
+drawable (like a GC for example), the XSELINUX code would try to use
+the security ID of that object which has never been labeled, get a NULL
+pointer and crash when checking whether the requested permissions are
+granted for subject security ID.
+
+To avoid the issue, make sure to call the XACE hooks when creating the
+GLX buffers.
+
+Credit goes to Donn Seeley <donn@xmission.com> for providing the patch.
+
+CVE-2024-0408
+
+Signed-off-by: Olivier Fourdan <ofourdan@redhat.com>
+Acked-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/e5e8586a12a3ec915673edffa10dc8fe5e15dac3]
+CVE: CVE-2024-0408
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ glx/glxcmds.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/glx/glxcmds.c b/glx/glxcmds.c
+index fc26a2e345..1e46d0c723 100644
+--- a/glx/glxcmds.c
++++ b/glx/glxcmds.c
+@@ -48,6 +48,7 @@
+ #include "indirect_util.h"
+ #include "protocol-versions.h"
+ #include "glxvndabi.h"
++#include "xace.h"
+
+ static char GLXServerVendorName[] = "SGI";
+
+@@ -1392,6 +1393,13 @@ DoCreatePbuffer(ClientPtr client, int screenNum, XID fbconfigId,
+ if (!pPixmap)
+ return BadAlloc;
+
++ err = XaceHook(XACE_RESOURCE_ACCESS, client, glxDrawableId, RT_PIXMAP,
++ pPixmap, RT_NONE, NULL, DixCreateAccess);
++ if (err != Success) {
++ (*pGlxScreen->pScreen->DestroyPixmap) (pPixmap);
++ return err;
++ }
++
+ /* Assign the pixmap the same id as the pbuffer and add it as a
+ * resource so it and the DRI2 drawable will be reclaimed when the
+ * pbuffer is destroyed. */
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0409.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0409.patch
new file mode 100644
index 0000000000..9763e0b562
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0409.patch
@@ -0,0 +1,46 @@
+From 2ef0f1116c65d5cb06d7b6d83f8a1aea702c94f7 Mon Sep 17 00:00:00 2001
+From: Olivier Fourdan <ofourdan@redhat.com>
+Date: Wed, 6 Dec 2023 11:51:56 +0100
+Subject: [PATCH] ephyr,xwayland: Use the proper private key for cursor
+
+The cursor in DIX is actually split in two parts, the cursor itself and
+the cursor bits, each with their own devPrivates.
+
+The cursor itself includes the cursor bits, meaning that the cursor bits
+devPrivates in within structure of the cursor.
+
+Both Xephyr and Xwayland were using the private key for the cursor bits
+to store the data for the cursor, and when using XSELINUX which comes
+with its own special devPrivates, the data stored in that cursor bits'
+devPrivates would interfere with the XSELINUX devPrivates data and the
+SELINUX security ID would point to some other unrelated data, causing a
+crash in the XSELINUX code when trying to (re)use the security ID.
+
+CVE-2024-0409
+
+Signed-off-by: Olivier Fourdan <ofourdan@redhat.com>
+Reviewed-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/2ef0f1116c65d5cb06d7b6d83f8a1aea702c94f7]
+CVE: CVE-2024-0409
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ hw/kdrive/ephyr/ephyrcursor.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/kdrive/ephyr/ephyrcursor.c b/hw/kdrive/ephyr/ephyrcursor.c
+index f991899..3f192d0 100644
+--- a/hw/kdrive/ephyr/ephyrcursor.c
++++ b/hw/kdrive/ephyr/ephyrcursor.c
+@@ -246,7 +246,7 @@ miPointerSpriteFuncRec EphyrPointerSpriteFuncs = {
+ Bool
+ ephyrCursorInit(ScreenPtr screen)
+ {
+- if (!dixRegisterPrivateKey(&ephyrCursorPrivateKey, PRIVATE_CURSOR_BITS,
++ if (!dixRegisterPrivateKey(&ephyrCursorPrivateKey, PRIVATE_CURSOR,
+ sizeof(ephyrCursorRec)))
+ return FALSE;
+
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21885.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21885.patch
new file mode 100644
index 0000000000..7c8fbcc3ec
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21885.patch
@@ -0,0 +1,113 @@
+From 4a5e9b1895627d40d26045bd0b7ef3dce503cbd1 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 4 Jan 2024 10:01:24 +1000
+Subject: [PATCH] Xi: flush hierarchy events after adding/removing master
+ devices
+
+The `XISendDeviceHierarchyEvent()` function allocates space to store up
+to `MAXDEVICES` (256) `xXIHierarchyInfo` structures in `info`.
+
+If a device with a given ID was removed and a new device with the same
+ID added both in the same operation, the single device ID will lead to
+two info structures being written to `info`.
+
+Since this case can occur for every device ID at once, a total of two
+times `MAXDEVICES` info structures might be written to the allocation.
+
+To avoid it, once one add/remove master is processed, send out the
+device hierarchy event for the current state and continue. That event
+thus only ever has exactly one of either added/removed in it (and
+optionally slave attached/detached).
+
+CVE-2024-21885, ZDI-CAN-22744
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/4a5e9b1895627d40d26045bd0b7ef3dce503cbd1]
+CVE: CVE-2024-21885
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/xichangehierarchy.c | 27 ++++++++++++++++++++++-----
+ 1 file changed, 22 insertions(+), 5 deletions(-)
+
+diff --git a/Xi/xichangehierarchy.c b/Xi/xichangehierarchy.c
+index d2d985848d..72d00451e3 100644
+--- a/Xi/xichangehierarchy.c
++++ b/Xi/xichangehierarchy.c
+@@ -416,6 +416,11 @@ ProcXIChangeHierarchy(ClientPtr client)
+ size_t len; /* length of data remaining in request */
+ int rc = Success;
+ int flags[MAXDEVICES] = { 0 };
++ enum {
++ NO_CHANGE,
++ FLUSH,
++ CHANGED,
++ } changes = NO_CHANGE;
+
+ REQUEST(xXIChangeHierarchyReq);
+ REQUEST_AT_LEAST_SIZE(xXIChangeHierarchyReq);
+@@ -465,8 +470,9 @@ ProcXIChangeHierarchy(ClientPtr client)
+ rc = add_master(client, c, flags);
+ if (rc != Success)
+ goto unwind;
+- }
++ changes = FLUSH;
+ break;
++ }
+ case XIRemoveMaster:
+ {
+ xXIRemoveMasterInfo *r = (xXIRemoveMasterInfo *) any;
+@@ -475,8 +481,9 @@ ProcXIChangeHierarchy(ClientPtr client)
+ rc = remove_master(client, r, flags);
+ if (rc != Success)
+ goto unwind;
+- }
++ changes = FLUSH;
+ break;
++ }
+ case XIDetachSlave:
+ {
+ xXIDetachSlaveInfo *c = (xXIDetachSlaveInfo *) any;
+@@ -485,8 +492,9 @@ ProcXIChangeHierarchy(ClientPtr client)
+ rc = detach_slave(client, c, flags);
+ if (rc != Success)
+ goto unwind;
+- }
++ changes = CHANGED;
+ break;
++ }
+ case XIAttachSlave:
+ {
+ xXIAttachSlaveInfo *c = (xXIAttachSlaveInfo *) any;
+@@ -495,16 +503,25 @@ ProcXIChangeHierarchy(ClientPtr client)
+ rc = attach_slave(client, c, flags);
+ if (rc != Success)
+ goto unwind;
++ changes = CHANGED;
++ break;
+ }
++ default:
+ break;
+ }
+
++ if (changes == FLUSH) {
++ XISendDeviceHierarchyEvent(flags);
++ memset(flags, 0, sizeof(flags));
++ changes = NO_CHANGE;
++ }
++
+ len -= any->length * 4;
+ any = (xXIAnyHierarchyChangeInfo *) ((char *) any + any->length * 4);
+ }
+
+ unwind:
+-
+- XISendDeviceHierarchyEvent(flags);
++ if (changes != NO_CHANGE)
++ XISendDeviceHierarchyEvent(flags);
+ return rc;
+ }
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-1.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-1.patch
new file mode 100644
index 0000000000..1e1c782963
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-1.patch
@@ -0,0 +1,74 @@
+From bc1fdbe46559dd947674375946bbef54dd0ce36b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= <jexposit@redhat.com>
+Date: Fri, 22 Dec 2023 18:28:31 +0100
+Subject: [PATCH] Xi: do not keep linked list pointer during recursion
+
+The `DisableDevice()` function is called whenever an enabled device
+is disabled and it moves the device from the `inputInfo.devices` linked
+list to the `inputInfo.off_devices` linked list.
+
+However, its link/unlink operation has an issue during the recursive
+call to `DisableDevice()` due to the `prev` pointer pointing to a
+removed device.
+
+This issue leads to a length mismatch between the total number of
+devices and the number of device in the list, leading to a heap
+overflow and, possibly, to local privilege escalation.
+
+Simplify the code that checked whether the device passed to
+`DisableDevice()` was in `inputInfo.devices` or not and find the
+previous device after the recursion.
+
+CVE-2024-21886, ZDI-CAN-22840
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/bc1fdbe46559dd947674375946bbef54dd0ce36b]
+CVE: CVE-2024-21886
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/devices.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/dix/devices.c b/dix/devices.c
+index dca98c8d1b..389d28a23c 100644
+--- a/dix/devices.c
++++ b/dix/devices.c
+@@ -453,14 +453,20 @@ DisableDevice(DeviceIntPtr dev, BOOL sendevent)
+ {
+ DeviceIntPtr *prev, other;
+ BOOL enabled;
++ BOOL dev_in_devices_list = FALSE;
+ int flags[MAXDEVICES] = { 0 };
+
+ if (!dev->enabled)
+ return TRUE;
+
+- for (prev = &inputInfo.devices;
+- *prev && (*prev != dev); prev = &(*prev)->next);
+- if (*prev != dev)
++ for (other = inputInfo.devices; other; other = other->next) {
++ if (other == dev) {
++ dev_in_devices_list = TRUE;
++ break;
++ }
++ }
++
++ if (!dev_in_devices_list)
+ return FALSE;
+
+ TouchEndPhysicallyActiveTouches(dev);
+@@ -511,6 +517,9 @@ DisableDevice(DeviceIntPtr dev, BOOL sendevent)
+ LeaveWindow(dev);
+ SetFocusOut(dev);
+
++ for (prev = &inputInfo.devices;
++ *prev && (*prev != dev); prev = &(*prev)->next);
++
+ *prev = dev->next;
+ dev->next = inputInfo.off_devices;
+ inputInfo.off_devices = dev;
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-2.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-2.patch
new file mode 100644
index 0000000000..af607df4f0
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-2.patch
@@ -0,0 +1,57 @@
+From 26769aa71fcbe0a8403b7fb13b7c9010cc07c3a8 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Fri, 5 Jan 2024 09:40:27 +1000
+Subject: [PATCH] dix: when disabling a master, float disabled slaved devices
+ too
+
+Disabling a master device floats all slave devices but we didn't do this
+to already-disabled slave devices. As a result those devices kept their
+reference to the master device resulting in access to already freed
+memory if the master device was removed before the corresponding slave
+device.
+
+And to match this behavior, also forcibly reset that pointer during
+CloseDownDevices().
+
+Related to CVE-2024-21886, ZDI-CAN-22840
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/26769aa71fcbe0a8403b7fb13b7c9010cc07c3a8]
+CVE: CVE-2024-21886
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/devices.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/dix/devices.c b/dix/devices.c
+index 389d28a23c..84a6406d13 100644
+--- a/dix/devices.c
++++ b/dix/devices.c
+@@ -483,6 +483,13 @@ DisableDevice(DeviceIntPtr dev, BOOL sendevent)
+ flags[other->id] |= XISlaveDetached;
+ }
+ }
++
++ for (other = inputInfo.off_devices; other; other = other->next) {
++ if (!IsMaster(other) && GetMaster(other, MASTER_ATTACHED) == dev) {
++ AttachDevice(NULL, other, NULL);
++ flags[other->id] |= XISlaveDetached;
++ }
++ }
+ }
+ else {
+ for (other = inputInfo.devices; other; other = other->next) {
+@@ -1088,6 +1095,11 @@ CloseDownDevices(void)
+ dev->master = NULL;
+ }
+
++ for (dev = inputInfo.off_devices; dev; dev = dev->next) {
++ if (!IsMaster(dev) && !IsFloating(dev))
++ dev->master = NULL;
++ }
++
+ CloseDeviceList(&inputInfo.devices);
+ CloseDeviceList(&inputInfo.off_devices);
+
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31080.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31080.patch
new file mode 100644
index 0000000000..40296903cd
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31080.patch
@@ -0,0 +1,49 @@
+From 96798fc1967491c80a4d0c8d9e0a80586cb2152b Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Fri, 22 Mar 2024 18:51:45 -0700
+Subject: [PATCH] Xi: ProcXIGetSelectedEvents needs to use unswapped length to
+ send reply
+
+CVE-2024-31080
+
+Reported-by: https://debbugs.gnu.org/cgi/bugreport.cgi?bug=69762
+Fixes: 53e821ab4 ("Xi: add request processing for XIGetSelectedEvents.")
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+Part-of: <https://gitlab.freedesktop.org/xorg/xserver/-/merge_requests/1463>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/96798fc1967491c80a4d0c8d9e0a80586cb2152b]
+CVE: CVE-2024-31080
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/xiselectev.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/Xi/xiselectev.c b/Xi/xiselectev.c
+index edcb8a0d36..ac14949871 100644
+--- a/Xi/xiselectev.c
++++ b/Xi/xiselectev.c
+@@ -349,6 +349,7 @@ ProcXIGetSelectedEvents(ClientPtr client)
+ InputClientsPtr others = NULL;
+ xXIEventMask *evmask = NULL;
+ DeviceIntPtr dev;
++ uint32_t length;
+
+ REQUEST(xXIGetSelectedEventsReq);
+ REQUEST_SIZE_MATCH(xXIGetSelectedEventsReq);
+@@ -418,10 +419,12 @@ ProcXIGetSelectedEvents(ClientPtr client)
+ }
+ }
+
++ /* save the value before SRepXIGetSelectedEvents swaps it */
++ length = reply.length;
+ WriteReplyToClient(client, sizeof(xXIGetSelectedEventsReply), &reply);
+
+ if (reply.num_masks)
+- WriteToClient(client, reply.length * 4, buffer);
++ WriteToClient(client, length * 4, buffer);
+
+ free(buffer);
+ return Success;
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31081.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31081.patch
new file mode 100644
index 0000000000..4380004700
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31081.patch
@@ -0,0 +1,47 @@
+From 3e77295f888c67fc7645db5d0c00926a29ffecee Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Fri, 22 Mar 2024 18:56:27 -0700
+Subject: [PATCH] Xi: ProcXIPassiveGrabDevice needs to use unswapped length to
+ send reply
+
+CVE-2024-31081
+
+Fixes: d220d6907 ("Xi: add GrabButton and GrabKeysym code.")
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+Part-of: <https://gitlab.freedesktop.org/xorg/xserver/-/merge_requests/1463>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/3e77295f888c67fc7645db5d0c00926a29ffecee]
+CVE: CVE-2024-31081
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/xipassivegrab.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/Xi/xipassivegrab.c b/Xi/xipassivegrab.c
+index c9ac2f8553..896233bec2 100644
+--- a/Xi/xipassivegrab.c
++++ b/Xi/xipassivegrab.c
+@@ -93,6 +93,7 @@ ProcXIPassiveGrabDevice(ClientPtr client)
+ GrabParameters param;
+ void *tmp;
+ int mask_len;
++ uint32_t length;
+
+ REQUEST(xXIPassiveGrabDeviceReq);
+ REQUEST_FIXED_SIZE(xXIPassiveGrabDeviceReq,
+@@ -247,9 +248,11 @@ ProcXIPassiveGrabDevice(ClientPtr client)
+ }
+ }
+
++ /* save the value before SRepXIPassiveGrabDevice swaps it */
++ length = rep.length;
+ WriteReplyToClient(client, sizeof(rep), &rep);
+ if (rep.num_modifiers)
+- WriteToClient(client, rep.length * 4, modifiers_failed);
++ WriteToClient(client, length * 4, modifiers_failed);
+
+ out:
+ free(modifiers_failed);
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg_21.1.3.bb b/meta/recipes-graphics/xorg-xserver/xserver-xorg_21.1.8.bb
index 1f53ab5177..b9eed92103 100644
--- a/meta/recipes-graphics/xorg-xserver/xserver-xorg_21.1.3.bb
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg_21.1.8.bb
@@ -1,10 +1,25 @@
require xserver-xorg.inc
SRC_URI += "file://0001-xf86pciBus.c-use-Intel-ddx-only-for-pre-gen4-hardwar.patch \
- file://0001-Avoid-duplicate-definitions-of-IOPortBase.patch \
- file://0001-render-Fix-build-with-gcc-12.patch \
- "
-SRC_URI[sha256sum] = "61d6aad5b6b47a116b960bd7f0cba4ee7e6da95d6bb0b127bde75d7d1acdebe5"
+ file://0001-Avoid-duplicate-definitions-of-IOPortBase.patch \
+ file://CVE-2023-5367.patch \
+ file://CVE-2023-5380.patch \
+ file://CVE-2023-6377.patch \
+ file://CVE-2023-6478.patch \
+ file://CVE-2023-6816.patch \
+ file://CVE-2024-0229-1.patch \
+ file://CVE-2024-0229-2.patch \
+ file://CVE-2024-0229-3.patch \
+ file://CVE-2024-0229-4.patch \
+ file://CVE-2024-21885.patch \
+ file://CVE-2024-21886-1.patch \
+ file://CVE-2024-21886-2.patch \
+ file://CVE-2024-0408.patch \
+ file://CVE-2024-0409.patch \
+ file://CVE-2024-31080.patch \
+ file://CVE-2024-31081.patch \
+ "
+SRC_URI[sha256sum] = "38aadb735650c8024ee25211c190bf8aad844c5f59632761ab1ef4c4d5aeb152"
# These extensions are now integrated into the server, so declare the migration
# path for in-place upgrades.
diff --git a/meta/recipes-graphics/xwayland/xwayland/CVE-2023-5367.patch b/meta/recipes-graphics/xwayland/xwayland/CVE-2023-5367.patch
new file mode 100644
index 0000000000..d4da1ecb4b
--- /dev/null
+++ b/meta/recipes-graphics/xwayland/xwayland/CVE-2023-5367.patch
@@ -0,0 +1,85 @@
+CVE: CVE-2023-5367
+Upstream-Status: Backport [ https://gitlab.freedesktop.org/xorg/xserver/-/commit/541ab2ecd41d4d8689e71855d93e492bc554719a ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+
+From 541ab2ecd41d4d8689e71855d93e492bc554719a Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 3 Oct 2023 11:53:05 +1000
+Subject: [PATCH] Xi/randr: fix handling of PropModeAppend/Prepend
+
+The handling of appending/prepending properties was incorrect, with at
+least two bugs: the property length was set to the length of the new
+part only, i.e. appending or prepending N elements to a property with P
+existing elements always resulted in the property having N elements
+instead of N + P.
+
+Second, when pre-pending a value to a property, the offset for the old
+values was incorrect, leaving the new property with potentially
+uninitalized values and/or resulting in OOB memory writes.
+For example, prepending a 3 element value to a 5 element property would
+result in this 8 value array:
+ [N, N, N, ?, ?, P, P, P ] P, P
+ ^OOB write
+
+The XI2 code is a copy/paste of the RandR code, so the bug exists in
+both.
+
+CVE-2023-5367, ZDI-CAN-22153
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+---
+ Xi/xiproperty.c | 4 ++--
+ randr/rrproperty.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/Xi/xiproperty.c b/Xi/xiproperty.c
+index 066ba21fba..d315f04d0e 100644
+--- a/Xi/xiproperty.c
++++ b/Xi/xiproperty.c
+@@ -730,7 +730,7 @@ XIChangeDeviceProperty(DeviceIntPtr dev, Atom property, Atom type,
+ XIDestroyDeviceProperty(prop);
+ return BadAlloc;
+ }
+- new_value.size = len;
++ new_value.size = total_len;
+ new_value.type = type;
+ new_value.format = format;
+
+@@ -747,7 +747,7 @@ XIChangeDeviceProperty(DeviceIntPtr dev, Atom property, Atom type,
+ case PropModePrepend:
+ new_data = new_value.data;
+ old_data = (void *) (((char *) new_value.data) +
+- (prop_value->size * size_in_bytes));
++ (len * size_in_bytes));
+ break;
+ }
+ if (new_data)
+diff --git a/randr/rrproperty.c b/randr/rrproperty.c
+index c2fb9585c6..25469f57b2 100644
+--- a/randr/rrproperty.c
++++ b/randr/rrproperty.c
+@@ -209,7 +209,7 @@ RRChangeOutputProperty(RROutputPtr output, Atom property, Atom type,
+ RRDestroyOutputProperty(prop);
+ return BadAlloc;
+ }
+- new_value.size = len;
++ new_value.size = total_len;
+ new_value.type = type;
+ new_value.format = format;
+
+@@ -226,7 +226,7 @@ RRChangeOutputProperty(RROutputPtr output, Atom property, Atom type,
+ case PropModePrepend:
+ new_data = new_value.data;
+ old_data = (void *) (((char *) new_value.data) +
+- (prop_value->size * size_in_bytes));
++ (len * size_in_bytes));
+ break;
+ }
+ if (new_data)
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xwayland/xwayland/CVE-2023-6377.patch b/meta/recipes-graphics/xwayland/xwayland/CVE-2023-6377.patch
new file mode 100644
index 0000000000..f650f495a3
--- /dev/null
+++ b/meta/recipes-graphics/xwayland/xwayland/CVE-2023-6377.patch
@@ -0,0 +1,82 @@
+CVE: CVE-2023-6377
+Upstream-Status: Backport [ https://gitlab.freedesktop.org/xorg/xserver/-/commit/19e9f199950aaa4b9b7696936d1b067475da999c ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+
+From 19e9f199950aaa4b9b7696936d1b067475da999c Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 28 Nov 2023 15:19:04 +1000
+Subject: [PATCH] Xi: allocate enough XkbActions for our buttons
+
+button->xkb_acts is supposed to be an array sufficiently large for all
+our buttons, not just a single XkbActions struct. Allocating
+insufficient memory here means when we memcpy() later in
+XkbSetDeviceInfo we write into memory that wasn't ours to begin with,
+leading to the usual security ooopsiedaisies.
+
+CVE-2023-6377, ZDI-CAN-22412, ZDI-CAN-22413
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+(cherry picked from commit 0c1a93d319558fe3ab2d94f51d174b4f93810afd)
+---
+ Xi/exevents.c | 12 ++++++------
+ dix/devices.c | 10 ++++++++++
+ 2 files changed, 16 insertions(+), 6 deletions(-)
+
+diff --git a/Xi/exevents.c b/Xi/exevents.c
+index dcd4efb3bc..54ea11a938 100644
+--- a/Xi/exevents.c
++++ b/Xi/exevents.c
+@@ -611,13 +611,13 @@ DeepCopyPointerClasses(DeviceIntPtr from, DeviceIntPtr to)
+ }
+
+ if (from->button->xkb_acts) {
+- if (!to->button->xkb_acts) {
+- to->button->xkb_acts = calloc(1, sizeof(XkbAction));
+- if (!to->button->xkb_acts)
+- FatalError("[Xi] not enough memory for xkb_acts.\n");
+- }
++ size_t maxbuttons = max(to->button->numButtons, from->button->numButtons);
++ to->button->xkb_acts = xnfreallocarray(to->button->xkb_acts,
++ maxbuttons,
++ sizeof(XkbAction));
++ memset(to->button->xkb_acts, 0, maxbuttons * sizeof(XkbAction));
+ memcpy(to->button->xkb_acts, from->button->xkb_acts,
+- sizeof(XkbAction));
++ from->button->numButtons * sizeof(XkbAction));
+ }
+ else {
+ free(to->button->xkb_acts);
+diff --git a/dix/devices.c b/dix/devices.c
+index 7150734a58..20fef16923 100644
+--- a/dix/devices.c
++++ b/dix/devices.c
+@@ -2530,6 +2530,8 @@ RecalculateMasterButtons(DeviceIntPtr slave)
+
+ if (master->button && master->button->numButtons != maxbuttons) {
+ int i;
++ int last_num_buttons = master->button->numButtons;
++
+ DeviceChangedEvent event = {
+ .header = ET_Internal,
+ .type = ET_DeviceChanged,
+@@ -2540,6 +2542,14 @@ RecalculateMasterButtons(DeviceIntPtr slave)
+ };
+
+ master->button->numButtons = maxbuttons;
++ if (last_num_buttons < maxbuttons) {
++ master->button->xkb_acts = xnfreallocarray(master->button->xkb_acts,
++ maxbuttons,
++ sizeof(XkbAction));
++ memset(&master->button->xkb_acts[last_num_buttons],
++ 0,
++ (maxbuttons - last_num_buttons) * sizeof(XkbAction));
++ }
+
+ memcpy(&event.buttons.names, master->button->labels, maxbuttons *
+ sizeof(Atom));
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xwayland/xwayland/CVE-2023-6478.patch b/meta/recipes-graphics/xwayland/xwayland/CVE-2023-6478.patch
new file mode 100644
index 0000000000..23fbc0e9e2
--- /dev/null
+++ b/meta/recipes-graphics/xwayland/xwayland/CVE-2023-6478.patch
@@ -0,0 +1,66 @@
+CVE: CVE-2023-6478
+Upstream-Status: Backport [ https://gitlab.freedesktop.org/xorg/xserver/-/commit/aaf854fb25541380cc38a221c15f0e8372f48872 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+
+From aaf854fb25541380cc38a221c15f0e8372f48872 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Mon, 27 Nov 2023 16:27:49 +1000
+Subject: [PATCH] randr: avoid integer truncation in length check of
+ ProcRRChange*Property
+
+Affected are ProcRRChangeProviderProperty and ProcRRChangeOutputProperty.
+See also xserver@8f454b79 where this same bug was fixed for the core
+protocol and XI.
+
+This fixes an OOB read and the resulting information disclosure.
+
+Length calculation for the request was clipped to a 32-bit integer. With
+the correct stuff->nUnits value the expected request size was
+truncated, passing the REQUEST_FIXED_SIZE check.
+
+The server then proceeded with reading at least stuff->num_items bytes
+(depending on stuff->format) from the request and stuffing whatever it
+finds into the property. In the process it would also allocate at least
+stuff->nUnits bytes, i.e. 4GB.
+
+CVE-2023-6478, ZDI-CAN-22561
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+(cherry picked from commit 14f480010a93ff962fef66a16412fafff81ad632)
+---
+ randr/rrproperty.c | 2 +-
+ randr/rrproviderproperty.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/randr/rrproperty.c b/randr/rrproperty.c
+index 25469f57b2..c4fef8a1f6 100644
+--- a/randr/rrproperty.c
++++ b/randr/rrproperty.c
+@@ -530,7 +530,7 @@ ProcRRChangeOutputProperty(ClientPtr client)
+ char format, mode;
+ unsigned long len;
+ int sizeInBytes;
+- int totalSize;
++ uint64_t totalSize;
+ int err;
+
+ REQUEST_AT_LEAST_SIZE(xRRChangeOutputPropertyReq);
+diff --git a/randr/rrproviderproperty.c b/randr/rrproviderproperty.c
+index b79c17f9bf..90c5a9a933 100644
+--- a/randr/rrproviderproperty.c
++++ b/randr/rrproviderproperty.c
+@@ -498,7 +498,7 @@ ProcRRChangeProviderProperty(ClientPtr client)
+ char format, mode;
+ unsigned long len;
+ int sizeInBytes;
+- int totalSize;
++ uint64_t totalSize;
+ int err;
+
+ REQUEST_AT_LEAST_SIZE(xRRChangeProviderPropertyReq);
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xwayland/xwayland/CVE-2023-6816.patch b/meta/recipes-graphics/xwayland/xwayland/CVE-2023-6816.patch
new file mode 100644
index 0000000000..5c68bfb3c1
--- /dev/null
+++ b/meta/recipes-graphics/xwayland/xwayland/CVE-2023-6816.patch
@@ -0,0 +1,57 @@
+CVE: CVE-2023-6816
+Upstream-Status: Backport [ https://gitlab.freedesktop.org/xorg/xserver/-/commit/b5cb27032d3e486ba84a491e1420e85171c4c0a3 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+From b5cb27032d3e486ba84a491e1420e85171c4c0a3 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 14 Dec 2023 11:29:49 +1000
+Subject: [PATCH] dix: allocate enough space for logical button maps
+
+Both DeviceFocusEvent and the XIQueryPointer reply contain a bit for
+each logical button currently down. Since buttons can be arbitrarily mapped
+to anything up to 255 make sure we have enough bits for the maximum mapping.
+
+CVE-2023-6816, ZDI-CAN-22664, ZDI-CAN-22665
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+(cherry picked from commit 9e2ecb2af8302dedc49cb6a63ebe063c58a9e7e3)
+---
+ Xi/xiquerypointer.c | 3 +--
+ dix/enterleave.c | 5 +++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/Xi/xiquerypointer.c b/Xi/xiquerypointer.c
+index 5b77b1a444..2b05ac5f39 100644
+--- a/Xi/xiquerypointer.c
++++ b/Xi/xiquerypointer.c
+@@ -149,8 +149,7 @@ ProcXIQueryPointer(ClientPtr client)
+ if (pDev->button) {
+ int i;
+
+- rep.buttons_len =
+- bytes_to_int32(bits_to_bytes(pDev->button->numButtons));
++ rep.buttons_len = bytes_to_int32(bits_to_bytes(256)); /* button map up to 255 */
+ rep.length += rep.buttons_len;
+ buttons = calloc(rep.buttons_len, 4);
+ if (!buttons)
+diff --git a/dix/enterleave.c b/dix/enterleave.c
+index 867ec74363..ded8679d76 100644
+--- a/dix/enterleave.c
++++ b/dix/enterleave.c
+@@ -784,8 +784,9 @@ DeviceFocusEvent(DeviceIntPtr dev, int type, int mode, int detail,
+
+ mouse = IsFloating(dev) ? dev : GetMaster(dev, MASTER_POINTER);
+
+- /* XI 2 event */
+- btlen = (mouse->button) ? bits_to_bytes(mouse->button->numButtons) : 0;
++ /* XI 2 event contains the logical button map - maps are CARD8
++ * so we need 256 bits for the possibly maximum mapping */
++ btlen = (mouse->button) ? bits_to_bytes(256) : 0;
+ btlen = bytes_to_int32(btlen);
+ len = sizeof(xXIFocusInEvent) + btlen * 4;
+
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xwayland/xwayland/CVE-2024-0408.patch b/meta/recipes-graphics/xwayland/xwayland/CVE-2024-0408.patch
new file mode 100644
index 0000000000..9063cd00b2
--- /dev/null
+++ b/meta/recipes-graphics/xwayland/xwayland/CVE-2024-0408.patch
@@ -0,0 +1,65 @@
+CVE: CVE-2024-0408
+Upstream-Status: Backport [ https://gitlab.freedesktop.org/xorg/xserver/-/commit/4093057b98bc5a178f130c9ba6b0b28385e24ae5 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+From 4093057b98bc5a178f130c9ba6b0b28385e24ae5 Mon Sep 17 00:00:00 2001
+From: Olivier Fourdan <ofourdan@redhat.com>
+Date: Wed, 6 Dec 2023 12:09:41 +0100
+Subject: [PATCH] glx: Call XACE hooks on the GLX buffer
+
+The XSELINUX code will label resources at creation by checking the
+access mode. When the access mode is DixCreateAccess, it will call the
+function to label the new resource SELinuxLabelResource().
+
+However, GLX buffers do not go through the XACE hooks when created,
+hence leaving the resource actually unlabeled.
+
+When, later, the client tries to create another resource using that
+drawable (like a GC for example), the XSELINUX code would try to use
+the security ID of that object which has never been labeled, get a NULL
+pointer and crash when checking whether the requested permissions are
+granted for subject security ID.
+
+To avoid the issue, make sure to call the XACE hooks when creating the
+GLX buffers.
+
+Credit goes to Donn Seeley <donn@xmission.com> for providing the patch.
+
+CVE-2024-0408
+
+Signed-off-by: Olivier Fourdan <ofourdan@redhat.com>
+Acked-by: Peter Hutterer <peter.hutterer@who-t.net>
+(cherry picked from commit e5e8586a12a3ec915673edffa10dc8fe5e15dac3)
+---
+ glx/glxcmds.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/glx/glxcmds.c b/glx/glxcmds.c
+index fc26a2e345..1e46d0c723 100644
+--- a/glx/glxcmds.c
++++ b/glx/glxcmds.c
+@@ -48,6 +48,7 @@
+ #include "indirect_util.h"
+ #include "protocol-versions.h"
+ #include "glxvndabi.h"
++#include "xace.h"
+
+ static char GLXServerVendorName[] = "SGI";
+
+@@ -1392,6 +1393,13 @@ DoCreatePbuffer(ClientPtr client, int screenNum, XID fbconfigId,
+ if (!pPixmap)
+ return BadAlloc;
+
++ err = XaceHook(XACE_RESOURCE_ACCESS, client, glxDrawableId, RT_PIXMAP,
++ pPixmap, RT_NONE, NULL, DixCreateAccess);
++ if (err != Success) {
++ (*pGlxScreen->pScreen->DestroyPixmap) (pPixmap);
++ return err;
++ }
++
+ /* Assign the pixmap the same id as the pbuffer and add it as a
+ * resource so it and the DRI2 drawable will be reclaimed when the
+ * pbuffer is destroyed. */
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xwayland/xwayland/CVE-2024-0409.patch b/meta/recipes-graphics/xwayland/xwayland/CVE-2024-0409.patch
new file mode 100644
index 0000000000..de3396a410
--- /dev/null
+++ b/meta/recipes-graphics/xwayland/xwayland/CVE-2024-0409.patch
@@ -0,0 +1,47 @@
+CVE: CVE-2024-0409
+Upstream-Status: Backport [ https://gitlab.freedesktop.org/xorg/xserver/-/commit/51be9e767a02cdc6a524dc895dcc81abb689d50b ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+From 51be9e767a02cdc6a524dc895dcc81abb689d50b Mon Sep 17 00:00:00 2001
+From: Olivier Fourdan <ofourdan@redhat.com>
+Date: Wed, 6 Dec 2023 11:51:56 +0100
+Subject: [PATCH] ephyr,xwayland: Use the proper private key for cursor
+
+The cursor in DIX is actually split in two parts, the cursor itself and
+the cursor bits, each with their own devPrivates.
+
+The cursor itself includes the cursor bits, meaning that the cursor bits
+devPrivates in within structure of the cursor.
+
+Both Xephyr and Xwayland were using the private key for the cursor bits
+to store the data for the cursor, and when using XSELINUX which comes
+with its own special devPrivates, the data stored in that cursor bits'
+devPrivates would interfere with the XSELINUX devPrivates data and the
+SELINUX security ID would point to some other unrelated data, causing a
+crash in the XSELINUX code when trying to (re)use the security ID.
+
+CVE-2024-0409
+
+Signed-off-by: Olivier Fourdan <ofourdan@redhat.com>
+Reviewed-by: Peter Hutterer <peter.hutterer@who-t.net>
+(cherry picked from commit 2ef0f1116c65d5cb06d7b6d83f8a1aea702c94f7)
+---
+ hw/xwayland/xwayland-cursor.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/xwayland/xwayland-cursor.c b/hw/xwayland/xwayland-cursor.c
+index e3c1aaa50c..bd94b0cfbb 100644
+--- a/hw/xwayland/xwayland-cursor.c
++++ b/hw/xwayland/xwayland-cursor.c
+@@ -431,7 +431,7 @@ static miPointerScreenFuncRec xwl_pointer_screen_funcs = {
+ Bool
+ xwl_screen_init_cursor(struct xwl_screen *xwl_screen)
+ {
+- if (!dixRegisterPrivateKey(&xwl_cursor_private_key, PRIVATE_CURSOR_BITS, 0))
++ if (!dixRegisterPrivateKey(&xwl_cursor_private_key, PRIVATE_CURSOR, 0))
+ return FALSE;
+
+ return miPointerInitialize(xwl_screen->screen,
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xwayland/xwayland_22.1.1.bb b/meta/recipes-graphics/xwayland/xwayland_22.1.8.bb
index b512b9932d..133c65fbc3 100644
--- a/meta/recipes-graphics/xwayland/xwayland_22.1.1.bb
+++ b/meta/recipes-graphics/xwayland/xwayland_22.1.8.bb
@@ -9,8 +9,15 @@ HOMEPAGE = "https://fedoraproject.org/wiki/Changes/XwaylandStandalone"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://COPYING;md5=5df87950af51ac2c5822094553ea1880"
-SRC_URI = "https://www.x.org/archive/individual/xserver/xwayland-${PV}.tar.xz"
-SRC_URI[sha256sum] = "f5d0e0ba37e19bb87c62f61da5970bd204939f2120620964bed4cc8495baa657"
+SRC_URI = "https://www.x.org/archive/individual/xserver/xwayland-${PV}.tar.xz \
+ file://CVE-2023-5367.patch \
+ file://CVE-2023-6377.patch \
+ file://CVE-2023-6478.patch \
+ file://CVE-2023-6816.patch \
+ file://CVE-2024-0408.patch \
+ file://CVE-2024-0409.patch \
+"
+SRC_URI[sha256sum] = "d11eeee73290b88ea8da42a7d9350dedfaba856ce4ae44e58c045ad9ecaa2f73"
UPSTREAM_CHECK_REGEX = "xwayland-(?P<pver>\d+(\.(?!90\d)\d+)+)\.tar"
@@ -23,7 +30,7 @@ OPENGL_PKGCONFIGS = "glx glamor dri3"
PACKAGECONFIG ??= "${XORG_CRYPTO} \
${@bb.utils.contains('DISTRO_FEATURES', 'opengl', '${OPENGL_PKGCONFIGS}', '', d)} \
"
-PACKAGECONFIG[dri3] = "-Ddri3=true,-Ddri3=false"
+PACKAGECONFIG[dri3] = "-Ddri3=true,-Ddri3=false,libxshmfence"
PACKAGECONFIG[glx] = "-Dglx=true,-Dglx=false,virtual/libgl virtual/libx11"
PACKAGECONFIG[glamor] = "-Dglamor=true,-Dglamor=false,libepoxy virtual/libgbm,libegl"
PACKAGECONFIG[unwind] = "-Dlibunwind=true,-Dlibunwind=false,libunwind"
diff --git a/meta/recipes-kernel/blktrace/blktrace/0001-bno_plot.py-btt_plot.py-Ask-for-python3-specifically.patch b/meta/recipes-kernel/blktrace/blktrace/0001-bno_plot.py-btt_plot.py-Ask-for-python3-specifically.patch
new file mode 100644
index 0000000000..e2305a1111
--- /dev/null
+++ b/meta/recipes-kernel/blktrace/blktrace/0001-bno_plot.py-btt_plot.py-Ask-for-python3-specifically.patch
@@ -0,0 +1,35 @@
+From 6f4769e6e2c5cdc1262891470995e6dead937c7a Mon Sep 17 00:00:00 2001
+From: Sakib Sajal <sakib.sajal@windriver.com>
+Date: Mon, 26 Jun 2023 17:57:36 -0400
+Subject: [PATCH] bno_plot.py, btt_plot.py: Ask for python3 specifically
+
+python2 is deprecated, use python3.
+
+Upstream-Status: Denied [https://www.spinics.net/lists/linux-btrace/msg01364.html]
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ btt/bno_plot.py | 2 +-
+ btt/btt_plot.py | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/btt/bno_plot.py b/btt/bno_plot.py
+index 3aa4e19..d7d7159 100644
+--- a/btt/bno_plot.py
++++ b/btt/bno_plot.py
+@@ -1,4 +1,4 @@
+-#! /usr/bin/env python
++#! /usr/bin/env python3
+ #
+ # btt blkno plotting interface
+ #
+diff --git a/btt/btt_plot.py b/btt/btt_plot.py
+index 40bc71f..8620d31 100755
+--- a/btt/btt_plot.py
++++ b/btt/btt_plot.py
+@@ -1,4 +1,4 @@
+-#! /usr/bin/env python
++#! /usr/bin/env python3
+ #
+ # btt_plot.py: Generate matplotlib plots for BTT generate data files
+ #
diff --git a/meta/recipes-kernel/blktrace/blktrace_git.bb b/meta/recipes-kernel/blktrace/blktrace_git.bb
index bba5e04504..1c0856be7b 100644
--- a/meta/recipes-kernel/blktrace/blktrace_git.bb
+++ b/meta/recipes-kernel/blktrace/blktrace_git.bb
@@ -14,7 +14,9 @@ SRCREV = "366d30b9cdb20345c5d064af850d686da79b89eb"
PV = "1.3.0+git${SRCPV}"
-SRC_URI = "git://git.kernel.dk/blktrace.git;branch=master"
+SRC_URI = "git://git.kernel.dk/blktrace.git;branch=master \
+ file://0001-bno_plot.py-btt_plot.py-Ask-for-python3-specifically.patch \
+ "
S = "${WORKDIR}/git"
diff --git a/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb b/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb
index a6ab9ca56d..12f1cf516e 100644
--- a/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb
+++ b/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb
@@ -11,7 +11,7 @@ LIC_FILES_CHKSUM = "\
DEPENDS = "git-native"
-SRCREV = "90598a5fae1172e3f7782a1b02f7b7518efd32c8"
+SRCREV = "2d01f24bc78256c709728eb3f204491bce13e0e5"
PV = "0.3+git${SRCPV}"
inherit native
diff --git a/meta/recipes-kernel/kmod/kmod/ptest.patch b/meta/recipes-kernel/kmod/kmod/ptest.patch
deleted file mode 100644
index 831dbcb909..0000000000
--- a/meta/recipes-kernel/kmod/kmod/ptest.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-Add 'install-ptest' rule.
-
-Signed-off-by: Tudor Florea <tudor.florea@enea.com>
-Upstream-Status: Pending
-
-diff -ruN a/Makefile.am b/Makefile.am
---- a/Makefile.am 2013-07-12 17:11:05.278331557 +0200
-+++ b/Makefile.am 2013-07-12 17:14:27.033788016 +0200
-@@ -204,6 +204,16 @@
-
- distclean-local: $(DISTCLEAN_LOCAL_HOOKS)
-
-+install-ptest:
-+ @$(MKDIR_P) $(DESTDIR)/testsuite
-+ @for file in $(TESTSUITE); do \
-+ install $$file $(DESTDIR)/testsuite; \
-+ done;
-+ @sed -e 's/^Makefile/_Makefile/' < Makefile > $(DESTDIR)/Makefile
-+ @$(MKDIR_P) $(DESTDIR)/tools
-+ @cp $(noinst_SCRIPTS) $(noinst_PROGRAMS) $(DESTDIR)/tools
-+ @cp -r testsuite/rootfs testsuite/.libs $(DESTDIR)/testsuite
-+
- # ------------------------------------------------------------------------------
- # custom release helpers
- # ------------------------------------------------------------------------------
diff --git a/meta/recipes-kernel/linux-firmware/linux-firmware_20220610.bb b/meta/recipes-kernel/linux-firmware/linux-firmware_20240220.bb
index 78b38df461..425b351dc1 100644
--- a/meta/recipes-kernel/linux-firmware/linux-firmware_20220610.bb
+++ b/meta/recipes-kernel/linux-firmware/linux-firmware_20240220.bb
@@ -12,12 +12,15 @@ LICENSE = "\
& Firmware-amdgpu \
& Firmware-amd-ucode \
& Firmware-amlogic_vdec \
+ & Firmware-amphion_vpu \
& Firmware-atheros_firmware \
& Firmware-atmel \
& Firmware-broadcom_bcm43xx \
& Firmware-ca0132 \
& Firmware-cavium \
& Firmware-chelsio_firmware \
+ & Firmware-cirrus \
+ & Firmware-cnm \
& Firmware-cw1200 \
& Firmware-cypress \
& Firmware-dib0700 \
@@ -27,25 +30,29 @@ LICENSE = "\
& Firmware-go7007 \
& Firmware-GPLv2 \
& Firmware-hfi1_firmware \
- & Firmware-i2400m \
& Firmware-i915 \
& Firmware-ibt_firmware \
& Firmware-ice \
+ & Firmware-ice_enhanced \
& Firmware-it913x \
& Firmware-iwlwifi_firmware \
& Firmware-IntcSST2 \
& Firmware-kaweth \
& Firmware-Lontium \
& Firmware-Marvell \
+ & Firmware-mediatek \
+ & Firmware-microchip \
& Firmware-moxa \
& Firmware-myri10ge_firmware \
& Firmware-netronome \
& Firmware-nvidia \
+ & Firmware-nxp_mc_firmware \
& Firmware-OLPC \
& Firmware-ath9k-htc \
& Firmware-phanfw \
& Firmware-qat \
& Firmware-qcom \
+ & Firmware-qcom-yamato \
& Firmware-qla1280 \
& Firmware-qla2xxx \
& Firmware-qualcommAthos_ar3k \
@@ -57,7 +64,6 @@ LICENSE = "\
& Firmware-rtlwifi_firmware \
& Firmware-imx-sdma_firmware \
& Firmware-siano \
- & Firmware-tda7706-firmware \
& Firmware-ti-connectivity \
& Firmware-ti-keystone \
& Firmware-ueagle-atm4-firmware \
@@ -69,13 +75,13 @@ LICENSE = "\
& WHENCE \
"
-WHENCE_CHKSUM = "385947b278a6646ae4c3d39ba8c9b1bb"
LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.adsp_sst;md5=615c45b91a5a4a9fe046d6ab9a2df728 \
file://LICENCE.agere;md5=af0133de6b4a9b2522defd5f188afd31 \
- file://LICENSE.amdgpu;md5=44c1166d052226cb2d6c8d7400090203 \
- file://LICENSE.amd-ucode;md5=3c5399dc9148d7f0e1f41e34b69cf14f \
+ file://LICENSE.amdgpu;md5=a2589a05ea5b6bd2b7f4f623c7e7a649 \
+ file://LICENSE.amd-ucode;md5=6ca90c57f7b248de1e25c7f68ffc4698 \
file://LICENSE.amlogic_vdec;md5=dc44f59bf64a81643e500ad3f39a468a \
+ file://LICENSE.amphion_vpu;md5=2bcdc00527b2d0542bd92b52aaec2b60 \
file://LICENCE.atheros_firmware;md5=30a14c7823beedac9fa39c64fdd01a13 \
file://LICENSE.atmel;md5=aa74ac0c60595dee4d4e239107ea77a3 \
file://LICENCE.broadcom_bcm43xx;md5=3160c14df7228891b868060e1951dfbc \
@@ -83,6 +89,8 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.cadence;md5=009f46816f6956cfb75ede13d3e1cee0 \
file://LICENCE.cavium;md5=c37aaffb1ebe5939b2580d073a95daea \
file://LICENCE.chelsio_firmware;md5=819aa8c3fa453f1b258ed8d168a9d903 \
+ file://LICENSE.cirrus;md5=662ea2c1a8888f7d79ed7f27c27472e1 \
+ file://LICENCE.cnm;md5=93b67e6bac7f8fec22b96b8ad0a1a9d0 \
file://LICENCE.cw1200;md5=f0f770864e7a8444a5c5aa9d12a3a7ed \
file://LICENCE.cypress;md5=48cd9436c763bf873961f9ed7b5c147b \
file://LICENSE.dib0700;md5=f7411825c8a555a1a3e5eab9ca773431 \
@@ -92,10 +100,10 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.go7007;md5=c0bb9f6aaaba55b0529ee9b30aa66beb \
file://GPL-2;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
file://LICENSE.hfi1_firmware;md5=5e7b6e586ce7339d12689e49931ad444 \
- file://LICENCE.i2400m;md5=14b901969e23c41881327c0d9e4b7d36 \
file://LICENSE.i915;md5=2b0b2e0d20984affd4490ba2cba02570 \
file://LICENCE.ibt_firmware;md5=fdbee1ddfe0fb7ab0b2fcd6b454a366b \
file://LICENSE.ice;md5=742ab4850f2670792940e6d15c974b2f \
+ file://LICENSE.ice_enhanced;md5=f305cfc31b64f95f774f9edd9df0224d \
file://LICENCE.IntcSST2;md5=9e7d8bea77612d7cc7d9e9b54b623062 \
file://LICENCE.it913x;md5=1fbf727bfb6a949810c4dbfa7e6ce4f8 \
file://LICENCE.iwlwifi_firmware;md5=2ce6786e0fc11ac6e36b54bb9b799f1b \
@@ -103,16 +111,19 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENSE.Lontium;md5=4ec8dc582ff7295f39e2ca6a7b0be2b6 \
file://LICENCE.Marvell;md5=28b6ed8bd04ba105af6e4dcd6e997772 \
file://LICENCE.mediatek;md5=7c1976b63217d76ce47d0a11d8a79cf2 \
+ file://LICENCE.microchip;md5=db753b00305675dfbf120e3f24a47277 \
file://LICENCE.moxa;md5=1086614767d8ccf744a923289d3d4261 \
file://LICENCE.myri10ge_firmware;md5=42e32fb89f6b959ca222e25ac8df8fed \
file://LICENCE.Netronome;md5=4add08f2577086d44447996503cddf5f \
file://LICENCE.nvidia;md5=4428a922ed3ba2ceec95f076a488ce07 \
file://LICENCE.NXP;md5=58bb8ba632cd729b9ba6183bc6aed36f \
+ file://LICENSE.nxp_mc_firmware;md5=9dc97e4b279b3858cae8879ae2fe5dd7 \
file://LICENCE.OLPC;md5=5b917f9d8c061991be4f6f5f108719cd \
file://LICENCE.open-ath9k-htc-firmware;md5=1b33c9f4d17bc4d457bdb23727046837 \
file://LICENCE.phanfw;md5=954dcec0e051f9409812b561ea743bfa \
- file://LICENCE.qat_firmware;md5=9e7d8bea77612d7cc7d9e9b54b623062 \
+ file://LICENCE.qat_firmware;md5=72de83dfd9b87be7685ed099a39fbea4 \
file://LICENSE.qcom;md5=164e3362a538eb11d3ac51e8e134294b \
+ file://LICENSE.qcom_yamato;md5=d0de0eeccaf1843a850bf7a6777eec5c \
file://LICENCE.qla1280;md5=d6895732e622d950609093223a2c4f5d \
file://LICENCE.qla2xxx;md5=505855e921b75f1be4a437ad9b79dff0 \
file://LICENSE.QualcommAtheros_ar3k;md5=b5fe244fb2b532311de1472a3bc06da5 \
@@ -124,7 +135,6 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.rtlwifi_firmware.txt;md5=00d06cfd3eddd5a2698948ead2ad54a5 \
file://LICENSE.sdma_firmware;md5=51e8c19ecc2270f4b8ea30341ad63ce9 \
file://LICENCE.siano;md5=4556c1bf830067f12ca151ad953ec2a5 \
- file://LICENCE.tda7706-firmware.txt;md5=835997cf5e3c131d0dddd695c7d9103e \
file://LICENCE.ti-connectivity;md5=c5e02be633f1499c109d1652514d85ec \
file://LICENCE.ti-keystone;md5=3a86335d32864b0bef996bee26cc0f2c \
file://LICENCE.ueagle-atm4-firmware;md5=4ed7ea6b507ccc583b9d594417714118 \
@@ -135,6 +145,9 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.xc5000c;md5=12b02efa3049db65d524aeb418dd87ca \
file://WHENCE;md5=${WHENCE_CHKSUM} \
"
+# WHENCE checksum is defined separately to ease overriding it if
+# class-devupstream is selected.
+WHENCE_CHKSUM = "a344e6c28970fc7daafa81c10247aeb6"
# These are not common licenses, set NO_GENERIC_LICENSE for them
# so that the license files will be copied from fetched source
@@ -144,6 +157,7 @@ NO_GENERIC_LICENSE[Firmware-agere] = "LICENCE.agere"
NO_GENERIC_LICENSE[Firmware-amdgpu] = "LICENSE.amdgpu"
NO_GENERIC_LICENSE[Firmware-amd-ucode] = "LICENSE.amd-ucode"
NO_GENERIC_LICENSE[Firmware-amlogic_vdec] = "LICENSE.amlogic_vdec"
+NO_GENERIC_LICENSE[Firmware-amphion_vpu] = "LICENSE.amphion_vpu"
NO_GENERIC_LICENSE[Firmware-atheros_firmware] = "LICENCE.atheros_firmware"
NO_GENERIC_LICENSE[Firmware-atmel] = "LICENSE.atmel"
NO_GENERIC_LICENSE[Firmware-broadcom_bcm43xx] = "LICENCE.broadcom_bcm43xx"
@@ -151,6 +165,8 @@ NO_GENERIC_LICENSE[Firmware-ca0132] = "LICENCE.ca0132"
NO_GENERIC_LICENSE[Firmware-cadence] = "LICENCE.cadence"
NO_GENERIC_LICENSE[Firmware-cavium] = "LICENCE.cavium"
NO_GENERIC_LICENSE[Firmware-chelsio_firmware] = "LICENCE.chelsio_firmware"
+NO_GENERIC_LICENSE[Firmware-cirrus] = "LICENSE.cirrus"
+NO_GENERIC_LICENSE[Firmware-cnm] = "LICENCE.cnm"
NO_GENERIC_LICENSE[Firmware-cw1200] = "LICENCE.cw1200"
NO_GENERIC_LICENSE[Firmware-cypress] = "LICENCE.cypress"
NO_GENERIC_LICENSE[Firmware-dib0700] = "LICENSE.dib0700"
@@ -160,10 +176,10 @@ NO_GENERIC_LICENSE[Firmware-fw_sst_0f28] = "LICENCE.fw_sst_0f28"
NO_GENERIC_LICENSE[Firmware-go7007] = "LICENCE.go7007"
NO_GENERIC_LICENSE[Firmware-GPLv2] = "GPL-2"
NO_GENERIC_LICENSE[Firmware-hfi1_firmware] = "LICENSE.hfi1_firmware"
-NO_GENERIC_LICENSE[Firmware-i2400m] = "LICENCE.i2400m"
NO_GENERIC_LICENSE[Firmware-i915] = "LICENSE.i915"
NO_GENERIC_LICENSE[Firmware-ibt_firmware] = "LICENCE.ibt_firmware"
NO_GENERIC_LICENSE[Firmware-ice] = "LICENSE.ice"
+NO_GENERIC_LICENSE[Firmware-ice_enhanced] = "LICENSE.ice_enhanced"
NO_GENERIC_LICENSE[Firmware-IntcSST2] = "LICENCE.IntcSST2"
NO_GENERIC_LICENSE[Firmware-it913x] = "LICENCE.it913x"
NO_GENERIC_LICENSE[Firmware-iwlwifi_firmware] = "LICENCE.iwlwifi_firmware"
@@ -171,15 +187,18 @@ NO_GENERIC_LICENSE[Firmware-kaweth] = "LICENCE.kaweth"
NO_GENERIC_LICENSE[Firmware-Lontium] = "LICENSE.Lontium"
NO_GENERIC_LICENSE[Firmware-Marvell] = "LICENCE.Marvell"
NO_GENERIC_LICENSE[Firmware-mediatek] = "LICENCE.mediatek"
+NO_GENERIC_LICENSE[Firmware-microchip] = "LICENCE.microchip"
NO_GENERIC_LICENSE[Firmware-moxa] = "LICENCE.moxa"
NO_GENERIC_LICENSE[Firmware-myri10ge_firmware] = "LICENCE.myri10ge_firmware"
NO_GENERIC_LICENSE[Firmware-netronome] = "LICENCE.Netronome"
NO_GENERIC_LICENSE[Firmware-nvidia] = "LICENCE.nvidia"
+NO_GENERIC_LICENSE[Firmware-nxp_mc_firmware] = "LICENSE.nxp_mc_firmware"
NO_GENERIC_LICENSE[Firmware-OLPC] = "LICENCE.OLPC"
NO_GENERIC_LICENSE[Firmware-ath9k-htc] = "LICENCE.open-ath9k-htc-firmware"
NO_GENERIC_LICENSE[Firmware-phanfw] = "LICENCE.phanfw"
NO_GENERIC_LICENSE[Firmware-qat] = "LICENCE.qat_firmware"
NO_GENERIC_LICENSE[Firmware-qcom] = "LICENSE.qcom"
+NO_GENERIC_LICENSE[Firmware-qcom-yamato] = "LICENSE.qcom_yamato"
NO_GENERIC_LICENSE[Firmware-qla1280] = "LICENCE.qla1280"
NO_GENERIC_LICENSE[Firmware-qla2xxx] = "LICENCE.qla2xxx"
NO_GENERIC_LICENSE[Firmware-qualcommAthos_ar3k] = "LICENSE.QualcommAtheros_ar3k"
@@ -191,7 +210,6 @@ NO_GENERIC_LICENSE[Firmware-ralink-firmware] = "LICENCE.ralink-firmware.txt"
NO_GENERIC_LICENSE[Firmware-rtlwifi_firmware] = "LICENCE.rtlwifi_firmware.txt"
NO_GENERIC_LICENSE[Firmware-siano] = "LICENCE.siano"
NO_GENERIC_LICENSE[Firmware-imx-sdma_firmware] = "LICENSE.sdma_firmware"
-NO_GENERIC_LICENSE[Firmware-tda7706-firmware] = "LICENCE.tda7706-firmware.txt"
NO_GENERIC_LICENSE[Firmware-ti-connectivity] = "LICENCE.ti-connectivity"
NO_GENERIC_LICENSE[Firmware-ti-keystone] = "LICENCE.ti-keystone"
NO_GENERIC_LICENSE[Firmware-ueagle-atm4-firmware] = "LICENCE.ueagle-atm4-firmware"
@@ -213,7 +231,7 @@ SRC_URI:class-devupstream = "git://git.kernel.org/pub/scm/linux/kernel/git/firmw
# Pin this to the 20220509 release, override this in local.conf
SRCREV:class-devupstream ?= "b19cbdca78ab2adfd210c91be15a22568e8b8cae"
-SRC_URI[sha256sum] = "faf3aedf89530e61f4fa1e8c7303dead9127cc24416945647797d079feb12837"
+SRC_URI[sha256sum] = "bf0f239dc0801e9d6bf5d5fb3e2f549575632cf4688f4348184199cb02c2bcd7"
inherit allarch
@@ -224,22 +242,34 @@ do_compile() {
}
do_install() {
- oe_runmake 'DESTDIR=${D}' 'FIRMWAREDIR=${nonarch_base_libdir}/firmware' install
+ # install-nodedup avoids rdfind dependency
+ oe_runmake 'DESTDIR=${D}' 'FIRMWAREDIR=${nonarch_base_libdir}/firmware' install-nodedup
cp GPL-2 LICEN[CS]E.* WHENCE ${D}${nonarch_base_libdir}/firmware/
}
-PACKAGES =+ "${PN}-ralink-license ${PN}-ralink \
+PACKAGES =+ "${PN}-amphion-vpu-license ${PN}-amphion-vpu \
+ ${PN}-cw1200-license ${PN}-cw1200 \
+ ${PN}-ralink-license ${PN}-ralink \
${PN}-mt7601u-license ${PN}-mt7601u \
+ ${PN}-mt7650-license ${PN}-mt7650 \
+ ${PN}-mt76x2-license ${PN}-mt76x2 \
${PN}-radeon-license ${PN}-radeon \
+ ${PN}-amdgpu-license ${PN}-amdgpu \
${PN}-marvell-license ${PN}-pcie8897 ${PN}-pcie8997 \
+ ${PN}-mediatek-license ${PN}-mediatek \
+ ${PN}-microchip-license ${PN}-microchip \
+ ${PN}-moxa-license ${PN}-moxa \
${PN}-sd8686 ${PN}-sd8688 ${PN}-sd8787 ${PN}-sd8797 ${PN}-sd8801 \
${PN}-sd8887 ${PN}-sd8897 ${PN}-sd8997 ${PN}-usb8997 \
${PN}-ti-connectivity-license ${PN}-wlcommon ${PN}-wl12xx ${PN}-wl18xx \
+ ${PN}-ti-keystone-license ${PN}-ti-keystone \
${PN}-vt6656-license ${PN}-vt6656 \
${PN}-rs9113 ${PN}-rs9116 \
${PN}-rtl-license ${PN}-rtl8188 ${PN}-rtl8192cu ${PN}-rtl8192ce ${PN}-rtl8192su ${PN}-rtl8723 ${PN}-rtl8821 \
+ ${PN}-rtl8761 \
${PN}-rtl8168 \
+ ${PN}-rtl8822 \
${PN}-cypress-license \
${PN}-broadcom-license \
${PN}-bcm-0bb4-0306 \
@@ -275,7 +305,9 @@ PACKAGES =+ "${PN}-ralink-license ${PN}-ralink \
${PN}-bcm4373 \
${PN}-bcm43xx \
${PN}-bcm43xx-hdr \
- ${PN}-atheros-license ${PN}-ar9170 ${PN}-ath6k ${PN}-ath9k ${PN}-ath3k \
+ ${PN}-cirrus-license ${PN}-cirrus \
+ ${PN}-cnm-license ${PN}-cnm \
+ ${PN}-atheros-license ${PN}-ar5523 ${PN}-ar9170 ${PN}-ath6k ${PN}-ath9k ${PN}-ath3k \
${PN}-gplv2-license ${PN}-carl9170 \
${PN}-ar3k-license ${PN}-ar3k ${PN}-ath10k-license ${PN}-ath10k ${PN}-ath11k ${PN}-qca \
\
@@ -301,29 +333,64 @@ PACKAGES =+ "${PN}-ralink-license ${PN}-ralink \
${PN}-ibt-misc \
${PN}-i915-license ${PN}-i915 \
${PN}-ice-license ${PN}-ice \
+ ${PN}-ice-enhanced-license ${PN}-ice-enhanced \
${PN}-adsp-sst-license ${PN}-adsp-sst \
${PN}-bnx2-mips \
${PN}-liquidio \
${PN}-nvidia-license \
${PN}-nvidia-tegra-k1 ${PN}-nvidia-tegra \
${PN}-nvidia-gpu \
+ ${PN}-nxp-mc-license ${PN}-nxp-mc \
${PN}-netronome-license ${PN}-netronome \
+ ${PN}-olpc-license ${PN}-olpc \
+ ${PN}-phanfw-license ${PN}-phanfw \
${PN}-qat ${PN}-qat-license \
- ${PN}-qcom-license \
+ ${PN}-qcom-license ${PN}-qcom-yamato-license \
${PN}-qcom-venus-1.8 ${PN}-qcom-venus-4.2 ${PN}-qcom-venus-5.2 ${PN}-qcom-venus-5.4 \
${PN}-qcom-vpu-1.0 ${PN}-qcom-vpu-2.0 \
${PN}-qcom-adreno-a2xx ${PN}-qcom-adreno-a3xx ${PN}-qcom-adreno-a4xx ${PN}-qcom-adreno-a530 \
${PN}-qcom-adreno-a630 ${PN}-qcom-adreno-a650 ${PN}-qcom-adreno-a660 \
- ${PN}-qcom-apq8096-audio ${PN}-qcom-apq8096-modem \
- ${PN}-qcom-sdm845-audio ${PN}-qcom-sdm845-compute ${PN}-qcom-sdm845-modem \
- ${PN}-qcom-sm8250-audio ${PN}-qcom-sm8250-compute \
+ ${PN}-qcom-apq8016-modem ${PN}-qcom-apq8016-wifi \
+ ${PN}-qcom-apq8096-adreno ${PN}-qcom-apq8096-audio ${PN}-qcom-apq8096-modem \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-compat \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-audio \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-adreno \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-compute \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-sensors \
+ ${PN}-qcom-sdm845-adreno ${PN}-qcom-sdm845-audio ${PN}-qcom-sdm845-compute ${PN}-qcom-sdm845-modem \
+ ${PN}-qcom-sm8250-adreno ${PN}-qcom-sm8250-audio ${PN}-qcom-sm8250-compute \
+ ${PN}-qla2xxx ${PN}-qla2xxx-license \
${PN}-amlogic-vdec-license ${PN}-amlogic-vdec \
${PN}-lt9611uxc ${PN}-lontium-license \
${PN}-whence-license \
+ ${PN}-wl1251-license ${PN}-wl1251 \
+ ${PN}-xc4000-license ${PN}-xc4000 \
+ ${PN}-xc5000-license ${PN}-xc5000 \
+ ${PN}-xc5000c-license ${PN}-xc5000c \
${PN}-license \
"
+# For Amphion VPU
+LICENSE:${PN}-amphion-vpu = "Firmware-amphion_vpu"
+LICENSE:${PN}-amphion-vpu-license = "Firmware-amphion_vpu"
+
+FILES:${PN}-amphion-vpu = "${nonarch_base_libdir}/firmware/amphion/*"
+FILES:${PN}-amphion-vpu-license = " \
+ ${nonarch_base_libdir}/firmware/LICENSE.amphion_vpu \
+"
+RDEPENDS:${PN}-amphion-vpu += "${PN}-amphion-vpu-license"
+
+# For cw1200
+LICENSE:${PN}-cw1200 = "Firmware-cw1200"
+LICENSE:${PN}-cw1200-license = "Firmware-cw1200"
+
+FILES:${PN}-cw1200 = "${nonarch_base_libdir}/firmware/wsm_22.bin"
+FILES:${PN}-cw1200-license = "${nonarch_base_libdir}/firmware/LICENCE.cw1200"
+
+RDEPENDS:${PN}-cw1200 += "${PN}-cw1200-license"
+
# For atheros
+LICENSE:${PN}-ar5523 = "Firmware-atheros_firmware"
LICENSE:${PN}-ar9170 = "Firmware-atheros_firmware"
LICENSE:${PN}-ath3k = "Firmware-atheros_firmware"
LICENSE:${PN}-ath6k = "Firmware-atheros_firmware"
@@ -331,6 +398,9 @@ LICENSE:${PN}-ath9k = "Firmware-atheros_firmware"
LICENSE:${PN}-atheros-license = "Firmware-atheros_firmware"
FILES:${PN}-atheros-license = "${nonarch_base_libdir}/firmware/LICENCE.atheros_firmware"
+FILES:${PN}-ar5523 = " \
+ ${nonarch_base_libdir}/firmware/ar5523.bin \
+"
FILES:${PN}-ar9170 = " \
${nonarch_base_libdir}/firmware/ar9170*.fw \
"
@@ -349,6 +419,7 @@ FILES:${PN}-ath9k = " \
${nonarch_base_libdir}/firmware/ath9k_htc/htc_9271-1.4.0.fw \
"
+RDEPENDS:${PN}-ar5523 += "${PN}-atheros-license"
RDEPENDS:${PN}-ar9170 += "${PN}-atheros-license"
RDEPENDS:${PN}-ath6k += "${PN}-atheros-license"
RDEPENDS:${PN}-ath9k += "${PN}-atheros-license"
@@ -411,12 +482,74 @@ LICENSE:${PN}-mt7601u-license = "Firmware-ralink_a_mediatek_company_firmware"
FILES:${PN}-mt7601u-license = "${nonarch_base_libdir}/firmware/LICENCE.ralink_a_mediatek_company_firmware"
FILES:${PN}-mt7601u = " \
+ ${nonarch_base_libdir}/firmware/mediatek/mt7601u.bin \
${nonarch_base_libdir}/firmware/mt7601u.bin \
"
-
RDEPENDS:${PN}-mt7601u += "${PN}-mt7601u-license"
+# For MediaTek Bluetooth USB driver 7650
+LICENSE:${PN}-mt7650 = "Firmware-ralink_a_mediatek_company_firmware"
+LICENSE:${PN}-mt7650-license = "Firmware-ralink_a_mediatek_company_firmware"
+
+FILES:${PN}-mt7650-license = " \
+ ${nonarch_base_libdir}/firmware/LICENCE.ralink_a_mediatek_company_firmware \
+"
+FILES:${PN}-mt7650 = " \
+ ${nonarch_base_libdir}/firmware/mediatek/mt7650.bin \
+ ${nonarch_base_libdir}/firmware/mt7650.bin \
+"
+RDEPENDS:${PN}-mt7650 += "${PN}-mt7650-license"
+
+# For MediaTek MT76x2 Wireless MACs
+LICENSE:${PN}-mt76x2 = "Firmware-ralink_a_mediatek_company_firmware"
+LICENSE:${PN}-mt76x2-license = "Firmware-ralink_a_mediatek_company_firmware"
+
+FILES:${PN}-mt76x2-license = " \
+ ${nonarch_base_libdir}/firmware/LICENCE.ralink_a_mediatek_company_firmware \
+"
+FILES:${PN}-mt76x2 = " \
+ ${nonarch_base_libdir}/firmware/mediatek/mt7662.bin \
+ ${nonarch_base_libdir}/firmware/mt7662.bin \
+ ${nonarch_base_libdir}/firmware/mediatek/mt7662_rom_patch.bin \
+ ${nonarch_base_libdir}/firmware/mt7662_rom_patch.bin \
+"
+RDEPENDS:${PN}-mt76x2 += "${PN}-mt76x2-license"
+
+# For MediaTek
+LICENSE:${PN}-mediatek = "Firmware-mediatek"
+LICENSE:${PN}-mediatek-license = "Firmware-mediatek"
+
+FILES:${PN}-mediatek = " \
+ ${nonarch_base_libdir}/firmware/mediatek/* \
+ ${nonarch_base_libdir}/firmware/vpu_d.bin \
+ ${nonarch_base_libdir}/firmware/vpu_p.bin \
+"
+FILES:${PN}-mediatek-license = " \
+ ${nonarch_base_libdir}/firmware/LICENCE.mediatek \
+"
+RDEPENDS:${PN}-mediatek += "${PN}-mediatek-license"
+
+# For Microchip
+LICENSE:${PN}-microchip = "Firmware-microchip"
+LICENSE:${PN}-microchip-license = "Firmware-microchip"
+
+FILES:${PN}-microchip = "${nonarch_base_libdir}/firmware/microchip/*"
+FILES:${PN}-microchip-license = " \
+ ${nonarch_base_libdir}/firmware/LICENCE.microchip \
+"
+RDEPENDS:${PN}-microchip += "${PN}-microchip-license"
+
+# For MOXA
+LICENSE:${PN}-moxa = "Firmware-moxa"
+LICENSE:${PN}-moxa-license = "Firmware-moxa"
+
+FILES:${PN}-moxa = "${nonarch_base_libdir}/firmware/moxa"
+FILES:${PN}-moxa-license = "${nonarch_base_libdir}/firmware/LICENCE.moxa"
+
+RDEPENDS:${PN}-moxa += "${PN}-moxa-license"
+
# For radeon
+
LICENSE:${PN}-radeon = "Firmware-radeon"
LICENSE:${PN}-radeon-license = "Firmware-radeon"
@@ -427,6 +560,17 @@ FILES:${PN}-radeon = " \
RDEPENDS:${PN}-radeon += "${PN}-radeon-license"
+# For amdgpu
+LICENSE:${PN}-amdgpu = "Firmware-amdgpu"
+LICENSE:${PN}-amdgpu-license = "Firmware-amdgpu"
+
+FILES:${PN}-amdgpu-license = "${nonarch_base_libdir}/firmware/LICENSE.amdgpu"
+FILES:${PN}-amdgpu = " \
+ ${nonarch_base_libdir}/firmware/amdgpu \
+"
+
+RDEPENDS:${PN}-amdgpu += "${PN}-amdgpu-license"
+
# For lontium
LICENSE:${PN}-lt9611uxc = "Firmware-Lontium"
@@ -524,6 +668,16 @@ FILES:${PN}-netronome = " \
RDEPENDS:${PN}-netronome += "${PN}-netronome-license"
+# For nxp-mc
+LICENSE:${PN}-nxp-mc = "Firmware-nxp_mc_firmware"
+LICENSE:${PN}-nxp-mc-license = "Firmware-nxp_mc_firmware"
+
+FILES:${PN}-nxp-mc= "${nonarch_base_libdir}/firmware/dpaa2/mc/*"
+FILES:${PN}-nxp-mc-license = " \
+ ${nonarch_base_libdir}/firmware/LICENSE.nxp_mc_firmware \
+"
+RDEPENDS:${PN}-nxp-mc += "${PN}-nxp-mc-license"
+
# For Nvidia
LICENSE:${PN}-nvidia-gpu = "Firmware-nvidia"
LICENSE:${PN}-nvidia-tegra = "Firmware-nvidia"
@@ -546,6 +700,37 @@ RDEPENDS:${PN}-nvidia-gpu += "${PN}-nvidia-license"
RDEPENDS:${PN}-nvidia-tegra += "${PN}-nvidia-license"
RDEPENDS:${PN}-nvidia-tegra-k1 += "${PN}-nvidia-license"
+# For OLPC
+LICENSE:${PN}-olpc = "Firmware-OLPC"
+LICENSE:${PN}-olpc-license = "Firmware-OLPC"
+
+FILES:${PN}-olpc = " \
+ ${nonarch_base_libdir}/firmware/libertas/lbtf_sdio.bin \
+ ${nonarch_base_libdir}/firmware/lbtf_usb.bin \
+ ${nonarch_base_libdir}/firmware/libertas/usb8388_olpc.bin \
+"
+FILES:${PN}-olpc-license = "${nonarch_base_libdir}/firmware/LICENCE.OLPC"
+
+RDEPENDS:${PN}-olpc += "${PN}-olpc-license"
+
+# For phanfw
+LICENSE:${PN}-phanfw = "Firmware-phanfw"
+LICENSE:${PN}-phanfw-license = "Firmware-phanfw"
+
+FILES:${PN}-phanfw = "${nonarch_base_libdir}/firmware/phanfw.bin"
+FILES:${PN}-phanfw-license = "${nonarch_base_libdir}/firmware/LICENCE.phanfw"
+
+RDEPENDS:${PN}-phanfw += "${PN}-phanfw-license"
+
+# For qla2xxx
+LICENSE:${PN}-qla2xxx = "Firmware-qla2xxx"
+LICENSE:${PN}-qla2xxx-license = "Firmware-qla2xxx"
+
+FILES:${PN}-qla2xxx = "${nonarch_base_libdir}/firmware/ql2*"
+FILES:${PN}-qla2xxx-license = "${nonarch_base_libdir}/firmware/LICENCE.qla2xxx"
+
+RDEPENDS:${PN}-qla2xxx += "${PN}-qla2xxx-license"
+
# For RSI RS911x WiFi
LICENSE:${PN}-rs9113 = "WHENCE"
LICENSE:${PN}-rs9116 = "WHENCE"
@@ -562,7 +747,9 @@ LICENSE:${PN}-rtl8192cu = "Firmware-rtlwifi_firmware"
LICENSE:${PN}-rtl8192ce = "Firmware-rtlwifi_firmware"
LICENSE:${PN}-rtl8192su = "Firmware-rtlwifi_firmware"
LICENSE:${PN}-rtl8723 = "Firmware-rtlwifi_firmware"
+LICENSE:${PN}-rtl8761 = "Firmware-rtlwifi_firmware"
LICENSE:${PN}-rtl8821 = "Firmware-rtlwifi_firmware"
+LICENSE:${PN}-rtl8822 = "Firmware-rtlwifi_firmware"
LICENSE:${PN}-rtl-license = "Firmware-rtlwifi_firmware"
LICENSE:${PN}-rtl8168 = "WHENCE"
@@ -587,9 +774,17 @@ FILES:${PN}-rtl8723 = " \
FILES:${PN}-rtl8821 = " \
${nonarch_base_libdir}/firmware/rtlwifi/rtl8821*.bin \
"
+FILES:${PN}-rtl8761 = " \
+ ${nonarch_base_libdir}/firmware/rtl_bt/rtl8761*.bin \
+"
FILES:${PN}-rtl8168 = " \
${nonarch_base_libdir}/firmware/rtl_nic/rtl8168*.fw \
"
+FILES:${PN}-rtl8822 = " \
+ ${nonarch_base_libdir}/firmware/rtl_bt/rtl8822*.bin \
+ ${nonarch_base_libdir}/firmware/rtw88/rtw8822*.bin \
+ ${nonarch_base_libdir}/firmware/rtlwifi/rtl8822*.bin \
+"
RDEPENDS:${PN}-rtl8188 += "${PN}-rtl-license"
RDEPENDS:${PN}-rtl8192ce += "${PN}-rtl-license"
@@ -597,8 +792,22 @@ RDEPENDS:${PN}-rtl8192cu += "${PN}-rtl-license"
RDEPENDS:${PN}-rtl8192su = "${PN}-rtl-license"
RDEPENDS:${PN}-rtl8723 += "${PN}-rtl-license"
RDEPENDS:${PN}-rtl8821 += "${PN}-rtl-license"
+RDEPENDS:${PN}-rtl8761 += "${PN}-rtl-license"
+RDEPENDS:${PN}-rtl8822 += "${PN}-rtl-license"
RDEPENDS:${PN}-rtl8168 += "${PN}-whence-license"
+# For TI wl1251
+LICENSE:${PN}-wl1251 = "Firmware-wl1251"
+LICENSE:${PN}-wl1251-license = "Firmware-wl1251"
+
+FILES:${PN}-wl1251 = " \
+ ${nonarch_base_libdir}/firmware/ti-connectivity/wl1251-fw.bin \
+ ${nonarch_base_libdir}/firmware/ti-connectivity/wl1251-nvs.bin \
+"
+FILES:${PN}-wl1251-license = "${nonarch_base_libdir}/firmware/LICENCE.wl1251"
+
+RDEPENDS:${PN}-wl1251 += "${PN}-wl1251-license"
+
# For ti-connectivity
LICENSE:${PN}-wlcommon = "Firmware-ti-connectivity"
LICENSE:${PN}-wl12xx = "Firmware-ti-connectivity"
@@ -628,6 +837,16 @@ FILES:${PN}-wl18xx = " \
RDEPENDS:${PN}-wl12xx = "${PN}-ti-connectivity-license ${PN}-wlcommon"
RDEPENDS:${PN}-wl18xx = "${PN}-ti-connectivity-license ${PN}-wlcommon"
+# For ti-keystone
+LICENSE:${PN}-ti-keystone = "Firmware-ti-keystone"
+LICENSE:${PN}-ti-keystone-license = "Firmware-ti-keystone"
+
+FILES:${PN}-ti-keystone = "${nonarch_base_libdir}/firmware/ti-keystone/*"
+FILES:${PN}-ti-keystone-license = " \
+ ${nonarch_base_libdir}/firmware/LICENCE.ti-keystone \
+"
+RDEPENDS:${PN}-ti-keystone += "${PN}-ti-keystone-license"
+
# For vt6656
LICENSE:${PN}-vt6656 = "Firmware-via_vt6656"
LICENSE:${PN}-vt6656-license = "Firmware-via_vt6656"
@@ -639,6 +858,35 @@ FILES:${PN}-vt6656 = " \
RDEPENDS:${PN}-vt6656 = "${PN}-vt6656-license"
+# For xc4000
+LICENSE:${PN}-xc4000 = "Firmware-xc4000"
+LICENSE:${PN}-xc4000-license = "Firmware-xc4000"
+
+FILES:${PN}-xc4000 = "${nonarch_base_libdir}/firmware/dvb-fe-xc4000-1.4.1.fw"
+FILES:${PN}-xc4000-license = "${nonarch_base_libdir}/firmware/LICENCE.xc4000"
+
+RDEPENDS:${PN}-xc4000 += "${PN}-xc4000-license"
+
+# For xc5000
+LICENSE:${PN}-xc5000 = "Firmware-xc5000"
+LICENSE:${PN}-xc5000-license = "Firmware-xc5000"
+
+FILES:${PN}-xc5000 = "${nonarch_base_libdir}/firmware/dvb-fe-xc5000-1.6.114.fw"
+FILES:${PN}-xc5000-license = "${nonarch_base_libdir}/firmware/LICENCE.xc5000"
+
+RDEPENDS:${PN}-xc5000 += "${PN}-xc5000-license"
+
+# For xc5000c
+LICENSE:${PN}-xc5000c = "Firmware-xc5000c"
+LICENSE:${PN}-xc5000c-license = "Firmware-xc5000c"
+
+FILES:${PN}-xc5000c = " \
+ ${nonarch_base_libdir}/firmware/dvb-fe-xc5000c-4.1.30.7.fw \
+"
+FILES:${PN}-xc5000c-license = "${nonarch_base_libdir}/firmware/LICENCE.xc5000c"
+
+RDEPENDS:${PN}-xc5000c += "${PN}-xc5000c-license"
+
# For broadcom
# for i in `grep brcm WHENCE | grep ^File | sed 's/File: brcm.//g'`; do pkg=`echo $i | sed 's/-[sp40].*//g; s/\.bin//g; s/brcmfmac/bcm/g; s/_hdr/-hdr/g; s/BCM/bcm-0bb4-0306/g'`; echo -e " \${PN}-$pkg \\"; done | sort -u
@@ -795,6 +1043,24 @@ FILES:${PN}-whence-license = "${nonarch_base_libdir}/firmware/WHENCE"
RDEPENDS:${PN}-bnx2-mips += "${PN}-whence-license"
+# For cirrus
+LICENSE:${PN}-cirrus = "Firmware-cirrus"
+LICENSE:${PN}-cirrus-license = "Firmware-cirrus"
+
+FILES:${PN}-cirrus = "${nonarch_base_libdir}/firmware/cirrus/*"
+FILES:${PN}-cirrus-license = "${nonarch_base_libdir}/firmware/LICENSE.cirrus"
+
+RDEPENDS:${PN}-cirrus += "${PN}-cirrus-license"
+
+# For cnm
+LICENSE:${PN}-cnm = "Firmware-cnm"
+LICENSE:${PN}-cnm-license = "Firmware-cnm"
+
+FILES:${PN}-cnm = "${nonarch_base_libdir}/firmware/cnm/wave521c_k3_codec_fw.bin"
+FILES:${PN}-cnm-license = "${nonarch_base_libdir}/firmware/LICENCE.cnm"
+
+RDEPENDS:${PN}-cnm += "${PN}-cnm-license"
+
# For imx-sdma
LICENSE:${PN}-imx-sdma-imx6q = "Firmware-imx-sdma_firmware"
LICENSE:${PN}-imx-sdma-imx7d = "Firmware-imx-sdma_firmware"
@@ -943,10 +1209,26 @@ FILES:${PN}-i915-license = "${nonarch_base_libdir}/firmware/LICENSE.i915"
FILES:${PN}-i915 = "${nonarch_base_libdir}/firmware/i915"
RDEPENDS:${PN}-i915 = "${PN}-i915-license"
+# For ice-enhanced
+LICENSE:${PN}-ice-enhanced = "Firmware-ice_enhanced"
+LICENSE:${PN}-ice-enhanced-license = "Firmware-ice_enhanced"
+
+FILES:${PN}-ice-enhanced = " \
+ ${nonarch_base_libdir}/firmware/intel/ice/ddp-comms/* \
+ ${nonarch_base_libdir}/firmware/intel/ice/ddp-wireless_edge/* \
+"
+FILES:${PN}-ice-enhanced-license = " \
+ ${nonarch_base_libdir}/firmware/LICENSE.ice_enhanced \
+"
+RDEPENDS:${PN}-ice-enhanced = "${PN}-ice-enhanced-license"
+
LICENSE:${PN}-ice = "Firmware-ice"
LICENSE:${PN}-ice-license = "Firmware-ice"
FILES:${PN}-ice-license = "${nonarch_base_libdir}/firmware/LICENSE.ice"
-FILES:${PN}-ice = "${nonarch_base_libdir}/firmware/intel/ice"
+FILES:${PN}-ice = " \
+ ${nonarch_base_libdir}/firmware/intel/ice/ddp/* \
+ ${nonarch_base_libdir}/firmware/intel/ice/ddp-lag/* \
+"
RDEPENDS:${PN}-ice = "${PN}-ice-license"
FILES:${PN}-adsp-sst-license = "${nonarch_base_libdir}/firmware/LICENCE.adsp_sst"
@@ -964,48 +1246,102 @@ RDEPENDS:${PN}-qat = "${PN}-qat-license"
# For QCOM VPU/GPU and SDM845
LICENSE:${PN}-qcom-license = "Firmware-qcom"
+LICENSE:${PN}-qcom-yamato-license = "Firmware-qcom-yamato"
+LICENSE:${PN}-qcom-venus-1.8 = "Firmware-qcom"
+LICENSE:${PN}-qcom-venus-4.2 = "Firmware-qcom"
+LICENSE:${PN}-qcom-venus-5.2 = "Firmware-qcom"
+LICENSE:${PN}-qcom-venus-5.4 = "Firmware-qcom"
+LICENSE:${PN}-qcom-vpu-1.0 = "Firmware-qcom"
+LICENSE:${PN}-qcom-vpu-2.0 = "Firmware-qcom"
+LICENSE:${PN}-qcom-adreno-a2xx = "Firmware-qcom Firmware-qcom-yamato"
+LICENSE:${PN}-qcom-adreno-a3xx = "Firmware-qcom"
+LICENSE:${PN}-qcom-adreno-a4xx = "Firmware-qcom"
+LICENSE:${PN}-qcom-adreno-a530 = "Firmware-qcom"
+LICENSE:${PN}-qcom-adreno-a630 = "Firmware-qcom"
+LICENSE:${PN}-qcom-adreno-a650 = "Firmware-qcom"
+LICENSE:${PN}-qcom-adreno-a660 = "Firmware-qcom"
+LICENSE:${PN}-qcom-apq8016-modem = "Firmware-qcom"
+LICENSE:${PN}-qcom-apq8016-wifi = "Firmware-qcom"
+LICENSE:${PN}-qcom-apq8096-audio = "Firmware-qcom"
+LICENSE:${PN}-qcom-apq8096-adreno = "Firmware-qcom"
+LICENSE:${PN}-qcom-apq8096-modem = "Firmware-qcom"
+LICENSE:${PN}-qcom-sc8280xp-lenovo-x13s-audio = "Firmware-qcom"
+LICENSE:${PN}-qcom-sc8280xp-lenovo-x13s-adreno = "Firmware-qcom"
+LICENSE:${PN}-qcom-sc8280xp-lenovo-x13s-compute = "Firmware-qcom"
+LICENSE:${PN}-qcom-sc8280xp-lenovo-x13s-sensors = "Firmware-qcom"
+LICENSE:${PN}-qcom-sdm845-audio = "Firmware-qcom"
+LICENSE:${PN}-qcom-sdm845-adreno = "Firmware-qcom"
+LICENSE:${PN}-qcom-sdm845-compute = "Firmware-qcom"
+LICENSE:${PN}-qcom-sdm845-modem = "Firmware-qcom"
+LICENSE:${PN}-qcom-sm8250-audio = "Firmware-qcom"
+LICENSE:${PN}-qcom-sm8250-adreno = "Firmware-qcom"
+LICENSE:${PN}-qcom-sm8250-compute = "Firmware-qcom"
+
FILES:${PN}-qcom-license = "${nonarch_base_libdir}/firmware/LICENSE.qcom ${nonarch_base_libdir}/firmware/qcom/NOTICE.txt"
+FILES:${PN}-qcom-yamato-license = "${nonarch_base_libdir}/firmware/LICENSE.qcom_yamato"
FILES:${PN}-qcom-venus-1.8 = "${nonarch_base_libdir}/firmware/qcom/venus-1.8/*"
FILES:${PN}-qcom-venus-4.2 = "${nonarch_base_libdir}/firmware/qcom/venus-4.2/*"
FILES:${PN}-qcom-venus-5.2 = "${nonarch_base_libdir}/firmware/qcom/venus-5.2/*"
FILES:${PN}-qcom-venus-5.4 = "${nonarch_base_libdir}/firmware/qcom/venus-5.4/*"
FILES:${PN}-qcom-vpu-1.0 = "${nonarch_base_libdir}/firmware/qcom/vpu-1.0/*"
FILES:${PN}-qcom-vpu-2.0 = "${nonarch_base_libdir}/firmware/qcom/vpu-2.0/*"
-FILES:${PN}-qcom-adreno-a2xx = "${nonarch_base_libdir}/firmware/qcom/leia_*.fw"
+FILES:${PN}-qcom-adreno-a2xx = "${nonarch_base_libdir}/firmware/qcom/leia_*.fw ${nonarch_base_libdir}/firmware/qcom/yamato_*.fw"
FILES:${PN}-qcom-adreno-a3xx = "${nonarch_base_libdir}/firmware/qcom/a3*_*.fw ${nonarch_base_libdir}/firmware/a300_*.fw"
FILES:${PN}-qcom-adreno-a4xx = "${nonarch_base_libdir}/firmware/qcom/a4*_*.fw"
-FILES:${PN}-qcom-adreno-a530 = "${nonarch_base_libdir}/firmware/qcom/a530*.*"
-FILES:${PN}-qcom-adreno-a630 = "${nonarch_base_libdir}/firmware/qcom/a630*.* ${nonarch_base_libdir}/firmware/qcom/sdm845/a630*.*"
-FILES:${PN}-qcom-adreno-a650 = "${nonarch_base_libdir}/firmware/qcom/a650*.* ${nonarch_base_libdir}/firmware/qcom/sm8250/a650*.*"
+FILES:${PN}-qcom-adreno-a530 = "${nonarch_base_libdir}/firmware/qcom/a530*.fw*"
+FILES:${PN}-qcom-adreno-a630 = "${nonarch_base_libdir}/firmware/qcom/a630*.*"
+FILES:${PN}-qcom-adreno-a650 = "${nonarch_base_libdir}/firmware/qcom/a650*.*"
FILES:${PN}-qcom-adreno-a660 = "${nonarch_base_libdir}/firmware/qcom/a660*.*"
+FILES:${PN}-qcom-apq8016-modem = "${nonarch_base_libdir}/firmware/qcom/apq8016/mba.mbn ${nonarch_base_libdir}/firmware/qcom/apq8016/modem.mbn"
+FILES:${PN}-qcom-apq8016-wifi = "${nonarch_base_libdir}/firmware/qcom/apq8016/wcnss.mbn ${nonarch_base_libdir}/firmware/qcom/apq8016/WCNSS*"
+FILES:${PN}-qcom-apq8096-adreno = "${nonarch_base_libdir}/firmware/qcom/apq8096/a530_zap.mbn ${nonarch_base_libdir}/firmware/qcom/a530_zap.mdt"
FILES:${PN}-qcom-apq8096-audio = "${nonarch_base_libdir}/firmware/qcom/apq8096/adsp*.*"
FILES:${PN}-qcom-apq8096-modem = "${nonarch_base_libdir}/firmware/qcom/apq8096/mba.mbn ${nonarch_base_libdir}/firmware/qcom/apq8096/modem*.* ${nonarch_base_libdir}/firmware/qcom/apq8096/wlanmdsp.mbn"
+FILES:${PN}-qcom-sc8280xp-lenovo-x13s-compat = "${nonarch_base_libdir}/firmware/qcom/LENOVO/21BX"
+FILES:${PN}-qcom-sc8280xp-lenovo-x13s-audio = "${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/*adsp*.* ${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/battmgr.jsn"
+FILES:${PN}-qcom-sc8280xp-lenovo-x13s-adreno = "${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/qcdxkmsuc8280.mbn"
+FILES:${PN}-qcom-sc8280xp-lenovo-x13s-compute = "${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/*cdsp*.*"
+FILES:${PN}-qcom-sc8280xp-lenovo-x13s-sensors = "${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/*slpi*.*"
+FILES:${PN}-qcom-sdm845-adreno = "${nonarch_base_libdir}/firmware/qcom/sdm845/a630*.*"
FILES:${PN}-qcom-sdm845-audio = "${nonarch_base_libdir}/firmware/qcom/sdm845/adsp*.*"
FILES:${PN}-qcom-sdm845-compute = "${nonarch_base_libdir}/firmware/qcom/sdm845/cdsp*.*"
FILES:${PN}-qcom-sdm845-modem = "${nonarch_base_libdir}/firmware/qcom/sdm845/mba.mbn ${nonarch_base_libdir}/firmware/qcom/sdm845/modem*.* ${nonarch_base_libdir}/firmware/qcom/sdm845/wlanmdsp.mbn"
+FILES:${PN}-qcom-sm8250-adreno = "${nonarch_base_libdir}/firmware/qcom/sm8250/a650*.*"
FILES:${PN}-qcom-sm8250-audio = "${nonarch_base_libdir}/firmware/qcom/sm8250/adsp*.*"
FILES:${PN}-qcom-sm8250-compute = "${nonarch_base_libdir}/firmware/qcom/sm8250/cdsp*.*"
+
RDEPENDS:${PN}-qcom-venus-1.8 = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-venus-4.2 = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-venus-5.2 = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-venus-5.4 = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-vpu-1.0 = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-vpu-2.0 = "${PN}-qcom-license"
-RDEPENDS:${PN}-qcom-adreno-a2xx = "${PN}-qcom-license"
+RDEPENDS:${PN}-qcom-adreno-a2xx = "${PN}-qcom-license ${PN}-qcom-yamato-license"
RDEPENDS:${PN}-qcom-adreno-a3xx = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-adreno-a4xx = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-adreno-a530 = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-adreno-a630 = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-adreno-a650 = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-adreno-a660 = "${PN}-qcom-license"
+RDEPENDS:${PN}-qcom-apq8016-modem = "${PN}-qcom-license"
+RDEPENDS:${PN}-qcom-apq8016-wifi = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-apq8096-audio = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-apq8096-modem = "${PN}-qcom-license"
+RDEPENDS:${PN}-qcom-sc8280xp-lenovo-x13s-audio = "${PN}-qcom-license"
+RDEPENDS:${PN}-qcom-sc8280xp-lenovo-x13s-adreno = "${PN}-qcom-license"
+RDEPENDS:${PN}-qcom-sc8280xp-lenovo-x13s-compute = "${PN}-qcom-license"
+RDEPENDS:${PN}-qcom-sc8280xp-lenovo-x13s-sensors = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-sdm845-audio = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-sdm845-compute = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-sdm845-modem = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-sm8250-audio = "${PN}-qcom-license"
RDEPENDS:${PN}-qcom-sm8250-compute = "${PN}-qcom-license"
+RRECOMMENDS:${PN}-qcom-sc8280xp-lenovo-x13s-audio = "${PN}-qcom-sc8280xp-lenovo-x13s-compat"
+RRECOMMENDS:${PN}-qcom-sc8280xp-lenovo-x13s-adreno = "${PN}-qcom-sc8280xp-lenovo-x13s-compat"
+RRECOMMENDS:${PN}-qcom-sc8280xp-lenovo-x13s-compute = "${PN}-qcom-sc8280xp-lenovo-x13s-compat"
+RRECOMMENDS:${PN}-qcom-sc8280xp-lenovo-x13s-sensors = "${PN}-qcom-sc8280xp-lenovo-x13s-compat"
+
FILES:${PN}-liquidio = "${nonarch_base_libdir}/firmware/liquidio"
# For Amlogic VDEC
@@ -1026,6 +1362,8 @@ LICENSE:${PN} = "\
& Firmware-ca0132 \
& Firmware-cavium \
& Firmware-chelsio_firmware \
+ & Firmware-cirrus \
+ & Firmware-cnm \
& Firmware-cw1200 \
& Firmware-dib0700 \
& Firmware-e100 \
@@ -1033,7 +1371,6 @@ LICENSE:${PN} = "\
& Firmware-fw_sst_0f28 \
& Firmware-go7007 \
& Firmware-hfi1_firmware \
- & Firmware-i2400m \
& Firmware-ibt_firmware \
& Firmware-it913x \
& Firmware-IntcSST2 \
@@ -1054,7 +1391,6 @@ LICENSE:${PN} = "\
& Firmware-ralink-firmware \
& Firmware-imx-sdma_firmware \
& Firmware-siano \
- & Firmware-tda7706-firmware \
& Firmware-ti-connectivity \
& Firmware-ti-keystone \
& Firmware-ueagle-atm4-firmware \
@@ -1087,3 +1423,9 @@ python populate_packages:prepend () {
# Firmware files are generally not ran on the CPU, so they can be
# allarch despite being architecture specific
INSANE_SKIP = "arch"
+
+# Don't warn about already stripped files
+INSANE_SKIP:${PN} = "already-stripped"
+
+# No need to put firmware into the sysroot
+SYSROOT_DIRS_IGNORE += "${nonarch_base_libdir}/firmware"
diff --git a/meta/recipes-kernel/linux-libc-headers/linux-libc-headers_5.16.bb b/meta/recipes-kernel/linux-libc-headers/linux-libc-headers_5.16.bb
index c64629d094..d5039264c4 100644
--- a/meta/recipes-kernel/linux-libc-headers/linux-libc-headers_5.16.bb
+++ b/meta/recipes-kernel/linux-libc-headers/linux-libc-headers_5.16.bb
@@ -7,7 +7,7 @@ SRC_URI:append:libc-musl = "\
file://0001-include-linux-stddef.h-in-swab.h-uapi-header.patch \
"
-SRC_URI:append = "\
+SRC_URI += "\
file://0001-scripts-Use-fixed-input-and-output-files-instead-of-.patch \
file://0001-kbuild-install_headers.sh-Strip-_UAPI-from-if-define.patch \
"
diff --git a/meta/recipes-kernel/linux/cve-exclusion.inc b/meta/recipes-kernel/linux/cve-exclusion.inc
new file mode 100644
index 0000000000..c34928e100
--- /dev/null
+++ b/meta/recipes-kernel/linux/cve-exclusion.inc
@@ -0,0 +1,6 @@
+# https://nvd.nist.gov/vuln/detail/CVE-2022-39188
+# Patched in kernel since v5.19 b67fbebd4cf980aecbcc750e1462128bffe8ae15
+# Backported in version v5.4.212 c9c5501e815132530d741ec9fdd22657f91656bc
+# Backported in version v5.10.141 895428ee124ad70b9763259308354877b725c31d
+# Backported in version v5.15.65 3ffb97fce282df03723995f5eed6a559d008078e
+CVE_CHECK_IGNORE += "CVE-2022-39188"
diff --git a/meta/recipes-kernel/linux/cve-exclusion_5.10.inc b/meta/recipes-kernel/linux/cve-exclusion_5.10.inc
new file mode 100644
index 0000000000..4d959c90b1
--- /dev/null
+++ b/meta/recipes-kernel/linux/cve-exclusion_5.10.inc
@@ -0,0 +1,7565 @@
+
+# Auto-generated CVE metadata, DO NOT EDIT BY HAND.
+# Generated at 2024-02-21 03:55:27.305577 for version 5.10.209
+
+python check_kernel_cve_status_version() {
+ this_version = "5.10.209"
+ kernel_version = d.getVar("LINUX_VERSION")
+ if kernel_version != this_version:
+ bb.warn("Kernel CVE status needs updating: generated for %s but kernel is %s" % (this_version, kernel_version))
+}
+do_cve_check[prefuncs] += "check_kernel_cve_status_version"
+
+# fixed-version: Fixed after version 2.6.12rc2
+CVE_CHECK_IGNORE += "CVE-2003-1604"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_IGNORE += "CVE-2004-0230"
+
+# CVE-2005-3660 has no known resolution
+
+# fixed-version: Fixed after version 2.6.26rc5
+CVE_CHECK_IGNORE += "CVE-2006-3635"
+
+# fixed-version: Fixed after version 2.6.19rc3
+CVE_CHECK_IGNORE += "CVE-2006-5331"
+
+# fixed-version: Fixed after version 2.6.19rc2
+CVE_CHECK_IGNORE += "CVE-2006-6128"
+
+# CVE-2007-3719 has no known resolution
+
+# fixed-version: Fixed after version 2.6.12rc2
+CVE_CHECK_IGNORE += "CVE-2007-4774"
+
+# fixed-version: Fixed after version 2.6.24rc6
+CVE_CHECK_IGNORE += "CVE-2007-6761"
+
+# fixed-version: Fixed after version 2.6.20rc5
+CVE_CHECK_IGNORE += "CVE-2007-6762"
+
+# CVE-2008-2544 has no known resolution
+
+# CVE-2008-4609 has no known resolution
+
+# fixed-version: Fixed after version 2.6.25rc1
+CVE_CHECK_IGNORE += "CVE-2008-7316"
+
+# fixed-version: Fixed after version 2.6.31rc6
+CVE_CHECK_IGNORE += "CVE-2009-2692"
+
+# fixed-version: Fixed after version 2.6.23rc9
+CVE_CHECK_IGNORE += "CVE-2010-0008"
+
+# fixed-version: Fixed after version 2.6.36rc5
+CVE_CHECK_IGNORE += "CVE-2010-3432"
+
+# CVE-2010-4563 has no known resolution
+
+# fixed-version: Fixed after version 2.6.37rc6
+CVE_CHECK_IGNORE += "CVE-2010-4648"
+
+# fixed-version: Fixed after version 2.6.38rc1
+CVE_CHECK_IGNORE += "CVE-2010-5313"
+
+# CVE-2010-5321 has no known resolution
+
+# fixed-version: Fixed after version 2.6.35rc1
+CVE_CHECK_IGNORE += "CVE-2010-5328"
+
+# fixed-version: Fixed after version 2.6.39rc1
+CVE_CHECK_IGNORE += "CVE-2010-5329"
+
+# fixed-version: Fixed after version 2.6.34rc7
+CVE_CHECK_IGNORE += "CVE-2010-5331"
+
+# fixed-version: Fixed after version 2.6.37rc1
+CVE_CHECK_IGNORE += "CVE-2010-5332"
+
+# fixed-version: Fixed after version 3.2rc1
+CVE_CHECK_IGNORE += "CVE-2011-4098"
+
+# fixed-version: Fixed after version 3.3rc1
+CVE_CHECK_IGNORE += "CVE-2011-4131"
+
+# fixed-version: Fixed after version 3.2rc1
+CVE_CHECK_IGNORE += "CVE-2011-4915"
+
+# CVE-2011-4916 has no known resolution
+
+# CVE-2011-4917 has no known resolution
+
+# fixed-version: Fixed after version 3.2rc1
+CVE_CHECK_IGNORE += "CVE-2011-5321"
+
+# fixed-version: Fixed after version 3.1rc1
+CVE_CHECK_IGNORE += "CVE-2011-5327"
+
+# fixed-version: Fixed after version 3.7rc2
+CVE_CHECK_IGNORE += "CVE-2012-0957"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_IGNORE += "CVE-2012-2119"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_IGNORE += "CVE-2012-2136"
+
+# fixed-version: Fixed after version 3.5rc2
+CVE_CHECK_IGNORE += "CVE-2012-2137"
+
+# fixed-version: Fixed after version 3.4rc6
+CVE_CHECK_IGNORE += "CVE-2012-2313"
+
+# fixed-version: Fixed after version 3.4rc6
+CVE_CHECK_IGNORE += "CVE-2012-2319"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_IGNORE += "CVE-2012-2372"
+
+# fixed-version: Fixed after version 3.4rc1
+CVE_CHECK_IGNORE += "CVE-2012-2375"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_IGNORE += "CVE-2012-2390"
+
+# fixed-version: Fixed after version 3.5rc4
+CVE_CHECK_IGNORE += "CVE-2012-2669"
+
+# fixed-version: Fixed after version 2.6.34rc1
+CVE_CHECK_IGNORE += "CVE-2012-2744"
+
+# fixed-version: Fixed after version 3.4rc3
+CVE_CHECK_IGNORE += "CVE-2012-2745"
+
+# fixed-version: Fixed after version 3.5rc6
+CVE_CHECK_IGNORE += "CVE-2012-3364"
+
+# fixed-version: Fixed after version 3.4rc5
+CVE_CHECK_IGNORE += "CVE-2012-3375"
+
+# fixed-version: Fixed after version 3.5rc5
+CVE_CHECK_IGNORE += "CVE-2012-3400"
+
+# fixed-version: Fixed after version 3.6rc2
+CVE_CHECK_IGNORE += "CVE-2012-3412"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_IGNORE += "CVE-2012-3430"
+
+# fixed-version: Fixed after version 2.6.19rc4
+CVE_CHECK_IGNORE += "CVE-2012-3510"
+
+# fixed-version: Fixed after version 3.5rc6
+CVE_CHECK_IGNORE += "CVE-2012-3511"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-3520"
+
+# fixed-version: Fixed after version 3.0rc1
+CVE_CHECK_IGNORE += "CVE-2012-3552"
+
+# Skipping CVE-2012-4220, no affected_versions
+
+# Skipping CVE-2012-4221, no affected_versions
+
+# Skipping CVE-2012-4222, no affected_versions
+
+# fixed-version: Fixed after version 3.4rc1
+CVE_CHECK_IGNORE += "CVE-2012-4398"
+
+# fixed-version: Fixed after version 2.6.36rc4
+CVE_CHECK_IGNORE += "CVE-2012-4444"
+
+# fixed-version: Fixed after version 3.7rc6
+CVE_CHECK_IGNORE += "CVE-2012-4461"
+
+# fixed-version: Fixed after version 3.6rc5
+CVE_CHECK_IGNORE += "CVE-2012-4467"
+
+# fixed-version: Fixed after version 3.7rc3
+CVE_CHECK_IGNORE += "CVE-2012-4508"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_IGNORE += "CVE-2012-4530"
+
+# CVE-2012-4542 has no known resolution
+
+# fixed-version: Fixed after version 3.7rc4
+CVE_CHECK_IGNORE += "CVE-2012-4565"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_IGNORE += "CVE-2012-5374"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_IGNORE += "CVE-2012-5375"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_IGNORE += "CVE-2012-5517"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_IGNORE += "CVE-2012-6536"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_IGNORE += "CVE-2012-6537"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_IGNORE += "CVE-2012-6538"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6539"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6540"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6541"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6542"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6543"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6544"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6545"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6546"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_IGNORE += "CVE-2012-6547"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_IGNORE += "CVE-2012-6548"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_IGNORE += "CVE-2012-6549"
+
+# fixed-version: Fixed after version 3.3rc1
+CVE_CHECK_IGNORE += "CVE-2012-6638"
+
+# fixed-version: Fixed after version 3.6rc2
+CVE_CHECK_IGNORE += "CVE-2012-6647"
+
+# fixed-version: Fixed after version 3.6
+CVE_CHECK_IGNORE += "CVE-2012-6657"
+
+# fixed-version: Fixed after version 3.6rc5
+CVE_CHECK_IGNORE += "CVE-2012-6689"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_IGNORE += "CVE-2012-6701"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_IGNORE += "CVE-2012-6703"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_IGNORE += "CVE-2012-6704"
+
+# fixed-version: Fixed after version 3.4rc1
+CVE_CHECK_IGNORE += "CVE-2012-6712"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_IGNORE += "CVE-2013-0160"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_IGNORE += "CVE-2013-0190"
+
+# fixed-version: Fixed after version 3.8rc7
+CVE_CHECK_IGNORE += "CVE-2013-0216"
+
+# fixed-version: Fixed after version 3.8rc7
+CVE_CHECK_IGNORE += "CVE-2013-0217"
+
+# fixed-version: Fixed after version 3.8
+CVE_CHECK_IGNORE += "CVE-2013-0228"
+
+# fixed-version: Fixed after version 3.8rc7
+CVE_CHECK_IGNORE += "CVE-2013-0231"
+
+# fixed-version: Fixed after version 3.8rc6
+CVE_CHECK_IGNORE += "CVE-2013-0268"
+
+# fixed-version: Fixed after version 3.8
+CVE_CHECK_IGNORE += "CVE-2013-0290"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_IGNORE += "CVE-2013-0309"
+
+# fixed-version: Fixed after version 3.5
+CVE_CHECK_IGNORE += "CVE-2013-0310"
+
+# fixed-version: Fixed after version 3.7rc8
+CVE_CHECK_IGNORE += "CVE-2013-0311"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_IGNORE += "CVE-2013-0313"
+
+# fixed-version: Fixed after version 3.11rc7
+CVE_CHECK_IGNORE += "CVE-2013-0343"
+
+# fixed-version: Fixed after version 3.8rc6
+CVE_CHECK_IGNORE += "CVE-2013-0349"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_IGNORE += "CVE-2013-0871"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_IGNORE += "CVE-2013-0913"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-0914"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-1059"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_IGNORE += "CVE-2013-1763"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_IGNORE += "CVE-2013-1767"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_IGNORE += "CVE-2013-1772"
+
+# fixed-version: Fixed after version 3.3rc1
+CVE_CHECK_IGNORE += "CVE-2013-1773"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_IGNORE += "CVE-2013-1774"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-1792"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_IGNORE += "CVE-2013-1796"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_IGNORE += "CVE-2013-1797"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_IGNORE += "CVE-2013-1798"
+
+# fixed-version: Fixed after version 3.8rc6
+CVE_CHECK_IGNORE += "CVE-2013-1819"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_IGNORE += "CVE-2013-1826"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2013-1827"
+
+# fixed-version: Fixed after version 3.9rc2
+CVE_CHECK_IGNORE += "CVE-2013-1828"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-1848"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-1858"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-1860"
+
+# fixed-version: Fixed after version 3.7rc3
+CVE_CHECK_IGNORE += "CVE-2013-1928"
+
+# fixed-version: Fixed after version 3.9rc6
+CVE_CHECK_IGNORE += "CVE-2013-1929"
+
+# Skipping CVE-2013-1935, no affected_versions
+
+# fixed-version: Fixed after version 3.0rc1
+CVE_CHECK_IGNORE += "CVE-2013-1943"
+
+# fixed-version: Fixed after version 3.9rc5
+CVE_CHECK_IGNORE += "CVE-2013-1956"
+
+# fixed-version: Fixed after version 3.9rc5
+CVE_CHECK_IGNORE += "CVE-2013-1957"
+
+# fixed-version: Fixed after version 3.9rc5
+CVE_CHECK_IGNORE += "CVE-2013-1958"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-1959"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_IGNORE += "CVE-2013-1979"
+
+# fixed-version: Fixed after version 3.8rc2
+CVE_CHECK_IGNORE += "CVE-2013-2015"
+
+# fixed-version: Fixed after version 2.6.34
+CVE_CHECK_IGNORE += "CVE-2013-2017"
+
+# fixed-version: Fixed after version 3.8rc4
+CVE_CHECK_IGNORE += "CVE-2013-2058"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_IGNORE += "CVE-2013-2094"
+
+# fixed-version: Fixed after version 2.6.34rc4
+CVE_CHECK_IGNORE += "CVE-2013-2128"
+
+# fixed-version: Fixed after version 3.11rc3
+CVE_CHECK_IGNORE += "CVE-2013-2140"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_IGNORE += "CVE-2013-2141"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_IGNORE += "CVE-2013-2146"
+
+# fixed-version: Fixed after version 3.12rc3
+CVE_CHECK_IGNORE += "CVE-2013-2147"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-2148"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-2164"
+
+# Skipping CVE-2013-2188, no affected_versions
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_IGNORE += "CVE-2013-2206"
+
+# Skipping CVE-2013-2224, no affected_versions
+
+# fixed-version: Fixed after version 3.10
+CVE_CHECK_IGNORE += "CVE-2013-2232"
+
+# fixed-version: Fixed after version 3.10
+CVE_CHECK_IGNORE += "CVE-2013-2234"
+
+# fixed-version: Fixed after version 3.9rc6
+CVE_CHECK_IGNORE += "CVE-2013-2237"
+
+# Skipping CVE-2013-2239, no affected_versions
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_IGNORE += "CVE-2013-2546"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_IGNORE += "CVE-2013-2547"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_IGNORE += "CVE-2013-2548"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_IGNORE += "CVE-2013-2596"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-2634"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-2635"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-2636"
+
+# fixed-version: Fixed after version 3.10rc4
+CVE_CHECK_IGNORE += "CVE-2013-2850"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-2851"
+
+# fixed-version: Fixed after version 3.10rc6
+CVE_CHECK_IGNORE += "CVE-2013-2852"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2013-2888"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-2889"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-2890"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-2891"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2013-2892"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-2893"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-2894"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-2895"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2013-2896"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-2897"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2013-2898"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2013-2899"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-2929"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-2930"
+
+# fixed-version: Fixed after version 3.9
+CVE_CHECK_IGNORE += "CVE-2013-3076"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3222"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3223"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3224"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3225"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3226"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3227"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3228"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3229"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3230"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3231"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3232"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3233"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3234"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3235"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3236"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3237"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3301"
+
+# fixed-version: Fixed after version 3.8rc3
+CVE_CHECK_IGNORE += "CVE-2013-3302"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-4125"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-4127"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-4129"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-4162"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-4163"
+
+# fixed-version: Fixed after version 3.11rc5
+CVE_CHECK_IGNORE += "CVE-2013-4205"
+
+# fixed-version: Fixed after version 3.10rc4
+CVE_CHECK_IGNORE += "CVE-2013-4220"
+
+# fixed-version: Fixed after version 3.10rc5
+CVE_CHECK_IGNORE += "CVE-2013-4247"
+
+# fixed-version: Fixed after version 3.11rc6
+CVE_CHECK_IGNORE += "CVE-2013-4254"
+
+# fixed-version: Fixed after version 3.12rc4
+CVE_CHECK_IGNORE += "CVE-2013-4270"
+
+# fixed-version: Fixed after version 3.12rc6
+CVE_CHECK_IGNORE += "CVE-2013-4299"
+
+# fixed-version: Fixed after version 3.11
+CVE_CHECK_IGNORE += "CVE-2013-4300"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2013-4312"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-4343"
+
+# fixed-version: Fixed after version 3.13rc2
+CVE_CHECK_IGNORE += "CVE-2013-4345"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-4348"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-4350"
+
+# fixed-version: Fixed after version 3.12rc4
+CVE_CHECK_IGNORE += "CVE-2013-4387"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_IGNORE += "CVE-2013-4470"
+
+# fixed-version: Fixed after version 3.10rc1
+CVE_CHECK_IGNORE += "CVE-2013-4483"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_IGNORE += "CVE-2013-4511"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_IGNORE += "CVE-2013-4512"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_IGNORE += "CVE-2013-4513"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_IGNORE += "CVE-2013-4514"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_IGNORE += "CVE-2013-4515"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_IGNORE += "CVE-2013-4516"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-4563"
+
+# fixed-version: Fixed after version 3.13rc7
+CVE_CHECK_IGNORE += "CVE-2013-4579"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_IGNORE += "CVE-2013-4587"
+
+# fixed-version: Fixed after version 2.6.33rc4
+CVE_CHECK_IGNORE += "CVE-2013-4588"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_IGNORE += "CVE-2013-4591"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_IGNORE += "CVE-2013-4592"
+
+# Skipping CVE-2013-4737, no affected_versions
+
+# Skipping CVE-2013-4738, no affected_versions
+
+# Skipping CVE-2013-4739, no affected_versions
+
+# fixed-version: Fixed after version 3.10rc5
+CVE_CHECK_IGNORE += "CVE-2013-5634"
+
+# fixed-version: Fixed after version 3.6rc6
+CVE_CHECK_IGNORE += "CVE-2013-6282"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_IGNORE += "CVE-2013-6367"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_IGNORE += "CVE-2013-6368"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_IGNORE += "CVE-2013-6376"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-6378"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-6380"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-6381"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_IGNORE += "CVE-2013-6382"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_IGNORE += "CVE-2013-6383"
+
+# Skipping CVE-2013-6392, no affected_versions
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2013-6431"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-6432"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_IGNORE += "CVE-2013-6885"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7026"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_IGNORE += "CVE-2013-7027"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7263"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7264"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7265"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7266"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7267"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7268"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7269"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7270"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7271"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7281"
+
+# fixed-version: Fixed after version 3.13rc7
+CVE_CHECK_IGNORE += "CVE-2013-7339"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7348"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2013-7421"
+
+# CVE-2013-7445 has no known resolution
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_IGNORE += "CVE-2013-7446"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_IGNORE += "CVE-2013-7470"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_IGNORE += "CVE-2014-0038"
+
+# fixed-version: Fixed after version 3.14rc5
+CVE_CHECK_IGNORE += "CVE-2014-0049"
+
+# fixed-version: Fixed after version 3.14
+CVE_CHECK_IGNORE += "CVE-2014-0055"
+
+# fixed-version: Fixed after version 3.14rc4
+CVE_CHECK_IGNORE += "CVE-2014-0069"
+
+# fixed-version: Fixed after version 3.14
+CVE_CHECK_IGNORE += "CVE-2014-0077"
+
+# fixed-version: Fixed after version 3.14rc7
+CVE_CHECK_IGNORE += "CVE-2014-0100"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_IGNORE += "CVE-2014-0101"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_IGNORE += "CVE-2014-0102"
+
+# fixed-version: Fixed after version 3.14rc7
+CVE_CHECK_IGNORE += "CVE-2014-0131"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_IGNORE += "CVE-2014-0155"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_IGNORE += "CVE-2014-0181"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_IGNORE += "CVE-2014-0196"
+
+# fixed-version: Fixed after version 2.6.33rc5
+CVE_CHECK_IGNORE += "CVE-2014-0203"
+
+# fixed-version: Fixed after version 2.6.37rc1
+CVE_CHECK_IGNORE += "CVE-2014-0205"
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_IGNORE += "CVE-2014-0206"
+
+# Skipping CVE-2014-0972, no affected_versions
+
+# fixed-version: Fixed after version 3.13
+CVE_CHECK_IGNORE += "CVE-2014-1438"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_IGNORE += "CVE-2014-1444"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_IGNORE += "CVE-2014-1445"
+
+# fixed-version: Fixed after version 3.13rc7
+CVE_CHECK_IGNORE += "CVE-2014-1446"
+
+# fixed-version: Fixed after version 3.13rc8
+CVE_CHECK_IGNORE += "CVE-2014-1690"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_IGNORE += "CVE-2014-1737"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_IGNORE += "CVE-2014-1738"
+
+# fixed-version: Fixed after version 3.15rc6
+CVE_CHECK_IGNORE += "CVE-2014-1739"
+
+# fixed-version: Fixed after version 3.14rc2
+CVE_CHECK_IGNORE += "CVE-2014-1874"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_IGNORE += "CVE-2014-2038"
+
+# fixed-version: Fixed after version 3.14rc3
+CVE_CHECK_IGNORE += "CVE-2014-2039"
+
+# fixed-version: Fixed after version 3.14rc7
+CVE_CHECK_IGNORE += "CVE-2014-2309"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_IGNORE += "CVE-2014-2523"
+
+# fixed-version: Fixed after version 3.14
+CVE_CHECK_IGNORE += "CVE-2014-2568"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_IGNORE += "CVE-2014-2580"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_IGNORE += "CVE-2014-2672"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_IGNORE += "CVE-2014-2673"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_IGNORE += "CVE-2014-2678"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_IGNORE += "CVE-2014-2706"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_IGNORE += "CVE-2014-2739"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_IGNORE += "CVE-2014-2851"
+
+# fixed-version: Fixed after version 3.2rc7
+CVE_CHECK_IGNORE += "CVE-2014-2889"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_IGNORE += "CVE-2014-3122"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_IGNORE += "CVE-2014-3144"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_IGNORE += "CVE-2014-3145"
+
+# fixed-version: Fixed after version 3.15
+CVE_CHECK_IGNORE += "CVE-2014-3153"
+
+# fixed-version: Fixed after version 3.17rc4
+CVE_CHECK_IGNORE += "CVE-2014-3180"
+
+# fixed-version: Fixed after version 3.17rc3
+CVE_CHECK_IGNORE += "CVE-2014-3181"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_IGNORE += "CVE-2014-3182"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_IGNORE += "CVE-2014-3183"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_IGNORE += "CVE-2014-3184"
+
+# fixed-version: Fixed after version 3.17rc3
+CVE_CHECK_IGNORE += "CVE-2014-3185"
+
+# fixed-version: Fixed after version 3.17rc3
+CVE_CHECK_IGNORE += "CVE-2014-3186"
+
+# Skipping CVE-2014-3519, no affected_versions
+
+# fixed-version: Fixed after version 3.16rc7
+CVE_CHECK_IGNORE += "CVE-2014-3534"
+
+# fixed-version: Fixed after version 2.6.36rc1
+CVE_CHECK_IGNORE += "CVE-2014-3535"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_IGNORE += "CVE-2014-3601"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-3610"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-3611"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_IGNORE += "CVE-2014-3631"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2014-3645"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-3646"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-3647"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-3673"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-3687"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-3688"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-3690"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2014-3917"
+
+# fixed-version: Fixed after version 3.15
+CVE_CHECK_IGNORE += "CVE-2014-3940"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2014-4014"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_IGNORE += "CVE-2014-4027"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_IGNORE += "CVE-2014-4157"
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_IGNORE += "CVE-2014-4171"
+
+# Skipping CVE-2014-4322, no affected_versions
+
+# Skipping CVE-2014-4323, no affected_versions
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_IGNORE += "CVE-2014-4508"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-4608"
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_IGNORE += "CVE-2014-4611"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_IGNORE += "CVE-2014-4652"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_IGNORE += "CVE-2014-4653"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_IGNORE += "CVE-2014-4654"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_IGNORE += "CVE-2014-4655"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_IGNORE += "CVE-2014-4656"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2014-4667"
+
+# fixed-version: Fixed after version 3.16rc4
+CVE_CHECK_IGNORE += "CVE-2014-4699"
+
+# fixed-version: Fixed after version 3.16rc6
+CVE_CHECK_IGNORE += "CVE-2014-4943"
+
+# fixed-version: Fixed after version 3.16rc7
+CVE_CHECK_IGNORE += "CVE-2014-5045"
+
+# fixed-version: Fixed after version 3.16
+CVE_CHECK_IGNORE += "CVE-2014-5077"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_IGNORE += "CVE-2014-5206"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_IGNORE += "CVE-2014-5207"
+
+# Skipping CVE-2014-5332, no affected_versions
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_IGNORE += "CVE-2014-5471"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_IGNORE += "CVE-2014-5472"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_IGNORE += "CVE-2014-6410"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_IGNORE += "CVE-2014-6416"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_IGNORE += "CVE-2014-6417"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_IGNORE += "CVE-2014-6418"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_IGNORE += "CVE-2014-7145"
+
+# Skipping CVE-2014-7207, no affected_versions
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_IGNORE += "CVE-2014-7283"
+
+# fixed-version: Fixed after version 3.15rc7
+CVE_CHECK_IGNORE += "CVE-2014-7284"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2014-7822"
+
+# fixed-version: Fixed after version 3.18rc3
+CVE_CHECK_IGNORE += "CVE-2014-7825"
+
+# fixed-version: Fixed after version 3.18rc3
+CVE_CHECK_IGNORE += "CVE-2014-7826"
+
+# fixed-version: Fixed after version 3.18rc5
+CVE_CHECK_IGNORE += "CVE-2014-7841"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-7842"
+
+# fixed-version: Fixed after version 3.18rc5
+CVE_CHECK_IGNORE += "CVE-2014-7843"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-7970"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-7975"
+
+# fixed-version: Fixed after version 3.18rc3
+CVE_CHECK_IGNORE += "CVE-2014-8086"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-8133"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-8134"
+
+# fixed-version: Fixed after version 4.0rc7
+CVE_CHECK_IGNORE += "CVE-2014-8159"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-8160"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2014-8171"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2014-8172"
+
+# fixed-version: Fixed after version 3.13rc5
+CVE_CHECK_IGNORE += "CVE-2014-8173"
+
+# Skipping CVE-2014-8181, no affected_versions
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-8369"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-8480"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-8481"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-8559"
+
+# fixed-version: Fixed after version 3.14rc3
+CVE_CHECK_IGNORE += "CVE-2014-8709"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-8884"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-8989"
+
+# fixed-version: Fixed after version 3.18rc6
+CVE_CHECK_IGNORE += "CVE-2014-9090"
+
+# fixed-version: Fixed after version 3.18rc6
+CVE_CHECK_IGNORE += "CVE-2014-9322"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-9419"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-9420"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2014-9428"
+
+# fixed-version: Fixed after version 3.19rc4
+CVE_CHECK_IGNORE += "CVE-2014-9529"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2014-9584"
+
+# fixed-version: Fixed after version 3.19rc4
+CVE_CHECK_IGNORE += "CVE-2014-9585"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-9644"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-9683"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-9710"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_IGNORE += "CVE-2014-9715"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2014-9717"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2014-9728"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2014-9729"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2014-9730"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2014-9731"
+
+# Skipping CVE-2014-9777, no affected_versions
+
+# Skipping CVE-2014-9778, no affected_versions
+
+# Skipping CVE-2014-9779, no affected_versions
+
+# Skipping CVE-2014-9780, no affected_versions
+
+# Skipping CVE-2014-9781, no affected_versions
+
+# Skipping CVE-2014-9782, no affected_versions
+
+# Skipping CVE-2014-9783, no affected_versions
+
+# Skipping CVE-2014-9784, no affected_versions
+
+# Skipping CVE-2014-9785, no affected_versions
+
+# Skipping CVE-2014-9786, no affected_versions
+
+# Skipping CVE-2014-9787, no affected_versions
+
+# Skipping CVE-2014-9788, no affected_versions
+
+# Skipping CVE-2014-9789, no affected_versions
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2014-9803"
+
+# Skipping CVE-2014-9863, no affected_versions
+
+# Skipping CVE-2014-9864, no affected_versions
+
+# Skipping CVE-2014-9865, no affected_versions
+
+# Skipping CVE-2014-9866, no affected_versions
+
+# Skipping CVE-2014-9867, no affected_versions
+
+# Skipping CVE-2014-9868, no affected_versions
+
+# Skipping CVE-2014-9869, no affected_versions
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2014-9870"
+
+# Skipping CVE-2014-9871, no affected_versions
+
+# Skipping CVE-2014-9872, no affected_versions
+
+# Skipping CVE-2014-9873, no affected_versions
+
+# Skipping CVE-2014-9874, no affected_versions
+
+# Skipping CVE-2014-9875, no affected_versions
+
+# Skipping CVE-2014-9876, no affected_versions
+
+# Skipping CVE-2014-9877, no affected_versions
+
+# Skipping CVE-2014-9878, no affected_versions
+
+# Skipping CVE-2014-9879, no affected_versions
+
+# Skipping CVE-2014-9880, no affected_versions
+
+# Skipping CVE-2014-9881, no affected_versions
+
+# Skipping CVE-2014-9882, no affected_versions
+
+# Skipping CVE-2014-9883, no affected_versions
+
+# Skipping CVE-2014-9884, no affected_versions
+
+# Skipping CVE-2014-9885, no affected_versions
+
+# Skipping CVE-2014-9886, no affected_versions
+
+# Skipping CVE-2014-9887, no affected_versions
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2014-9888"
+
+# Skipping CVE-2014-9889, no affected_versions
+
+# Skipping CVE-2014-9890, no affected_versions
+
+# Skipping CVE-2014-9891, no affected_versions
+
+# Skipping CVE-2014-9892, no affected_versions
+
+# Skipping CVE-2014-9893, no affected_versions
+
+# Skipping CVE-2014-9894, no affected_versions
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2014-9895"
+
+# Skipping CVE-2014-9896, no affected_versions
+
+# Skipping CVE-2014-9897, no affected_versions
+
+# Skipping CVE-2014-9898, no affected_versions
+
+# Skipping CVE-2014-9899, no affected_versions
+
+# Skipping CVE-2014-9900, no affected_versions
+
+# fixed-version: Fixed after version 3.14rc4
+CVE_CHECK_IGNORE += "CVE-2014-9903"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_IGNORE += "CVE-2014-9904"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2014-9914"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-9922"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-9940"
+
+# fixed-version: Fixed after version 3.19rc6
+CVE_CHECK_IGNORE += "CVE-2015-0239"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_IGNORE += "CVE-2015-0274"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-0275"
+
+# Skipping CVE-2015-0777, no affected_versions
+
+# Skipping CVE-2015-1328, no affected_versions
+
+# fixed-version: Fixed after version 4.2rc5
+CVE_CHECK_IGNORE += "CVE-2015-1333"
+
+# fixed-version: Fixed after version 4.4rc5
+CVE_CHECK_IGNORE += "CVE-2015-1339"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_IGNORE += "CVE-2015-1350"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_IGNORE += "CVE-2015-1420"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_IGNORE += "CVE-2015-1421"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_IGNORE += "CVE-2015-1465"
+
+# fixed-version: Fixed after version 3.19rc5
+CVE_CHECK_IGNORE += "CVE-2015-1573"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_IGNORE += "CVE-2015-1593"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2015-1805"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_IGNORE += "CVE-2015-2041"
+
+# fixed-version: Fixed after version 3.19
+CVE_CHECK_IGNORE += "CVE-2015-2042"
+
+# fixed-version: Fixed after version 4.0rc4
+CVE_CHECK_IGNORE += "CVE-2015-2150"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_IGNORE += "CVE-2015-2666"
+
+# fixed-version: Fixed after version 4.0rc3
+CVE_CHECK_IGNORE += "CVE-2015-2672"
+
+# fixed-version: Fixed after version 4.0rc6
+CVE_CHECK_IGNORE += "CVE-2015-2686"
+
+# fixed-version: Fixed after version 4.0rc3
+CVE_CHECK_IGNORE += "CVE-2015-2830"
+
+# CVE-2015-2877 has no known resolution
+
+# fixed-version: Fixed after version 4.0rc7
+CVE_CHECK_IGNORE += "CVE-2015-2922"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2015-2925"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_IGNORE += "CVE-2015-3212"
+
+# fixed-version: Fixed after version 2.6.33rc8
+CVE_CHECK_IGNORE += "CVE-2015-3214"
+
+# fixed-version: Fixed after version 4.2rc2
+CVE_CHECK_IGNORE += "CVE-2015-3288"
+
+# fixed-version: Fixed after version 4.2rc3
+CVE_CHECK_IGNORE += "CVE-2015-3290"
+
+# fixed-version: Fixed after version 4.2rc3
+CVE_CHECK_IGNORE += "CVE-2015-3291"
+
+# fixed-version: Fixed after version 4.0rc5
+CVE_CHECK_IGNORE += "CVE-2015-3331"
+
+# Skipping CVE-2015-3332, no affected_versions
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-3339"
+
+# fixed-version: Fixed after version 4.1rc2
+CVE_CHECK_IGNORE += "CVE-2015-3636"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_IGNORE += "CVE-2015-4001"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_IGNORE += "CVE-2015-4002"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_IGNORE += "CVE-2015-4003"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2015-4004"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_IGNORE += "CVE-2015-4036"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_IGNORE += "CVE-2015-4167"
+
+# fixed-version: Fixed after version 3.13rc5
+CVE_CHECK_IGNORE += "CVE-2015-4170"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-4176"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-4177"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-4178"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_IGNORE += "CVE-2015-4692"
+
+# fixed-version: Fixed after version 4.1rc6
+CVE_CHECK_IGNORE += "CVE-2015-4700"
+
+# fixed-version: Fixed after version 4.2rc7
+CVE_CHECK_IGNORE += "CVE-2015-5156"
+
+# fixed-version: Fixed after version 4.2rc3
+CVE_CHECK_IGNORE += "CVE-2015-5157"
+
+# fixed-version: Fixed after version 4.3rc3
+CVE_CHECK_IGNORE += "CVE-2015-5257"
+
+# fixed-version: Fixed after version 4.3rc3
+CVE_CHECK_IGNORE += "CVE-2015-5283"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-5307"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-5327"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_IGNORE += "CVE-2015-5364"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_IGNORE += "CVE-2015-5366"
+
+# fixed-version: Fixed after version 4.2rc6
+CVE_CHECK_IGNORE += "CVE-2015-5697"
+
+# fixed-version: Fixed after version 4.1rc3
+CVE_CHECK_IGNORE += "CVE-2015-5706"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-5707"
+
+# fixed-version: Fixed after version 4.2rc5
+CVE_CHECK_IGNORE += "CVE-2015-6252"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-6526"
+
+# CVE-2015-6619 has no known resolution
+
+# CVE-2015-6646 has no known resolution
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2015-6937"
+
+# Skipping CVE-2015-7312, no affected_versions
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_IGNORE += "CVE-2015-7509"
+
+# fixed-version: Fixed after version 4.4rc7
+CVE_CHECK_IGNORE += "CVE-2015-7513"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-7515"
+
+# fixed-version: Fixed after version 4.4rc8
+CVE_CHECK_IGNORE += "CVE-2015-7550"
+
+# Skipping CVE-2015-7553, no affected_versions
+
+# fixed-version: Fixed after version 4.5rc2
+CVE_CHECK_IGNORE += "CVE-2015-7566"
+
+# fixed-version: Fixed after version 4.3rc4
+CVE_CHECK_IGNORE += "CVE-2015-7613"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-7799"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_IGNORE += "CVE-2015-7833"
+
+# Skipping CVE-2015-7837, no affected_versions
+
+# fixed-version: Fixed after version 4.3rc7
+CVE_CHECK_IGNORE += "CVE-2015-7872"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-7884"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-7885"
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_IGNORE += "CVE-2015-7990"
+
+# Skipping CVE-2015-8019, no affected_versions
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-8104"
+
+# fixed-version: Fixed after version 4.0rc3
+CVE_CHECK_IGNORE += "CVE-2015-8215"
+
+# fixed-version: Fixed after version 2.6.34rc1
+CVE_CHECK_IGNORE += "CVE-2015-8324"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-8374"
+
+# fixed-version: Fixed after version 4.4rc3
+CVE_CHECK_IGNORE += "CVE-2015-8539"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8543"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8550"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8551"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8552"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8553"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8569"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8575"
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_IGNORE += "CVE-2015-8660"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2015-8709"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2015-8746"
+
+# fixed-version: Fixed after version 4.3rc4
+CVE_CHECK_IGNORE += "CVE-2015-8767"
+
+# fixed-version: Fixed after version 4.4rc5
+CVE_CHECK_IGNORE += "CVE-2015-8785"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-8787"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2015-8812"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8816"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-8830"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2015-8839"
+
+# fixed-version: Fixed after version 4.4rc3
+CVE_CHECK_IGNORE += "CVE-2015-8844"
+
+# fixed-version: Fixed after version 4.4rc3
+CVE_CHECK_IGNORE += "CVE-2015-8845"
+
+# Skipping CVE-2015-8937, no affected_versions
+
+# Skipping CVE-2015-8938, no affected_versions
+
+# Skipping CVE-2015-8939, no affected_versions
+
+# Skipping CVE-2015-8940, no affected_versions
+
+# Skipping CVE-2015-8941, no affected_versions
+
+# Skipping CVE-2015-8942, no affected_versions
+
+# Skipping CVE-2015-8943, no affected_versions
+
+# Skipping CVE-2015-8944, no affected_versions
+
+# fixed-version: Fixed after version 4.1rc2
+CVE_CHECK_IGNORE += "CVE-2015-8950"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2015-8952"
+
+# fixed-version: Fixed after version 4.3
+CVE_CHECK_IGNORE += "CVE-2015-8953"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-8955"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_IGNORE += "CVE-2015-8956"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-8961"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-8962"
+
+# fixed-version: Fixed after version 4.4
+CVE_CHECK_IGNORE += "CVE-2015-8963"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2015-8964"
+
+# fixed-version: Fixed after version 4.4rc8
+CVE_CHECK_IGNORE += "CVE-2015-8966"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_IGNORE += "CVE-2015-8967"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2015-8970"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_IGNORE += "CVE-2015-9004"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2015-9016"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_IGNORE += "CVE-2015-9289"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-0617"
+
+# fixed-version: Fixed after version 4.5rc2
+CVE_CHECK_IGNORE += "CVE-2016-0723"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-0728"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_IGNORE += "CVE-2016-0758"
+
+# Skipping CVE-2016-0774, no affected_versions
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2016-0821"
+
+# fixed-version: Fixed after version 4.0rc5
+CVE_CHECK_IGNORE += "CVE-2016-0823"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_IGNORE += "CVE-2016-10044"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2016-10088"
+
+# fixed-version: Fixed after version 4.9
+CVE_CHECK_IGNORE += "CVE-2016-10147"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_IGNORE += "CVE-2016-10150"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2016-10153"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2016-10154"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_IGNORE += "CVE-2016-10200"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2016-10208"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-10229"
+
+# fixed-version: Fixed after version 4.8rc6
+CVE_CHECK_IGNORE += "CVE-2016-10318"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2016-10723"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2016-10741"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2016-10764"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_IGNORE += "CVE-2016-10905"
+
+# fixed-version: Fixed after version 4.5rc6
+CVE_CHECK_IGNORE += "CVE-2016-10906"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_IGNORE += "CVE-2016-10907"
+
+# fixed-version: Fixed after version 4.7rc5
+CVE_CHECK_IGNORE += "CVE-2016-1237"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-1575"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-1576"
+
+# fixed-version: Fixed after version 4.7rc3
+CVE_CHECK_IGNORE += "CVE-2016-1583"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2016-2053"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2069"
+
+# fixed-version: Fixed after version 4.4
+CVE_CHECK_IGNORE += "CVE-2016-2070"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_IGNORE += "CVE-2016-2085"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_IGNORE += "CVE-2016-2117"
+
+# fixed-version: Fixed after version 4.5
+CVE_CHECK_IGNORE += "CVE-2016-2143"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-2184"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-2185"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-2186"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_IGNORE += "CVE-2016-2187"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_IGNORE += "CVE-2016-2188"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_IGNORE += "CVE-2016-2383"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_IGNORE += "CVE-2016-2384"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2543"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2544"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2545"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2546"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2547"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2548"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2549"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_IGNORE += "CVE-2016-2550"
+
+# fixed-version: Fixed after version 4.5rc2
+CVE_CHECK_IGNORE += "CVE-2016-2782"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2847"
+
+# Skipping CVE-2016-2853, no affected_versions
+
+# Skipping CVE-2016-2854, no affected_versions
+
+# fixed-version: Fixed after version 4.5
+CVE_CHECK_IGNORE += "CVE-2016-3044"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2016-3070"
+
+# fixed-version: Fixed after version 4.6rc2
+CVE_CHECK_IGNORE += "CVE-2016-3134"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-3135"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_IGNORE += "CVE-2016-3136"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_IGNORE += "CVE-2016-3137"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-3138"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_IGNORE += "CVE-2016-3139"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_IGNORE += "CVE-2016-3140"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-3156"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-3157"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-3672"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-3689"
+
+# Skipping CVE-2016-3695, no affected_versions
+
+# Skipping CVE-2016-3699, no affected_versions
+
+# Skipping CVE-2016-3707, no affected_versions
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-3713"
+
+# CVE-2016-3775 has no known resolution
+
+# CVE-2016-3802 has no known resolution
+
+# CVE-2016-3803 has no known resolution
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_IGNORE += "CVE-2016-3841"
+
+# fixed-version: Fixed after version 4.8rc2
+CVE_CHECK_IGNORE += "CVE-2016-3857"
+
+# fixed-version: Fixed after version 4.5
+CVE_CHECK_IGNORE += "CVE-2016-3951"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_IGNORE += "CVE-2016-3955"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_IGNORE += "CVE-2016-3961"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-4440"
+
+# fixed-version: Fixed after version 4.7rc4
+CVE_CHECK_IGNORE += "CVE-2016-4470"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-4482"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_IGNORE += "CVE-2016-4485"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_IGNORE += "CVE-2016-4486"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_IGNORE += "CVE-2016-4557"
+
+# fixed-version: Fixed after version 4.6rc7
+CVE_CHECK_IGNORE += "CVE-2016-4558"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_IGNORE += "CVE-2016-4565"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_IGNORE += "CVE-2016-4568"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-4569"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-4578"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_IGNORE += "CVE-2016-4580"
+
+# fixed-version: Fixed after version 4.6rc7
+CVE_CHECK_IGNORE += "CVE-2016-4581"
+
+# fixed-version: Fixed after version 4.7rc4
+CVE_CHECK_IGNORE += "CVE-2016-4794"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-4805"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_IGNORE += "CVE-2016-4913"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-4951"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-4997"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-4998"
+
+# fixed-version: Fixed after version 4.9rc2
+CVE_CHECK_IGNORE += "CVE-2016-5195"
+
+# fixed-version: Fixed after version 4.7rc3
+CVE_CHECK_IGNORE += "CVE-2016-5243"
+
+# fixed-version: Fixed after version 4.7rc3
+CVE_CHECK_IGNORE += "CVE-2016-5244"
+
+# Skipping CVE-2016-5340, no affected_versions
+
+# Skipping CVE-2016-5342, no affected_versions
+
+# Skipping CVE-2016-5343, no affected_versions
+
+# Skipping CVE-2016-5344, no affected_versions
+
+# fixed-version: Fixed after version 4.7
+CVE_CHECK_IGNORE += "CVE-2016-5400"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_IGNORE += "CVE-2016-5412"
+
+# fixed-version: Fixed after version 4.7
+CVE_CHECK_IGNORE += "CVE-2016-5696"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-5728"
+
+# fixed-version: Fixed after version 4.7rc6
+CVE_CHECK_IGNORE += "CVE-2016-5828"
+
+# fixed-version: Fixed after version 4.7rc5
+CVE_CHECK_IGNORE += "CVE-2016-5829"
+
+# CVE-2016-5870 has no known resolution
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_IGNORE += "CVE-2016-6130"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_IGNORE += "CVE-2016-6136"
+
+# fixed-version: Fixed after version 4.7rc7
+CVE_CHECK_IGNORE += "CVE-2016-6156"
+
+# fixed-version: Fixed after version 4.7
+CVE_CHECK_IGNORE += "CVE-2016-6162"
+
+# fixed-version: Fixed after version 4.7rc7
+CVE_CHECK_IGNORE += "CVE-2016-6187"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-6197"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_IGNORE += "CVE-2016-6198"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_IGNORE += "CVE-2016-6213"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-6327"
+
+# fixed-version: Fixed after version 4.8rc3
+CVE_CHECK_IGNORE += "CVE-2016-6480"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_IGNORE += "CVE-2016-6516"
+
+# Skipping CVE-2016-6753, no affected_versions
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_IGNORE += "CVE-2016-6786"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_IGNORE += "CVE-2016-6787"
+
+# fixed-version: Fixed after version 4.8rc5
+CVE_CHECK_IGNORE += "CVE-2016-6828"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_IGNORE += "CVE-2016-7039"
+
+# fixed-version: Fixed after version 4.9rc3
+CVE_CHECK_IGNORE += "CVE-2016-7042"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_IGNORE += "CVE-2016-7097"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-7117"
+
+# Skipping CVE-2016-7118, no affected_versions
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_IGNORE += "CVE-2016-7425"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_IGNORE += "CVE-2016-7910"
+
+# fixed-version: Fixed after version 4.7rc7
+CVE_CHECK_IGNORE += "CVE-2016-7911"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_IGNORE += "CVE-2016-7912"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-7913"
+
+# fixed-version: Fixed after version 4.6rc4
+CVE_CHECK_IGNORE += "CVE-2016-7914"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-7915"
+
+# fixed-version: Fixed after version 4.6rc7
+CVE_CHECK_IGNORE += "CVE-2016-7916"
+
+# fixed-version: Fixed after version 4.5rc6
+CVE_CHECK_IGNORE += "CVE-2016-7917"
+
+# fixed-version: Fixed after version 4.9
+CVE_CHECK_IGNORE += "CVE-2016-8399"
+
+# Skipping CVE-2016-8401, no affected_versions
+
+# Skipping CVE-2016-8402, no affected_versions
+
+# Skipping CVE-2016-8403, no affected_versions
+
+# Skipping CVE-2016-8404, no affected_versions
+
+# fixed-version: Fixed after version 4.10rc6
+CVE_CHECK_IGNORE += "CVE-2016-8405"
+
+# Skipping CVE-2016-8406, no affected_versions
+
+# Skipping CVE-2016-8407, no affected_versions
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_IGNORE += "CVE-2016-8630"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_IGNORE += "CVE-2016-8632"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_IGNORE += "CVE-2016-8633"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2016-8636"
+
+# fixed-version: Fixed after version 4.9rc6
+CVE_CHECK_IGNORE += "CVE-2016-8645"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2016-8646"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_IGNORE += "CVE-2016-8650"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_IGNORE += "CVE-2016-8655"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_IGNORE += "CVE-2016-8658"
+
+# CVE-2016-8660 has no known resolution
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-8666"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_IGNORE += "CVE-2016-9083"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_IGNORE += "CVE-2016-9084"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-9120"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_IGNORE += "CVE-2016-9178"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2016-9191"
+
+# fixed-version: Fixed after version 4.9rc3
+CVE_CHECK_IGNORE += "CVE-2016-9313"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_IGNORE += "CVE-2016-9555"
+
+# fixed-version: Fixed after version 4.9
+CVE_CHECK_IGNORE += "CVE-2016-9576"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2016-9588"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_IGNORE += "CVE-2016-9604"
+
+# Skipping CVE-2016-9644, no affected_versions
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-9685"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-9754"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_IGNORE += "CVE-2016-9755"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_IGNORE += "CVE-2016-9756"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_IGNORE += "CVE-2016-9777"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_IGNORE += "CVE-2016-9793"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-9794"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-9806"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_IGNORE += "CVE-2016-9919"
+
+# Skipping CVE-2017-0403, no affected_versions
+
+# Skipping CVE-2017-0404, no affected_versions
+
+# Skipping CVE-2017-0426, no affected_versions
+
+# Skipping CVE-2017-0427, no affected_versions
+
+# CVE-2017-0507 has no known resolution
+
+# CVE-2017-0508 has no known resolution
+
+# Skipping CVE-2017-0510, no affected_versions
+
+# Skipping CVE-2017-0528, no affected_versions
+
+# Skipping CVE-2017-0537, no affected_versions
+
+# CVE-2017-0564 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-0605"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-0627"
+
+# CVE-2017-0630 has no known resolution
+
+# CVE-2017-0749 has no known resolution
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2017-0750"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-0786"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_IGNORE += "CVE-2017-0861"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_IGNORE += "CVE-2017-1000"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_IGNORE += "CVE-2017-1000111"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_IGNORE += "CVE-2017-1000112"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-1000251"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-1000252"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2017-1000253"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-1000255"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_IGNORE += "CVE-2017-1000363"
+
+# fixed-version: Fixed after version 4.12rc6
+CVE_CHECK_IGNORE += "CVE-2017-1000364"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_IGNORE += "CVE-2017-1000365"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-1000370"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-1000371"
+
+# fixed-version: Fixed after version 4.12rc6
+CVE_CHECK_IGNORE += "CVE-2017-1000379"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_IGNORE += "CVE-2017-1000380"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2017-1000405"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_IGNORE += "CVE-2017-1000407"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2017-1000410"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-10661"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-10662"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-10663"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-10810"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_IGNORE += "CVE-2017-10911"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-11089"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-11176"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-11472"
+
+# fixed-version: Fixed after version 4.13rc2
+CVE_CHECK_IGNORE += "CVE-2017-11473"
+
+# fixed-version: Fixed after version 4.13
+CVE_CHECK_IGNORE += "CVE-2017-11600"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_IGNORE += "CVE-2017-12134"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-12146"
+
+# fixed-version: Fixed after version 4.14rc2
+CVE_CHECK_IGNORE += "CVE-2017-12153"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-12154"
+
+# fixed-version: Fixed after version 4.9rc6
+CVE_CHECK_IGNORE += "CVE-2017-12168"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-12188"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-12190"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_IGNORE += "CVE-2017-12192"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_IGNORE += "CVE-2017-12193"
+
+# fixed-version: Fixed after version 4.13rc4
+CVE_CHECK_IGNORE += "CVE-2017-12762"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_IGNORE += "CVE-2017-13080"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2017-13166"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_IGNORE += "CVE-2017-13167"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2017-13168"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2017-13215"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2017-13216"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2017-13220"
+
+# CVE-2017-13221 has no known resolution
+
+# CVE-2017-13222 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_IGNORE += "CVE-2017-13305"
+
+# fixed-version: Fixed after version 4.13rc7
+CVE_CHECK_IGNORE += "CVE-2017-13686"
+
+# CVE-2017-13693 has no known resolution
+
+# CVE-2017-13694 has no known resolution
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2017-13695"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2017-13715"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-14051"
+
+# fixed-version: Fixed after version 4.12rc3
+CVE_CHECK_IGNORE += "CVE-2017-14106"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_IGNORE += "CVE-2017-14140"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-14156"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-14340"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_IGNORE += "CVE-2017-14489"
+
+# fixed-version: Fixed after version 4.13
+CVE_CHECK_IGNORE += "CVE-2017-14497"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_IGNORE += "CVE-2017-14954"
+
+# fixed-version: Fixed after version 4.14rc2
+CVE_CHECK_IGNORE += "CVE-2017-14991"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_IGNORE += "CVE-2017-15102"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_IGNORE += "CVE-2017-15115"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_IGNORE += "CVE-2017-15116"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-15121"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-15126"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_IGNORE += "CVE-2017-15127"
+
+# fixed-version: Fixed after version 4.14rc8
+CVE_CHECK_IGNORE += "CVE-2017-15128"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-15129"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-15265"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_IGNORE += "CVE-2017-15274"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_IGNORE += "CVE-2017-15299"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_IGNORE += "CVE-2017-15306"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_IGNORE += "CVE-2017-15537"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-15649"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2017-15868"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_IGNORE += "CVE-2017-15951"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-16525"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-16526"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-16527"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-16528"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-16529"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-16530"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-16531"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-16532"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-16533"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-16534"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_IGNORE += "CVE-2017-16535"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-16536"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-16537"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2017-16538"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_IGNORE += "CVE-2017-16643"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2017-16644"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_IGNORE += "CVE-2017-16645"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-16646"
+
+# fixed-version: Fixed after version 4.14
+CVE_CHECK_IGNORE += "CVE-2017-16647"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-16648"
+
+# fixed-version: Fixed after version 4.14
+CVE_CHECK_IGNORE += "CVE-2017-16649"
+
+# fixed-version: Fixed after version 4.14
+CVE_CHECK_IGNORE += "CVE-2017-16650"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-16911"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-16912"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-16913"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-16914"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_IGNORE += "CVE-2017-16939"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-16994"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-16995"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-16996"
+
+# fixed-version: Fixed after version 4.13rc7
+CVE_CHECK_IGNORE += "CVE-2017-17052"
+
+# fixed-version: Fixed after version 4.13rc7
+CVE_CHECK_IGNORE += "CVE-2017-17053"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-17448"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-17449"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-17450"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-17558"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-17712"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17741"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-17805"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-17806"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_IGNORE += "CVE-2017-17807"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17852"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17853"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17854"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17855"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17856"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17857"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-17862"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17863"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17864"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2017-17975"
+
+# fixed-version: Fixed after version 4.11rc7
+CVE_CHECK_IGNORE += "CVE-2017-18017"
+
+# fixed-version: Fixed after version 4.15rc7
+CVE_CHECK_IGNORE += "CVE-2017-18075"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-18079"
+
+# CVE-2017-18169 has no known resolution
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2017-18174"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-18193"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-18200"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2017-18202"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-18203"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-18204"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2017-18208"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-18216"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-18218"
+
+# fixed-version: Fixed after version 4.12rc4
+CVE_CHECK_IGNORE += "CVE-2017-18221"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-18222"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-18224"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2017-18232"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-18241"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-18249"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-18255"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-18257"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_IGNORE += "CVE-2017-18261"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_IGNORE += "CVE-2017-18270"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-18344"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_IGNORE += "CVE-2017-18360"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_IGNORE += "CVE-2017-18379"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-18509"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-18549"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-18550"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_IGNORE += "CVE-2017-18551"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-18552"
+
+# fixed-version: Fixed after version 4.15rc6
+CVE_CHECK_IGNORE += "CVE-2017-18595"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-2583"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-2584"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-2596"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-2618"
+
+# fixed-version: Fixed after version 2.6.25rc1
+CVE_CHECK_IGNORE += "CVE-2017-2634"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_IGNORE += "CVE-2017-2636"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2017-2647"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_IGNORE += "CVE-2017-2671"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-5123"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-5546"
+
+# fixed-version: Fixed after version 4.10rc5
+CVE_CHECK_IGNORE += "CVE-2017-5547"
+
+# fixed-version: Fixed after version 4.10rc5
+CVE_CHECK_IGNORE += "CVE-2017-5548"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-5549"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-5550"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-5551"
+
+# fixed-version: Fixed after version 4.10rc6
+CVE_CHECK_IGNORE += "CVE-2017-5576"
+
+# fixed-version: Fixed after version 4.10rc6
+CVE_CHECK_IGNORE += "CVE-2017-5577"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-5669"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2017-5715"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2017-5753"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2017-5754"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-5897"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-5967"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-5970"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2017-5972"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-5986"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-6001"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_IGNORE += "CVE-2017-6074"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-6214"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_IGNORE += "CVE-2017-6345"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_IGNORE += "CVE-2017-6346"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-6347"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_IGNORE += "CVE-2017-6348"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-6353"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_IGNORE += "CVE-2017-6874"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2017-6951"
+
+# fixed-version: Fixed after version 4.11rc5
+CVE_CHECK_IGNORE += "CVE-2017-7184"
+
+# fixed-version: Fixed after version 4.11rc5
+CVE_CHECK_IGNORE += "CVE-2017-7187"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_IGNORE += "CVE-2017-7261"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-7273"
+
+# fixed-version: Fixed after version 4.11rc4
+CVE_CHECK_IGNORE += "CVE-2017-7277"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_IGNORE += "CVE-2017-7294"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_IGNORE += "CVE-2017-7308"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_IGNORE += "CVE-2017-7346"
+
+# CVE-2017-7369 has no known resolution
+
+# fixed-version: Fixed after version 4.11rc4
+CVE_CHECK_IGNORE += "CVE-2017-7374"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_IGNORE += "CVE-2017-7472"
+
+# fixed-version: Fixed after version 4.11
+CVE_CHECK_IGNORE += "CVE-2017-7477"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_IGNORE += "CVE-2017-7482"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-7487"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2017-7495"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_IGNORE += "CVE-2017-7518"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-7533"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-7541"
+
+# fixed-version: Fixed after version 4.13rc2
+CVE_CHECK_IGNORE += "CVE-2017-7542"
+
+# fixed-version: Fixed after version 4.13
+CVE_CHECK_IGNORE += "CVE-2017-7558"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_IGNORE += "CVE-2017-7616"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_IGNORE += "CVE-2017-7618"
+
+# fixed-version: Fixed after version 4.11
+CVE_CHECK_IGNORE += "CVE-2017-7645"
+
+# fixed-version: Fixed after version 4.11rc7
+CVE_CHECK_IGNORE += "CVE-2017-7889"
+
+# fixed-version: Fixed after version 4.11
+CVE_CHECK_IGNORE += "CVE-2017-7895"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_IGNORE += "CVE-2017-7979"
+
+# fixed-version: Fixed after version 4.11rc4
+CVE_CHECK_IGNORE += "CVE-2017-8061"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_IGNORE += "CVE-2017-8062"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-8063"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-8064"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-8065"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-8066"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-8067"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-8068"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-8069"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-8070"
+
+# fixed-version: Fixed after version 4.10rc7
+CVE_CHECK_IGNORE += "CVE-2017-8071"
+
+# fixed-version: Fixed after version 4.10rc7
+CVE_CHECK_IGNORE += "CVE-2017-8072"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2017-8106"
+
+# fixed-version: Fixed after version 3.19rc6
+CVE_CHECK_IGNORE += "CVE-2017-8240"
+
+# CVE-2017-8242 has no known resolution
+
+# CVE-2017-8244 has no known resolution
+
+# CVE-2017-8245 has no known resolution
+
+# CVE-2017-8246 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-8797"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_IGNORE += "CVE-2017-8824"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-8831"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-8890"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_IGNORE += "CVE-2017-8924"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_IGNORE += "CVE-2017-8925"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-9059"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_IGNORE += "CVE-2017-9074"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_IGNORE += "CVE-2017-9075"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_IGNORE += "CVE-2017-9076"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_IGNORE += "CVE-2017-9077"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-9150"
+
+# fixed-version: Fixed after version 4.12rc3
+CVE_CHECK_IGNORE += "CVE-2017-9211"
+
+# fixed-version: Fixed after version 4.12rc3
+CVE_CHECK_IGNORE += "CVE-2017-9242"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_IGNORE += "CVE-2017-9605"
+
+# fixed-version: Fixed after version 4.3rc7
+CVE_CHECK_IGNORE += "CVE-2017-9725"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-9984"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-9985"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-9986"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_IGNORE += "CVE-2018-1000004"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2018-1000026"
+
+# fixed-version: Fixed after version 4.15
+CVE_CHECK_IGNORE += "CVE-2018-1000028"
+
+# fixed-version: Fixed after version 4.16
+CVE_CHECK_IGNORE += "CVE-2018-1000199"
+
+# fixed-version: Fixed after version 4.17rc5
+CVE_CHECK_IGNORE += "CVE-2018-1000200"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_IGNORE += "CVE-2018-1000204"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-10021"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-10074"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2018-10087"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2018-10124"
+
+# fixed-version: Fixed after version 4.17rc4
+CVE_CHECK_IGNORE += "CVE-2018-10322"
+
+# fixed-version: Fixed after version 4.17rc4
+CVE_CHECK_IGNORE += "CVE-2018-10323"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_IGNORE += "CVE-2018-1065"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2018-1066"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_IGNORE += "CVE-2018-10675"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_IGNORE += "CVE-2018-1068"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-10840"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-10853"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-1087"
+
+# CVE-2018-10872 has no known resolution
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10876"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10877"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10878"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10879"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10880"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10881"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10882"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10883"
+
+# fixed-version: Fixed after version 2.6.36rc1
+CVE_CHECK_IGNORE += "CVE-2018-10901"
+
+# fixed-version: Fixed after version 4.18rc6
+CVE_CHECK_IGNORE += "CVE-2018-10902"
+
+# fixed-version: Fixed after version 4.14rc2
+CVE_CHECK_IGNORE += "CVE-2018-1091"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2018-1092"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2018-1093"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_IGNORE += "CVE-2018-10938"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2018-1094"
+
+# fixed-version: Fixed after version 4.17rc3
+CVE_CHECK_IGNORE += "CVE-2018-10940"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2018-1095"
+
+# fixed-version: Fixed after version 4.17rc2
+CVE_CHECK_IGNORE += "CVE-2018-1108"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-1118"
+
+# fixed-version: Fixed after version 4.17rc6
+CVE_CHECK_IGNORE += "CVE-2018-1120"
+
+# CVE-2018-1121 has no known resolution
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2018-11232"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-1128"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-1129"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-1130"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-11412"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_IGNORE += "CVE-2018-11506"
+
+# fixed-version: Fixed after version 4.17rc5
+CVE_CHECK_IGNORE += "CVE-2018-11508"
+
+# CVE-2018-11987 has no known resolution
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2018-12126"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2018-12127"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2018-12130"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2018-12207"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-12232"
+
+# fixed-version: Fixed after version 4.18rc2
+CVE_CHECK_IGNORE += "CVE-2018-12233"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-12633"
+
+# fixed-version: Fixed after version 4.18rc2
+CVE_CHECK_IGNORE += "CVE-2018-12714"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-12896"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-12904"
+
+# CVE-2018-12928 has no known resolution
+
+# CVE-2018-12929 has no known resolution
+
+# CVE-2018-12930 has no known resolution
+
+# CVE-2018-12931 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-13053"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-13093"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-13094"
+
+# fixed-version: Fixed after version 4.18rc3
+CVE_CHECK_IGNORE += "CVE-2018-13095"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-13096"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-13097"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-13098"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-13099"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-13100"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-13405"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-13406"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14609"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14610"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14611"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14612"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14613"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14614"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14615"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14616"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14617"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2018-14619"
+
+# fixed-version: Fixed after version 4.20rc6
+CVE_CHECK_IGNORE += "CVE-2018-14625"
+
+# fixed-version: Fixed after version 4.19rc6
+CVE_CHECK_IGNORE += "CVE-2018-14633"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2018-14634"
+
+# fixed-version: Fixed after version 4.19rc4
+CVE_CHECK_IGNORE += "CVE-2018-14641"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2018-14646"
+
+# fixed-version: Fixed after version 4.19rc2
+CVE_CHECK_IGNORE += "CVE-2018-14656"
+
+# fixed-version: Fixed after version 4.18rc8
+CVE_CHECK_IGNORE += "CVE-2018-14678"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-14734"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_IGNORE += "CVE-2018-15471"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-15572"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-15594"
+
+# fixed-version: Fixed after version 4.18rc5
+CVE_CHECK_IGNORE += "CVE-2018-16276"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_IGNORE += "CVE-2018-16597"
+
+# fixed-version: Fixed after version 4.19rc2
+CVE_CHECK_IGNORE += "CVE-2018-16658"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_IGNORE += "CVE-2018-16862"
+
+# fixed-version: Fixed after version 4.20rc3
+CVE_CHECK_IGNORE += "CVE-2018-16871"
+
+# fixed-version: Fixed after version 5.0rc5
+CVE_CHECK_IGNORE += "CVE-2018-16880"
+
+# fixed-version: Fixed after version 4.20
+CVE_CHECK_IGNORE += "CVE-2018-16882"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_IGNORE += "CVE-2018-16884"
+
+# CVE-2018-16885 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc4
+CVE_CHECK_IGNORE += "CVE-2018-17182"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_IGNORE += "CVE-2018-17972"
+
+# CVE-2018-17977 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_IGNORE += "CVE-2018-18021"
+
+# fixed-version: Fixed after version 4.19
+CVE_CHECK_IGNORE += "CVE-2018-18281"
+
+# fixed-version: Fixed after version 4.15rc6
+CVE_CHECK_IGNORE += "CVE-2018-18386"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_IGNORE += "CVE-2018-18397"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_IGNORE += "CVE-2018-18445"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2018-18559"
+
+# CVE-2018-18653 has no known resolution
+
+# fixed-version: Fixed after version 4.17rc4
+CVE_CHECK_IGNORE += "CVE-2018-18690"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_IGNORE += "CVE-2018-18710"
+
+# fixed-version: Fixed after version 4.20rc2
+CVE_CHECK_IGNORE += "CVE-2018-18955"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_IGNORE += "CVE-2018-19406"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_IGNORE += "CVE-2018-19407"
+
+# fixed-version: Fixed after version 4.20rc6
+CVE_CHECK_IGNORE += "CVE-2018-19824"
+
+# fixed-version: Fixed after version 4.20rc3
+CVE_CHECK_IGNORE += "CVE-2018-19854"
+
+# fixed-version: Fixed after version 4.20
+CVE_CHECK_IGNORE += "CVE-2018-19985"
+
+# fixed-version: Fixed after version 4.20rc6
+CVE_CHECK_IGNORE += "CVE-2018-20169"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2018-20449"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2018-20509"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_IGNORE += "CVE-2018-20510"
+
+# fixed-version: Fixed after version 4.19rc5
+CVE_CHECK_IGNORE += "CVE-2018-20511"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_IGNORE += "CVE-2018-20669"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_IGNORE += "CVE-2018-20784"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_IGNORE += "CVE-2018-20836"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_IGNORE += "CVE-2018-20854"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-20855"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-20856"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2018-20961"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-20976"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-21008"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_IGNORE += "CVE-2018-25015"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_IGNORE += "CVE-2018-25020"
+
+# CVE-2018-3574 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-3620"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_IGNORE += "CVE-2018-3639"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-3646"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_IGNORE += "CVE-2018-3665"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-3693"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2018-5332"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2018-5333"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2018-5344"
+
+# fixed-version: Fixed after version 4.18rc7
+CVE_CHECK_IGNORE += "CVE-2018-5390"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-5391"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_IGNORE += "CVE-2018-5703"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2018-5750"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2018-5803"
+
+# fixed-version: Fixed after version 4.17rc6
+CVE_CHECK_IGNORE += "CVE-2018-5814"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2018-5848"
+
+# Skipping CVE-2018-5856, no affected_versions
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_IGNORE += "CVE-2018-5873"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2018-5953"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2018-5995"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_IGNORE += "CVE-2018-6412"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2018-6554"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2018-6555"
+
+# CVE-2018-6559 has no known resolution
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_IGNORE += "CVE-2018-6927"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_IGNORE += "CVE-2018-7191"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2018-7273"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2018-7480"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_IGNORE += "CVE-2018-7492"
+
+# fixed-version: Fixed after version 4.16rc2
+CVE_CHECK_IGNORE += "CVE-2018-7566"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-7740"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2018-7754"
+
+# fixed-version: Fixed after version 4.19rc5
+CVE_CHECK_IGNORE += "CVE-2018-7755"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2018-7757"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_IGNORE += "CVE-2018-7995"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2018-8043"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2018-8087"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-8781"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-8822"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-8897"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-9363"
+
+# fixed-version: Fixed after version 4.17rc3
+CVE_CHECK_IGNORE += "CVE-2018-9385"
+
+# fixed-version: Fixed after version 4.17rc3
+CVE_CHECK_IGNORE += "CVE-2018-9415"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2018-9422"
+
+# fixed-version: Fixed after version 4.15rc6
+CVE_CHECK_IGNORE += "CVE-2018-9465"
+
+# fixed-version: Fixed after version 4.18rc5
+CVE_CHECK_IGNORE += "CVE-2018-9516"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2018-9517"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_IGNORE += "CVE-2018-9518"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2018-9568"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-0136"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-0145"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-0146"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-0147"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-0148"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-0149"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_IGNORE += "CVE-2019-0154"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_IGNORE += "CVE-2019-0155"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-10124"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-10125"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-10126"
+
+# CVE-2019-10140 has no known resolution
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-10142"
+
+# fixed-version: Fixed after version 5.3rc3
+CVE_CHECK_IGNORE += "CVE-2019-10207"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-10220"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-10638"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-10639"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_IGNORE += "CVE-2019-11085"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-11091"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_IGNORE += "CVE-2019-11135"
+
+# fixed-version: Fixed after version 4.8rc5
+CVE_CHECK_IGNORE += "CVE-2019-11190"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-11191"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_IGNORE += "CVE-2019-1125"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-11477"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-11478"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-11479"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-11486"
+
+# fixed-version: Fixed after version 5.1rc5
+CVE_CHECK_IGNORE += "CVE-2019-11487"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_IGNORE += "CVE-2019-11599"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_IGNORE += "CVE-2019-11683"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-11810"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-11811"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-11815"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-11833"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-11884"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-12378"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-12379"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-12380"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-12381"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-12382"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-12454"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-12455"
+
+# CVE-2019-12456 has no known resolution
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-12614"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_IGNORE += "CVE-2019-12615"
+
+# fixed-version: Fixed after version 5.2rc7
+CVE_CHECK_IGNORE += "CVE-2019-12817"
+
+# fixed-version: Fixed after version 5.0
+CVE_CHECK_IGNORE += "CVE-2019-12818"
+
+# fixed-version: Fixed after version 5.0rc8
+CVE_CHECK_IGNORE += "CVE-2019-12819"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2019-12881"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-12984"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_IGNORE += "CVE-2019-13233"
+
+# fixed-version: Fixed after version 5.2
+CVE_CHECK_IGNORE += "CVE-2019-13272"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-13631"
+
+# fixed-version: Fixed after version 5.3rc2
+CVE_CHECK_IGNORE += "CVE-2019-13648"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-14283"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-14284"
+
+# fixed-version: Fixed after version 5.5rc7
+CVE_CHECK_IGNORE += "CVE-2019-14615"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2019-14763"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_IGNORE += "CVE-2019-14814"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_IGNORE += "CVE-2019-14815"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_IGNORE += "CVE-2019-14816"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-14821"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_IGNORE += "CVE-2019-14835"
+
+# fixed-version: Fixed after version 5.5rc3
+CVE_CHECK_IGNORE += "CVE-2019-14895"
+
+# fixed-version: Fixed after version 5.5
+CVE_CHECK_IGNORE += "CVE-2019-14896"
+
+# fixed-version: Fixed after version 5.5
+CVE_CHECK_IGNORE += "CVE-2019-14897"
+
+# CVE-2019-14898 has no known resolution
+
+# fixed-version: Fixed after version 5.5rc3
+CVE_CHECK_IGNORE += "CVE-2019-14901"
+
+# fixed-version: Fixed after version 5.3rc8
+CVE_CHECK_IGNORE += "CVE-2019-15030"
+
+# fixed-version: Fixed after version 5.3rc8
+CVE_CHECK_IGNORE += "CVE-2019-15031"
+
+# fixed-version: Fixed after version 5.2rc2
+CVE_CHECK_IGNORE += "CVE-2019-15090"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-15098"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-15099"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_IGNORE += "CVE-2019-15117"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_IGNORE += "CVE-2019-15118"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-15211"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-15212"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-15213"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_IGNORE += "CVE-2019-15214"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-15215"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_IGNORE += "CVE-2019-15216"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-15217"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-15218"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-15219"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-15220"
+
+# fixed-version: Fixed after version 5.2
+CVE_CHECK_IGNORE += "CVE-2019-15221"
+
+# fixed-version: Fixed after version 5.3rc3
+CVE_CHECK_IGNORE += "CVE-2019-15222"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-15223"
+
+# CVE-2019-15239 has no known resolution
+
+# CVE-2019-15290 has no known resolution
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-15291"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-15292"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_IGNORE += "CVE-2019-15504"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-15505"
+
+# fixed-version: Fixed after version 5.3rc6
+CVE_CHECK_IGNORE += "CVE-2019-15538"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_IGNORE += "CVE-2019-15666"
+
+# CVE-2019-15791 has no known resolution
+
+# CVE-2019-15792 has no known resolution
+
+# CVE-2019-15793 has no known resolution
+
+# CVE-2019-15794 needs backporting (fixed from 5.12)
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-15807"
+
+# CVE-2019-15902 has no known resolution
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-15916"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-15917"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_IGNORE += "CVE-2019-15918"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_IGNORE += "CVE-2019-15919"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_IGNORE += "CVE-2019-15920"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_IGNORE += "CVE-2019-15921"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-15922"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-15923"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-15924"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-15925"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-15926"
+
+# fixed-version: Fixed after version 5.0rc2
+CVE_CHECK_IGNORE += "CVE-2019-15927"
+
+# CVE-2019-16089 has no known resolution
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-16229"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-16230"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-16231"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-16232"
+
+# fixed-version: Fixed after version 5.4rc5
+CVE_CHECK_IGNORE += "CVE-2019-16233"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_IGNORE += "CVE-2019-16234"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-16413"
+
+# fixed-version: Fixed after version 5.3rc7
+CVE_CHECK_IGNORE += "CVE-2019-16714"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-16746"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2019-16921"
+
+# fixed-version: Fixed after version 5.0
+CVE_CHECK_IGNORE += "CVE-2019-16994"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-16995"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-17052"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-17053"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-17054"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-17055"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-17056"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-17075"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_IGNORE += "CVE-2019-17133"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-17351"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-17666"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-18198"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-18282"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-18660"
+
+# fixed-version: Fixed after version 4.17rc5
+CVE_CHECK_IGNORE += "CVE-2019-18675"
+
+# CVE-2019-18680 has no known resolution
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-18683"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-18786"
+
+# fixed-version: Fixed after version 5.1rc7
+CVE_CHECK_IGNORE += "CVE-2019-18805"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-18806"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-18807"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-18808"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-18809"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-18810"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_IGNORE += "CVE-2019-18811"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_IGNORE += "CVE-2019-18812"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-18813"
+
+# fixed-version: Fixed after version 5.7rc7
+CVE_CHECK_IGNORE += "CVE-2019-18814"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-18885"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19036"
+
+# fixed-version: Fixed after version 5.5rc3
+CVE_CHECK_IGNORE += "CVE-2019-19037"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2019-19039"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19043"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-19044"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-19045"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19046"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-19047"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-19048"
+
+# fixed-version: Fixed after version 5.4rc5
+CVE_CHECK_IGNORE += "CVE-2019-19049"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19050"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-19051"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_IGNORE += "CVE-2019-19052"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19053"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19054"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_IGNORE += "CVE-2019-19055"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19056"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19057"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_IGNORE += "CVE-2019-19058"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_IGNORE += "CVE-2019-19059"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-19060"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-19061"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19062"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19063"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19064"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-19065"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19066"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-19067"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19068"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-19069"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19070"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19071"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19072"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19073"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19074"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-19075"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19076"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19077"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19078"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_IGNORE += "CVE-2019-19079"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19080"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19081"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19082"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-19083"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_IGNORE += "CVE-2019-19227"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19241"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19252"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19318"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-19319"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19332"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19338"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2019-19377"
+
+# CVE-2019-19378 has no known resolution
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19447"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2019-19448"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2019-19449"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2019-19462"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-19523"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_IGNORE += "CVE-2019-19524"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-19525"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_IGNORE += "CVE-2019-19526"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_IGNORE += "CVE-2019-19527"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-19528"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_IGNORE += "CVE-2019-19529"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_IGNORE += "CVE-2019-19530"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_IGNORE += "CVE-2019-19531"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-19532"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19533"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_IGNORE += "CVE-2019-19534"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_IGNORE += "CVE-2019-19535"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_IGNORE += "CVE-2019-19536"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_IGNORE += "CVE-2019-19537"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-19543"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19602"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19767"
+
+# fixed-version: Fixed after version 5.6rc4
+CVE_CHECK_IGNORE += "CVE-2019-19768"
+
+# fixed-version: Fixed after version 5.6rc5
+CVE_CHECK_IGNORE += "CVE-2019-19769"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2019-19770"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_IGNORE += "CVE-2019-19807"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-19813"
+
+# CVE-2019-19814 has no known resolution
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-19815"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-19816"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19922"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_IGNORE += "CVE-2019-19927"
+
+# fixed-version: Fixed after version 5.5rc3
+CVE_CHECK_IGNORE += "CVE-2019-19947"
+
+# fixed-version: Fixed after version 5.5rc2
+CVE_CHECK_IGNORE += "CVE-2019-19965"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-19966"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_IGNORE += "CVE-2019-1999"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_IGNORE += "CVE-2019-20054"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-20095"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-20096"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2019-2024"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_IGNORE += "CVE-2019-2025"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-20422"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_IGNORE += "CVE-2019-2054"
+
+# fixed-version: Fixed after version 5.5rc6
+CVE_CHECK_IGNORE += "CVE-2019-20636"
+
+# CVE-2019-20794 has no known resolution
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-20806"
+
+# fixed-version: Fixed after version 5.6rc1
+CVE_CHECK_IGNORE += "CVE-2019-20810"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_IGNORE += "CVE-2019-20811"
+
+# fixed-version: Fixed after version 5.5rc3
+CVE_CHECK_IGNORE += "CVE-2019-20812"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-20908"
+
+# fixed-version: Fixed after version 5.3rc2
+CVE_CHECK_IGNORE += "CVE-2019-20934"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-2101"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-2181"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_IGNORE += "CVE-2019-2182"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-2213"
+
+# fixed-version: Fixed after version 5.3rc2
+CVE_CHECK_IGNORE += "CVE-2019-2214"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2019-2215"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_IGNORE += "CVE-2019-25044"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_IGNORE += "CVE-2019-25045"
+
+# fixed-version: Fixed after version 5.6rc1
+CVE_CHECK_IGNORE += "CVE-2019-3016"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-3459"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-3460"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_IGNORE += "CVE-2019-3701"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_IGNORE += "CVE-2019-3819"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2019-3837"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-3846"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-3874"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-3882"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-3887"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_IGNORE += "CVE-2019-3892"
+
+# fixed-version: Fixed after version 2.6.35rc1
+CVE_CHECK_IGNORE += "CVE-2019-3896"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_IGNORE += "CVE-2019-3900"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_IGNORE += "CVE-2019-3901"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_IGNORE += "CVE-2019-5108"
+
+# Skipping CVE-2019-5489, no affected_versions
+
+# fixed-version: Fixed after version 5.0rc2
+CVE_CHECK_IGNORE += "CVE-2019-6133"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_IGNORE += "CVE-2019-6974"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_IGNORE += "CVE-2019-7221"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_IGNORE += "CVE-2019-7222"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_IGNORE += "CVE-2019-7308"
+
+# fixed-version: Fixed after version 5.0rc8
+CVE_CHECK_IGNORE += "CVE-2019-8912"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_IGNORE += "CVE-2019-8956"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-8980"
+
+# fixed-version: Fixed after version 5.0rc4
+CVE_CHECK_IGNORE += "CVE-2019-9003"
+
+# fixed-version: Fixed after version 5.0rc7
+CVE_CHECK_IGNORE += "CVE-2019-9162"
+
+# fixed-version: Fixed after version 5.0
+CVE_CHECK_IGNORE += "CVE-2019-9213"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_IGNORE += "CVE-2019-9245"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2019-9444"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-9445"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-9453"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_IGNORE += "CVE-2019-9454"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_IGNORE += "CVE-2019-9455"
+
+# fixed-version: Fixed after version 4.16rc6
+CVE_CHECK_IGNORE += "CVE-2019-9456"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2019-9457"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_IGNORE += "CVE-2019-9458"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-9466"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-9500"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-9503"
+
+# fixed-version: Fixed after version 5.2
+CVE_CHECK_IGNORE += "CVE-2019-9506"
+
+# fixed-version: Fixed after version 5.1rc2
+CVE_CHECK_IGNORE += "CVE-2019-9857"
+
+# fixed-version: Fixed after version 5.6rc3
+CVE_CHECK_IGNORE += "CVE-2020-0009"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_IGNORE += "CVE-2020-0030"
+
+# fixed-version: Fixed after version 5.5rc2
+CVE_CHECK_IGNORE += "CVE-2020-0041"
+
+# fixed-version: Fixed after version 4.3rc7
+CVE_CHECK_IGNORE += "CVE-2020-0066"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2020-0067"
+
+# fixed-version: Fixed after version 5.6rc2
+CVE_CHECK_IGNORE += "CVE-2020-0110"
+
+# fixed-version: Fixed after version 5.7rc4
+CVE_CHECK_IGNORE += "CVE-2020-0255"
+
+# fixed-version: Fixed after version 5.5rc6
+CVE_CHECK_IGNORE += "CVE-2020-0305"
+
+# CVE-2020-0347 has no known resolution
+
+# fixed-version: Fixed after version 5.6rc1
+CVE_CHECK_IGNORE += "CVE-2020-0404"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-0423"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2020-0427"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2020-0429"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2020-0430"
+
+# fixed-version: Fixed after version 5.5rc6
+CVE_CHECK_IGNORE += "CVE-2020-0431"
+
+# fixed-version: Fixed after version 5.6rc1
+CVE_CHECK_IGNORE += "CVE-2020-0432"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2020-0433"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2020-0435"
+
+# fixed-version: Fixed after version 5.6rc4
+CVE_CHECK_IGNORE += "CVE-2020-0444"
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2020-0465"
+
+# fixed-version: Fixed after version 5.9rc2
+CVE_CHECK_IGNORE += "CVE-2020-0466"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-0543"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-10135"
+
+# fixed-version: Fixed after version 5.5rc5
+CVE_CHECK_IGNORE += "CVE-2020-10690"
+
+# CVE-2020-10708 has no known resolution
+
+# fixed-version: Fixed after version 5.7rc6
+CVE_CHECK_IGNORE += "CVE-2020-10711"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2020-10720"
+
+# fixed-version: Fixed after version 5.7
+CVE_CHECK_IGNORE += "CVE-2020-10732"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2020-10742"
+
+# fixed-version: Fixed after version 5.7rc4
+CVE_CHECK_IGNORE += "CVE-2020-10751"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-10757"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-10766"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-10767"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-10768"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_IGNORE += "CVE-2020-10769"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2020-10773"
+
+# CVE-2020-10774 has no known resolution
+
+# fixed-version: Fixed after version 5.8rc6
+CVE_CHECK_IGNORE += "CVE-2020-10781"
+
+# fixed-version: Fixed after version 5.6rc4
+CVE_CHECK_IGNORE += "CVE-2020-10942"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-11494"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-11565"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-11608"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-11609"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-11668"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2020-11669"
+
+# CVE-2020-11725 has no known resolution
+
+# fixed-version: Fixed after version 5.7rc4
+CVE_CHECK_IGNORE += "CVE-2020-11884"
+
+# CVE-2020-11935 has no known resolution
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2020-12114"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-12351"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-12352"
+
+# CVE-2020-12362 needs backporting (fixed from 5.11rc1)
+
+# CVE-2020-12363 needs backporting (fixed from 5.11rc1)
+
+# CVE-2020-12364 needs backporting (fixed from 5.11rc1)
+
+# fixed-version: Fixed after version 5.7rc3
+CVE_CHECK_IGNORE += "CVE-2020-12464"
+
+# fixed-version: Fixed after version 5.6rc6
+CVE_CHECK_IGNORE += "CVE-2020-12465"
+
+# fixed-version: Fixed after version 5.5rc7
+CVE_CHECK_IGNORE += "CVE-2020-12652"
+
+# fixed-version: Fixed after version 5.6rc1
+CVE_CHECK_IGNORE += "CVE-2020-12653"
+
+# fixed-version: Fixed after version 5.6rc1
+CVE_CHECK_IGNORE += "CVE-2020-12654"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-12655"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-12656"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-12657"
+
+# fixed-version: Fixed after version 5.7rc2
+CVE_CHECK_IGNORE += "CVE-2020-12659"
+
+# fixed-version: Fixed after version 5.6rc4
+CVE_CHECK_IGNORE += "CVE-2020-12768"
+
+# fixed-version: Fixed after version 5.5rc6
+CVE_CHECK_IGNORE += "CVE-2020-12769"
+
+# fixed-version: Fixed after version 5.7rc3
+CVE_CHECK_IGNORE += "CVE-2020-12770"
+
+# fixed-version: Fixed after version 5.8rc2
+CVE_CHECK_IGNORE += "CVE-2020-12771"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-12826"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-12888"
+
+# fixed-version: Fixed after version 5.10rc4
+CVE_CHECK_IGNORE += "CVE-2020-12912"
+
+# fixed-version: Fixed after version 5.7rc6
+CVE_CHECK_IGNORE += "CVE-2020-13143"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-13974"
+
+# CVE-2020-14304 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2020-14305"
+
+# fixed-version: Fixed after version 5.9rc2
+CVE_CHECK_IGNORE += "CVE-2020-14314"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2020-14331"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-14351"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_IGNORE += "CVE-2020-14353"
+
+# fixed-version: Fixed after version 5.8rc5
+CVE_CHECK_IGNORE += "CVE-2020-14356"
+
+# fixed-version: Fixed after version 5.6rc6
+CVE_CHECK_IGNORE += "CVE-2020-14381"
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2020-14385"
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2020-14386"
+
+# fixed-version: Fixed after version 5.9rc6
+CVE_CHECK_IGNORE += "CVE-2020-14390"
+
+# fixed-version: Fixed after version 5.5
+CVE_CHECK_IGNORE += "CVE-2020-14416"
+
+# fixed-version: Fixed after version 5.8rc3
+CVE_CHECK_IGNORE += "CVE-2020-15393"
+
+# fixed-version: Fixed after version 5.8rc2
+CVE_CHECK_IGNORE += "CVE-2020-15436"
+
+# fixed-version: Fixed after version 5.8rc7
+CVE_CHECK_IGNORE += "CVE-2020-15437"
+
+# fixed-version: Fixed after version 5.8rc3
+CVE_CHECK_IGNORE += "CVE-2020-15780"
+
+# CVE-2020-15802 has no known resolution
+
+# fixed-version: Fixed after version 5.8rc6
+CVE_CHECK_IGNORE += "CVE-2020-15852"
+
+# cpe-stable-backport: Backported in 5.10.68
+CVE_CHECK_IGNORE += "CVE-2020-16119"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-16120"
+
+# fixed-version: Fixed after version 5.8
+CVE_CHECK_IGNORE += "CVE-2020-16166"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2020-1749"
+
+# fixed-version: Fixed after version 5.8rc4
+CVE_CHECK_IGNORE += "CVE-2020-24394"
+
+# fixed-version: Fixed after version 5.8
+CVE_CHECK_IGNORE += "CVE-2020-24490"
+
+# CVE-2020-24502 has no known resolution
+
+# CVE-2020-24503 has no known resolution
+
+# CVE-2020-24504 needs backporting (fixed from 5.12rc1)
+
+# cpe-stable-backport: Backported in 5.10.42
+CVE_CHECK_IGNORE += "CVE-2020-24586"
+
+# cpe-stable-backport: Backported in 5.10.42
+CVE_CHECK_IGNORE += "CVE-2020-24587"
+
+# cpe-stable-backport: Backported in 5.10.42
+CVE_CHECK_IGNORE += "CVE-2020-24588"
+
+# fixed-version: Fixed after version 5.9rc7
+CVE_CHECK_IGNORE += "CVE-2020-25211"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2020-25212"
+
+# CVE-2020-25220 has no known resolution
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2020-25221"
+
+# fixed-version: Fixed after version 5.9rc5
+CVE_CHECK_IGNORE += "CVE-2020-25284"
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2020-25285"
+
+# cpe-stable-backport: Backported in 5.10.20
+CVE_CHECK_IGNORE += "CVE-2020-25639"
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2020-25641"
+
+# fixed-version: Fixed after version 5.9rc7
+CVE_CHECK_IGNORE += "CVE-2020-25643"
+
+# fixed-version: Fixed after version 5.9rc7
+CVE_CHECK_IGNORE += "CVE-2020-25645"
+
+# fixed-version: Fixed after version 5.10rc2
+CVE_CHECK_IGNORE += "CVE-2020-25656"
+
+# CVE-2020-25661 has no known resolution
+
+# CVE-2020-25662 has no known resolution
+
+# fixed-version: Fixed after version 5.10rc3
+CVE_CHECK_IGNORE += "CVE-2020-25668"
+
+# fixed-version: Fixed after version 5.10rc5
+CVE_CHECK_IGNORE += "CVE-2020-25669"
+
+# cpe-stable-backport: Backported in 5.10.30
+CVE_CHECK_IGNORE += "CVE-2020-25670"
+
+# cpe-stable-backport: Backported in 5.10.30
+CVE_CHECK_IGNORE += "CVE-2020-25671"
+
+# cpe-stable-backport: Backported in 5.10.30
+CVE_CHECK_IGNORE += "CVE-2020-25672"
+
+# cpe-stable-backport: Backported in 5.10.30
+CVE_CHECK_IGNORE += "CVE-2020-25673"
+
+# fixed-version: Fixed after version 5.10rc3
+CVE_CHECK_IGNORE += "CVE-2020-25704"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-25705"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2020-26088"
+
+# cpe-stable-backport: Backported in 5.10.42
+CVE_CHECK_IGNORE += "CVE-2020-26139"
+
+# CVE-2020-26140 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.42
+CVE_CHECK_IGNORE += "CVE-2020-26141"
+
+# CVE-2020-26142 has no known resolution
+
+# CVE-2020-26143 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.42
+CVE_CHECK_IGNORE += "CVE-2020-26145"
+
+# cpe-stable-backport: Backported in 5.10.42
+CVE_CHECK_IGNORE += "CVE-2020-26147"
+
+# cpe-stable-backport: Backported in 5.10.47
+CVE_CHECK_IGNORE += "CVE-2020-26541"
+
+# cpe-stable-backport: Backported in 5.10.40
+CVE_CHECK_IGNORE += "CVE-2020-26555"
+
+# CVE-2020-26556 has no known resolution
+
+# CVE-2020-26557 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.40
+CVE_CHECK_IGNORE += "CVE-2020-26558"
+
+# CVE-2020-26559 has no known resolution
+
+# CVE-2020-26560 has no known resolution
+
+# fixed-version: Fixed after version 5.6
+CVE_CHECK_IGNORE += "CVE-2020-27066"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2020-27067"
+
+# fixed-version: Fixed after version 5.6rc2
+CVE_CHECK_IGNORE += "CVE-2020-27068"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-27152"
+
+# cpe-stable-backport: Backported in 5.10.25
+CVE_CHECK_IGNORE += "CVE-2020-27170"
+
+# cpe-stable-backport: Backported in 5.10.25
+CVE_CHECK_IGNORE += "CVE-2020-27171"
+
+# fixed-version: Fixed after version 5.9
+CVE_CHECK_IGNORE += "CVE-2020-27194"
+
+# fixed-version: Fixed after version 5.6rc4
+CVE_CHECK_IGNORE += "CVE-2020-2732"
+
+# fixed-version: Fixed after version 5.6rc5
+CVE_CHECK_IGNORE += "CVE-2020-27418"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-27673"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-27675"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-27777"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-27784"
+
+# fixed-version: Fixed after version 5.7rc6
+CVE_CHECK_IGNORE += "CVE-2020-27786"
+
+# cpe-stable-backport: Backported in 5.10.4
+CVE_CHECK_IGNORE += "CVE-2020-27815"
+
+# cpe-stable-backport: Backported in 5.10.82
+CVE_CHECK_IGNORE += "CVE-2020-27820"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-27825"
+
+# fixed-version: Fixed after version 5.10rc7
+CVE_CHECK_IGNORE += "CVE-2020-27830"
+
+# fixed-version: Fixed after version 5.10rc6
+CVE_CHECK_IGNORE += "CVE-2020-27835"
+
+# fixed-version: Fixed after version 5.9rc6
+CVE_CHECK_IGNORE += "CVE-2020-28097"
+
+# cpe-stable-backport: Backported in 5.10.7
+CVE_CHECK_IGNORE += "CVE-2020-28374"
+
+# fixed-version: Fixed after version 5.10rc7
+CVE_CHECK_IGNORE += "CVE-2020-28588"
+
+# fixed-version: Fixed after version 5.9
+CVE_CHECK_IGNORE += "CVE-2020-28915"
+
+# fixed-version: Fixed after version 5.10rc5
+CVE_CHECK_IGNORE += "CVE-2020-28941"
+
+# fixed-version: Fixed after version 5.10rc3
+CVE_CHECK_IGNORE += "CVE-2020-28974"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-29368"
+
+# fixed-version: Fixed after version 5.8rc7
+CVE_CHECK_IGNORE += "CVE-2020-29369"
+
+# fixed-version: Fixed after version 5.6rc7
+CVE_CHECK_IGNORE += "CVE-2020-29370"
+
+# fixed-version: Fixed after version 5.9rc2
+CVE_CHECK_IGNORE += "CVE-2020-29371"
+
+# fixed-version: Fixed after version 5.7rc3
+CVE_CHECK_IGNORE += "CVE-2020-29372"
+
+# fixed-version: Fixed after version 5.6rc2
+CVE_CHECK_IGNORE += "CVE-2020-29373"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-29374"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-29534"
+
+# cpe-stable-backport: Backported in 5.10.4
+CVE_CHECK_IGNORE += "CVE-2020-29568"
+
+# cpe-stable-backport: Backported in 5.10.4
+CVE_CHECK_IGNORE += "CVE-2020-29569"
+
+# fixed-version: Fixed after version 5.10rc7
+CVE_CHECK_IGNORE += "CVE-2020-29660"
+
+# fixed-version: Fixed after version 5.10rc7
+CVE_CHECK_IGNORE += "CVE-2020-29661"
+
+# cpe-stable-backport: Backported in 5.10.4
+CVE_CHECK_IGNORE += "CVE-2020-35499"
+
+# CVE-2020-35501 has no known resolution
+
+# fixed-version: Fixed after version 5.10rc3
+CVE_CHECK_IGNORE += "CVE-2020-35508"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2020-35513"
+
+# fixed-version: Fixed after version 5.10rc7
+CVE_CHECK_IGNORE += "CVE-2020-35519"
+
+# cpe-stable-backport: Backported in 5.10.6
+CVE_CHECK_IGNORE += "CVE-2020-36158"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-36310"
+
+# fixed-version: Fixed after version 5.9rc5
+CVE_CHECK_IGNORE += "CVE-2020-36311"
+
+# fixed-version: Fixed after version 5.9rc5
+CVE_CHECK_IGNORE += "CVE-2020-36312"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-36313"
+
+# cpe-stable-backport: Backported in 5.10.6
+CVE_CHECK_IGNORE += "CVE-2020-36322"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-36385"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2020-36386"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2020-36387"
+
+# cpe-stable-backport: Backported in 5.10.96
+CVE_CHECK_IGNORE += "CVE-2020-36516"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-36557"
+
+# fixed-version: Fixed after version 5.6rc3
+CVE_CHECK_IGNORE += "CVE-2020-36558"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-36691"
+
+# fixed-version: Fixed after version 5.10
+CVE_CHECK_IGNORE += "CVE-2020-36694"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2020-36766"
+
+# cpe-stable-backport: Backported in 5.10.61
+CVE_CHECK_IGNORE += "CVE-2020-3702"
+
+# fixed-version: Fixed after version 5.10rc5
+CVE_CHECK_IGNORE += "CVE-2020-4788"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2020-7053"
+
+# fixed-version: Fixed after version 5.5
+CVE_CHECK_IGNORE += "CVE-2020-8428"
+
+# fixed-version: Fixed after version 5.6rc5
+CVE_CHECK_IGNORE += "CVE-2020-8647"
+
+# fixed-version: Fixed after version 5.6rc3
+CVE_CHECK_IGNORE += "CVE-2020-8648"
+
+# fixed-version: Fixed after version 5.6rc5
+CVE_CHECK_IGNORE += "CVE-2020-8649"
+
+# fixed-version: Fixed after version 5.10rc4
+CVE_CHECK_IGNORE += "CVE-2020-8694"
+
+# CVE-2020-8832 has no known resolution
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2020-8834"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-8835"
+
+# fixed-version: Fixed after version 5.6rc2
+CVE_CHECK_IGNORE += "CVE-2020-8992"
+
+# fixed-version: Fixed after version 5.6rc4
+CVE_CHECK_IGNORE += "CVE-2020-9383"
+
+# fixed-version: Fixed after version 5.6rc3
+CVE_CHECK_IGNORE += "CVE-2020-9391"
+
+# cpe-stable-backport: Backported in 5.10.40
+CVE_CHECK_IGNORE += "CVE-2021-0129"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2021-0342"
+
+# CVE-2021-0399 has no known resolution
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2021-0447"
+
+# fixed-version: Fixed after version 5.9rc7
+CVE_CHECK_IGNORE += "CVE-2021-0448"
+
+# cpe-stable-backport: Backported in 5.10.19
+CVE_CHECK_IGNORE += "CVE-2021-0512"
+
+# fixed-version: Fixed after version 5.8
+CVE_CHECK_IGNORE += "CVE-2021-0605"
+
+# CVE-2021-0606 has no known resolution
+
+# CVE-2021-0695 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.7
+CVE_CHECK_IGNORE += "CVE-2021-0707"
+
+# cpe-stable-backport: Backported in 5.10.55
+CVE_CHECK_IGNORE += "CVE-2021-0920"
+
+# CVE-2021-0924 has no known resolution
+
+# fixed-version: Fixed after version 5.6rc1
+CVE_CHECK_IGNORE += "CVE-2021-0929"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2021-0935"
+
+# CVE-2021-0936 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.31
+CVE_CHECK_IGNORE += "CVE-2021-0937"
+
+# fixed-version: Fixed after version 5.10rc4
+CVE_CHECK_IGNORE += "CVE-2021-0938"
+
+# cpe-stable-backport: Backported in 5.10.28
+CVE_CHECK_IGNORE += "CVE-2021-0941"
+
+# CVE-2021-0961 has no known resolution
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2021-1048"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2021-20177"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2021-20194"
+
+# CVE-2021-20219 has no known resolution
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2021-20226"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2021-20239"
+
+# fixed-version: Fixed after version 4.5rc5
+CVE_CHECK_IGNORE += "CVE-2021-20261"
+
+# fixed-version: Fixed after version 4.5rc3
+CVE_CHECK_IGNORE += "CVE-2021-20265"
+
+# cpe-stable-backport: Backported in 5.10.10
+CVE_CHECK_IGNORE += "CVE-2021-20268"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2021-20292"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2021-20317"
+
+# cpe-stable-backport: Backported in 5.10.68
+CVE_CHECK_IGNORE += "CVE-2021-20320"
+
+# cpe-stable-backport: Backported in 5.10.73
+CVE_CHECK_IGNORE += "CVE-2021-20321"
+
+# cpe-stable-backport: Backported in 5.10.65
+CVE_CHECK_IGNORE += "CVE-2021-20322"
+
+# cpe-stable-backport: Backported in 5.10.17
+CVE_CHECK_IGNORE += "CVE-2021-21781"
+
+# cpe-stable-backport: Backported in 5.10.47
+CVE_CHECK_IGNORE += "CVE-2021-22543"
+
+# cpe-stable-backport: Backported in 5.10.31
+CVE_CHECK_IGNORE += "CVE-2021-22555"
+
+# cpe-stable-backport: Backported in 5.10.88
+CVE_CHECK_IGNORE += "CVE-2021-22600"
+
+# cpe-stable-backport: Backported in 5.10.32
+CVE_CHECK_IGNORE += "CVE-2021-23133"
+
+# fixed-version: only affects 5.12rc7 onwards
+CVE_CHECK_IGNORE += "CVE-2021-23134"
+
+# cpe-stable-backport: Backported in 5.10.105
+CVE_CHECK_IGNORE += "CVE-2021-26401"
+
+# cpe-stable-backport: Backported in 5.10.13
+CVE_CHECK_IGNORE += "CVE-2021-26708"
+
+# cpe-stable-backport: Backported in 5.10.18
+CVE_CHECK_IGNORE += "CVE-2021-26930"
+
+# cpe-stable-backport: Backported in 5.10.18
+CVE_CHECK_IGNORE += "CVE-2021-26931"
+
+# cpe-stable-backport: Backported in 5.10.18
+CVE_CHECK_IGNORE += "CVE-2021-26932"
+
+# CVE-2021-26934 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.21
+CVE_CHECK_IGNORE += "CVE-2021-27363"
+
+# cpe-stable-backport: Backported in 5.10.21
+CVE_CHECK_IGNORE += "CVE-2021-27364"
+
+# cpe-stable-backport: Backported in 5.10.21
+CVE_CHECK_IGNORE += "CVE-2021-27365"
+
+# cpe-stable-backport: Backported in 5.10.21
+CVE_CHECK_IGNORE += "CVE-2021-28038"
+
+# cpe-stable-backport: Backported in 5.10.21
+CVE_CHECK_IGNORE += "CVE-2021-28039"
+
+# cpe-stable-backport: Backported in 5.10.24
+CVE_CHECK_IGNORE += "CVE-2021-28375"
+
+# cpe-stable-backport: Backported in 5.10.24
+CVE_CHECK_IGNORE += "CVE-2021-28660"
+
+# cpe-stable-backport: Backported in 5.10.27
+CVE_CHECK_IGNORE += "CVE-2021-28688"
+
+# cpe-stable-backport: Backported in 5.10.43
+CVE_CHECK_IGNORE += "CVE-2021-28691"
+
+# cpe-stable-backport: Backported in 5.10.88
+CVE_CHECK_IGNORE += "CVE-2021-28711"
+
+# cpe-stable-backport: Backported in 5.10.88
+CVE_CHECK_IGNORE += "CVE-2021-28712"
+
+# cpe-stable-backport: Backported in 5.10.88
+CVE_CHECK_IGNORE += "CVE-2021-28713"
+
+# cpe-stable-backport: Backported in 5.10.88
+CVE_CHECK_IGNORE += "CVE-2021-28714"
+
+# cpe-stable-backport: Backported in 5.10.88
+CVE_CHECK_IGNORE += "CVE-2021-28715"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-28950"
+
+# cpe-stable-backport: Backported in 5.10.26
+CVE_CHECK_IGNORE += "CVE-2021-28951"
+
+# cpe-stable-backport: Backported in 5.10.26
+CVE_CHECK_IGNORE += "CVE-2021-28952"
+
+# cpe-stable-backport: Backported in 5.10.26
+CVE_CHECK_IGNORE += "CVE-2021-28964"
+
+# cpe-stable-backport: Backported in 5.10.26
+CVE_CHECK_IGNORE += "CVE-2021-28971"
+
+# cpe-stable-backport: Backported in 5.10.26
+CVE_CHECK_IGNORE += "CVE-2021-28972"
+
+# cpe-stable-backport: Backported in 5.10.29
+CVE_CHECK_IGNORE += "CVE-2021-29154"
+
+# cpe-stable-backport: Backported in 5.10.32
+CVE_CHECK_IGNORE += "CVE-2021-29155"
+
+# cpe-stable-backport: Backported in 5.10.27
+CVE_CHECK_IGNORE += "CVE-2021-29264"
+
+# cpe-stable-backport: Backported in 5.10.24
+CVE_CHECK_IGNORE += "CVE-2021-29265"
+
+# cpe-stable-backport: Backported in 5.10.26
+CVE_CHECK_IGNORE += "CVE-2021-29266"
+
+# cpe-stable-backport: Backported in 5.10.27
+CVE_CHECK_IGNORE += "CVE-2021-29646"
+
+# cpe-stable-backport: Backported in 5.10.27
+CVE_CHECK_IGNORE += "CVE-2021-29647"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-29648"
+
+# cpe-stable-backport: Backported in 5.10.27
+CVE_CHECK_IGNORE += "CVE-2021-29649"
+
+# cpe-stable-backport: Backported in 5.10.27
+CVE_CHECK_IGNORE += "CVE-2021-29650"
+
+# cpe-stable-backport: Backported in 5.10.28
+CVE_CHECK_IGNORE += "CVE-2021-29657"
+
+# cpe-stable-backport: Backported in 5.10.21
+CVE_CHECK_IGNORE += "CVE-2021-30002"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-30178"
+
+# cpe-stable-backport: Backported in 5.10.37
+CVE_CHECK_IGNORE += "CVE-2021-31440"
+
+# cpe-stable-backport: Backported in 5.10.10
+CVE_CHECK_IGNORE += "CVE-2021-3178"
+
+# cpe-stable-backport: Backported in 5.10.35
+CVE_CHECK_IGNORE += "CVE-2021-31829"
+
+# cpe-stable-backport: Backported in 5.10.27
+CVE_CHECK_IGNORE += "CVE-2021-31916"
+
+# CVE-2021-32078 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.10.37
+CVE_CHECK_IGNORE += "CVE-2021-32399"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-32606"
+
+# cpe-stable-backport: Backported in 5.10.24
+CVE_CHECK_IGNORE += "CVE-2021-33033"
+
+# cpe-stable-backport: Backported in 5.10.37
+CVE_CHECK_IGNORE += "CVE-2021-33034"
+
+# CVE-2021-33061 needs backporting (fixed from 5.18rc1)
+
+# cpe-stable-backport: Backported in 5.10.42
+CVE_CHECK_IGNORE += "CVE-2021-33098"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-33135"
+
+# fixed-version: only affects 5.12rc8 onwards
+CVE_CHECK_IGNORE += "CVE-2021-33200"
+
+# cpe-stable-backport: Backported in 5.10.12
+CVE_CHECK_IGNORE += "CVE-2021-3347"
+
+# cpe-stable-backport: Backported in 5.10.13
+CVE_CHECK_IGNORE += "CVE-2021-3348"
+
+# cpe-stable-backport: Backported in 5.10.46
+CVE_CHECK_IGNORE += "CVE-2021-33624"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2021-33630"
+
+# cpe-stable-backport: Backported in 5.10.177
+CVE_CHECK_IGNORE += "CVE-2021-33631"
+
+# cpe-stable-backport: Backported in 5.10.130
+CVE_CHECK_IGNORE += "CVE-2021-33655"
+
+# cpe-stable-backport: Backported in 5.10.127
+CVE_CHECK_IGNORE += "CVE-2021-33656"
+
+# cpe-stable-backport: Backported in 5.10.52
+CVE_CHECK_IGNORE += "CVE-2021-33909"
+
+# fixed-version: Fixed after version 5.10
+CVE_CHECK_IGNORE += "CVE-2021-3411"
+
+# fixed-version: Fixed after version 5.9rc2
+CVE_CHECK_IGNORE += "CVE-2021-3428"
+
+# cpe-stable-backport: Backported in 5.10.19
+CVE_CHECK_IGNORE += "CVE-2021-3444"
+
+# cpe-stable-backport: Backported in 5.10.56
+CVE_CHECK_IGNORE += "CVE-2021-34556"
+
+# cpe-stable-backport: Backported in 5.10.46
+CVE_CHECK_IGNORE += "CVE-2021-34693"
+
+# cpe-stable-backport: Backported in 5.10.28
+CVE_CHECK_IGNORE += "CVE-2021-3483"
+
+# cpe-stable-backport: Backported in 5.10.62
+CVE_CHECK_IGNORE += "CVE-2021-34866"
+
+# cpe-stable-backport: Backported in 5.10.37
+CVE_CHECK_IGNORE += "CVE-2021-3489"
+
+# cpe-stable-backport: Backported in 5.10.37
+CVE_CHECK_IGNORE += "CVE-2021-3490"
+
+# cpe-stable-backport: Backported in 5.10.37
+CVE_CHECK_IGNORE += "CVE-2021-3491"
+
+# CVE-2021-3492 has no known resolution
+
+# CVE-2021-3493 needs backporting (fixed from 5.11rc1)
+
+# cpe-stable-backport: Backported in 5.10.42
+CVE_CHECK_IGNORE += "CVE-2021-34981"
+
+# cpe-stable-backport: Backported in 5.10.32
+CVE_CHECK_IGNORE += "CVE-2021-3501"
+
+# cpe-stable-backport: Backported in 5.10.47
+CVE_CHECK_IGNORE += "CVE-2021-35039"
+
+# cpe-stable-backport: Backported in 5.10.36
+CVE_CHECK_IGNORE += "CVE-2021-3506"
+
+# CVE-2021-3542 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.36
+CVE_CHECK_IGNORE += "CVE-2021-3543"
+
+# cpe-stable-backport: Backported in 5.10.56
+CVE_CHECK_IGNORE += "CVE-2021-35477"
+
+# cpe-stable-backport: Backported in 5.10.43
+CVE_CHECK_IGNORE += "CVE-2021-3564"
+
+# cpe-stable-backport: Backported in 5.10.43
+CVE_CHECK_IGNORE += "CVE-2021-3573"
+
+# cpe-stable-backport: Backported in 5.10.43
+CVE_CHECK_IGNORE += "CVE-2021-3587"
+
+# cpe-stable-backport: Backported in 5.10.16
+CVE_CHECK_IGNORE += "CVE-2021-3600"
+
+# cpe-stable-backport: Backported in 5.10.50
+CVE_CHECK_IGNORE += "CVE-2021-3609"
+
+# cpe-stable-backport: Backported in 5.10.20
+CVE_CHECK_IGNORE += "CVE-2021-3612"
+
+# fixed-version: Fixed after version 5.5rc7
+CVE_CHECK_IGNORE += "CVE-2021-3635"
+
+# cpe-stable-backport: Backported in 5.10.80
+CVE_CHECK_IGNORE += "CVE-2021-3640"
+
+# cpe-stable-backport: Backported in 5.10.60
+CVE_CHECK_IGNORE += "CVE-2021-3653"
+
+# cpe-stable-backport: Backported in 5.10.51
+CVE_CHECK_IGNORE += "CVE-2021-3655"
+
+# cpe-stable-backport: Backported in 5.10.60
+CVE_CHECK_IGNORE += "CVE-2021-3656"
+
+# cpe-stable-backport: Backported in 5.10.30
+CVE_CHECK_IGNORE += "CVE-2021-3659"
+
+# CVE-2021-3669 needs backporting (fixed from 5.15rc1)
+
+# cpe-stable-backport: Backported in 5.10.54
+CVE_CHECK_IGNORE += "CVE-2021-3679"
+
+# CVE-2021-3714 has no known resolution
+
+# fixed-version: Fixed after version 5.6
+CVE_CHECK_IGNORE += "CVE-2021-3715"
+
+# cpe-stable-backport: Backported in 5.10.54
+CVE_CHECK_IGNORE += "CVE-2021-37159"
+
+# cpe-stable-backport: Backported in 5.10.59
+CVE_CHECK_IGNORE += "CVE-2021-3732"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-3736"
+
+# cpe-stable-backport: Backported in 5.10.62
+CVE_CHECK_IGNORE += "CVE-2021-3739"
+
+# cpe-stable-backport: Backported in 5.10.46
+CVE_CHECK_IGNORE += "CVE-2021-3743"
+
+# cpe-stable-backport: Backported in 5.10.71
+CVE_CHECK_IGNORE += "CVE-2021-3744"
+
+# cpe-stable-backport: Backported in 5.10.80
+CVE_CHECK_IGNORE += "CVE-2021-3752"
+
+# cpe-stable-backport: Backported in 5.10.62
+CVE_CHECK_IGNORE += "CVE-2021-3753"
+
+# cpe-stable-backport: Backported in 5.10.54
+CVE_CHECK_IGNORE += "CVE-2021-37576"
+
+# cpe-stable-backport: Backported in 5.10.154
+CVE_CHECK_IGNORE += "CVE-2021-3759"
+
+# cpe-stable-backport: Backported in 5.10.76
+CVE_CHECK_IGNORE += "CVE-2021-3760"
+
+# cpe-stable-backport: Backported in 5.10.71
+CVE_CHECK_IGNORE += "CVE-2021-3764"
+
+# cpe-stable-backport: Backported in 5.10.77
+CVE_CHECK_IGNORE += "CVE-2021-3772"
+
+# cpe-stable-backport: Backported in 5.10.52
+CVE_CHECK_IGNORE += "CVE-2021-38160"
+
+# cpe-stable-backport: Backported in 5.10.60
+CVE_CHECK_IGNORE += "CVE-2021-38166"
+
+# cpe-stable-backport: Backported in 5.10.44
+CVE_CHECK_IGNORE += "CVE-2021-38198"
+
+# cpe-stable-backport: Backported in 5.10.52
+CVE_CHECK_IGNORE += "CVE-2021-38199"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-38200"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-38201"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-38202"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-38203"
+
+# cpe-stable-backport: Backported in 5.10.54
+CVE_CHECK_IGNORE += "CVE-2021-38204"
+
+# cpe-stable-backport: Backported in 5.10.59
+CVE_CHECK_IGNORE += "CVE-2021-38205"
+
+# cpe-stable-backport: Backported in 5.10.46
+CVE_CHECK_IGNORE += "CVE-2021-38206"
+
+# cpe-stable-backport: Backported in 5.10.46
+CVE_CHECK_IGNORE += "CVE-2021-38207"
+
+# cpe-stable-backport: Backported in 5.10.43
+CVE_CHECK_IGNORE += "CVE-2021-38208"
+
+# cpe-stable-backport: Backported in 5.10.35
+CVE_CHECK_IGNORE += "CVE-2021-38209"
+
+# cpe-stable-backport: Backported in 5.10.71
+CVE_CHECK_IGNORE += "CVE-2021-38300"
+
+# CVE-2021-3847 has no known resolution
+
+# CVE-2021-3864 has no known resolution
+
+# CVE-2021-3892 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.75
+CVE_CHECK_IGNORE += "CVE-2021-3894"
+
+# cpe-stable-backport: Backported in 5.10.76
+CVE_CHECK_IGNORE += "CVE-2021-3896"
+
+# cpe-stable-backport: Backported in 5.10.91
+CVE_CHECK_IGNORE += "CVE-2021-3923"
+
+# cpe-stable-backport: Backported in 5.10.62
+CVE_CHECK_IGNORE += "CVE-2021-39633"
+
+# fixed-version: Fixed after version 5.9rc8
+CVE_CHECK_IGNORE += "CVE-2021-39634"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2021-39636"
+
+# cpe-stable-backport: Backported in 5.10.7
+CVE_CHECK_IGNORE += "CVE-2021-39648"
+
+# cpe-stable-backport: Backported in 5.10.24
+CVE_CHECK_IGNORE += "CVE-2021-39656"
+
+# cpe-stable-backport: Backported in 5.10.11
+CVE_CHECK_IGNORE += "CVE-2021-39657"
+
+# cpe-stable-backport: Backported in 5.10.85
+CVE_CHECK_IGNORE += "CVE-2021-39685"
+
+# cpe-stable-backport: Backported in 5.10.80
+CVE_CHECK_IGNORE += "CVE-2021-39686"
+
+# cpe-stable-backport: Backported in 5.10.85
+CVE_CHECK_IGNORE += "CVE-2021-39698"
+
+# fixed-version: Fixed after version 4.18rc6
+CVE_CHECK_IGNORE += "CVE-2021-39711"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_IGNORE += "CVE-2021-39713"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2021-39714"
+
+# CVE-2021-39800 has no known resolution
+
+# CVE-2021-39801 has no known resolution
+
+# CVE-2021-39802 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.83
+CVE_CHECK_IGNORE += "CVE-2021-4001"
+
+# cpe-stable-backport: Backported in 5.10.82
+CVE_CHECK_IGNORE += "CVE-2021-4002"
+
+# CVE-2021-4023 needs backporting (fixed from 5.15rc1)
+
+# cpe-stable-backport: Backported in 5.10.71
+CVE_CHECK_IGNORE += "CVE-2021-4028"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-4032"
+
+# cpe-stable-backport: Backported in 5.10.146
+CVE_CHECK_IGNORE += "CVE-2021-4037"
+
+# cpe-stable-backport: Backported in 5.10.63
+CVE_CHECK_IGNORE += "CVE-2021-40490"
+
+# cpe-stable-backport: Backported in 5.10.84
+CVE_CHECK_IGNORE += "CVE-2021-4083"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-4090"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-4093"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-4095"
+
+# cpe-stable-backport: Backported in 5.10.68
+CVE_CHECK_IGNORE += "CVE-2021-41073"
+
+# cpe-stable-backport: Backported in 5.10.88
+CVE_CHECK_IGNORE += "CVE-2021-4135"
+
+# cpe-stable-backport: Backported in 5.10.78
+CVE_CHECK_IGNORE += "CVE-2021-4148"
+
+# cpe-stable-backport: Backported in 5.10.75
+CVE_CHECK_IGNORE += "CVE-2021-4149"
+
+# CVE-2021-4150 needs backporting (fixed from 5.15rc7)
+
+# cpe-stable-backport: Backported in 5.10.52
+CVE_CHECK_IGNORE += "CVE-2021-4154"
+
+# cpe-stable-backport: Backported in 5.10.91
+CVE_CHECK_IGNORE += "CVE-2021-4155"
+
+# cpe-stable-backport: Backported in 5.10.38
+CVE_CHECK_IGNORE += "CVE-2021-4157"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2021-4159"
+
+# cpe-stable-backport: Backported in 5.10.73
+CVE_CHECK_IGNORE += "CVE-2021-41864"
+
+# cpe-stable-backport: Backported in 5.10.111
+CVE_CHECK_IGNORE += "CVE-2021-4197"
+
+# cpe-stable-backport: Backported in 5.10.61
+CVE_CHECK_IGNORE += "CVE-2021-42008"
+
+# cpe-stable-backport: Backported in 5.10.82
+CVE_CHECK_IGNORE += "CVE-2021-4202"
+
+# cpe-stable-backport: Backported in 5.10.71
+CVE_CHECK_IGNORE += "CVE-2021-4203"
+
+# CVE-2021-4204 needs backporting (fixed from 5.17rc1)
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2021-4218"
+
+# cpe-stable-backport: Backported in 5.10.67
+CVE_CHECK_IGNORE += "CVE-2021-42252"
+
+# cpe-stable-backport: Backported in 5.10.77
+CVE_CHECK_IGNORE += "CVE-2021-42327"
+
+# cpe-stable-backport: Backported in 5.10.78
+CVE_CHECK_IGNORE += "CVE-2021-42739"
+
+# cpe-stable-backport: Backported in 5.10.76
+CVE_CHECK_IGNORE += "CVE-2021-43056"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-43057"
+
+# cpe-stable-backport: Backported in 5.10.77
+CVE_CHECK_IGNORE += "CVE-2021-43267"
+
+# cpe-stable-backport: Backported in 5.10.76
+CVE_CHECK_IGNORE += "CVE-2021-43389"
+
+# cpe-stable-backport: Backported in 5.10.84
+CVE_CHECK_IGNORE += "CVE-2021-43975"
+
+# cpe-stable-backport: Backported in 5.10.94
+CVE_CHECK_IGNORE += "CVE-2021-43976"
+
+# cpe-stable-backport: Backported in 5.10.89
+CVE_CHECK_IGNORE += "CVE-2021-44733"
+
+# cpe-stable-backport: Backported in 5.10.200
+CVE_CHECK_IGNORE += "CVE-2021-44879"
+
+# cpe-stable-backport: Backported in 5.10.91
+CVE_CHECK_IGNORE += "CVE-2021-45095"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2021-45100"
+
+# cpe-stable-backport: Backported in 5.10.88
+CVE_CHECK_IGNORE += "CVE-2021-45402"
+
+# cpe-stable-backport: Backported in 5.10.89
+CVE_CHECK_IGNORE += "CVE-2021-45469"
+
+# fixed-version: only affects 5.13rc4 onwards
+CVE_CHECK_IGNORE += "CVE-2021-45480"
+
+# cpe-stable-backport: Backported in 5.10.51
+CVE_CHECK_IGNORE += "CVE-2021-45485"
+
+# cpe-stable-backport: Backported in 5.10.37
+CVE_CHECK_IGNORE += "CVE-2021-45486"
+
+# cpe-stable-backport: Backported in 5.10.80
+CVE_CHECK_IGNORE += "CVE-2021-45868"
+
+# cpe-stable-backport: Backported in 5.10.64
+CVE_CHECK_IGNORE += "CVE-2021-46283"
+
+# cpe-stable-backport: Backported in 5.10.105
+CVE_CHECK_IGNORE += "CVE-2022-0001"
+
+# cpe-stable-backport: Backported in 5.10.105
+CVE_CHECK_IGNORE += "CVE-2022-0002"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-0168"
+
+# cpe-stable-backport: Backported in 5.10.146
+CVE_CHECK_IGNORE += "CVE-2022-0171"
+
+# cpe-stable-backport: Backported in 5.10.93
+CVE_CHECK_IGNORE += "CVE-2022-0185"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-0264"
+
+# cpe-stable-backport: Backported in 5.10.54
+CVE_CHECK_IGNORE += "CVE-2022-0286"
+
+# cpe-stable-backport: Backported in 5.10.75
+CVE_CHECK_IGNORE += "CVE-2022-0322"
+
+# cpe-stable-backport: Backported in 5.10.95
+CVE_CHECK_IGNORE += "CVE-2022-0330"
+
+# CVE-2022-0382 needs backporting (fixed from 5.16)
+
+# CVE-2022-0400 has no known resolution
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-0433"
+
+# cpe-stable-backport: Backported in 5.10.100
+CVE_CHECK_IGNORE += "CVE-2022-0435"
+
+# CVE-2022-0480 needs backporting (fixed from 5.15rc1)
+
+# cpe-stable-backport: Backported in 5.10.100
+CVE_CHECK_IGNORE += "CVE-2022-0487"
+
+# cpe-stable-backport: Backported in 5.10.97
+CVE_CHECK_IGNORE += "CVE-2022-0492"
+
+# cpe-stable-backport: Backported in 5.10.115
+CVE_CHECK_IGNORE += "CVE-2022-0494"
+
+# CVE-2022-0500 needs backporting (fixed from 5.17rc1)
+
+# cpe-stable-backport: Backported in 5.10.100
+CVE_CHECK_IGNORE += "CVE-2022-0516"
+
+# cpe-stable-backport: Backported in 5.10.96
+CVE_CHECK_IGNORE += "CVE-2022-0617"
+
+# cpe-stable-backport: Backported in 5.10.76
+CVE_CHECK_IGNORE += "CVE-2022-0644"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-0646"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-0742"
+
+# fixed-version: Fixed after version 5.8rc6
+CVE_CHECK_IGNORE += "CVE-2022-0812"
+
+# cpe-stable-backport: Backported in 5.10.102
+CVE_CHECK_IGNORE += "CVE-2022-0847"
+
+# cpe-stable-backport: Backported in 5.10.50
+CVE_CHECK_IGNORE += "CVE-2022-0850"
+
+# fixed-version: only affects 5.17rc6 onwards
+CVE_CHECK_IGNORE += "CVE-2022-0854"
+
+# cpe-stable-backport: Backported in 5.10.106
+CVE_CHECK_IGNORE += "CVE-2022-0995"
+
+# CVE-2022-0998 needs backporting (fixed from 5.17rc1)
+
+# cpe-stable-backport: Backported in 5.10.106
+CVE_CHECK_IGNORE += "CVE-2022-1011"
+
+# cpe-stable-backport: Backported in 5.10.119
+CVE_CHECK_IGNORE += "CVE-2022-1012"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1015"
+
+# cpe-stable-backport: Backported in 5.10.109
+CVE_CHECK_IGNORE += "CVE-2022-1016"
+
+# fixed-version: only affects 5.12rc3 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1043"
+
+# cpe-stable-backport: Backported in 5.10.109
+CVE_CHECK_IGNORE += "CVE-2022-1048"
+
+# cpe-stable-backport: Backported in 5.10.97
+CVE_CHECK_IGNORE += "CVE-2022-1055"
+
+# CVE-2022-1116 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-1158"
+
+# cpe-stable-backport: Backported in 5.10.121
+CVE_CHECK_IGNORE += "CVE-2022-1184"
+
+# cpe-stable-backport: Backported in 5.10.89
+CVE_CHECK_IGNORE += "CVE-2022-1195"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-1198"
+
+# cpe-stable-backport: Backported in 5.10.106
+CVE_CHECK_IGNORE += "CVE-2022-1199"
+
+# cpe-stable-backport: Backported in 5.10.112
+CVE_CHECK_IGNORE += "CVE-2022-1204"
+
+# fixed-version: only affects 5.17rc4 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1205"
+
+# CVE-2022-1247 has no known resolution
+
+# CVE-2022-1263 needs backporting (fixed from 5.18rc3)
+
+# CVE-2022-1280 needs backporting (fixed from 5.15rc1)
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-1353"
+
+# fixed-version: Fixed after version 5.6rc2
+CVE_CHECK_IGNORE += "CVE-2022-1419"
+
+# cpe-stable-backport: Backported in 5.10.134
+CVE_CHECK_IGNORE += "CVE-2022-1462"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1508"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-1516"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1651"
+
+# cpe-stable-backport: Backported in 5.10.118
+CVE_CHECK_IGNORE += "CVE-2022-1652"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1671"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_IGNORE += "CVE-2022-1678"
+
+# cpe-stable-backport: Backported in 5.10.137
+CVE_CHECK_IGNORE += "CVE-2022-1679"
+
+# cpe-stable-backport: Backported in 5.10.118
+CVE_CHECK_IGNORE += "CVE-2022-1729"
+
+# cpe-stable-backport: Backported in 5.10.115
+CVE_CHECK_IGNORE += "CVE-2022-1734"
+
+# cpe-stable-backport: Backported in 5.10.117
+CVE_CHECK_IGNORE += "CVE-2022-1786"
+
+# cpe-stable-backport: Backported in 5.10.119
+CVE_CHECK_IGNORE += "CVE-2022-1789"
+
+# cpe-stable-backport: Backported in 5.10.114
+CVE_CHECK_IGNORE += "CVE-2022-1836"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1852"
+
+# fixed-version: only affects 5.17rc8 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1882"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1943"
+
+# cpe-stable-backport: Backported in 5.10.120
+CVE_CHECK_IGNORE += "CVE-2022-1966"
+
+# cpe-stable-backport: Backported in 5.10.120
+CVE_CHECK_IGNORE += "CVE-2022-1972"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1973"
+
+# cpe-stable-backport: Backported in 5.10.115
+CVE_CHECK_IGNORE += "CVE-2022-1974"
+
+# cpe-stable-backport: Backported in 5.10.115
+CVE_CHECK_IGNORE += "CVE-2022-1975"
+
+# fixed-version: only affects 5.18rc2 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1976"
+
+# fixed-version: only affects 5.13rc7 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1998"
+
+# cpe-stable-backport: Backported in 5.10.102
+CVE_CHECK_IGNORE += "CVE-2022-20008"
+
+# cpe-stable-backport: Backported in 5.10.85
+CVE_CHECK_IGNORE += "CVE-2022-20132"
+
+# cpe-stable-backport: Backported in 5.10.64
+CVE_CHECK_IGNORE += "CVE-2022-20141"
+
+# CVE-2022-20148 needs backporting (fixed from 5.16rc1)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-20153"
+
+# cpe-stable-backport: Backported in 5.10.90
+CVE_CHECK_IGNORE += "CVE-2022-20154"
+
+# cpe-stable-backport: Backported in 5.10.108
+CVE_CHECK_IGNORE += "CVE-2022-20158"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2022-20166"
+
+# cpe-stable-backport: Backported in 5.10.108
+CVE_CHECK_IGNORE += "CVE-2022-20368"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-20369"
+
+# CVE-2022-20409 needs backporting (fixed from 5.12rc1)
+
+# cpe-stable-backport: Backported in 5.10.142
+CVE_CHECK_IGNORE += "CVE-2022-20421"
+
+# cpe-stable-backport: Backported in 5.10.137
+CVE_CHECK_IGNORE += "CVE-2022-20422"
+
+# fixed-version: only affects 5.17rc4 onwards
+CVE_CHECK_IGNORE += "CVE-2022-20423"
+
+# CVE-2022-20424 needs backporting (fixed from 5.12rc1)
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2022-20565"
+
+# cpe-stable-backport: Backported in 5.10.135
+CVE_CHECK_IGNORE += "CVE-2022-20566"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_IGNORE += "CVE-2022-20567"
+
+# CVE-2022-20568 needs backporting (fixed from 5.12rc1)
+
+# cpe-stable-backport: Backported in 5.10.120
+CVE_CHECK_IGNORE += "CVE-2022-20572"
+
+# cpe-stable-backport: Backported in 5.10.120
+CVE_CHECK_IGNORE += "CVE-2022-2078"
+
+# cpe-stable-backport: Backported in 5.10.123
+CVE_CHECK_IGNORE += "CVE-2022-21123"
+
+# cpe-stable-backport: Backported in 5.10.123
+CVE_CHECK_IGNORE += "CVE-2022-21125"
+
+# cpe-stable-backport: Backported in 5.10.123
+CVE_CHECK_IGNORE += "CVE-2022-21166"
+
+# fixed-version: Fixed after version 4.20
+CVE_CHECK_IGNORE += "CVE-2022-21385"
+
+# cpe-stable-backport: Backported in 5.10.119
+CVE_CHECK_IGNORE += "CVE-2022-21499"
+
+# cpe-stable-backport: Backported in 5.10.134
+CVE_CHECK_IGNORE += "CVE-2022-21505"
+
+# cpe-stable-backport: Backported in 5.10.137
+CVE_CHECK_IGNORE += "CVE-2022-2153"
+
+# cpe-stable-backport: Backported in 5.10.170
+CVE_CHECK_IGNORE += "CVE-2022-2196"
+
+# CVE-2022-2209 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.95
+CVE_CHECK_IGNORE += "CVE-2022-22942"
+
+# cpe-stable-backport: Backported in 5.10.105
+CVE_CHECK_IGNORE += "CVE-2022-23036"
+
+# cpe-stable-backport: Backported in 5.10.105
+CVE_CHECK_IGNORE += "CVE-2022-23037"
+
+# cpe-stable-backport: Backported in 5.10.105
+CVE_CHECK_IGNORE += "CVE-2022-23038"
+
+# cpe-stable-backport: Backported in 5.10.105
+CVE_CHECK_IGNORE += "CVE-2022-23039"
+
+# cpe-stable-backport: Backported in 5.10.105
+CVE_CHECK_IGNORE += "CVE-2022-23040"
+
+# cpe-stable-backport: Backported in 5.10.105
+CVE_CHECK_IGNORE += "CVE-2022-23041"
+
+# cpe-stable-backport: Backported in 5.10.105
+CVE_CHECK_IGNORE += "CVE-2022-23042"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-2308"
+
+# cpe-stable-backport: Backported in 5.10.129
+CVE_CHECK_IGNORE += "CVE-2022-2318"
+
+# CVE-2022-23222 needs backporting (fixed from 5.17rc1)
+
+# CVE-2022-2327 needs backporting (fixed from 5.12rc1)
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-2380"
+
+# cpe-stable-backport: Backported in 5.10.133
+CVE_CHECK_IGNORE += "CVE-2022-23816"
+
+# CVE-2022-23825 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.105
+CVE_CHECK_IGNORE += "CVE-2022-23960"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-24122"
+
+# cpe-stable-backport: Backported in 5.10.96
+CVE_CHECK_IGNORE += "CVE-2022-24448"
+
+# cpe-stable-backport: Backported in 5.10.104
+CVE_CHECK_IGNORE += "CVE-2022-24958"
+
+# cpe-stable-backport: Backported in 5.10.96
+CVE_CHECK_IGNORE += "CVE-2022-24959"
+
+# cpe-stable-backport: Backported in 5.10.120
+CVE_CHECK_IGNORE += "CVE-2022-2503"
+
+# cpe-stable-backport: Backported in 5.10.101
+CVE_CHECK_IGNORE += "CVE-2022-25258"
+
+# CVE-2022-25265 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.101
+CVE_CHECK_IGNORE += "CVE-2022-25375"
+
+# cpe-stable-backport: Backported in 5.10.103
+CVE_CHECK_IGNORE += "CVE-2022-25636"
+
+# cpe-stable-backport: Backported in 5.10.137
+CVE_CHECK_IGNORE += "CVE-2022-2585"
+
+# cpe-stable-backport: Backported in 5.10.137
+CVE_CHECK_IGNORE += "CVE-2022-2586"
+
+# cpe-stable-backport: Backported in 5.10.137
+CVE_CHECK_IGNORE += "CVE-2022-2588"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-2590"
+
+# cpe-stable-backport: Backported in 5.10.150
+CVE_CHECK_IGNORE += "CVE-2022-2602"
+
+# cpe-stable-backport: Backported in 5.10.129
+CVE_CHECK_IGNORE += "CVE-2022-26365"
+
+# cpe-stable-backport: Backported in 5.10.136
+CVE_CHECK_IGNORE += "CVE-2022-26373"
+
+# cpe-stable-backport: Backported in 5.10.113
+CVE_CHECK_IGNORE += "CVE-2022-2639"
+
+# cpe-stable-backport: Backported in 5.10.109
+CVE_CHECK_IGNORE += "CVE-2022-26490"
+
+# cpe-stable-backport: Backported in 5.10.143
+CVE_CHECK_IGNORE += "CVE-2022-2663"
+
+# CVE-2022-26878 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.103
+CVE_CHECK_IGNORE += "CVE-2022-26966"
+
+# cpe-stable-backport: Backported in 5.10.103
+CVE_CHECK_IGNORE += "CVE-2022-27223"
+
+# cpe-stable-backport: Backported in 5.10.108
+CVE_CHECK_IGNORE += "CVE-2022-27666"
+
+# CVE-2022-27672 needs backporting (fixed from 6.2)
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-2785"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-27950"
+
+# cpe-stable-backport: Backported in 5.10.109
+CVE_CHECK_IGNORE += "CVE-2022-28356"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-28388"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-28389"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-28390"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-2873"
+
+# fixed-version: only affects 5.17rc3 onwards
+CVE_CHECK_IGNORE += "CVE-2022-28796"
+
+# cpe-stable-backport: Backported in 5.10.117
+CVE_CHECK_IGNORE += "CVE-2022-28893"
+
+# cpe-stable-backport: Backported in 5.10.140
+CVE_CHECK_IGNORE += "CVE-2022-2905"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-29156"
+
+# cpe-stable-backport: Backported in 5.10.97
+CVE_CHECK_IGNORE += "CVE-2022-2938"
+
+# cpe-stable-backport: Backported in 5.10.113
+CVE_CHECK_IGNORE += "CVE-2022-29581"
+
+# cpe-stable-backport: Backported in 5.10.111
+CVE_CHECK_IGNORE += "CVE-2022-29582"
+
+# cpe-stable-backport: Backported in 5.10.120
+CVE_CHECK_IGNORE += "CVE-2022-2959"
+
+# CVE-2022-2961 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.101
+CVE_CHECK_IGNORE += "CVE-2022-2964"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-2977"
+
+# cpe-stable-backport: Backported in 5.10.148
+CVE_CHECK_IGNORE += "CVE-2022-2978"
+
+# cpe-stable-backport: Backported in 5.10.133
+CVE_CHECK_IGNORE += "CVE-2022-29900"
+
+# cpe-stable-backport: Backported in 5.10.133
+CVE_CHECK_IGNORE += "CVE-2022-29901"
+
+# CVE-2022-2991 needs backporting (fixed from 5.15rc1)
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-29968"
+
+# cpe-stable-backport: Backported in 5.10.140
+CVE_CHECK_IGNORE += "CVE-2022-3028"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-30594"
+
+# cpe-stable-backport: Backported in 5.10.145
+CVE_CHECK_IGNORE += "CVE-2022-3061"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3077"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-3078"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3103"
+
+# cpe-stable-backport: Backported in 5.10.122
+CVE_CHECK_IGNORE += "CVE-2022-3104"
+
+# cpe-stable-backport: Backported in 5.10.91
+CVE_CHECK_IGNORE += "CVE-2022-3105"
+
+# cpe-stable-backport: Backported in 5.10.88
+CVE_CHECK_IGNORE += "CVE-2022-3106"
+
+# cpe-stable-backport: Backported in 5.10.108
+CVE_CHECK_IGNORE += "CVE-2022-3107"
+
+# CVE-2022-3108 needs backporting (fixed from 5.17rc1)
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3110"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-3111"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-3112"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-3113"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3114"
+
+# cpe-stable-backport: Backported in 5.10.121
+CVE_CHECK_IGNORE += "CVE-2022-3115"
+
+# cpe-stable-backport: Backported in 5.10.156
+CVE_CHECK_IGNORE += "CVE-2022-3169"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3170"
+
+# CVE-2022-3176 needs backporting (fixed from 5.17rc1)
+
+# cpe-stable-backport: Backported in 5.10.111
+CVE_CHECK_IGNORE += "CVE-2022-3202"
+
+# cpe-stable-backport: Backported in 5.10.120
+CVE_CHECK_IGNORE += "CVE-2022-32250"
+
+# cpe-stable-backport: Backported in 5.10.125
+CVE_CHECK_IGNORE += "CVE-2022-32296"
+
+# CVE-2022-3238 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2022-3239"
+
+# cpe-stable-backport: Backported in 5.10.122
+CVE_CHECK_IGNORE += "CVE-2022-32981"
+
+# cpe-stable-backport: Backported in 5.10.148
+CVE_CHECK_IGNORE += "CVE-2022-3303"
+
+# CVE-2022-3344 needs backporting (fixed from 6.1rc7)
+
+# cpe-stable-backport: Backported in 5.10.129
+CVE_CHECK_IGNORE += "CVE-2022-33740"
+
+# cpe-stable-backport: Backported in 5.10.129
+CVE_CHECK_IGNORE += "CVE-2022-33741"
+
+# cpe-stable-backport: Backported in 5.10.129
+CVE_CHECK_IGNORE += "CVE-2022-33742"
+
+# cpe-stable-backport: Backported in 5.10.129
+CVE_CHECK_IGNORE += "CVE-2022-33743"
+
+# cpe-stable-backport: Backported in 5.10.129
+CVE_CHECK_IGNORE += "CVE-2022-33744"
+
+# cpe-stable-backport: Backported in 5.10.114
+CVE_CHECK_IGNORE += "CVE-2022-33981"
+
+# cpe-stable-backport: Backported in 5.10.163
+CVE_CHECK_IGNORE += "CVE-2022-3424"
+
+# fixed-version: only affects 5.18rc2 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3435"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-34494"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-34495"
+
+# cpe-stable-backport: Backported in 5.10.130
+CVE_CHECK_IGNORE += "CVE-2022-34918"
+
+# cpe-stable-backport: Backported in 5.10.156
+CVE_CHECK_IGNORE += "CVE-2022-3521"
+
+# CVE-2022-3522 needs backporting (fixed from 6.1rc1)
+
+# CVE-2022-3523 needs backporting (fixed from 6.1rc1)
+
+# cpe-stable-backport: Backported in 5.10.154
+CVE_CHECK_IGNORE += "CVE-2022-3524"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3526"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3531"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3532"
+
+# CVE-2022-3533 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.163
+CVE_CHECK_IGNORE += "CVE-2022-3534"
+
+# cpe-stable-backport: Backported in 5.10.150
+CVE_CHECK_IGNORE += "CVE-2022-3535"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3541"
+
+# cpe-stable-backport: Backported in 5.10.150
+CVE_CHECK_IGNORE += "CVE-2022-3542"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3543"
+
+# CVE-2022-3544 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.160
+CVE_CHECK_IGNORE += "CVE-2022-3545"
+
+# cpe-stable-backport: Backported in 5.10.154
+CVE_CHECK_IGNORE += "CVE-2022-3564"
+
+# cpe-stable-backport: Backported in 5.10.150
+CVE_CHECK_IGNORE += "CVE-2022-3565"
+
+# CVE-2022-3566 needs backporting (fixed from 6.1rc1)
+
+# CVE-2022-3567 needs backporting (fixed from 6.1rc1)
+
+# cpe-stable-backport: Backported in 5.10.121
+CVE_CHECK_IGNORE += "CVE-2022-3577"
+
+# cpe-stable-backport: Backported in 5.10.143
+CVE_CHECK_IGNORE += "CVE-2022-3586"
+
+# cpe-stable-backport: Backported in 5.10.150
+CVE_CHECK_IGNORE += "CVE-2022-3594"
+
+# CVE-2022-3595 needs backporting (fixed from 6.1rc1)
+
+# CVE-2022-3606 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.132
+CVE_CHECK_IGNORE += "CVE-2022-36123"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3619"
+
+# cpe-stable-backport: Backported in 5.10.148
+CVE_CHECK_IGNORE += "CVE-2022-3621"
+
+# cpe-stable-backport: Backported in 5.10.159
+CVE_CHECK_IGNORE += "CVE-2022-3623"
+
+# CVE-2022-3624 needs backporting (fixed from 6.0rc1)
+
+# cpe-stable-backport: Backported in 5.10.138
+CVE_CHECK_IGNORE += "CVE-2022-3625"
+
+# cpe-stable-backport: Backported in 5.10.154
+CVE_CHECK_IGNORE += "CVE-2022-3628"
+
+# cpe-stable-backport: Backported in 5.10.163
+CVE_CHECK_IGNORE += "CVE-2022-36280"
+
+# cpe-stable-backport: Backported in 5.10.138
+CVE_CHECK_IGNORE += "CVE-2022-3629"
+
+# fixed-version: only affects 5.19rc6 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3630"
+
+# cpe-stable-backport: Backported in 5.10.138
+CVE_CHECK_IGNORE += "CVE-2022-3633"
+
+# cpe-stable-backport: Backported in 5.10.138
+CVE_CHECK_IGNORE += "CVE-2022-3635"
+
+# CVE-2022-3636 needs backporting (fixed from 5.19rc1)
+
+# fixed-version: only affects 5.19 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3640"
+
+# cpe-stable-backport: Backported in 5.10.193
+CVE_CHECK_IGNORE += "CVE-2022-36402"
+
+# CVE-2022-3642 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.159
+CVE_CHECK_IGNORE += "CVE-2022-3643"
+
+# cpe-stable-backport: Backported in 5.10.148
+CVE_CHECK_IGNORE += "CVE-2022-3646"
+
+# cpe-stable-backport: Backported in 5.10.148
+CVE_CHECK_IGNORE += "CVE-2022-3649"
+
+# cpe-stable-backport: Backported in 5.10.134
+CVE_CHECK_IGNORE += "CVE-2022-36879"
+
+# cpe-stable-backport: Backported in 5.10.135
+CVE_CHECK_IGNORE += "CVE-2022-36946"
+
+# cpe-stable-backport: Backported in 5.10.170
+CVE_CHECK_IGNORE += "CVE-2022-3707"
+
+# CVE-2022-38096 has no known resolution
+
+# CVE-2022-38457 needs backporting (fixed from 6.2rc4)
+
+# CVE-2022-3903 needs backporting (fixed from 6.1rc2)
+
+# fixed-version: only affects 5.18 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3910"
+
+# CVE-2022-39188 needs backporting (fixed from 5.19rc8)
+
+# cpe-stable-backport: Backported in 5.10.180
+CVE_CHECK_IGNORE += "CVE-2022-39189"
+
+# cpe-stable-backport: Backported in 5.10.140
+CVE_CHECK_IGNORE += "CVE-2022-39190"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3977"
+
+# cpe-stable-backport: Backported in 5.10.145
+CVE_CHECK_IGNORE += "CVE-2022-39842"
+
+# CVE-2022-40133 needs backporting (fixed from 6.2rc4)
+
+# cpe-stable-backport: Backported in 5.10.143
+CVE_CHECK_IGNORE += "CVE-2022-40307"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-40476"
+
+# cpe-stable-backport: Backported in 5.10.148
+CVE_CHECK_IGNORE += "CVE-2022-40768"
+
+# cpe-stable-backport: Backported in 5.10.142
+CVE_CHECK_IGNORE += "CVE-2022-4095"
+
+# cpe-stable-backport: Backported in 5.10.189
+CVE_CHECK_IGNORE += "CVE-2022-40982"
+
+# cpe-stable-backport: Backported in 5.10.163
+CVE_CHECK_IGNORE += "CVE-2022-41218"
+
+# cpe-stable-backport: Backported in 5.10.137
+CVE_CHECK_IGNORE += "CVE-2022-41222"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-4127"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-4128"
+
+# cpe-stable-backport: Backported in 5.10.166
+CVE_CHECK_IGNORE += "CVE-2022-4129"
+
+# fixed-version: only affects 5.17rc2 onwards
+CVE_CHECK_IGNORE += "CVE-2022-4139"
+
+# cpe-stable-backport: Backported in 5.10.148
+CVE_CHECK_IGNORE += "CVE-2022-41674"
+
+# CVE-2022-41848 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.150
+CVE_CHECK_IGNORE += "CVE-2022-41849"
+
+# cpe-stable-backport: Backported in 5.10.150
+CVE_CHECK_IGNORE += "CVE-2022-41850"
+
+# cpe-stable-backport: Backported in 5.10.112
+CVE_CHECK_IGNORE += "CVE-2022-41858"
+
+# fixed-version: only affects 5.16rc7 onwards
+CVE_CHECK_IGNORE += "CVE-2022-42328"
+
+# fixed-version: only affects 5.16rc7 onwards
+CVE_CHECK_IGNORE += "CVE-2022-42329"
+
+# cpe-stable-backport: Backported in 5.10.146
+CVE_CHECK_IGNORE += "CVE-2022-42432"
+
+# cpe-stable-backport: Backported in 5.10.181
+CVE_CHECK_IGNORE += "CVE-2022-4269"
+
+# cpe-stable-backport: Backported in 5.10.141
+CVE_CHECK_IGNORE += "CVE-2022-42703"
+
+# cpe-stable-backport: Backported in 5.10.149
+CVE_CHECK_IGNORE += "CVE-2022-42719"
+
+# cpe-stable-backport: Backported in 5.10.148
+CVE_CHECK_IGNORE += "CVE-2022-42720"
+
+# cpe-stable-backport: Backported in 5.10.148
+CVE_CHECK_IGNORE += "CVE-2022-42721"
+
+# cpe-stable-backport: Backported in 5.10.148
+CVE_CHECK_IGNORE += "CVE-2022-42722"
+
+# cpe-stable-backport: Backported in 5.10.154
+CVE_CHECK_IGNORE += "CVE-2022-42895"
+
+# cpe-stable-backport: Backported in 5.10.154
+CVE_CHECK_IGNORE += "CVE-2022-42896"
+
+# cpe-stable-backport: Backported in 5.10.148
+CVE_CHECK_IGNORE += "CVE-2022-43750"
+
+# cpe-stable-backport: Backported in 5.10.158
+CVE_CHECK_IGNORE += "CVE-2022-4378"
+
+# cpe-stable-backport: Backported in 5.10.177
+CVE_CHECK_IGNORE += "CVE-2022-4379"
+
+# cpe-stable-backport: Backported in 5.10.165
+CVE_CHECK_IGNORE += "CVE-2022-4382"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-43945"
+
+# CVE-2022-44032 needs backporting (fixed from 6.4rc1)
+
+# CVE-2022-44033 needs backporting (fixed from 6.4rc1)
+
+# CVE-2022-44034 needs backporting (fixed from 6.4rc1)
+
+# CVE-2022-4543 has no known resolution
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-45869"
+
+# CVE-2022-45884 has no known resolution
+
+# CVE-2022-45885 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.183
+CVE_CHECK_IGNORE += "CVE-2022-45886"
+
+# cpe-stable-backport: Backported in 5.10.183
+CVE_CHECK_IGNORE += "CVE-2022-45887"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-45888"
+
+# cpe-stable-backport: Backported in 5.10.183
+CVE_CHECK_IGNORE += "CVE-2022-45919"
+
+# cpe-stable-backport: Backported in 5.10.161
+CVE_CHECK_IGNORE += "CVE-2022-45934"
+
+# cpe-stable-backport: Backported in 5.10.142
+CVE_CHECK_IGNORE += "CVE-2022-4662"
+
+# CVE-2022-4696 needs backporting (fixed from 5.12rc1)
+
+# cpe-stable-backport: Backported in 5.10.136
+CVE_CHECK_IGNORE += "CVE-2022-4744"
+
+# cpe-stable-backport: Backported in 5.10.157
+CVE_CHECK_IGNORE += "CVE-2022-47518"
+
+# cpe-stable-backport: Backported in 5.10.157
+CVE_CHECK_IGNORE += "CVE-2022-47519"
+
+# cpe-stable-backport: Backported in 5.10.157
+CVE_CHECK_IGNORE += "CVE-2022-47520"
+
+# cpe-stable-backport: Backported in 5.10.157
+CVE_CHECK_IGNORE += "CVE-2022-47521"
+
+# cpe-stable-backport: Backported in 5.10.163
+CVE_CHECK_IGNORE += "CVE-2022-47929"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-47938"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-47939"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-47940"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-47941"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-47942"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-47943"
+
+# CVE-2022-47946 needs backporting (fixed from 5.12rc2)
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-4842"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-48423"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-48424"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-48425"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-48502"
+
+# cpe-stable-backport: Backported in 5.10.118
+CVE_CHECK_IGNORE += "CVE-2022-48619"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_IGNORE += "CVE-2023-0030"
+
+# cpe-stable-backport: Backported in 5.10.163
+CVE_CHECK_IGNORE += "CVE-2023-0045"
+
+# cpe-stable-backport: Backported in 5.10.80
+CVE_CHECK_IGNORE += "CVE-2023-0047"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-0122"
+
+# cpe-stable-backport: Backported in 5.10.180
+CVE_CHECK_IGNORE += "CVE-2023-0160"
+
+# cpe-stable-backport: Backported in 5.10.164
+CVE_CHECK_IGNORE += "CVE-2023-0179"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-0210"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2023-0240"
+
+# cpe-stable-backport: Backported in 5.10.163
+CVE_CHECK_IGNORE += "CVE-2023-0266"
+
+# CVE-2023-0386 needs backporting (fixed from 6.2rc6)
+
+# cpe-stable-backport: Backported in 5.10.164
+CVE_CHECK_IGNORE += "CVE-2023-0394"
+
+# cpe-stable-backport: Backported in 5.10.165
+CVE_CHECK_IGNORE += "CVE-2023-0458"
+
+# cpe-stable-backport: Backported in 5.10.170
+CVE_CHECK_IGNORE += "CVE-2023-0459"
+
+# cpe-stable-backport: Backported in 5.10.163
+CVE_CHECK_IGNORE += "CVE-2023-0461"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-0468"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-0469"
+
+# cpe-stable-backport: Backported in 5.10.152
+CVE_CHECK_IGNORE += "CVE-2023-0590"
+
+# CVE-2023-0597 needs backporting (fixed from 6.2rc1)
+
+# cpe-stable-backport: Backported in 5.10.153
+CVE_CHECK_IGNORE += "CVE-2023-0615"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-1032"
+
+# cpe-stable-backport: Backported in 5.10.166
+CVE_CHECK_IGNORE += "CVE-2023-1073"
+
+# cpe-stable-backport: Backported in 5.10.166
+CVE_CHECK_IGNORE += "CVE-2023-1074"
+
+# CVE-2023-1075 needs backporting (fixed from 6.2rc7)
+
+# cpe-stable-backport: Backported in 5.10.173
+CVE_CHECK_IGNORE += "CVE-2023-1076"
+
+# cpe-stable-backport: Backported in 5.10.173
+CVE_CHECK_IGNORE += "CVE-2023-1077"
+
+# cpe-stable-backport: Backported in 5.10.168
+CVE_CHECK_IGNORE += "CVE-2023-1078"
+
+# cpe-stable-backport: Backported in 5.10.173
+CVE_CHECK_IGNORE += "CVE-2023-1079"
+
+# cpe-stable-backport: Backported in 5.10.137
+CVE_CHECK_IGNORE += "CVE-2023-1095"
+
+# cpe-stable-backport: Backported in 5.10.173
+CVE_CHECK_IGNORE += "CVE-2023-1118"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-1192"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-1193"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-1194"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-1195"
+
+# cpe-stable-backport: Backported in 5.10.190
+CVE_CHECK_IGNORE += "CVE-2023-1206"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2023-1249"
+
+# cpe-stable-backport: Backported in 5.10.80
+CVE_CHECK_IGNORE += "CVE-2023-1252"
+
+# cpe-stable-backport: Backported in 5.10.169
+CVE_CHECK_IGNORE += "CVE-2023-1281"
+
+# CVE-2023-1295 needs backporting (fixed from 5.12rc1)
+
+# cpe-stable-backport: Backported in 5.10.180
+CVE_CHECK_IGNORE += "CVE-2023-1380"
+
+# cpe-stable-backport: Backported in 5.10.157
+CVE_CHECK_IGNORE += "CVE-2023-1382"
+
+# cpe-stable-backport: Backported in 5.10.10
+CVE_CHECK_IGNORE += "CVE-2023-1390"
+
+# CVE-2023-1476 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.169
+CVE_CHECK_IGNORE += "CVE-2023-1513"
+
+# cpe-stable-backport: Backported in 5.10.102
+CVE_CHECK_IGNORE += "CVE-2023-1582"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-1583"
+
+# cpe-stable-backport: Backported in 5.10.177
+CVE_CHECK_IGNORE += "CVE-2023-1611"
+
+# cpe-stable-backport: Backported in 5.10.111
+CVE_CHECK_IGNORE += "CVE-2023-1637"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-1652"
+
+# cpe-stable-backport: Backported in 5.10.177
+CVE_CHECK_IGNORE += "CVE-2023-1670"
+
+# cpe-stable-backport: Backported in 5.10.173
+CVE_CHECK_IGNORE += "CVE-2023-1829"
+
+# cpe-stable-backport: Backported in 5.10.118
+CVE_CHECK_IGNORE += "CVE-2023-1838"
+
+# cpe-stable-backport: Backported in 5.10.176
+CVE_CHECK_IGNORE += "CVE-2023-1855"
+
+# cpe-stable-backport: Backported in 5.10.178
+CVE_CHECK_IGNORE += "CVE-2023-1859"
+
+# CVE-2023-1872 needs backporting (fixed from 5.18rc2)
+
+# cpe-stable-backport: Backported in 5.10.177
+CVE_CHECK_IGNORE += "CVE-2023-1989"
+
+# cpe-stable-backport: Backported in 5.10.176
+CVE_CHECK_IGNORE += "CVE-2023-1990"
+
+# fixed-version: only affects 5.19rc7 onwards
+CVE_CHECK_IGNORE += "CVE-2023-1998"
+
+# cpe-stable-backport: Backported in 5.10.180
+CVE_CHECK_IGNORE += "CVE-2023-2002"
+
+# cpe-stable-backport: Backported in 5.10.157
+CVE_CHECK_IGNORE += "CVE-2023-2006"
+
+# CVE-2023-2007 needs backporting (fixed from 6.0rc1)
+
+# cpe-stable-backport: Backported in 5.10.127
+CVE_CHECK_IGNORE += "CVE-2023-2008"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-2019"
+
+# cpe-stable-backport: Backported in 5.10.189
+CVE_CHECK_IGNORE += "CVE-2023-20569"
+
+# cpe-stable-backport: Backported in 5.10.190
+CVE_CHECK_IGNORE += "CVE-2023-20588"
+
+# cpe-stable-backport: Backported in 5.10.187
+CVE_CHECK_IGNORE += "CVE-2023-20593"
+
+# CVE-2023-20928 needs backporting (fixed from 6.0rc1)
+
+# CVE-2023-20937 has no known resolution
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-20938"
+
+# CVE-2023-20941 has no known resolution
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-21102"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-21106"
+
+# cpe-stable-backport: Backported in 5.10.184
+CVE_CHECK_IGNORE += "CVE-2023-2124"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-21255"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-21264"
+
+# CVE-2023-21400 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.179
+CVE_CHECK_IGNORE += "CVE-2023-2156"
+
+# cpe-stable-backport: Backported in 5.10.168
+CVE_CHECK_IGNORE += "CVE-2023-2162"
+
+# cpe-stable-backport: Backported in 5.10.179
+CVE_CHECK_IGNORE += "CVE-2023-2163"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-2166"
+
+# CVE-2023-2176 needs backporting (fixed from 6.3rc1)
+
+# cpe-stable-backport: Backported in 5.10.135
+CVE_CHECK_IGNORE += "CVE-2023-2177"
+
+# cpe-stable-backport: Backported in 5.10.177
+CVE_CHECK_IGNORE += "CVE-2023-2194"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-2235"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-2236"
+
+# cpe-stable-backport: Backported in 5.10.179
+CVE_CHECK_IGNORE += "CVE-2023-2248"
+
+# cpe-stable-backport: Backported in 5.10.180
+CVE_CHECK_IGNORE += "CVE-2023-2269"
+
+# CVE-2023-22995 needs backporting (fixed from 5.17rc1)
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-22996"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-22997"
+
+# cpe-stable-backport: Backported in 5.10.171
+CVE_CHECK_IGNORE += "CVE-2023-22998"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-22999"
+
+# CVE-2023-23000 needs backporting (fixed from 5.17rc1)
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-23001"
+
+# cpe-stable-backport: Backported in 5.10.94
+CVE_CHECK_IGNORE += "CVE-2023-23002"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-23003"
+
+# cpe-stable-backport: Backported in 5.10.173
+CVE_CHECK_IGNORE += "CVE-2023-23004"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-23005"
+
+# cpe-stable-backport: Backported in 5.10.90
+CVE_CHECK_IGNORE += "CVE-2023-23006"
+
+# CVE-2023-23039 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.163
+CVE_CHECK_IGNORE += "CVE-2023-23454"
+
+# cpe-stable-backport: Backported in 5.10.163
+CVE_CHECK_IGNORE += "CVE-2023-23455"
+
+# cpe-stable-backport: Backported in 5.10.166
+CVE_CHECK_IGNORE += "CVE-2023-23559"
+
+# CVE-2023-23586 needs backporting (fixed from 5.12rc1)
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-2430"
+
+# cpe-stable-backport: Backported in 5.10.177
+CVE_CHECK_IGNORE += "CVE-2023-2483"
+
+# cpe-stable-backport: Backported in 5.10.173
+CVE_CHECK_IGNORE += "CVE-2023-25012"
+
+# cpe-stable-backport: Backported in 5.10.179
+CVE_CHECK_IGNORE += "CVE-2023-2513"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-25775"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-2598"
+
+# CVE-2023-26242 has no known resolution
+
+# CVE-2023-2640 has no known resolution
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-26544"
+
+# cpe-stable-backport: Backported in 5.10.169
+CVE_CHECK_IGNORE += "CVE-2023-26545"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-26605"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-26606"
+
+# cpe-stable-backport: Backported in 5.10.156
+CVE_CHECK_IGNORE += "CVE-2023-26607"
+
+# cpe-stable-backport: Backported in 5.10.159
+CVE_CHECK_IGNORE += "CVE-2023-28327"
+
+# cpe-stable-backport: Backported in 5.10.163
+CVE_CHECK_IGNORE += "CVE-2023-28328"
+
+# cpe-stable-backport: Backported in 5.10.110
+CVE_CHECK_IGNORE += "CVE-2023-28410"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-28464"
+
+# cpe-stable-backport: Backported in 5.10.177
+CVE_CHECK_IGNORE += "CVE-2023-28466"
+
+# cpe-stable-backport: Backported in 5.10.143
+CVE_CHECK_IGNORE += "CVE-2023-2860"
+
+# cpe-stable-backport: Backported in 5.10.51
+CVE_CHECK_IGNORE += "CVE-2023-28772"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-28866"
+
+# cpe-stable-backport: Backported in 5.10.188
+CVE_CHECK_IGNORE += "CVE-2023-2898"
+
+# cpe-stable-backport: Backported in 5.10.173
+CVE_CHECK_IGNORE += "CVE-2023-2985"
+
+# cpe-stable-backport: Backported in 5.10.153
+CVE_CHECK_IGNORE += "CVE-2023-3006"
+
+# Skipping CVE-2023-3022, no affected_versions
+
+# cpe-stable-backport: Backported in 5.10.176
+CVE_CHECK_IGNORE += "CVE-2023-30456"
+
+# cpe-stable-backport: Backported in 5.10.177
+CVE_CHECK_IGNORE += "CVE-2023-30772"
+
+# cpe-stable-backport: Backported in 5.10.181
+CVE_CHECK_IGNORE += "CVE-2023-3090"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_IGNORE += "CVE-2023-3106"
+
+# Skipping CVE-2023-3108, no affected_versions
+
+# CVE-2023-31081 has no known resolution
+
+# CVE-2023-31082 has no known resolution
+
+# CVE-2023-31083 needs backporting (fixed from 6.6rc1)
+
+# CVE-2023-31084 needs backporting (fixed from 6.4rc3)
+
+# cpe-stable-backport: Backported in 5.10.198
+CVE_CHECK_IGNORE += "CVE-2023-31085"
+
+# cpe-stable-backport: Backported in 5.10.184
+CVE_CHECK_IGNORE += "CVE-2023-3111"
+
+# cpe-stable-backport: Backported in 5.10.188
+CVE_CHECK_IGNORE += "CVE-2023-3117"
+
+# cpe-stable-backport: Backported in 5.10.188
+CVE_CHECK_IGNORE += "CVE-2023-31248"
+
+# cpe-stable-backport: Backported in 5.10.181
+CVE_CHECK_IGNORE += "CVE-2023-3141"
+
+# cpe-stable-backport: Backported in 5.10.179
+CVE_CHECK_IGNORE += "CVE-2023-31436"
+
+# cpe-stable-backport: Backported in 5.10.115
+CVE_CHECK_IGNORE += "CVE-2023-3159"
+
+# cpe-stable-backport: Backported in 5.10.168
+CVE_CHECK_IGNORE += "CVE-2023-3161"
+
+# cpe-stable-backport: Backported in 5.10.183
+CVE_CHECK_IGNORE += "CVE-2023-3212"
+
+# cpe-stable-backport: Backported in 5.10.173
+CVE_CHECK_IGNORE += "CVE-2023-3220"
+
+# cpe-stable-backport: Backported in 5.10.180
+CVE_CHECK_IGNORE += "CVE-2023-32233"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-32247"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-32248"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-32250"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-32252"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-32254"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-32257"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-32258"
+
+# cpe-stable-backport: Backported in 5.10.168
+CVE_CHECK_IGNORE += "CVE-2023-32269"
+
+# CVE-2023-32629 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.180
+CVE_CHECK_IGNORE += "CVE-2023-3268"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3269"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3312"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3317"
+
+# cpe-stable-backport: Backported in 5.10.177
+CVE_CHECK_IGNORE += "CVE-2023-33203"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-33250"
+
+# cpe-stable-backport: Backported in 5.10.177
+CVE_CHECK_IGNORE += "CVE-2023-33288"
+
+# cpe-stable-backport: Backported in 5.10.185
+CVE_CHECK_IGNORE += "CVE-2023-3338"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3355"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3357"
+
+# cpe-stable-backport: Backported in 5.10.166
+CVE_CHECK_IGNORE += "CVE-2023-3358"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3359"
+
+# CVE-2023-3389 needs backporting (fixed from 6.0rc1)
+
+# cpe-stable-backport: Backported in 5.10.188
+CVE_CHECK_IGNORE += "CVE-2023-3390"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-33951"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-33952"
+
+# CVE-2023-3397 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.184
+CVE_CHECK_IGNORE += "CVE-2023-34255"
+
+# cpe-stable-backport: Backported in 5.10.180
+CVE_CHECK_IGNORE += "CVE-2023-34256"
+
+# fixed-version: only affects 6.1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-34319"
+
+# cpe-stable-backport: Backported in 5.10.198
+CVE_CHECK_IGNORE += "CVE-2023-34324"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3439"
+
+# cpe-stable-backport: Backported in 5.10.188
+CVE_CHECK_IGNORE += "CVE-2023-35001"
+
+# cpe-stable-backport: Backported in 5.10.168
+CVE_CHECK_IGNORE += "CVE-2023-3567"
+
+# CVE-2023-35693 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.183
+CVE_CHECK_IGNORE += "CVE-2023-35788"
+
+# cpe-stable-backport: Backported in 5.10.180
+CVE_CHECK_IGNORE += "CVE-2023-35823"
+
+# cpe-stable-backport: Backported in 5.10.180
+CVE_CHECK_IGNORE += "CVE-2023-35824"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-35826"
+
+# cpe-stable-backport: Backported in 5.10.199
+CVE_CHECK_IGNORE += "CVE-2023-35827"
+
+# cpe-stable-backport: Backported in 5.10.180
+CVE_CHECK_IGNORE += "CVE-2023-35828"
+
+# cpe-stable-backport: Backported in 5.10.180
+CVE_CHECK_IGNORE += "CVE-2023-35829"
+
+# cpe-stable-backport: Backported in 5.10.185
+CVE_CHECK_IGNORE += "CVE-2023-3609"
+
+# cpe-stable-backport: Backported in 5.10.188
+CVE_CHECK_IGNORE += "CVE-2023-3610"
+
+# cpe-stable-backport: Backported in 5.10.188
+CVE_CHECK_IGNORE += "CVE-2023-3611"
+
+# CVE-2023-3640 has no known resolution
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-37453"
+
+# CVE-2023-37454 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.192
+CVE_CHECK_IGNORE += "CVE-2023-3772"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3773"
+
+# cpe-stable-backport: Backported in 5.10.188
+CVE_CHECK_IGNORE += "CVE-2023-3776"
+
+# cpe-stable-backport: Backported in 5.10.188
+CVE_CHECK_IGNORE += "CVE-2023-3777"
+
+# cpe-stable-backport: Backported in 5.10.154
+CVE_CHECK_IGNORE += "CVE-2023-3812"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-38409"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-38426"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-38427"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-38428"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-38429"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-38430"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-38431"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-38432"
+
+# cpe-stable-backport: Backported in 5.10.188
+CVE_CHECK_IGNORE += "CVE-2023-3863"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3865"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3866"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3867"
+
+# cpe-stable-backport: Backported in 5.10.195
+CVE_CHECK_IGNORE += "CVE-2023-39189"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-39191"
+
+# cpe-stable-backport: Backported in 5.10.195
+CVE_CHECK_IGNORE += "CVE-2023-39192"
+
+# cpe-stable-backport: Backported in 5.10.195
+CVE_CHECK_IGNORE += "CVE-2023-39193"
+
+# cpe-stable-backport: Backported in 5.10.192
+CVE_CHECK_IGNORE += "CVE-2023-39194"
+
+# cpe-stable-backport: Backported in 5.10.188
+CVE_CHECK_IGNORE += "CVE-2023-39197"
+
+# cpe-stable-backport: Backported in 5.10.208
+CVE_CHECK_IGNORE += "CVE-2023-39198"
+
+# cpe-stable-backport: Backported in 5.10.188
+CVE_CHECK_IGNORE += "CVE-2023-4004"
+
+# CVE-2023-4010 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.190
+CVE_CHECK_IGNORE += "CVE-2023-4015"
+
+# cpe-stable-backport: Backported in 5.10.190
+CVE_CHECK_IGNORE += "CVE-2023-40283"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-40791"
+
+# cpe-stable-backport: Backported in 5.10.190
+CVE_CHECK_IGNORE += "CVE-2023-4128"
+
+# cpe-stable-backport: Backported in 5.10.188
+CVE_CHECK_IGNORE += "CVE-2023-4132"
+
+# CVE-2023-4133 needs backporting (fixed from 6.3)
+
+# CVE-2023-4134 needs backporting (fixed from 6.5rc1)
+
+# cpe-stable-backport: Backported in 5.10.190
+CVE_CHECK_IGNORE += "CVE-2023-4147"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-4155"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-4194"
+
+# cpe-stable-backport: Backported in 5.10.190
+CVE_CHECK_IGNORE += "CVE-2023-4206"
+
+# cpe-stable-backport: Backported in 5.10.190
+CVE_CHECK_IGNORE += "CVE-2023-4207"
+
+# cpe-stable-backport: Backported in 5.10.190
+CVE_CHECK_IGNORE += "CVE-2023-4208"
+
+# cpe-stable-backport: Backported in 5.10.198
+CVE_CHECK_IGNORE += "CVE-2023-4244"
+
+# cpe-stable-backport: Backported in 5.10.190
+CVE_CHECK_IGNORE += "CVE-2023-4273"
+
+# cpe-stable-backport: Backported in 5.10.195
+CVE_CHECK_IGNORE += "CVE-2023-42752"
+
+# cpe-stable-backport: Backported in 5.10.195
+CVE_CHECK_IGNORE += "CVE-2023-42753"
+
+# cpe-stable-backport: Backported in 5.10.198
+CVE_CHECK_IGNORE += "CVE-2023-42754"
+
+# cpe-stable-backport: Backported in 5.10.197
+CVE_CHECK_IGNORE += "CVE-2023-42755"
+
+# fixed-version: only affects 6.4rc6 onwards
+CVE_CHECK_IGNORE += "CVE-2023-42756"
+
+# cpe-stable-backport: Backported in 5.10.121
+CVE_CHECK_IGNORE += "CVE-2023-4385"
+
+# cpe-stable-backport: Backported in 5.10.118
+CVE_CHECK_IGNORE += "CVE-2023-4387"
+
+# cpe-stable-backport: Backported in 5.10.112
+CVE_CHECK_IGNORE += "CVE-2023-4389"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-4394"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-44466"
+
+# cpe-stable-backport: Backported in 5.10.118
+CVE_CHECK_IGNORE += "CVE-2023-4459"
+
+# cpe-stable-backport: Backported in 5.10.198
+CVE_CHECK_IGNORE += "CVE-2023-4563"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-4569"
+
+# cpe-stable-backport: Backported in 5.10.173
+CVE_CHECK_IGNORE += "CVE-2023-45862"
+
+# cpe-stable-backport: Backported in 5.10.200
+CVE_CHECK_IGNORE += "CVE-2023-45863"
+
+# cpe-stable-backport: Backported in 5.10.195
+CVE_CHECK_IGNORE += "CVE-2023-45871"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-45898"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-4610"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-4611"
+
+# CVE-2023-4622 needs backporting (fixed from 6.5rc1)
+
+# cpe-stable-backport: Backported in 5.10.195
+CVE_CHECK_IGNORE += "CVE-2023-4623"
+
+# cpe-stable-backport: Backported in 5.10.199
+CVE_CHECK_IGNORE += "CVE-2023-46343"
+
+# cpe-stable-backport: Backported in 5.10.199
+CVE_CHECK_IGNORE += "CVE-2023-46813"
+
+# cpe-stable-backport: Backported in 5.10.209
+CVE_CHECK_IGNORE += "CVE-2023-46838"
+
+# cpe-stable-backport: Backported in 5.10.202
+CVE_CHECK_IGNORE += "CVE-2023-46862"
+
+# CVE-2023-47233 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.53
+CVE_CHECK_IGNORE += "CVE-2023-4732"
+
+# cpe-stable-backport: Backported in 5.10.198
+CVE_CHECK_IGNORE += "CVE-2023-4881"
+
+# cpe-stable-backport: Backported in 5.10.195
+CVE_CHECK_IGNORE += "CVE-2023-4921"
+
+# CVE-2023-50431 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-5090"
+
+# cpe-stable-backport: Backported in 5.10.192
+CVE_CHECK_IGNORE += "CVE-2023-51042"
+
+# cpe-stable-backport: Backported in 5.10.188
+CVE_CHECK_IGNORE += "CVE-2023-51043"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-5158"
+
+# cpe-stable-backport: Backported in 5.10.206
+CVE_CHECK_IGNORE += "CVE-2023-51779"
+
+# cpe-stable-backport: Backported in 5.10.199
+CVE_CHECK_IGNORE += "CVE-2023-5178"
+
+# cpe-stable-backport: Backported in 5.10.205
+CVE_CHECK_IGNORE += "CVE-2023-51780"
+
+# cpe-stable-backport: Backported in 5.10.205
+CVE_CHECK_IGNORE += "CVE-2023-51781"
+
+# cpe-stable-backport: Backported in 5.10.205
+CVE_CHECK_IGNORE += "CVE-2023-51782"
+
+# cpe-stable-backport: Backported in 5.10.198
+CVE_CHECK_IGNORE += "CVE-2023-5197"
+
+# cpe-stable-backport: Backported in 5.10.208
+CVE_CHECK_IGNORE += "CVE-2023-52340"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-5345"
+
+# fixed-version: only affects 6.2 onwards
+CVE_CHECK_IGNORE += "CVE-2023-5633"
+
+# cpe-stable-backport: Backported in 5.10.199
+CVE_CHECK_IGNORE += "CVE-2023-5717"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-5972"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-6039"
+
+# cpe-stable-backport: Backported in 5.10.208
+CVE_CHECK_IGNORE += "CVE-2023-6040"
+
+# fixed-version: only affects 6.6rc3 onwards
+CVE_CHECK_IGNORE += "CVE-2023-6111"
+
+# cpe-stable-backport: Backported in 5.10.203
+CVE_CHECK_IGNORE += "CVE-2023-6121"
+
+# cpe-stable-backport: Backported in 5.10.195
+CVE_CHECK_IGNORE += "CVE-2023-6176"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-6200"
+
+# CVE-2023-6238 has no known resolution
+
+# CVE-2023-6240 has no known resolution
+
+# CVE-2023-6270 has no known resolution
+
+# CVE-2023-6356 has no known resolution
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-6531"
+
+# CVE-2023-6535 has no known resolution
+
+# CVE-2023-6536 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.192
+CVE_CHECK_IGNORE += "CVE-2023-6546"
+
+# CVE-2023-6560 needs backporting (fixed from 6.7rc4)
+
+# cpe-stable-backport: Backported in 5.10.206
+CVE_CHECK_IGNORE += "CVE-2023-6606"
+
+# CVE-2023-6610 needs backporting (fixed from 6.7rc7)
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-6622"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-6679"
+
+# cpe-stable-backport: Backported in 5.10.204
+CVE_CHECK_IGNORE += "CVE-2023-6817"
+
+# cpe-stable-backport: Backported in 5.10.209
+CVE_CHECK_IGNORE += "CVE-2023-6915"
+
+# cpe-stable-backport: Backported in 5.10.204
+CVE_CHECK_IGNORE += "CVE-2023-6931"
+
+# cpe-stable-backport: Backported in 5.10.203
+CVE_CHECK_IGNORE += "CVE-2023-6932"
+
+# CVE-2023-7042 has no known resolution
+
+# cpe-stable-backport: Backported in 5.10.173
+CVE_CHECK_IGNORE += "CVE-2023-7192"
+
+# fixed-version: only affects 6.5rc6 onwards
+CVE_CHECK_IGNORE += "CVE-2024-0193"
+
+# CVE-2024-0340 needs backporting (fixed from 6.4rc6)
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2024-0443"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2024-0562"
+
+# CVE-2024-0564 has no known resolution
+
+# CVE-2024-0565 needs backporting (fixed from 6.7rc6)
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2024-0582"
+
+# cpe-stable-backport: Backported in 5.10.203
+CVE_CHECK_IGNORE += "CVE-2024-0584"
+
+# CVE-2024-0607 needs backporting (fixed from 6.7rc2)
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2024-0639"
+
+# cpe-stable-backport: Backported in 5.10.198
+CVE_CHECK_IGNORE += "CVE-2024-0641"
+
+# cpe-stable-backport: Backported in 5.10.208
+CVE_CHECK_IGNORE += "CVE-2024-0646"
+
+# cpe-stable-backport: Backported in 5.10.180
+CVE_CHECK_IGNORE += "CVE-2024-0775"
+
+# CVE-2024-0841 has no known resolution
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2024-1085"
+
+# CVE-2024-1086 needs backporting (fixed from 6.8rc2)
+
+# CVE-2024-1312 needs backporting (fixed from 6.5rc4)
+
+# CVE-2024-21803 has no known resolution
+
+# CVE-2024-22099 has no known resolution
+
+# CVE-2024-22386 has no known resolution
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2024-22705"
+
+# CVE-2024-23196 has no known resolution
+
+# CVE-2024-23307 has no known resolution
+
+# CVE-2024-23848 has no known resolution
+
+# CVE-2024-23849 needs backporting (fixed from 6.8rc2)
+
+# CVE-2024-23850 has no known resolution
+
+# CVE-2024-23851 has no known resolution
+
+# CVE-2024-24855 has no known resolution
+
+# CVE-2024-24857 has no known resolution
+
+# CVE-2024-24858 has no known resolution
+
+# CVE-2024-24859 has no known resolution
+
+# CVE-2024-24860 has no known resolution
+
+# CVE-2024-24861 has no known resolution
+
+# CVE-2024-24864 has no known resolution
+
diff --git a/meta/recipes-kernel/linux/cve-exclusion_5.15.inc b/meta/recipes-kernel/linux/cve-exclusion_5.15.inc
new file mode 100644
index 0000000000..922d7f457f
--- /dev/null
+++ b/meta/recipes-kernel/linux/cve-exclusion_5.15.inc
@@ -0,0 +1,7753 @@
+
+# Auto-generated CVE metadata, DO NOT EDIT BY HAND.
+# Generated at 2024-03-08 10:36:30.059302 for version 5.15.150
+
+python check_kernel_cve_status_version() {
+ this_version = "5.15.150"
+ kernel_version = d.getVar("LINUX_VERSION")
+ if kernel_version != this_version:
+ bb.warn("Kernel CVE status needs updating: generated for %s but kernel is %s" % (this_version, kernel_version))
+}
+do_cve_check[prefuncs] += "check_kernel_cve_status_version"
+
+# fixed-version: Fixed after version 2.6.12rc2
+CVE_CHECK_IGNORE += "CVE-2003-1604"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_IGNORE += "CVE-2004-0230"
+
+# CVE-2005-3660 has no known resolution
+
+# fixed-version: Fixed after version 2.6.26rc5
+CVE_CHECK_IGNORE += "CVE-2006-3635"
+
+# fixed-version: Fixed after version 2.6.19rc3
+CVE_CHECK_IGNORE += "CVE-2006-5331"
+
+# fixed-version: Fixed after version 2.6.19rc2
+CVE_CHECK_IGNORE += "CVE-2006-6128"
+
+# CVE-2007-3719 has no known resolution
+
+# fixed-version: Fixed after version 2.6.12rc2
+CVE_CHECK_IGNORE += "CVE-2007-4774"
+
+# fixed-version: Fixed after version 2.6.24rc6
+CVE_CHECK_IGNORE += "CVE-2007-6761"
+
+# fixed-version: Fixed after version 2.6.20rc5
+CVE_CHECK_IGNORE += "CVE-2007-6762"
+
+# CVE-2008-2544 has no known resolution
+
+# CVE-2008-4609 has no known resolution
+
+# fixed-version: Fixed after version 2.6.25rc1
+CVE_CHECK_IGNORE += "CVE-2008-7316"
+
+# fixed-version: Fixed after version 2.6.31rc6
+CVE_CHECK_IGNORE += "CVE-2009-2692"
+
+# fixed-version: Fixed after version 2.6.23rc9
+CVE_CHECK_IGNORE += "CVE-2010-0008"
+
+# fixed-version: Fixed after version 2.6.36rc5
+CVE_CHECK_IGNORE += "CVE-2010-3432"
+
+# CVE-2010-4563 has no known resolution
+
+# fixed-version: Fixed after version 2.6.37rc6
+CVE_CHECK_IGNORE += "CVE-2010-4648"
+
+# fixed-version: Fixed after version 2.6.38rc1
+CVE_CHECK_IGNORE += "CVE-2010-5313"
+
+# CVE-2010-5321 has no known resolution
+
+# fixed-version: Fixed after version 2.6.35rc1
+CVE_CHECK_IGNORE += "CVE-2010-5328"
+
+# fixed-version: Fixed after version 2.6.39rc1
+CVE_CHECK_IGNORE += "CVE-2010-5329"
+
+# fixed-version: Fixed after version 2.6.34rc7
+CVE_CHECK_IGNORE += "CVE-2010-5331"
+
+# fixed-version: Fixed after version 2.6.37rc1
+CVE_CHECK_IGNORE += "CVE-2010-5332"
+
+# fixed-version: Fixed after version 3.2rc1
+CVE_CHECK_IGNORE += "CVE-2011-4098"
+
+# fixed-version: Fixed after version 3.3rc1
+CVE_CHECK_IGNORE += "CVE-2011-4131"
+
+# fixed-version: Fixed after version 3.2rc1
+CVE_CHECK_IGNORE += "CVE-2011-4915"
+
+# CVE-2011-4916 has no known resolution
+
+# CVE-2011-4917 has no known resolution
+
+# fixed-version: Fixed after version 3.2rc1
+CVE_CHECK_IGNORE += "CVE-2011-5321"
+
+# fixed-version: Fixed after version 3.1rc1
+CVE_CHECK_IGNORE += "CVE-2011-5327"
+
+# fixed-version: Fixed after version 3.7rc2
+CVE_CHECK_IGNORE += "CVE-2012-0957"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_IGNORE += "CVE-2012-2119"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_IGNORE += "CVE-2012-2136"
+
+# fixed-version: Fixed after version 3.5rc2
+CVE_CHECK_IGNORE += "CVE-2012-2137"
+
+# fixed-version: Fixed after version 3.4rc6
+CVE_CHECK_IGNORE += "CVE-2012-2313"
+
+# fixed-version: Fixed after version 3.4rc6
+CVE_CHECK_IGNORE += "CVE-2012-2319"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_IGNORE += "CVE-2012-2372"
+
+# fixed-version: Fixed after version 3.4rc1
+CVE_CHECK_IGNORE += "CVE-2012-2375"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_IGNORE += "CVE-2012-2390"
+
+# fixed-version: Fixed after version 3.5rc4
+CVE_CHECK_IGNORE += "CVE-2012-2669"
+
+# fixed-version: Fixed after version 2.6.34rc1
+CVE_CHECK_IGNORE += "CVE-2012-2744"
+
+# fixed-version: Fixed after version 3.4rc3
+CVE_CHECK_IGNORE += "CVE-2012-2745"
+
+# fixed-version: Fixed after version 3.5rc6
+CVE_CHECK_IGNORE += "CVE-2012-3364"
+
+# fixed-version: Fixed after version 3.4rc5
+CVE_CHECK_IGNORE += "CVE-2012-3375"
+
+# fixed-version: Fixed after version 3.5rc5
+CVE_CHECK_IGNORE += "CVE-2012-3400"
+
+# fixed-version: Fixed after version 3.6rc2
+CVE_CHECK_IGNORE += "CVE-2012-3412"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_IGNORE += "CVE-2012-3430"
+
+# fixed-version: Fixed after version 2.6.19rc4
+CVE_CHECK_IGNORE += "CVE-2012-3510"
+
+# fixed-version: Fixed after version 3.5rc6
+CVE_CHECK_IGNORE += "CVE-2012-3511"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-3520"
+
+# fixed-version: Fixed after version 3.0rc1
+CVE_CHECK_IGNORE += "CVE-2012-3552"
+
+# Skipping CVE-2012-4220, no affected_versions
+
+# Skipping CVE-2012-4221, no affected_versions
+
+# Skipping CVE-2012-4222, no affected_versions
+
+# fixed-version: Fixed after version 3.4rc1
+CVE_CHECK_IGNORE += "CVE-2012-4398"
+
+# fixed-version: Fixed after version 2.6.36rc4
+CVE_CHECK_IGNORE += "CVE-2012-4444"
+
+# fixed-version: Fixed after version 3.7rc6
+CVE_CHECK_IGNORE += "CVE-2012-4461"
+
+# fixed-version: Fixed after version 3.6rc5
+CVE_CHECK_IGNORE += "CVE-2012-4467"
+
+# fixed-version: Fixed after version 3.7rc3
+CVE_CHECK_IGNORE += "CVE-2012-4508"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_IGNORE += "CVE-2012-4530"
+
+# CVE-2012-4542 has no known resolution
+
+# fixed-version: Fixed after version 3.7rc4
+CVE_CHECK_IGNORE += "CVE-2012-4565"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_IGNORE += "CVE-2012-5374"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_IGNORE += "CVE-2012-5375"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_IGNORE += "CVE-2012-5517"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_IGNORE += "CVE-2012-6536"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_IGNORE += "CVE-2012-6537"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_IGNORE += "CVE-2012-6538"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6539"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6540"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6541"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6542"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6543"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6544"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6545"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2012-6546"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_IGNORE += "CVE-2012-6547"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_IGNORE += "CVE-2012-6548"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_IGNORE += "CVE-2012-6549"
+
+# fixed-version: Fixed after version 3.3rc1
+CVE_CHECK_IGNORE += "CVE-2012-6638"
+
+# fixed-version: Fixed after version 3.6rc2
+CVE_CHECK_IGNORE += "CVE-2012-6647"
+
+# fixed-version: Fixed after version 3.6
+CVE_CHECK_IGNORE += "CVE-2012-6657"
+
+# fixed-version: Fixed after version 3.6rc5
+CVE_CHECK_IGNORE += "CVE-2012-6689"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_IGNORE += "CVE-2012-6701"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_IGNORE += "CVE-2012-6703"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_IGNORE += "CVE-2012-6704"
+
+# fixed-version: Fixed after version 3.4rc1
+CVE_CHECK_IGNORE += "CVE-2012-6712"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_IGNORE += "CVE-2013-0160"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_IGNORE += "CVE-2013-0190"
+
+# fixed-version: Fixed after version 3.8rc7
+CVE_CHECK_IGNORE += "CVE-2013-0216"
+
+# fixed-version: Fixed after version 3.8rc7
+CVE_CHECK_IGNORE += "CVE-2013-0217"
+
+# fixed-version: Fixed after version 3.8
+CVE_CHECK_IGNORE += "CVE-2013-0228"
+
+# fixed-version: Fixed after version 3.8rc7
+CVE_CHECK_IGNORE += "CVE-2013-0231"
+
+# fixed-version: Fixed after version 3.8rc6
+CVE_CHECK_IGNORE += "CVE-2013-0268"
+
+# fixed-version: Fixed after version 3.8
+CVE_CHECK_IGNORE += "CVE-2013-0290"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_IGNORE += "CVE-2013-0309"
+
+# fixed-version: Fixed after version 3.5
+CVE_CHECK_IGNORE += "CVE-2013-0310"
+
+# fixed-version: Fixed after version 3.7rc8
+CVE_CHECK_IGNORE += "CVE-2013-0311"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_IGNORE += "CVE-2013-0313"
+
+# fixed-version: Fixed after version 3.11rc7
+CVE_CHECK_IGNORE += "CVE-2013-0343"
+
+# fixed-version: Fixed after version 3.8rc6
+CVE_CHECK_IGNORE += "CVE-2013-0349"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_IGNORE += "CVE-2013-0871"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_IGNORE += "CVE-2013-0913"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-0914"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-1059"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_IGNORE += "CVE-2013-1763"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_IGNORE += "CVE-2013-1767"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_IGNORE += "CVE-2013-1772"
+
+# fixed-version: Fixed after version 3.3rc1
+CVE_CHECK_IGNORE += "CVE-2013-1773"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_IGNORE += "CVE-2013-1774"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-1792"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_IGNORE += "CVE-2013-1796"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_IGNORE += "CVE-2013-1797"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_IGNORE += "CVE-2013-1798"
+
+# fixed-version: Fixed after version 3.8rc6
+CVE_CHECK_IGNORE += "CVE-2013-1819"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_IGNORE += "CVE-2013-1826"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_IGNORE += "CVE-2013-1827"
+
+# fixed-version: Fixed after version 3.9rc2
+CVE_CHECK_IGNORE += "CVE-2013-1828"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-1848"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-1858"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-1860"
+
+# fixed-version: Fixed after version 3.7rc3
+CVE_CHECK_IGNORE += "CVE-2013-1928"
+
+# fixed-version: Fixed after version 3.9rc6
+CVE_CHECK_IGNORE += "CVE-2013-1929"
+
+# Skipping CVE-2013-1935, no affected_versions
+
+# fixed-version: Fixed after version 3.0rc1
+CVE_CHECK_IGNORE += "CVE-2013-1943"
+
+# fixed-version: Fixed after version 3.9rc5
+CVE_CHECK_IGNORE += "CVE-2013-1956"
+
+# fixed-version: Fixed after version 3.9rc5
+CVE_CHECK_IGNORE += "CVE-2013-1957"
+
+# fixed-version: Fixed after version 3.9rc5
+CVE_CHECK_IGNORE += "CVE-2013-1958"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-1959"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_IGNORE += "CVE-2013-1979"
+
+# fixed-version: Fixed after version 3.8rc2
+CVE_CHECK_IGNORE += "CVE-2013-2015"
+
+# fixed-version: Fixed after version 2.6.34
+CVE_CHECK_IGNORE += "CVE-2013-2017"
+
+# fixed-version: Fixed after version 3.8rc4
+CVE_CHECK_IGNORE += "CVE-2013-2058"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_IGNORE += "CVE-2013-2094"
+
+# fixed-version: Fixed after version 2.6.34rc4
+CVE_CHECK_IGNORE += "CVE-2013-2128"
+
+# fixed-version: Fixed after version 3.11rc3
+CVE_CHECK_IGNORE += "CVE-2013-2140"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_IGNORE += "CVE-2013-2141"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_IGNORE += "CVE-2013-2146"
+
+# fixed-version: Fixed after version 3.12rc3
+CVE_CHECK_IGNORE += "CVE-2013-2147"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-2148"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-2164"
+
+# Skipping CVE-2013-2188, no affected_versions
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_IGNORE += "CVE-2013-2206"
+
+# Skipping CVE-2013-2224, no affected_versions
+
+# fixed-version: Fixed after version 3.10
+CVE_CHECK_IGNORE += "CVE-2013-2232"
+
+# fixed-version: Fixed after version 3.10
+CVE_CHECK_IGNORE += "CVE-2013-2234"
+
+# fixed-version: Fixed after version 3.9rc6
+CVE_CHECK_IGNORE += "CVE-2013-2237"
+
+# Skipping CVE-2013-2239, no affected_versions
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_IGNORE += "CVE-2013-2546"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_IGNORE += "CVE-2013-2547"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_IGNORE += "CVE-2013-2548"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_IGNORE += "CVE-2013-2596"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-2634"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-2635"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_IGNORE += "CVE-2013-2636"
+
+# fixed-version: Fixed after version 3.10rc4
+CVE_CHECK_IGNORE += "CVE-2013-2850"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-2851"
+
+# fixed-version: Fixed after version 3.10rc6
+CVE_CHECK_IGNORE += "CVE-2013-2852"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2013-2888"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-2889"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-2890"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-2891"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2013-2892"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-2893"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-2894"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-2895"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2013-2896"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-2897"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2013-2898"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2013-2899"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-2929"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-2930"
+
+# fixed-version: Fixed after version 3.9
+CVE_CHECK_IGNORE += "CVE-2013-3076"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3222"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3223"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3224"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3225"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3226"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3227"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3228"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3229"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3230"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3231"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3232"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3233"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3234"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3235"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3236"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3237"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_IGNORE += "CVE-2013-3301"
+
+# fixed-version: Fixed after version 3.8rc3
+CVE_CHECK_IGNORE += "CVE-2013-3302"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-4125"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-4127"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-4129"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-4162"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2013-4163"
+
+# fixed-version: Fixed after version 3.11rc5
+CVE_CHECK_IGNORE += "CVE-2013-4205"
+
+# fixed-version: Fixed after version 3.10rc4
+CVE_CHECK_IGNORE += "CVE-2013-4220"
+
+# fixed-version: Fixed after version 3.10rc5
+CVE_CHECK_IGNORE += "CVE-2013-4247"
+
+# fixed-version: Fixed after version 3.11rc6
+CVE_CHECK_IGNORE += "CVE-2013-4254"
+
+# fixed-version: Fixed after version 3.12rc4
+CVE_CHECK_IGNORE += "CVE-2013-4270"
+
+# fixed-version: Fixed after version 3.12rc6
+CVE_CHECK_IGNORE += "CVE-2013-4299"
+
+# fixed-version: Fixed after version 3.11
+CVE_CHECK_IGNORE += "CVE-2013-4300"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2013-4312"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-4343"
+
+# fixed-version: Fixed after version 3.13rc2
+CVE_CHECK_IGNORE += "CVE-2013-4345"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-4348"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_IGNORE += "CVE-2013-4350"
+
+# fixed-version: Fixed after version 3.12rc4
+CVE_CHECK_IGNORE += "CVE-2013-4387"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_IGNORE += "CVE-2013-4470"
+
+# fixed-version: Fixed after version 3.10rc1
+CVE_CHECK_IGNORE += "CVE-2013-4483"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_IGNORE += "CVE-2013-4511"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_IGNORE += "CVE-2013-4512"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_IGNORE += "CVE-2013-4513"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_IGNORE += "CVE-2013-4514"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_IGNORE += "CVE-2013-4515"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_IGNORE += "CVE-2013-4516"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-4563"
+
+# fixed-version: Fixed after version 3.13rc7
+CVE_CHECK_IGNORE += "CVE-2013-4579"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_IGNORE += "CVE-2013-4587"
+
+# fixed-version: Fixed after version 2.6.33rc4
+CVE_CHECK_IGNORE += "CVE-2013-4588"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_IGNORE += "CVE-2013-4591"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_IGNORE += "CVE-2013-4592"
+
+# Skipping CVE-2013-4737, no affected_versions
+
+# Skipping CVE-2013-4738, no affected_versions
+
+# Skipping CVE-2013-4739, no affected_versions
+
+# fixed-version: Fixed after version 3.10rc5
+CVE_CHECK_IGNORE += "CVE-2013-5634"
+
+# fixed-version: Fixed after version 3.6rc6
+CVE_CHECK_IGNORE += "CVE-2013-6282"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_IGNORE += "CVE-2013-6367"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_IGNORE += "CVE-2013-6368"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_IGNORE += "CVE-2013-6376"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-6378"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-6380"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-6381"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_IGNORE += "CVE-2013-6382"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_IGNORE += "CVE-2013-6383"
+
+# Skipping CVE-2013-6392, no affected_versions
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2013-6431"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-6432"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_IGNORE += "CVE-2013-6885"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7026"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_IGNORE += "CVE-2013-7027"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7263"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7264"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7265"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7266"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7267"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7268"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7269"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7270"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7271"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7281"
+
+# fixed-version: Fixed after version 3.13rc7
+CVE_CHECK_IGNORE += "CVE-2013-7339"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2013-7348"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2013-7421"
+
+# CVE-2013-7445 has no known resolution
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_IGNORE += "CVE-2013-7446"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_IGNORE += "CVE-2013-7470"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_IGNORE += "CVE-2014-0038"
+
+# fixed-version: Fixed after version 3.14rc5
+CVE_CHECK_IGNORE += "CVE-2014-0049"
+
+# fixed-version: Fixed after version 3.14
+CVE_CHECK_IGNORE += "CVE-2014-0055"
+
+# fixed-version: Fixed after version 3.14rc4
+CVE_CHECK_IGNORE += "CVE-2014-0069"
+
+# fixed-version: Fixed after version 3.14
+CVE_CHECK_IGNORE += "CVE-2014-0077"
+
+# fixed-version: Fixed after version 3.14rc7
+CVE_CHECK_IGNORE += "CVE-2014-0100"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_IGNORE += "CVE-2014-0101"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_IGNORE += "CVE-2014-0102"
+
+# fixed-version: Fixed after version 3.14rc7
+CVE_CHECK_IGNORE += "CVE-2014-0131"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_IGNORE += "CVE-2014-0155"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_IGNORE += "CVE-2014-0181"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_IGNORE += "CVE-2014-0196"
+
+# fixed-version: Fixed after version 2.6.33rc5
+CVE_CHECK_IGNORE += "CVE-2014-0203"
+
+# fixed-version: Fixed after version 2.6.37rc1
+CVE_CHECK_IGNORE += "CVE-2014-0205"
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_IGNORE += "CVE-2014-0206"
+
+# Skipping CVE-2014-0972, no affected_versions
+
+# fixed-version: Fixed after version 3.13
+CVE_CHECK_IGNORE += "CVE-2014-1438"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_IGNORE += "CVE-2014-1444"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_IGNORE += "CVE-2014-1445"
+
+# fixed-version: Fixed after version 3.13rc7
+CVE_CHECK_IGNORE += "CVE-2014-1446"
+
+# fixed-version: Fixed after version 3.13rc8
+CVE_CHECK_IGNORE += "CVE-2014-1690"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_IGNORE += "CVE-2014-1737"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_IGNORE += "CVE-2014-1738"
+
+# fixed-version: Fixed after version 3.15rc6
+CVE_CHECK_IGNORE += "CVE-2014-1739"
+
+# fixed-version: Fixed after version 3.14rc2
+CVE_CHECK_IGNORE += "CVE-2014-1874"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_IGNORE += "CVE-2014-2038"
+
+# fixed-version: Fixed after version 3.14rc3
+CVE_CHECK_IGNORE += "CVE-2014-2039"
+
+# fixed-version: Fixed after version 3.14rc7
+CVE_CHECK_IGNORE += "CVE-2014-2309"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_IGNORE += "CVE-2014-2523"
+
+# fixed-version: Fixed after version 3.14
+CVE_CHECK_IGNORE += "CVE-2014-2568"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_IGNORE += "CVE-2014-2580"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_IGNORE += "CVE-2014-2672"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_IGNORE += "CVE-2014-2673"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_IGNORE += "CVE-2014-2678"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_IGNORE += "CVE-2014-2706"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_IGNORE += "CVE-2014-2739"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_IGNORE += "CVE-2014-2851"
+
+# fixed-version: Fixed after version 3.2rc7
+CVE_CHECK_IGNORE += "CVE-2014-2889"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_IGNORE += "CVE-2014-3122"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_IGNORE += "CVE-2014-3144"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_IGNORE += "CVE-2014-3145"
+
+# fixed-version: Fixed after version 3.15
+CVE_CHECK_IGNORE += "CVE-2014-3153"
+
+# fixed-version: Fixed after version 3.17rc4
+CVE_CHECK_IGNORE += "CVE-2014-3180"
+
+# fixed-version: Fixed after version 3.17rc3
+CVE_CHECK_IGNORE += "CVE-2014-3181"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_IGNORE += "CVE-2014-3182"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_IGNORE += "CVE-2014-3183"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_IGNORE += "CVE-2014-3184"
+
+# fixed-version: Fixed after version 3.17rc3
+CVE_CHECK_IGNORE += "CVE-2014-3185"
+
+# fixed-version: Fixed after version 3.17rc3
+CVE_CHECK_IGNORE += "CVE-2014-3186"
+
+# Skipping CVE-2014-3519, no affected_versions
+
+# fixed-version: Fixed after version 3.16rc7
+CVE_CHECK_IGNORE += "CVE-2014-3534"
+
+# fixed-version: Fixed after version 2.6.36rc1
+CVE_CHECK_IGNORE += "CVE-2014-3535"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_IGNORE += "CVE-2014-3601"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-3610"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-3611"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_IGNORE += "CVE-2014-3631"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2014-3645"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-3646"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-3647"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-3673"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-3687"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-3688"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-3690"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2014-3917"
+
+# fixed-version: Fixed after version 3.15
+CVE_CHECK_IGNORE += "CVE-2014-3940"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2014-4014"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_IGNORE += "CVE-2014-4027"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_IGNORE += "CVE-2014-4157"
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_IGNORE += "CVE-2014-4171"
+
+# Skipping CVE-2014-4322, no affected_versions
+
+# Skipping CVE-2014-4323, no affected_versions
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_IGNORE += "CVE-2014-4508"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-4608"
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_IGNORE += "CVE-2014-4611"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_IGNORE += "CVE-2014-4652"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_IGNORE += "CVE-2014-4653"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_IGNORE += "CVE-2014-4654"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_IGNORE += "CVE-2014-4655"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_IGNORE += "CVE-2014-4656"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2014-4667"
+
+# fixed-version: Fixed after version 3.16rc4
+CVE_CHECK_IGNORE += "CVE-2014-4699"
+
+# fixed-version: Fixed after version 3.16rc6
+CVE_CHECK_IGNORE += "CVE-2014-4943"
+
+# fixed-version: Fixed after version 3.16rc7
+CVE_CHECK_IGNORE += "CVE-2014-5045"
+
+# fixed-version: Fixed after version 3.16
+CVE_CHECK_IGNORE += "CVE-2014-5077"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_IGNORE += "CVE-2014-5206"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_IGNORE += "CVE-2014-5207"
+
+# Skipping CVE-2014-5332, no affected_versions
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_IGNORE += "CVE-2014-5471"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_IGNORE += "CVE-2014-5472"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_IGNORE += "CVE-2014-6410"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_IGNORE += "CVE-2014-6416"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_IGNORE += "CVE-2014-6417"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_IGNORE += "CVE-2014-6418"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_IGNORE += "CVE-2014-7145"
+
+# Skipping CVE-2014-7207, no affected_versions
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_IGNORE += "CVE-2014-7283"
+
+# fixed-version: Fixed after version 3.15rc7
+CVE_CHECK_IGNORE += "CVE-2014-7284"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2014-7822"
+
+# fixed-version: Fixed after version 3.18rc3
+CVE_CHECK_IGNORE += "CVE-2014-7825"
+
+# fixed-version: Fixed after version 3.18rc3
+CVE_CHECK_IGNORE += "CVE-2014-7826"
+
+# fixed-version: Fixed after version 3.18rc5
+CVE_CHECK_IGNORE += "CVE-2014-7841"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-7842"
+
+# fixed-version: Fixed after version 3.18rc5
+CVE_CHECK_IGNORE += "CVE-2014-7843"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-7970"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-7975"
+
+# fixed-version: Fixed after version 3.18rc3
+CVE_CHECK_IGNORE += "CVE-2014-8086"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-8133"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-8134"
+
+# fixed-version: Fixed after version 4.0rc7
+CVE_CHECK_IGNORE += "CVE-2014-8159"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-8160"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_IGNORE += "CVE-2014-8171"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2014-8172"
+
+# fixed-version: Fixed after version 3.13rc5
+CVE_CHECK_IGNORE += "CVE-2014-8173"
+
+# Skipping CVE-2014-8181, no affected_versions
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-8369"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-8480"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-8481"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-8559"
+
+# fixed-version: Fixed after version 3.14rc3
+CVE_CHECK_IGNORE += "CVE-2014-8709"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2014-8884"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-8989"
+
+# fixed-version: Fixed after version 3.18rc6
+CVE_CHECK_IGNORE += "CVE-2014-9090"
+
+# fixed-version: Fixed after version 3.18rc6
+CVE_CHECK_IGNORE += "CVE-2014-9322"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-9419"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-9420"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2014-9428"
+
+# fixed-version: Fixed after version 3.19rc4
+CVE_CHECK_IGNORE += "CVE-2014-9529"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2014-9584"
+
+# fixed-version: Fixed after version 3.19rc4
+CVE_CHECK_IGNORE += "CVE-2014-9585"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-9644"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-9683"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-9710"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_IGNORE += "CVE-2014-9715"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2014-9717"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2014-9728"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2014-9729"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2014-9730"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2014-9731"
+
+# Skipping CVE-2014-9777, no affected_versions
+
+# Skipping CVE-2014-9778, no affected_versions
+
+# Skipping CVE-2014-9779, no affected_versions
+
+# Skipping CVE-2014-9780, no affected_versions
+
+# Skipping CVE-2014-9781, no affected_versions
+
+# Skipping CVE-2014-9782, no affected_versions
+
+# Skipping CVE-2014-9783, no affected_versions
+
+# Skipping CVE-2014-9784, no affected_versions
+
+# Skipping CVE-2014-9785, no affected_versions
+
+# Skipping CVE-2014-9786, no affected_versions
+
+# Skipping CVE-2014-9787, no affected_versions
+
+# Skipping CVE-2014-9788, no affected_versions
+
+# Skipping CVE-2014-9789, no affected_versions
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2014-9803"
+
+# Skipping CVE-2014-9863, no affected_versions
+
+# Skipping CVE-2014-9864, no affected_versions
+
+# Skipping CVE-2014-9865, no affected_versions
+
+# Skipping CVE-2014-9866, no affected_versions
+
+# Skipping CVE-2014-9867, no affected_versions
+
+# Skipping CVE-2014-9868, no affected_versions
+
+# Skipping CVE-2014-9869, no affected_versions
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2014-9870"
+
+# Skipping CVE-2014-9871, no affected_versions
+
+# Skipping CVE-2014-9872, no affected_versions
+
+# Skipping CVE-2014-9873, no affected_versions
+
+# Skipping CVE-2014-9874, no affected_versions
+
+# Skipping CVE-2014-9875, no affected_versions
+
+# Skipping CVE-2014-9876, no affected_versions
+
+# Skipping CVE-2014-9877, no affected_versions
+
+# Skipping CVE-2014-9878, no affected_versions
+
+# Skipping CVE-2014-9879, no affected_versions
+
+# Skipping CVE-2014-9880, no affected_versions
+
+# Skipping CVE-2014-9881, no affected_versions
+
+# Skipping CVE-2014-9882, no affected_versions
+
+# Skipping CVE-2014-9883, no affected_versions
+
+# Skipping CVE-2014-9884, no affected_versions
+
+# Skipping CVE-2014-9885, no affected_versions
+
+# Skipping CVE-2014-9886, no affected_versions
+
+# Skipping CVE-2014-9887, no affected_versions
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_IGNORE += "CVE-2014-9888"
+
+# Skipping CVE-2014-9889, no affected_versions
+
+# Skipping CVE-2014-9890, no affected_versions
+
+# Skipping CVE-2014-9891, no affected_versions
+
+# Skipping CVE-2014-9892, no affected_versions
+
+# Skipping CVE-2014-9893, no affected_versions
+
+# Skipping CVE-2014-9894, no affected_versions
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2014-9895"
+
+# Skipping CVE-2014-9896, no affected_versions
+
+# Skipping CVE-2014-9897, no affected_versions
+
+# Skipping CVE-2014-9898, no affected_versions
+
+# Skipping CVE-2014-9899, no affected_versions
+
+# Skipping CVE-2014-9900, no affected_versions
+
+# fixed-version: Fixed after version 3.14rc4
+CVE_CHECK_IGNORE += "CVE-2014-9903"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_IGNORE += "CVE-2014-9904"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2014-9914"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_IGNORE += "CVE-2014-9922"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_IGNORE += "CVE-2014-9940"
+
+# fixed-version: Fixed after version 3.19rc6
+CVE_CHECK_IGNORE += "CVE-2015-0239"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_IGNORE += "CVE-2015-0274"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-0275"
+
+# Skipping CVE-2015-0777, no affected_versions
+
+# Skipping CVE-2015-1328, no affected_versions
+
+# fixed-version: Fixed after version 4.2rc5
+CVE_CHECK_IGNORE += "CVE-2015-1333"
+
+# fixed-version: Fixed after version 4.4rc5
+CVE_CHECK_IGNORE += "CVE-2015-1339"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_IGNORE += "CVE-2015-1350"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_IGNORE += "CVE-2015-1420"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_IGNORE += "CVE-2015-1421"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_IGNORE += "CVE-2015-1465"
+
+# fixed-version: Fixed after version 3.19rc5
+CVE_CHECK_IGNORE += "CVE-2015-1573"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_IGNORE += "CVE-2015-1593"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2015-1805"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_IGNORE += "CVE-2015-2041"
+
+# fixed-version: Fixed after version 3.19
+CVE_CHECK_IGNORE += "CVE-2015-2042"
+
+# fixed-version: Fixed after version 4.0rc4
+CVE_CHECK_IGNORE += "CVE-2015-2150"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_IGNORE += "CVE-2015-2666"
+
+# fixed-version: Fixed after version 4.0rc3
+CVE_CHECK_IGNORE += "CVE-2015-2672"
+
+# fixed-version: Fixed after version 4.0rc6
+CVE_CHECK_IGNORE += "CVE-2015-2686"
+
+# fixed-version: Fixed after version 4.0rc3
+CVE_CHECK_IGNORE += "CVE-2015-2830"
+
+# CVE-2015-2877 has no known resolution
+
+# fixed-version: Fixed after version 4.0rc7
+CVE_CHECK_IGNORE += "CVE-2015-2922"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2015-2925"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_IGNORE += "CVE-2015-3212"
+
+# fixed-version: Fixed after version 2.6.33rc8
+CVE_CHECK_IGNORE += "CVE-2015-3214"
+
+# fixed-version: Fixed after version 4.2rc2
+CVE_CHECK_IGNORE += "CVE-2015-3288"
+
+# fixed-version: Fixed after version 4.2rc3
+CVE_CHECK_IGNORE += "CVE-2015-3290"
+
+# fixed-version: Fixed after version 4.2rc3
+CVE_CHECK_IGNORE += "CVE-2015-3291"
+
+# fixed-version: Fixed after version 4.0rc5
+CVE_CHECK_IGNORE += "CVE-2015-3331"
+
+# Skipping CVE-2015-3332, no affected_versions
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-3339"
+
+# fixed-version: Fixed after version 4.1rc2
+CVE_CHECK_IGNORE += "CVE-2015-3636"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_IGNORE += "CVE-2015-4001"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_IGNORE += "CVE-2015-4002"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_IGNORE += "CVE-2015-4003"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2015-4004"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_IGNORE += "CVE-2015-4036"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_IGNORE += "CVE-2015-4167"
+
+# fixed-version: Fixed after version 3.13rc5
+CVE_CHECK_IGNORE += "CVE-2015-4170"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-4176"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-4177"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-4178"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_IGNORE += "CVE-2015-4692"
+
+# fixed-version: Fixed after version 4.1rc6
+CVE_CHECK_IGNORE += "CVE-2015-4700"
+
+# fixed-version: Fixed after version 4.2rc7
+CVE_CHECK_IGNORE += "CVE-2015-5156"
+
+# fixed-version: Fixed after version 4.2rc3
+CVE_CHECK_IGNORE += "CVE-2015-5157"
+
+# fixed-version: Fixed after version 4.3rc3
+CVE_CHECK_IGNORE += "CVE-2015-5257"
+
+# fixed-version: Fixed after version 4.3rc3
+CVE_CHECK_IGNORE += "CVE-2015-5283"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-5307"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-5327"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_IGNORE += "CVE-2015-5364"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_IGNORE += "CVE-2015-5366"
+
+# fixed-version: Fixed after version 4.2rc6
+CVE_CHECK_IGNORE += "CVE-2015-5697"
+
+# fixed-version: Fixed after version 4.1rc3
+CVE_CHECK_IGNORE += "CVE-2015-5706"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-5707"
+
+# fixed-version: Fixed after version 4.2rc5
+CVE_CHECK_IGNORE += "CVE-2015-6252"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-6526"
+
+# CVE-2015-6619 has no known resolution
+
+# CVE-2015-6646 has no known resolution
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2015-6937"
+
+# Skipping CVE-2015-7312, no affected_versions
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_IGNORE += "CVE-2015-7509"
+
+# fixed-version: Fixed after version 4.4rc7
+CVE_CHECK_IGNORE += "CVE-2015-7513"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-7515"
+
+# fixed-version: Fixed after version 4.4rc8
+CVE_CHECK_IGNORE += "CVE-2015-7550"
+
+# Skipping CVE-2015-7553, no affected_versions
+
+# fixed-version: Fixed after version 4.5rc2
+CVE_CHECK_IGNORE += "CVE-2015-7566"
+
+# fixed-version: Fixed after version 4.3rc4
+CVE_CHECK_IGNORE += "CVE-2015-7613"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-7799"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_IGNORE += "CVE-2015-7833"
+
+# Skipping CVE-2015-7837, no affected_versions
+
+# fixed-version: Fixed after version 4.3rc7
+CVE_CHECK_IGNORE += "CVE-2015-7872"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-7884"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-7885"
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_IGNORE += "CVE-2015-7990"
+
+# Skipping CVE-2015-8019, no affected_versions
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-8104"
+
+# fixed-version: Fixed after version 4.0rc3
+CVE_CHECK_IGNORE += "CVE-2015-8215"
+
+# fixed-version: Fixed after version 2.6.34rc1
+CVE_CHECK_IGNORE += "CVE-2015-8324"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-8374"
+
+# fixed-version: Fixed after version 4.4rc3
+CVE_CHECK_IGNORE += "CVE-2015-8539"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8543"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8550"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8551"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8552"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8553"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8569"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8575"
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_IGNORE += "CVE-2015-8660"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2015-8709"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2015-8746"
+
+# fixed-version: Fixed after version 4.3rc4
+CVE_CHECK_IGNORE += "CVE-2015-8767"
+
+# fixed-version: Fixed after version 4.4rc5
+CVE_CHECK_IGNORE += "CVE-2015-8785"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-8787"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2015-8812"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_IGNORE += "CVE-2015-8816"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-8830"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2015-8839"
+
+# fixed-version: Fixed after version 4.4rc3
+CVE_CHECK_IGNORE += "CVE-2015-8844"
+
+# fixed-version: Fixed after version 4.4rc3
+CVE_CHECK_IGNORE += "CVE-2015-8845"
+
+# Skipping CVE-2015-8937, no affected_versions
+
+# Skipping CVE-2015-8938, no affected_versions
+
+# Skipping CVE-2015-8939, no affected_versions
+
+# Skipping CVE-2015-8940, no affected_versions
+
+# Skipping CVE-2015-8941, no affected_versions
+
+# Skipping CVE-2015-8942, no affected_versions
+
+# Skipping CVE-2015-8943, no affected_versions
+
+# Skipping CVE-2015-8944, no affected_versions
+
+# fixed-version: Fixed after version 4.1rc2
+CVE_CHECK_IGNORE += "CVE-2015-8950"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2015-8952"
+
+# fixed-version: Fixed after version 4.3
+CVE_CHECK_IGNORE += "CVE-2015-8953"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2015-8955"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_IGNORE += "CVE-2015-8956"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-8961"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2015-8962"
+
+# fixed-version: Fixed after version 4.4
+CVE_CHECK_IGNORE += "CVE-2015-8963"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2015-8964"
+
+# fixed-version: Fixed after version 4.4rc8
+CVE_CHECK_IGNORE += "CVE-2015-8966"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_IGNORE += "CVE-2015-8967"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2015-8970"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_IGNORE += "CVE-2015-9004"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2015-9016"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_IGNORE += "CVE-2015-9289"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-0617"
+
+# fixed-version: Fixed after version 4.5rc2
+CVE_CHECK_IGNORE += "CVE-2016-0723"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-0728"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_IGNORE += "CVE-2016-0758"
+
+# Skipping CVE-2016-0774, no affected_versions
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2016-0821"
+
+# fixed-version: Fixed after version 4.0rc5
+CVE_CHECK_IGNORE += "CVE-2016-0823"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_IGNORE += "CVE-2016-10044"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2016-10088"
+
+# fixed-version: Fixed after version 4.9
+CVE_CHECK_IGNORE += "CVE-2016-10147"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_IGNORE += "CVE-2016-10150"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2016-10153"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2016-10154"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_IGNORE += "CVE-2016-10200"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2016-10208"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-10229"
+
+# fixed-version: Fixed after version 4.8rc6
+CVE_CHECK_IGNORE += "CVE-2016-10318"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2016-10723"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2016-10741"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2016-10764"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_IGNORE += "CVE-2016-10905"
+
+# fixed-version: Fixed after version 4.5rc6
+CVE_CHECK_IGNORE += "CVE-2016-10906"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_IGNORE += "CVE-2016-10907"
+
+# fixed-version: Fixed after version 4.7rc5
+CVE_CHECK_IGNORE += "CVE-2016-1237"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-1575"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-1576"
+
+# fixed-version: Fixed after version 4.7rc3
+CVE_CHECK_IGNORE += "CVE-2016-1583"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2016-2053"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2069"
+
+# fixed-version: Fixed after version 4.4
+CVE_CHECK_IGNORE += "CVE-2016-2070"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_IGNORE += "CVE-2016-2085"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_IGNORE += "CVE-2016-2117"
+
+# fixed-version: Fixed after version 4.5
+CVE_CHECK_IGNORE += "CVE-2016-2143"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-2184"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-2185"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-2186"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_IGNORE += "CVE-2016-2187"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_IGNORE += "CVE-2016-2188"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_IGNORE += "CVE-2016-2383"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_IGNORE += "CVE-2016-2384"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2543"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2544"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2545"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2546"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2547"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2548"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2549"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_IGNORE += "CVE-2016-2550"
+
+# fixed-version: Fixed after version 4.5rc2
+CVE_CHECK_IGNORE += "CVE-2016-2782"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2016-2847"
+
+# Skipping CVE-2016-2853, no affected_versions
+
+# Skipping CVE-2016-2854, no affected_versions
+
+# fixed-version: Fixed after version 4.5
+CVE_CHECK_IGNORE += "CVE-2016-3044"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2016-3070"
+
+# fixed-version: Fixed after version 4.6rc2
+CVE_CHECK_IGNORE += "CVE-2016-3134"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-3135"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_IGNORE += "CVE-2016-3136"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_IGNORE += "CVE-2016-3137"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-3138"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_IGNORE += "CVE-2016-3139"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_IGNORE += "CVE-2016-3140"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-3156"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-3157"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-3672"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-3689"
+
+# Skipping CVE-2016-3695, no affected_versions
+
+# Skipping CVE-2016-3699, no affected_versions
+
+# Skipping CVE-2016-3707, no affected_versions
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-3713"
+
+# CVE-2016-3775 has no known resolution
+
+# CVE-2016-3802 has no known resolution
+
+# CVE-2016-3803 has no known resolution
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_IGNORE += "CVE-2016-3841"
+
+# fixed-version: Fixed after version 4.8rc2
+CVE_CHECK_IGNORE += "CVE-2016-3857"
+
+# fixed-version: Fixed after version 4.5
+CVE_CHECK_IGNORE += "CVE-2016-3951"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_IGNORE += "CVE-2016-3955"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_IGNORE += "CVE-2016-3961"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-4440"
+
+# fixed-version: Fixed after version 4.7rc4
+CVE_CHECK_IGNORE += "CVE-2016-4470"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-4482"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_IGNORE += "CVE-2016-4485"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_IGNORE += "CVE-2016-4486"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_IGNORE += "CVE-2016-4557"
+
+# fixed-version: Fixed after version 4.6rc7
+CVE_CHECK_IGNORE += "CVE-2016-4558"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_IGNORE += "CVE-2016-4565"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_IGNORE += "CVE-2016-4568"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-4569"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-4578"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_IGNORE += "CVE-2016-4580"
+
+# fixed-version: Fixed after version 4.6rc7
+CVE_CHECK_IGNORE += "CVE-2016-4581"
+
+# fixed-version: Fixed after version 4.7rc4
+CVE_CHECK_IGNORE += "CVE-2016-4794"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-4805"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_IGNORE += "CVE-2016-4913"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-4951"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-4997"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-4998"
+
+# fixed-version: Fixed after version 4.9rc2
+CVE_CHECK_IGNORE += "CVE-2016-5195"
+
+# fixed-version: Fixed after version 4.7rc3
+CVE_CHECK_IGNORE += "CVE-2016-5243"
+
+# fixed-version: Fixed after version 4.7rc3
+CVE_CHECK_IGNORE += "CVE-2016-5244"
+
+# Skipping CVE-2016-5340, no affected_versions
+
+# Skipping CVE-2016-5342, no affected_versions
+
+# Skipping CVE-2016-5343, no affected_versions
+
+# Skipping CVE-2016-5344, no affected_versions
+
+# fixed-version: Fixed after version 4.7
+CVE_CHECK_IGNORE += "CVE-2016-5400"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_IGNORE += "CVE-2016-5412"
+
+# fixed-version: Fixed after version 4.7
+CVE_CHECK_IGNORE += "CVE-2016-5696"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-5728"
+
+# fixed-version: Fixed after version 4.7rc6
+CVE_CHECK_IGNORE += "CVE-2016-5828"
+
+# fixed-version: Fixed after version 4.7rc5
+CVE_CHECK_IGNORE += "CVE-2016-5829"
+
+# CVE-2016-5870 has no known resolution
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_IGNORE += "CVE-2016-6130"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_IGNORE += "CVE-2016-6136"
+
+# fixed-version: Fixed after version 4.7rc7
+CVE_CHECK_IGNORE += "CVE-2016-6156"
+
+# fixed-version: Fixed after version 4.7
+CVE_CHECK_IGNORE += "CVE-2016-6162"
+
+# fixed-version: Fixed after version 4.7rc7
+CVE_CHECK_IGNORE += "CVE-2016-6187"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-6197"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_IGNORE += "CVE-2016-6198"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_IGNORE += "CVE-2016-6213"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-6327"
+
+# fixed-version: Fixed after version 4.8rc3
+CVE_CHECK_IGNORE += "CVE-2016-6480"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_IGNORE += "CVE-2016-6516"
+
+# Skipping CVE-2016-6753, no affected_versions
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_IGNORE += "CVE-2016-6786"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_IGNORE += "CVE-2016-6787"
+
+# fixed-version: Fixed after version 4.8rc5
+CVE_CHECK_IGNORE += "CVE-2016-6828"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_IGNORE += "CVE-2016-7039"
+
+# fixed-version: Fixed after version 4.9rc3
+CVE_CHECK_IGNORE += "CVE-2016-7042"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_IGNORE += "CVE-2016-7097"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-7117"
+
+# Skipping CVE-2016-7118, no affected_versions
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_IGNORE += "CVE-2016-7425"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_IGNORE += "CVE-2016-7910"
+
+# fixed-version: Fixed after version 4.7rc7
+CVE_CHECK_IGNORE += "CVE-2016-7911"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_IGNORE += "CVE-2016-7912"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-7913"
+
+# fixed-version: Fixed after version 4.6rc4
+CVE_CHECK_IGNORE += "CVE-2016-7914"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-7915"
+
+# fixed-version: Fixed after version 4.6rc7
+CVE_CHECK_IGNORE += "CVE-2016-7916"
+
+# fixed-version: Fixed after version 4.5rc6
+CVE_CHECK_IGNORE += "CVE-2016-7917"
+
+# fixed-version: Fixed after version 4.9
+CVE_CHECK_IGNORE += "CVE-2016-8399"
+
+# Skipping CVE-2016-8401, no affected_versions
+
+# Skipping CVE-2016-8402, no affected_versions
+
+# Skipping CVE-2016-8403, no affected_versions
+
+# Skipping CVE-2016-8404, no affected_versions
+
+# fixed-version: Fixed after version 4.10rc6
+CVE_CHECK_IGNORE += "CVE-2016-8405"
+
+# Skipping CVE-2016-8406, no affected_versions
+
+# Skipping CVE-2016-8407, no affected_versions
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_IGNORE += "CVE-2016-8630"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_IGNORE += "CVE-2016-8632"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_IGNORE += "CVE-2016-8633"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2016-8636"
+
+# fixed-version: Fixed after version 4.9rc6
+CVE_CHECK_IGNORE += "CVE-2016-8645"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2016-8646"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_IGNORE += "CVE-2016-8650"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_IGNORE += "CVE-2016-8655"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_IGNORE += "CVE-2016-8658"
+
+# CVE-2016-8660 has no known resolution
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-8666"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_IGNORE += "CVE-2016-9083"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_IGNORE += "CVE-2016-9084"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-9120"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_IGNORE += "CVE-2016-9178"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2016-9191"
+
+# fixed-version: Fixed after version 4.9rc3
+CVE_CHECK_IGNORE += "CVE-2016-9313"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_IGNORE += "CVE-2016-9555"
+
+# fixed-version: Fixed after version 4.9
+CVE_CHECK_IGNORE += "CVE-2016-9576"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_IGNORE += "CVE-2016-9588"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_IGNORE += "CVE-2016-9604"
+
+# Skipping CVE-2016-9644, no affected_versions
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2016-9685"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-9754"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_IGNORE += "CVE-2016-9755"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_IGNORE += "CVE-2016-9756"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_IGNORE += "CVE-2016-9777"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_IGNORE += "CVE-2016-9793"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-9794"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2016-9806"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_IGNORE += "CVE-2016-9919"
+
+# Skipping CVE-2017-0403, no affected_versions
+
+# Skipping CVE-2017-0404, no affected_versions
+
+# Skipping CVE-2017-0426, no affected_versions
+
+# Skipping CVE-2017-0427, no affected_versions
+
+# CVE-2017-0507 has no known resolution
+
+# CVE-2017-0508 has no known resolution
+
+# Skipping CVE-2017-0510, no affected_versions
+
+# Skipping CVE-2017-0528, no affected_versions
+
+# Skipping CVE-2017-0537, no affected_versions
+
+# CVE-2017-0564 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-0605"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-0627"
+
+# CVE-2017-0630 has no known resolution
+
+# CVE-2017-0749 has no known resolution
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2017-0750"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-0786"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_IGNORE += "CVE-2017-0861"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_IGNORE += "CVE-2017-1000"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_IGNORE += "CVE-2017-1000111"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_IGNORE += "CVE-2017-1000112"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-1000251"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-1000252"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_IGNORE += "CVE-2017-1000253"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-1000255"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_IGNORE += "CVE-2017-1000363"
+
+# fixed-version: Fixed after version 4.12rc6
+CVE_CHECK_IGNORE += "CVE-2017-1000364"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_IGNORE += "CVE-2017-1000365"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-1000370"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-1000371"
+
+# fixed-version: Fixed after version 4.12rc6
+CVE_CHECK_IGNORE += "CVE-2017-1000379"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_IGNORE += "CVE-2017-1000380"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2017-1000405"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_IGNORE += "CVE-2017-1000407"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2017-1000410"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-10661"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-10662"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-10663"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-10810"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_IGNORE += "CVE-2017-10911"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-11089"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-11176"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-11472"
+
+# fixed-version: Fixed after version 4.13rc2
+CVE_CHECK_IGNORE += "CVE-2017-11473"
+
+# fixed-version: Fixed after version 4.13
+CVE_CHECK_IGNORE += "CVE-2017-11600"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_IGNORE += "CVE-2017-12134"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-12146"
+
+# fixed-version: Fixed after version 4.14rc2
+CVE_CHECK_IGNORE += "CVE-2017-12153"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-12154"
+
+# fixed-version: Fixed after version 4.9rc6
+CVE_CHECK_IGNORE += "CVE-2017-12168"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-12188"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-12190"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_IGNORE += "CVE-2017-12192"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_IGNORE += "CVE-2017-12193"
+
+# fixed-version: Fixed after version 4.13rc4
+CVE_CHECK_IGNORE += "CVE-2017-12762"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_IGNORE += "CVE-2017-13080"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2017-13166"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_IGNORE += "CVE-2017-13167"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2017-13168"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_IGNORE += "CVE-2017-13215"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2017-13216"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2017-13220"
+
+# CVE-2017-13221 has no known resolution
+
+# CVE-2017-13222 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_IGNORE += "CVE-2017-13305"
+
+# fixed-version: Fixed after version 4.13rc7
+CVE_CHECK_IGNORE += "CVE-2017-13686"
+
+# CVE-2017-13693 has no known resolution
+
+# CVE-2017-13694 has no known resolution
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2017-13695"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_IGNORE += "CVE-2017-13715"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-14051"
+
+# fixed-version: Fixed after version 4.12rc3
+CVE_CHECK_IGNORE += "CVE-2017-14106"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_IGNORE += "CVE-2017-14140"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-14156"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-14340"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_IGNORE += "CVE-2017-14489"
+
+# fixed-version: Fixed after version 4.13
+CVE_CHECK_IGNORE += "CVE-2017-14497"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_IGNORE += "CVE-2017-14954"
+
+# fixed-version: Fixed after version 4.14rc2
+CVE_CHECK_IGNORE += "CVE-2017-14991"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_IGNORE += "CVE-2017-15102"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_IGNORE += "CVE-2017-15115"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_IGNORE += "CVE-2017-15116"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-15121"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-15126"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_IGNORE += "CVE-2017-15127"
+
+# fixed-version: Fixed after version 4.14rc8
+CVE_CHECK_IGNORE += "CVE-2017-15128"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-15129"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-15265"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_IGNORE += "CVE-2017-15274"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_IGNORE += "CVE-2017-15299"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_IGNORE += "CVE-2017-15306"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_IGNORE += "CVE-2017-15537"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-15649"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_IGNORE += "CVE-2017-15868"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_IGNORE += "CVE-2017-15951"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-16525"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-16526"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-16527"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2017-16528"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-16529"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-16530"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-16531"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-16532"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-16533"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2017-16534"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_IGNORE += "CVE-2017-16535"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-16536"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-16537"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2017-16538"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_IGNORE += "CVE-2017-16643"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2017-16644"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_IGNORE += "CVE-2017-16645"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-16646"
+
+# fixed-version: Fixed after version 4.14
+CVE_CHECK_IGNORE += "CVE-2017-16647"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-16648"
+
+# fixed-version: Fixed after version 4.14
+CVE_CHECK_IGNORE += "CVE-2017-16649"
+
+# fixed-version: Fixed after version 4.14
+CVE_CHECK_IGNORE += "CVE-2017-16650"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-16911"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-16912"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-16913"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-16914"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_IGNORE += "CVE-2017-16939"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-16994"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-16995"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-16996"
+
+# fixed-version: Fixed after version 4.13rc7
+CVE_CHECK_IGNORE += "CVE-2017-17052"
+
+# fixed-version: Fixed after version 4.13rc7
+CVE_CHECK_IGNORE += "CVE-2017-17053"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-17448"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-17449"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-17450"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-17558"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-17712"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17741"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-17805"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-17806"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_IGNORE += "CVE-2017-17807"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17852"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17853"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17854"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17855"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17856"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17857"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-17862"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17863"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_IGNORE += "CVE-2017-17864"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2017-17975"
+
+# fixed-version: Fixed after version 4.11rc7
+CVE_CHECK_IGNORE += "CVE-2017-18017"
+
+# fixed-version: Fixed after version 4.15rc7
+CVE_CHECK_IGNORE += "CVE-2017-18075"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-18079"
+
+# CVE-2017-18169 has no known resolution
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2017-18174"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-18193"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-18200"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2017-18202"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-18203"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-18204"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2017-18208"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-18216"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-18218"
+
+# fixed-version: Fixed after version 4.12rc4
+CVE_CHECK_IGNORE += "CVE-2017-18221"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-18222"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-18224"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2017-18232"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-18241"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-18249"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-18255"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-18257"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_IGNORE += "CVE-2017-18261"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_IGNORE += "CVE-2017-18270"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2017-18344"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_IGNORE += "CVE-2017-18360"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_IGNORE += "CVE-2017-18379"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-18509"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-18549"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-18550"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_IGNORE += "CVE-2017-18551"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-18552"
+
+# fixed-version: Fixed after version 4.15rc6
+CVE_CHECK_IGNORE += "CVE-2017-18595"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-2583"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-2584"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-2596"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-2618"
+
+# fixed-version: Fixed after version 2.6.25rc1
+CVE_CHECK_IGNORE += "CVE-2017-2634"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_IGNORE += "CVE-2017-2636"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2017-2647"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_IGNORE += "CVE-2017-2671"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_IGNORE += "CVE-2017-5123"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-5546"
+
+# fixed-version: Fixed after version 4.10rc5
+CVE_CHECK_IGNORE += "CVE-2017-5547"
+
+# fixed-version: Fixed after version 4.10rc5
+CVE_CHECK_IGNORE += "CVE-2017-5548"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-5549"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-5550"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-5551"
+
+# fixed-version: Fixed after version 4.10rc6
+CVE_CHECK_IGNORE += "CVE-2017-5576"
+
+# fixed-version: Fixed after version 4.10rc6
+CVE_CHECK_IGNORE += "CVE-2017-5577"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-5669"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2017-5715"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2017-5753"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2017-5754"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-5897"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-5967"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-5970"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_IGNORE += "CVE-2017-5972"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-5986"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-6001"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_IGNORE += "CVE-2017-6074"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-6214"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_IGNORE += "CVE-2017-6345"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_IGNORE += "CVE-2017-6346"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-6347"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_IGNORE += "CVE-2017-6348"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-6353"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_IGNORE += "CVE-2017-6874"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2017-6951"
+
+# fixed-version: Fixed after version 4.11rc5
+CVE_CHECK_IGNORE += "CVE-2017-7184"
+
+# fixed-version: Fixed after version 4.11rc5
+CVE_CHECK_IGNORE += "CVE-2017-7187"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_IGNORE += "CVE-2017-7261"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_IGNORE += "CVE-2017-7273"
+
+# fixed-version: Fixed after version 4.11rc4
+CVE_CHECK_IGNORE += "CVE-2017-7277"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_IGNORE += "CVE-2017-7294"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_IGNORE += "CVE-2017-7308"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_IGNORE += "CVE-2017-7346"
+
+# CVE-2017-7369 has no known resolution
+
+# fixed-version: Fixed after version 4.11rc4
+CVE_CHECK_IGNORE += "CVE-2017-7374"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_IGNORE += "CVE-2017-7472"
+
+# fixed-version: Fixed after version 4.11
+CVE_CHECK_IGNORE += "CVE-2017-7477"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_IGNORE += "CVE-2017-7482"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-7487"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_IGNORE += "CVE-2017-7495"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_IGNORE += "CVE-2017-7518"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-7533"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-7541"
+
+# fixed-version: Fixed after version 4.13rc2
+CVE_CHECK_IGNORE += "CVE-2017-7542"
+
+# fixed-version: Fixed after version 4.13
+CVE_CHECK_IGNORE += "CVE-2017-7558"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_IGNORE += "CVE-2017-7616"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_IGNORE += "CVE-2017-7618"
+
+# fixed-version: Fixed after version 4.11
+CVE_CHECK_IGNORE += "CVE-2017-7645"
+
+# fixed-version: Fixed after version 4.11rc7
+CVE_CHECK_IGNORE += "CVE-2017-7889"
+
+# fixed-version: Fixed after version 4.11
+CVE_CHECK_IGNORE += "CVE-2017-7895"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_IGNORE += "CVE-2017-7979"
+
+# fixed-version: Fixed after version 4.11rc4
+CVE_CHECK_IGNORE += "CVE-2017-8061"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_IGNORE += "CVE-2017-8062"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-8063"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-8064"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-8065"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-8066"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2017-8067"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-8068"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-8069"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_IGNORE += "CVE-2017-8070"
+
+# fixed-version: Fixed after version 4.10rc7
+CVE_CHECK_IGNORE += "CVE-2017-8071"
+
+# fixed-version: Fixed after version 4.10rc7
+CVE_CHECK_IGNORE += "CVE-2017-8072"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2017-8106"
+
+# fixed-version: Fixed after version 3.19rc6
+CVE_CHECK_IGNORE += "CVE-2017-8240"
+
+# CVE-2017-8242 has no known resolution
+
+# CVE-2017-8244 has no known resolution
+
+# CVE-2017-8245 has no known resolution
+
+# CVE-2017-8246 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-8797"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_IGNORE += "CVE-2017-8824"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-8831"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-8890"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_IGNORE += "CVE-2017-8924"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_IGNORE += "CVE-2017-8925"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-9059"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_IGNORE += "CVE-2017-9074"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_IGNORE += "CVE-2017-9075"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_IGNORE += "CVE-2017-9076"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_IGNORE += "CVE-2017-9077"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2017-9150"
+
+# fixed-version: Fixed after version 4.12rc3
+CVE_CHECK_IGNORE += "CVE-2017-9211"
+
+# fixed-version: Fixed after version 4.12rc3
+CVE_CHECK_IGNORE += "CVE-2017-9242"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_IGNORE += "CVE-2017-9605"
+
+# fixed-version: Fixed after version 4.3rc7
+CVE_CHECK_IGNORE += "CVE-2017-9725"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-9984"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2017-9985"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2017-9986"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_IGNORE += "CVE-2018-1000004"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2018-1000026"
+
+# fixed-version: Fixed after version 4.15
+CVE_CHECK_IGNORE += "CVE-2018-1000028"
+
+# fixed-version: Fixed after version 4.16
+CVE_CHECK_IGNORE += "CVE-2018-1000199"
+
+# fixed-version: Fixed after version 4.17rc5
+CVE_CHECK_IGNORE += "CVE-2018-1000200"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_IGNORE += "CVE-2018-1000204"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-10021"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-10074"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2018-10087"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2018-10124"
+
+# fixed-version: Fixed after version 4.17rc4
+CVE_CHECK_IGNORE += "CVE-2018-10322"
+
+# fixed-version: Fixed after version 4.17rc4
+CVE_CHECK_IGNORE += "CVE-2018-10323"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_IGNORE += "CVE-2018-1065"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2018-1066"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_IGNORE += "CVE-2018-10675"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_IGNORE += "CVE-2018-1068"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-10840"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-10853"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-1087"
+
+# CVE-2018-10872 has no known resolution
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10876"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10877"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10878"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10879"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10880"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10881"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10882"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-10883"
+
+# fixed-version: Fixed after version 2.6.36rc1
+CVE_CHECK_IGNORE += "CVE-2018-10901"
+
+# fixed-version: Fixed after version 4.18rc6
+CVE_CHECK_IGNORE += "CVE-2018-10902"
+
+# fixed-version: Fixed after version 4.14rc2
+CVE_CHECK_IGNORE += "CVE-2018-1091"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2018-1092"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2018-1093"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_IGNORE += "CVE-2018-10938"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2018-1094"
+
+# fixed-version: Fixed after version 4.17rc3
+CVE_CHECK_IGNORE += "CVE-2018-10940"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2018-1095"
+
+# fixed-version: Fixed after version 4.17rc2
+CVE_CHECK_IGNORE += "CVE-2018-1108"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-1118"
+
+# fixed-version: Fixed after version 4.17rc6
+CVE_CHECK_IGNORE += "CVE-2018-1120"
+
+# CVE-2018-1121 has no known resolution
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2018-11232"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-1128"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-1129"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-1130"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-11412"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_IGNORE += "CVE-2018-11506"
+
+# fixed-version: Fixed after version 4.17rc5
+CVE_CHECK_IGNORE += "CVE-2018-11508"
+
+# CVE-2018-11987 has no known resolution
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2018-12126"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2018-12127"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2018-12130"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2018-12207"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-12232"
+
+# fixed-version: Fixed after version 4.18rc2
+CVE_CHECK_IGNORE += "CVE-2018-12233"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-12633"
+
+# fixed-version: Fixed after version 4.18rc2
+CVE_CHECK_IGNORE += "CVE-2018-12714"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-12896"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-12904"
+
+# CVE-2018-12928 has no known resolution
+
+# CVE-2018-12929 has no known resolution
+
+# CVE-2018-12930 has no known resolution
+
+# CVE-2018-12931 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-13053"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-13093"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-13094"
+
+# fixed-version: Fixed after version 4.18rc3
+CVE_CHECK_IGNORE += "CVE-2018-13095"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-13096"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-13097"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-13098"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-13099"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-13100"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_IGNORE += "CVE-2018-13405"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-13406"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14609"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14610"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14611"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14612"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14613"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14614"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14615"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14616"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-14617"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_IGNORE += "CVE-2018-14619"
+
+# fixed-version: Fixed after version 4.20rc6
+CVE_CHECK_IGNORE += "CVE-2018-14625"
+
+# fixed-version: Fixed after version 4.19rc6
+CVE_CHECK_IGNORE += "CVE-2018-14633"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2018-14634"
+
+# fixed-version: Fixed after version 4.19rc4
+CVE_CHECK_IGNORE += "CVE-2018-14641"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2018-14646"
+
+# fixed-version: Fixed after version 4.19rc2
+CVE_CHECK_IGNORE += "CVE-2018-14656"
+
+# fixed-version: Fixed after version 4.18rc8
+CVE_CHECK_IGNORE += "CVE-2018-14678"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-14734"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_IGNORE += "CVE-2018-15471"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-15572"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-15594"
+
+# fixed-version: Fixed after version 4.18rc5
+CVE_CHECK_IGNORE += "CVE-2018-16276"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_IGNORE += "CVE-2018-16597"
+
+# fixed-version: Fixed after version 4.19rc2
+CVE_CHECK_IGNORE += "CVE-2018-16658"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_IGNORE += "CVE-2018-16862"
+
+# fixed-version: Fixed after version 4.20rc3
+CVE_CHECK_IGNORE += "CVE-2018-16871"
+
+# fixed-version: Fixed after version 5.0rc5
+CVE_CHECK_IGNORE += "CVE-2018-16880"
+
+# fixed-version: Fixed after version 4.20
+CVE_CHECK_IGNORE += "CVE-2018-16882"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_IGNORE += "CVE-2018-16884"
+
+# CVE-2018-16885 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc4
+CVE_CHECK_IGNORE += "CVE-2018-17182"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_IGNORE += "CVE-2018-17972"
+
+# CVE-2018-17977 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_IGNORE += "CVE-2018-18021"
+
+# fixed-version: Fixed after version 4.19
+CVE_CHECK_IGNORE += "CVE-2018-18281"
+
+# fixed-version: Fixed after version 4.15rc6
+CVE_CHECK_IGNORE += "CVE-2018-18386"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_IGNORE += "CVE-2018-18397"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_IGNORE += "CVE-2018-18445"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2018-18559"
+
+# CVE-2018-18653 has no known resolution
+
+# fixed-version: Fixed after version 4.17rc4
+CVE_CHECK_IGNORE += "CVE-2018-18690"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_IGNORE += "CVE-2018-18710"
+
+# fixed-version: Fixed after version 4.20rc2
+CVE_CHECK_IGNORE += "CVE-2018-18955"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_IGNORE += "CVE-2018-19406"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_IGNORE += "CVE-2018-19407"
+
+# fixed-version: Fixed after version 4.20rc6
+CVE_CHECK_IGNORE += "CVE-2018-19824"
+
+# fixed-version: Fixed after version 4.20rc3
+CVE_CHECK_IGNORE += "CVE-2018-19854"
+
+# fixed-version: Fixed after version 4.20
+CVE_CHECK_IGNORE += "CVE-2018-19985"
+
+# fixed-version: Fixed after version 4.20rc6
+CVE_CHECK_IGNORE += "CVE-2018-20169"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2018-20449"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2018-20509"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_IGNORE += "CVE-2018-20510"
+
+# fixed-version: Fixed after version 4.19rc5
+CVE_CHECK_IGNORE += "CVE-2018-20511"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_IGNORE += "CVE-2018-20669"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_IGNORE += "CVE-2018-20784"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_IGNORE += "CVE-2018-20836"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_IGNORE += "CVE-2018-20854"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-20855"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-20856"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2018-20961"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-20976"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2018-21008"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_IGNORE += "CVE-2018-25015"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_IGNORE += "CVE-2018-25020"
+
+# CVE-2018-3574 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-3620"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_IGNORE += "CVE-2018-3639"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-3646"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_IGNORE += "CVE-2018-3665"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-3693"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2018-5332"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2018-5333"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_IGNORE += "CVE-2018-5344"
+
+# fixed-version: Fixed after version 4.18rc7
+CVE_CHECK_IGNORE += "CVE-2018-5390"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-5391"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_IGNORE += "CVE-2018-5703"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2018-5750"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2018-5803"
+
+# fixed-version: Fixed after version 4.17rc6
+CVE_CHECK_IGNORE += "CVE-2018-5814"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2018-5848"
+
+# Skipping CVE-2018-5856, no affected_versions
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_IGNORE += "CVE-2018-5873"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2018-5953"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2018-5995"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_IGNORE += "CVE-2018-6412"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2018-6554"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2018-6555"
+
+# CVE-2018-6559 has no known resolution
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_IGNORE += "CVE-2018-6927"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_IGNORE += "CVE-2018-7191"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2018-7273"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_IGNORE += "CVE-2018-7480"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_IGNORE += "CVE-2018-7492"
+
+# fixed-version: Fixed after version 4.16rc2
+CVE_CHECK_IGNORE += "CVE-2018-7566"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-7740"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2018-7754"
+
+# fixed-version: Fixed after version 4.19rc5
+CVE_CHECK_IGNORE += "CVE-2018-7755"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2018-7757"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_IGNORE += "CVE-2018-7995"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2018-8043"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2018-8087"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-8781"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-8822"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2018-8897"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2018-9363"
+
+# fixed-version: Fixed after version 4.17rc3
+CVE_CHECK_IGNORE += "CVE-2018-9385"
+
+# fixed-version: Fixed after version 4.17rc3
+CVE_CHECK_IGNORE += "CVE-2018-9415"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_IGNORE += "CVE-2018-9422"
+
+# fixed-version: Fixed after version 4.15rc6
+CVE_CHECK_IGNORE += "CVE-2018-9465"
+
+# fixed-version: Fixed after version 4.18rc5
+CVE_CHECK_IGNORE += "CVE-2018-9516"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_IGNORE += "CVE-2018-9517"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_IGNORE += "CVE-2018-9518"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2018-9568"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-0136"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-0145"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-0146"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-0147"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-0148"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-0149"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_IGNORE += "CVE-2019-0154"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_IGNORE += "CVE-2019-0155"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-10124"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-10125"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-10126"
+
+# CVE-2019-10140 has no known resolution
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-10142"
+
+# fixed-version: Fixed after version 5.3rc3
+CVE_CHECK_IGNORE += "CVE-2019-10207"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-10220"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-10638"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-10639"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_IGNORE += "CVE-2019-11085"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-11091"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_IGNORE += "CVE-2019-11135"
+
+# fixed-version: Fixed after version 4.8rc5
+CVE_CHECK_IGNORE += "CVE-2019-11190"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-11191"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_IGNORE += "CVE-2019-1125"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-11477"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-11478"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-11479"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-11486"
+
+# fixed-version: Fixed after version 5.1rc5
+CVE_CHECK_IGNORE += "CVE-2019-11487"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_IGNORE += "CVE-2019-11599"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_IGNORE += "CVE-2019-11683"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-11810"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-11811"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-11815"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-11833"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-11884"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-12378"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-12379"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-12380"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-12381"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-12382"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-12454"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-12455"
+
+# CVE-2019-12456 has no known resolution
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-12614"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_IGNORE += "CVE-2019-12615"
+
+# fixed-version: Fixed after version 5.2rc7
+CVE_CHECK_IGNORE += "CVE-2019-12817"
+
+# fixed-version: Fixed after version 5.0
+CVE_CHECK_IGNORE += "CVE-2019-12818"
+
+# fixed-version: Fixed after version 5.0rc8
+CVE_CHECK_IGNORE += "CVE-2019-12819"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2019-12881"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-12984"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_IGNORE += "CVE-2019-13233"
+
+# fixed-version: Fixed after version 5.2
+CVE_CHECK_IGNORE += "CVE-2019-13272"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-13631"
+
+# fixed-version: Fixed after version 5.3rc2
+CVE_CHECK_IGNORE += "CVE-2019-13648"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-14283"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-14284"
+
+# fixed-version: Fixed after version 5.5rc7
+CVE_CHECK_IGNORE += "CVE-2019-14615"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2019-14763"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_IGNORE += "CVE-2019-14814"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_IGNORE += "CVE-2019-14815"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_IGNORE += "CVE-2019-14816"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-14821"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_IGNORE += "CVE-2019-14835"
+
+# fixed-version: Fixed after version 5.5rc3
+CVE_CHECK_IGNORE += "CVE-2019-14895"
+
+# fixed-version: Fixed after version 5.5
+CVE_CHECK_IGNORE += "CVE-2019-14896"
+
+# fixed-version: Fixed after version 5.5
+CVE_CHECK_IGNORE += "CVE-2019-14897"
+
+# CVE-2019-14898 has no known resolution
+
+# fixed-version: Fixed after version 5.5rc3
+CVE_CHECK_IGNORE += "CVE-2019-14901"
+
+# fixed-version: Fixed after version 5.3rc8
+CVE_CHECK_IGNORE += "CVE-2019-15030"
+
+# fixed-version: Fixed after version 5.3rc8
+CVE_CHECK_IGNORE += "CVE-2019-15031"
+
+# fixed-version: Fixed after version 5.2rc2
+CVE_CHECK_IGNORE += "CVE-2019-15090"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-15098"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-15099"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_IGNORE += "CVE-2019-15117"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_IGNORE += "CVE-2019-15118"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-15211"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-15212"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-15213"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_IGNORE += "CVE-2019-15214"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-15215"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_IGNORE += "CVE-2019-15216"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-15217"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-15218"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-15219"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-15220"
+
+# fixed-version: Fixed after version 5.2
+CVE_CHECK_IGNORE += "CVE-2019-15221"
+
+# fixed-version: Fixed after version 5.3rc3
+CVE_CHECK_IGNORE += "CVE-2019-15222"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-15223"
+
+# CVE-2019-15239 has no known resolution
+
+# CVE-2019-15290 has no known resolution
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-15291"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-15292"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_IGNORE += "CVE-2019-15504"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-15505"
+
+# fixed-version: Fixed after version 5.3rc6
+CVE_CHECK_IGNORE += "CVE-2019-15538"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_IGNORE += "CVE-2019-15666"
+
+# CVE-2019-15791 has no known resolution
+
+# CVE-2019-15792 has no known resolution
+
+# CVE-2019-15793 has no known resolution
+
+# fixed-version: Fixed after version 5.12
+CVE_CHECK_IGNORE += "CVE-2019-15794"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2019-15807"
+
+# CVE-2019-15902 has no known resolution
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-15916"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-15917"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_IGNORE += "CVE-2019-15918"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_IGNORE += "CVE-2019-15919"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_IGNORE += "CVE-2019-15920"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_IGNORE += "CVE-2019-15921"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-15922"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-15923"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-15924"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-15925"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-15926"
+
+# fixed-version: Fixed after version 5.0rc2
+CVE_CHECK_IGNORE += "CVE-2019-15927"
+
+# CVE-2019-16089 has no known resolution
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-16229"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-16230"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-16231"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-16232"
+
+# fixed-version: Fixed after version 5.4rc5
+CVE_CHECK_IGNORE += "CVE-2019-16233"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_IGNORE += "CVE-2019-16234"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-16413"
+
+# fixed-version: Fixed after version 5.3rc7
+CVE_CHECK_IGNORE += "CVE-2019-16714"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-16746"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2019-16921"
+
+# fixed-version: Fixed after version 5.0
+CVE_CHECK_IGNORE += "CVE-2019-16994"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-16995"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-17052"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-17053"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-17054"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-17055"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-17056"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-17075"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_IGNORE += "CVE-2019-17133"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-17351"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-17666"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-18198"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-18282"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-18660"
+
+# fixed-version: Fixed after version 4.17rc5
+CVE_CHECK_IGNORE += "CVE-2019-18675"
+
+# CVE-2019-18680 has no known resolution
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-18683"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-18786"
+
+# fixed-version: Fixed after version 5.1rc7
+CVE_CHECK_IGNORE += "CVE-2019-18805"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-18806"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-18807"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-18808"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-18809"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-18810"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_IGNORE += "CVE-2019-18811"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_IGNORE += "CVE-2019-18812"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-18813"
+
+# fixed-version: Fixed after version 5.7rc7
+CVE_CHECK_IGNORE += "CVE-2019-18814"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-18885"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19036"
+
+# fixed-version: Fixed after version 5.5rc3
+CVE_CHECK_IGNORE += "CVE-2019-19037"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2019-19039"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19043"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-19044"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-19045"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19046"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-19047"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-19048"
+
+# fixed-version: Fixed after version 5.4rc5
+CVE_CHECK_IGNORE += "CVE-2019-19049"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19050"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-19051"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_IGNORE += "CVE-2019-19052"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19053"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19054"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_IGNORE += "CVE-2019-19055"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19056"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19057"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_IGNORE += "CVE-2019-19058"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_IGNORE += "CVE-2019-19059"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-19060"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-19061"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19062"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19063"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19064"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-19065"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19066"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-19067"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19068"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-19069"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19070"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19071"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19072"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19073"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19074"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-19075"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19076"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19077"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19078"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_IGNORE += "CVE-2019-19079"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19080"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19081"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19082"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-19083"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_IGNORE += "CVE-2019-19227"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19241"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19252"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19318"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-19319"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19332"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19338"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2019-19377"
+
+# CVE-2019-19378 has no known resolution
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19447"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2019-19448"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2019-19449"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2019-19462"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-19523"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_IGNORE += "CVE-2019-19524"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_IGNORE += "CVE-2019-19525"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_IGNORE += "CVE-2019-19526"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_IGNORE += "CVE-2019-19527"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_IGNORE += "CVE-2019-19528"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_IGNORE += "CVE-2019-19529"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_IGNORE += "CVE-2019-19530"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_IGNORE += "CVE-2019-19531"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2019-19532"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19533"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_IGNORE += "CVE-2019-19534"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_IGNORE += "CVE-2019-19535"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_IGNORE += "CVE-2019-19536"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_IGNORE += "CVE-2019-19537"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-19543"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19602"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2019-19767"
+
+# fixed-version: Fixed after version 5.6rc4
+CVE_CHECK_IGNORE += "CVE-2019-19768"
+
+# fixed-version: Fixed after version 5.6rc5
+CVE_CHECK_IGNORE += "CVE-2019-19769"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2019-19770"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_IGNORE += "CVE-2019-19807"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-19813"
+
+# CVE-2019-19814 has no known resolution
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2019-19815"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-19816"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-19922"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_IGNORE += "CVE-2019-19927"
+
+# fixed-version: Fixed after version 5.5rc3
+CVE_CHECK_IGNORE += "CVE-2019-19947"
+
+# fixed-version: Fixed after version 5.5rc2
+CVE_CHECK_IGNORE += "CVE-2019-19965"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-19966"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_IGNORE += "CVE-2019-1999"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_IGNORE += "CVE-2019-20054"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-20095"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-20096"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2019-2024"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_IGNORE += "CVE-2019-2025"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-20422"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_IGNORE += "CVE-2019-2054"
+
+# fixed-version: Fixed after version 5.5rc6
+CVE_CHECK_IGNORE += "CVE-2019-20636"
+
+# CVE-2019-20794 has no known resolution
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-20806"
+
+# fixed-version: Fixed after version 5.6rc1
+CVE_CHECK_IGNORE += "CVE-2019-20810"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_IGNORE += "CVE-2019-20811"
+
+# fixed-version: Fixed after version 5.5rc3
+CVE_CHECK_IGNORE += "CVE-2019-20812"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2019-20908"
+
+# fixed-version: Fixed after version 5.3rc2
+CVE_CHECK_IGNORE += "CVE-2019-20934"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-2101"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-2181"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_IGNORE += "CVE-2019-2182"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-2213"
+
+# fixed-version: Fixed after version 5.3rc2
+CVE_CHECK_IGNORE += "CVE-2019-2214"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2019-2215"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_IGNORE += "CVE-2019-25044"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_IGNORE += "CVE-2019-25045"
+
+# fixed-version: Fixed after version 5.6rc1
+CVE_CHECK_IGNORE += "CVE-2019-3016"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-3459"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-3460"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_IGNORE += "CVE-2019-3701"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_IGNORE += "CVE-2019-3819"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_IGNORE += "CVE-2019-3837"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_IGNORE += "CVE-2019-3846"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-3874"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-3882"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_IGNORE += "CVE-2019-3887"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_IGNORE += "CVE-2019-3892"
+
+# fixed-version: Fixed after version 2.6.35rc1
+CVE_CHECK_IGNORE += "CVE-2019-3896"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_IGNORE += "CVE-2019-3900"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_IGNORE += "CVE-2019-3901"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_IGNORE += "CVE-2019-5108"
+
+# Skipping CVE-2019-5489, no affected_versions
+
+# fixed-version: Fixed after version 5.0rc2
+CVE_CHECK_IGNORE += "CVE-2019-6133"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_IGNORE += "CVE-2019-6974"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_IGNORE += "CVE-2019-7221"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_IGNORE += "CVE-2019-7222"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_IGNORE += "CVE-2019-7308"
+
+# fixed-version: Fixed after version 5.0rc8
+CVE_CHECK_IGNORE += "CVE-2019-8912"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_IGNORE += "CVE-2019-8956"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-8980"
+
+# fixed-version: Fixed after version 5.0rc4
+CVE_CHECK_IGNORE += "CVE-2019-9003"
+
+# fixed-version: Fixed after version 5.0rc7
+CVE_CHECK_IGNORE += "CVE-2019-9162"
+
+# fixed-version: Fixed after version 5.0
+CVE_CHECK_IGNORE += "CVE-2019-9213"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_IGNORE += "CVE-2019-9245"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_IGNORE += "CVE-2019-9444"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-9445"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2019-9453"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_IGNORE += "CVE-2019-9454"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_IGNORE += "CVE-2019-9455"
+
+# fixed-version: Fixed after version 4.16rc6
+CVE_CHECK_IGNORE += "CVE-2019-9456"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_IGNORE += "CVE-2019-9457"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_IGNORE += "CVE-2019-9458"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-9466"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-9500"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_IGNORE += "CVE-2019-9503"
+
+# fixed-version: Fixed after version 5.2
+CVE_CHECK_IGNORE += "CVE-2019-9506"
+
+# fixed-version: Fixed after version 5.1rc2
+CVE_CHECK_IGNORE += "CVE-2019-9857"
+
+# fixed-version: Fixed after version 5.6rc3
+CVE_CHECK_IGNORE += "CVE-2020-0009"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_IGNORE += "CVE-2020-0030"
+
+# fixed-version: Fixed after version 5.5rc2
+CVE_CHECK_IGNORE += "CVE-2020-0041"
+
+# fixed-version: Fixed after version 4.3rc7
+CVE_CHECK_IGNORE += "CVE-2020-0066"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2020-0067"
+
+# fixed-version: Fixed after version 5.6rc2
+CVE_CHECK_IGNORE += "CVE-2020-0110"
+
+# fixed-version: Fixed after version 5.7rc4
+CVE_CHECK_IGNORE += "CVE-2020-0255"
+
+# fixed-version: Fixed after version 5.5rc6
+CVE_CHECK_IGNORE += "CVE-2020-0305"
+
+# CVE-2020-0347 has no known resolution
+
+# fixed-version: Fixed after version 5.6rc1
+CVE_CHECK_IGNORE += "CVE-2020-0404"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-0423"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2020-0427"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2020-0429"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2020-0430"
+
+# fixed-version: Fixed after version 5.5rc6
+CVE_CHECK_IGNORE += "CVE-2020-0431"
+
+# fixed-version: Fixed after version 5.6rc1
+CVE_CHECK_IGNORE += "CVE-2020-0432"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2020-0433"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_IGNORE += "CVE-2020-0435"
+
+# fixed-version: Fixed after version 5.6rc4
+CVE_CHECK_IGNORE += "CVE-2020-0444"
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2020-0465"
+
+# fixed-version: Fixed after version 5.9rc2
+CVE_CHECK_IGNORE += "CVE-2020-0466"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-0543"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-10135"
+
+# fixed-version: Fixed after version 5.5rc5
+CVE_CHECK_IGNORE += "CVE-2020-10690"
+
+# CVE-2020-10708 has no known resolution
+
+# fixed-version: Fixed after version 5.7rc6
+CVE_CHECK_IGNORE += "CVE-2020-10711"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_IGNORE += "CVE-2020-10720"
+
+# fixed-version: Fixed after version 5.7
+CVE_CHECK_IGNORE += "CVE-2020-10732"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_IGNORE += "CVE-2020-10742"
+
+# fixed-version: Fixed after version 5.7rc4
+CVE_CHECK_IGNORE += "CVE-2020-10751"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-10757"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-10766"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-10767"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-10768"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_IGNORE += "CVE-2020-10769"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_IGNORE += "CVE-2020-10773"
+
+# CVE-2020-10774 has no known resolution
+
+# fixed-version: Fixed after version 5.8rc6
+CVE_CHECK_IGNORE += "CVE-2020-10781"
+
+# fixed-version: Fixed after version 5.6rc4
+CVE_CHECK_IGNORE += "CVE-2020-10942"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-11494"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-11565"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-11608"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-11609"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-11668"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2020-11669"
+
+# CVE-2020-11725 has no known resolution
+
+# fixed-version: Fixed after version 5.7rc4
+CVE_CHECK_IGNORE += "CVE-2020-11884"
+
+# CVE-2020-11935 has no known resolution
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_IGNORE += "CVE-2020-12114"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-12351"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-12352"
+
+# fixed-version: Fixed after version 5.11rc1
+CVE_CHECK_IGNORE += "CVE-2020-12362"
+
+# fixed-version: Fixed after version 5.11rc1
+CVE_CHECK_IGNORE += "CVE-2020-12363"
+
+# fixed-version: Fixed after version 5.11rc1
+CVE_CHECK_IGNORE += "CVE-2020-12364"
+
+# fixed-version: Fixed after version 5.7rc3
+CVE_CHECK_IGNORE += "CVE-2020-12464"
+
+# fixed-version: Fixed after version 5.6rc6
+CVE_CHECK_IGNORE += "CVE-2020-12465"
+
+# fixed-version: Fixed after version 5.5rc7
+CVE_CHECK_IGNORE += "CVE-2020-12652"
+
+# fixed-version: Fixed after version 5.6rc1
+CVE_CHECK_IGNORE += "CVE-2020-12653"
+
+# fixed-version: Fixed after version 5.6rc1
+CVE_CHECK_IGNORE += "CVE-2020-12654"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-12655"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-12656"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-12657"
+
+# fixed-version: Fixed after version 5.7rc2
+CVE_CHECK_IGNORE += "CVE-2020-12659"
+
+# fixed-version: Fixed after version 5.6rc4
+CVE_CHECK_IGNORE += "CVE-2020-12768"
+
+# fixed-version: Fixed after version 5.5rc6
+CVE_CHECK_IGNORE += "CVE-2020-12769"
+
+# fixed-version: Fixed after version 5.7rc3
+CVE_CHECK_IGNORE += "CVE-2020-12770"
+
+# fixed-version: Fixed after version 5.8rc2
+CVE_CHECK_IGNORE += "CVE-2020-12771"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-12826"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-12888"
+
+# fixed-version: Fixed after version 5.10rc4
+CVE_CHECK_IGNORE += "CVE-2020-12912"
+
+# fixed-version: Fixed after version 5.7rc6
+CVE_CHECK_IGNORE += "CVE-2020-13143"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-13974"
+
+# CVE-2020-14304 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2020-14305"
+
+# fixed-version: Fixed after version 5.9rc2
+CVE_CHECK_IGNORE += "CVE-2020-14314"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2020-14331"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-14351"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_IGNORE += "CVE-2020-14353"
+
+# fixed-version: Fixed after version 5.8rc5
+CVE_CHECK_IGNORE += "CVE-2020-14356"
+
+# fixed-version: Fixed after version 5.6rc6
+CVE_CHECK_IGNORE += "CVE-2020-14381"
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2020-14385"
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2020-14386"
+
+# fixed-version: Fixed after version 5.9rc6
+CVE_CHECK_IGNORE += "CVE-2020-14390"
+
+# fixed-version: Fixed after version 5.5
+CVE_CHECK_IGNORE += "CVE-2020-14416"
+
+# fixed-version: Fixed after version 5.8rc3
+CVE_CHECK_IGNORE += "CVE-2020-15393"
+
+# fixed-version: Fixed after version 5.8rc2
+CVE_CHECK_IGNORE += "CVE-2020-15436"
+
+# fixed-version: Fixed after version 5.8rc7
+CVE_CHECK_IGNORE += "CVE-2020-15437"
+
+# fixed-version: Fixed after version 5.8rc3
+CVE_CHECK_IGNORE += "CVE-2020-15780"
+
+# CVE-2020-15802 has no known resolution
+
+# fixed-version: Fixed after version 5.8rc6
+CVE_CHECK_IGNORE += "CVE-2020-15852"
+
+# fixed-version: Fixed after version 5.15rc2
+CVE_CHECK_IGNORE += "CVE-2020-16119"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-16120"
+
+# fixed-version: Fixed after version 5.8
+CVE_CHECK_IGNORE += "CVE-2020-16166"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2020-1749"
+
+# fixed-version: Fixed after version 5.8rc4
+CVE_CHECK_IGNORE += "CVE-2020-24394"
+
+# fixed-version: Fixed after version 5.8
+CVE_CHECK_IGNORE += "CVE-2020-24490"
+
+# CVE-2020-24502 has no known resolution
+
+# CVE-2020-24503 has no known resolution
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2020-24504"
+
+# fixed-version: Fixed after version 5.13rc4
+CVE_CHECK_IGNORE += "CVE-2020-24586"
+
+# fixed-version: Fixed after version 5.13rc4
+CVE_CHECK_IGNORE += "CVE-2020-24587"
+
+# fixed-version: Fixed after version 5.13rc4
+CVE_CHECK_IGNORE += "CVE-2020-24588"
+
+# fixed-version: Fixed after version 5.9rc7
+CVE_CHECK_IGNORE += "CVE-2020-25211"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2020-25212"
+
+# CVE-2020-25220 has no known resolution
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2020-25221"
+
+# fixed-version: Fixed after version 5.9rc5
+CVE_CHECK_IGNORE += "CVE-2020-25284"
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2020-25285"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2020-25639"
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2020-25641"
+
+# fixed-version: Fixed after version 5.9rc7
+CVE_CHECK_IGNORE += "CVE-2020-25643"
+
+# fixed-version: Fixed after version 5.9rc7
+CVE_CHECK_IGNORE += "CVE-2020-25645"
+
+# fixed-version: Fixed after version 5.10rc2
+CVE_CHECK_IGNORE += "CVE-2020-25656"
+
+# CVE-2020-25661 has no known resolution
+
+# CVE-2020-25662 has no known resolution
+
+# fixed-version: Fixed after version 5.10rc3
+CVE_CHECK_IGNORE += "CVE-2020-25668"
+
+# fixed-version: Fixed after version 5.10rc5
+CVE_CHECK_IGNORE += "CVE-2020-25669"
+
+# fixed-version: Fixed after version 5.12rc7
+CVE_CHECK_IGNORE += "CVE-2020-25670"
+
+# fixed-version: Fixed after version 5.12rc7
+CVE_CHECK_IGNORE += "CVE-2020-25671"
+
+# fixed-version: Fixed after version 5.12rc7
+CVE_CHECK_IGNORE += "CVE-2020-25672"
+
+# fixed-version: Fixed after version 5.12rc7
+CVE_CHECK_IGNORE += "CVE-2020-25673"
+
+# fixed-version: Fixed after version 5.10rc3
+CVE_CHECK_IGNORE += "CVE-2020-25704"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-25705"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2020-26088"
+
+# fixed-version: Fixed after version 5.13rc4
+CVE_CHECK_IGNORE += "CVE-2020-26139"
+
+# CVE-2020-26140 has no known resolution
+
+# fixed-version: Fixed after version 5.13rc4
+CVE_CHECK_IGNORE += "CVE-2020-26141"
+
+# CVE-2020-26142 has no known resolution
+
+# CVE-2020-26143 has no known resolution
+
+# fixed-version: Fixed after version 5.13rc4
+CVE_CHECK_IGNORE += "CVE-2020-26145"
+
+# fixed-version: Fixed after version 5.13rc4
+CVE_CHECK_IGNORE += "CVE-2020-26147"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2020-26541"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2020-26555"
+
+# CVE-2020-26556 has no known resolution
+
+# CVE-2020-26557 has no known resolution
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2020-26558"
+
+# CVE-2020-26559 has no known resolution
+
+# CVE-2020-26560 has no known resolution
+
+# fixed-version: Fixed after version 5.6
+CVE_CHECK_IGNORE += "CVE-2020-27066"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_IGNORE += "CVE-2020-27067"
+
+# fixed-version: Fixed after version 5.6rc2
+CVE_CHECK_IGNORE += "CVE-2020-27068"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-27152"
+
+# fixed-version: Fixed after version 5.12rc5
+CVE_CHECK_IGNORE += "CVE-2020-27170"
+
+# fixed-version: Fixed after version 5.12rc5
+CVE_CHECK_IGNORE += "CVE-2020-27171"
+
+# fixed-version: Fixed after version 5.9
+CVE_CHECK_IGNORE += "CVE-2020-27194"
+
+# fixed-version: Fixed after version 5.6rc4
+CVE_CHECK_IGNORE += "CVE-2020-2732"
+
+# fixed-version: Fixed after version 5.6rc5
+CVE_CHECK_IGNORE += "CVE-2020-27418"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-27673"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-27675"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-27777"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-27784"
+
+# fixed-version: Fixed after version 5.7rc6
+CVE_CHECK_IGNORE += "CVE-2020-27786"
+
+# fixed-version: Fixed after version 5.11rc1
+CVE_CHECK_IGNORE += "CVE-2020-27815"
+
+# cpe-stable-backport: Backported in 5.15.5
+CVE_CHECK_IGNORE += "CVE-2020-27820"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-27825"
+
+# fixed-version: Fixed after version 5.10rc7
+CVE_CHECK_IGNORE += "CVE-2020-27830"
+
+# fixed-version: Fixed after version 5.10rc6
+CVE_CHECK_IGNORE += "CVE-2020-27835"
+
+# fixed-version: Fixed after version 5.9rc6
+CVE_CHECK_IGNORE += "CVE-2020-28097"
+
+# fixed-version: Fixed after version 5.11rc4
+CVE_CHECK_IGNORE += "CVE-2020-28374"
+
+# fixed-version: Fixed after version 5.10rc7
+CVE_CHECK_IGNORE += "CVE-2020-28588"
+
+# fixed-version: Fixed after version 5.9
+CVE_CHECK_IGNORE += "CVE-2020-28915"
+
+# fixed-version: Fixed after version 5.10rc5
+CVE_CHECK_IGNORE += "CVE-2020-28941"
+
+# fixed-version: Fixed after version 5.10rc3
+CVE_CHECK_IGNORE += "CVE-2020-28974"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-29368"
+
+# fixed-version: Fixed after version 5.8rc7
+CVE_CHECK_IGNORE += "CVE-2020-29369"
+
+# fixed-version: Fixed after version 5.6rc7
+CVE_CHECK_IGNORE += "CVE-2020-29370"
+
+# fixed-version: Fixed after version 5.9rc2
+CVE_CHECK_IGNORE += "CVE-2020-29371"
+
+# fixed-version: Fixed after version 5.7rc3
+CVE_CHECK_IGNORE += "CVE-2020-29372"
+
+# fixed-version: Fixed after version 5.6rc2
+CVE_CHECK_IGNORE += "CVE-2020-29373"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-29374"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-29534"
+
+# fixed-version: Fixed after version 5.11rc1
+CVE_CHECK_IGNORE += "CVE-2020-29568"
+
+# fixed-version: Fixed after version 5.11rc1
+CVE_CHECK_IGNORE += "CVE-2020-29569"
+
+# fixed-version: Fixed after version 5.10rc7
+CVE_CHECK_IGNORE += "CVE-2020-29660"
+
+# fixed-version: Fixed after version 5.10rc7
+CVE_CHECK_IGNORE += "CVE-2020-29661"
+
+# fixed-version: Fixed after version 5.11rc1
+CVE_CHECK_IGNORE += "CVE-2020-35499"
+
+# CVE-2020-35501 has no known resolution
+
+# fixed-version: Fixed after version 5.10rc3
+CVE_CHECK_IGNORE += "CVE-2020-35508"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_IGNORE += "CVE-2020-35513"
+
+# fixed-version: Fixed after version 5.10rc7
+CVE_CHECK_IGNORE += "CVE-2020-35519"
+
+# fixed-version: Fixed after version 5.11rc1
+CVE_CHECK_IGNORE += "CVE-2020-36158"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-36310"
+
+# fixed-version: Fixed after version 5.9rc5
+CVE_CHECK_IGNORE += "CVE-2020-36311"
+
+# fixed-version: Fixed after version 5.9rc5
+CVE_CHECK_IGNORE += "CVE-2020-36312"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-36313"
+
+# fixed-version: Fixed after version 5.11rc1
+CVE_CHECK_IGNORE += "CVE-2020-36322"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2020-36385"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2020-36386"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2020-36387"
+
+# cpe-stable-backport: Backported in 5.15.19
+CVE_CHECK_IGNORE += "CVE-2020-36516"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-36557"
+
+# fixed-version: Fixed after version 5.6rc3
+CVE_CHECK_IGNORE += "CVE-2020-36558"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2020-36691"
+
+# fixed-version: Fixed after version 5.10
+CVE_CHECK_IGNORE += "CVE-2020-36694"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2020-36766"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2020-3702"
+
+# fixed-version: Fixed after version 5.10rc5
+CVE_CHECK_IGNORE += "CVE-2020-4788"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_IGNORE += "CVE-2020-7053"
+
+# fixed-version: Fixed after version 5.5
+CVE_CHECK_IGNORE += "CVE-2020-8428"
+
+# fixed-version: Fixed after version 5.6rc5
+CVE_CHECK_IGNORE += "CVE-2020-8647"
+
+# fixed-version: Fixed after version 5.6rc3
+CVE_CHECK_IGNORE += "CVE-2020-8648"
+
+# fixed-version: Fixed after version 5.6rc5
+CVE_CHECK_IGNORE += "CVE-2020-8649"
+
+# fixed-version: Fixed after version 5.10rc4
+CVE_CHECK_IGNORE += "CVE-2020-8694"
+
+# CVE-2020-8832 has no known resolution
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_IGNORE += "CVE-2020-8834"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2020-8835"
+
+# fixed-version: Fixed after version 5.6rc2
+CVE_CHECK_IGNORE += "CVE-2020-8992"
+
+# fixed-version: Fixed after version 5.6rc4
+CVE_CHECK_IGNORE += "CVE-2020-9383"
+
+# fixed-version: Fixed after version 5.6rc3
+CVE_CHECK_IGNORE += "CVE-2020-9391"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2021-0129"
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2021-0342"
+
+# CVE-2021-0399 has no known resolution
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_IGNORE += "CVE-2021-0447"
+
+# fixed-version: Fixed after version 5.9rc7
+CVE_CHECK_IGNORE += "CVE-2021-0448"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2021-0512"
+
+# fixed-version: Fixed after version 5.8
+CVE_CHECK_IGNORE += "CVE-2021-0605"
+
+# CVE-2021-0606 has no known resolution
+
+# CVE-2021-0695 has no known resolution
+
+# fixed-version: Fixed after version 5.11rc3
+CVE_CHECK_IGNORE += "CVE-2021-0707"
+
+# fixed-version: Fixed after version 5.14rc4
+CVE_CHECK_IGNORE += "CVE-2021-0920"
+
+# CVE-2021-0924 has no known resolution
+
+# fixed-version: Fixed after version 5.6rc1
+CVE_CHECK_IGNORE += "CVE-2021-0929"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_IGNORE += "CVE-2021-0935"
+
+# CVE-2021-0936 has no known resolution
+
+# fixed-version: Fixed after version 5.12rc8
+CVE_CHECK_IGNORE += "CVE-2021-0937"
+
+# fixed-version: Fixed after version 5.10rc4
+CVE_CHECK_IGNORE += "CVE-2021-0938"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2021-0941"
+
+# CVE-2021-0961 has no known resolution
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2021-1048"
+
+# fixed-version: Fixed after version 5.5rc1
+CVE_CHECK_IGNORE += "CVE-2021-20177"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2021-20194"
+
+# CVE-2021-20219 has no known resolution
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2021-20226"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2021-20239"
+
+# fixed-version: Fixed after version 4.5rc5
+CVE_CHECK_IGNORE += "CVE-2021-20261"
+
+# fixed-version: Fixed after version 4.5rc3
+CVE_CHECK_IGNORE += "CVE-2021-20265"
+
+# fixed-version: Fixed after version 5.11rc5
+CVE_CHECK_IGNORE += "CVE-2021-20268"
+
+# fixed-version: Fixed after version 5.9rc1
+CVE_CHECK_IGNORE += "CVE-2021-20292"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2021-20317"
+
+# fixed-version: Fixed after version 5.15rc3
+CVE_CHECK_IGNORE += "CVE-2021-20320"
+
+# fixed-version: Fixed after version 5.15rc5
+CVE_CHECK_IGNORE += "CVE-2021-20321"
+
+# fixed-version: Fixed after version 5.15rc1
+CVE_CHECK_IGNORE += "CVE-2021-20322"
+
+# fixed-version: Fixed after version 5.11rc7
+CVE_CHECK_IGNORE += "CVE-2021-21781"
+
+# fixed-version: Fixed after version 5.13
+CVE_CHECK_IGNORE += "CVE-2021-22543"
+
+# fixed-version: Fixed after version 5.12rc8
+CVE_CHECK_IGNORE += "CVE-2021-22555"
+
+# cpe-stable-backport: Backported in 5.15.11
+CVE_CHECK_IGNORE += "CVE-2021-22600"
+
+# fixed-version: Fixed after version 5.12rc8
+CVE_CHECK_IGNORE += "CVE-2021-23133"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2021-23134"
+
+# cpe-stable-backport: Backported in 5.15.28
+CVE_CHECK_IGNORE += "CVE-2021-26401"
+
+# fixed-version: Fixed after version 5.11rc7
+CVE_CHECK_IGNORE += "CVE-2021-26708"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2021-26930"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2021-26931"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2021-26932"
+
+# CVE-2021-26934 has no known resolution
+
+# fixed-version: Fixed after version 5.12rc2
+CVE_CHECK_IGNORE += "CVE-2021-27363"
+
+# fixed-version: Fixed after version 5.12rc2
+CVE_CHECK_IGNORE += "CVE-2021-27364"
+
+# fixed-version: Fixed after version 5.12rc2
+CVE_CHECK_IGNORE += "CVE-2021-27365"
+
+# fixed-version: Fixed after version 5.12rc2
+CVE_CHECK_IGNORE += "CVE-2021-28038"
+
+# fixed-version: Fixed after version 5.12rc2
+CVE_CHECK_IGNORE += "CVE-2021-28039"
+
+# fixed-version: Fixed after version 5.12rc3
+CVE_CHECK_IGNORE += "CVE-2021-28375"
+
+# fixed-version: Fixed after version 5.12rc3
+CVE_CHECK_IGNORE += "CVE-2021-28660"
+
+# fixed-version: Fixed after version 5.12rc6
+CVE_CHECK_IGNORE += "CVE-2021-28688"
+
+# fixed-version: Fixed after version 5.13rc6
+CVE_CHECK_IGNORE += "CVE-2021-28691"
+
+# cpe-stable-backport: Backported in 5.15.11
+CVE_CHECK_IGNORE += "CVE-2021-28711"
+
+# cpe-stable-backport: Backported in 5.15.11
+CVE_CHECK_IGNORE += "CVE-2021-28712"
+
+# cpe-stable-backport: Backported in 5.15.11
+CVE_CHECK_IGNORE += "CVE-2021-28713"
+
+# cpe-stable-backport: Backported in 5.15.11
+CVE_CHECK_IGNORE += "CVE-2021-28714"
+
+# cpe-stable-backport: Backported in 5.15.11
+CVE_CHECK_IGNORE += "CVE-2021-28715"
+
+# fixed-version: Fixed after version 5.12rc4
+CVE_CHECK_IGNORE += "CVE-2021-28950"
+
+# fixed-version: Fixed after version 5.12rc2
+CVE_CHECK_IGNORE += "CVE-2021-28951"
+
+# fixed-version: Fixed after version 5.12rc4
+CVE_CHECK_IGNORE += "CVE-2021-28952"
+
+# fixed-version: Fixed after version 5.12rc4
+CVE_CHECK_IGNORE += "CVE-2021-28964"
+
+# fixed-version: Fixed after version 5.12rc4
+CVE_CHECK_IGNORE += "CVE-2021-28971"
+
+# fixed-version: Fixed after version 5.12rc4
+CVE_CHECK_IGNORE += "CVE-2021-28972"
+
+# fixed-version: Fixed after version 5.12rc7
+CVE_CHECK_IGNORE += "CVE-2021-29154"
+
+# fixed-version: Fixed after version 5.12rc8
+CVE_CHECK_IGNORE += "CVE-2021-29155"
+
+# fixed-version: Fixed after version 5.12rc3
+CVE_CHECK_IGNORE += "CVE-2021-29264"
+
+# fixed-version: Fixed after version 5.12rc3
+CVE_CHECK_IGNORE += "CVE-2021-29265"
+
+# fixed-version: Fixed after version 5.12rc4
+CVE_CHECK_IGNORE += "CVE-2021-29266"
+
+# fixed-version: Fixed after version 5.12rc5
+CVE_CHECK_IGNORE += "CVE-2021-29646"
+
+# fixed-version: Fixed after version 5.12rc5
+CVE_CHECK_IGNORE += "CVE-2021-29647"
+
+# fixed-version: Fixed after version 5.12rc5
+CVE_CHECK_IGNORE += "CVE-2021-29648"
+
+# fixed-version: Fixed after version 5.12rc5
+CVE_CHECK_IGNORE += "CVE-2021-29649"
+
+# fixed-version: Fixed after version 5.12rc5
+CVE_CHECK_IGNORE += "CVE-2021-29650"
+
+# fixed-version: Fixed after version 5.12rc6
+CVE_CHECK_IGNORE += "CVE-2021-29657"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2021-30002"
+
+# fixed-version: Fixed after version 5.12rc2
+CVE_CHECK_IGNORE += "CVE-2021-30178"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2021-31440"
+
+# fixed-version: Fixed after version 5.11rc5
+CVE_CHECK_IGNORE += "CVE-2021-3178"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2021-31829"
+
+# fixed-version: Fixed after version 5.12rc5
+CVE_CHECK_IGNORE += "CVE-2021-31916"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2021-32078"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2021-32399"
+
+# fixed-version: Fixed after version 5.13rc4
+CVE_CHECK_IGNORE += "CVE-2021-32606"
+
+# fixed-version: Fixed after version 5.12rc3
+CVE_CHECK_IGNORE += "CVE-2021-33033"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2021-33034"
+
+# CVE-2021-33061 needs backporting (fixed from 5.18rc1)
+
+# fixed-version: Fixed after version 5.13rc4
+CVE_CHECK_IGNORE += "CVE-2021-33098"
+
+# cpe-stable-backport: Backported in 5.15.29
+CVE_CHECK_IGNORE += "CVE-2021-33135"
+
+# fixed-version: Fixed after version 5.13rc4
+CVE_CHECK_IGNORE += "CVE-2021-33200"
+
+# fixed-version: Fixed after version 5.11rc6
+CVE_CHECK_IGNORE += "CVE-2021-3347"
+
+# fixed-version: Fixed after version 5.11rc6
+CVE_CHECK_IGNORE += "CVE-2021-3348"
+
+# fixed-version: Fixed after version 5.13rc7
+CVE_CHECK_IGNORE += "CVE-2021-33624"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_IGNORE += "CVE-2021-33630"
+
+# cpe-stable-backport: Backported in 5.15.87
+CVE_CHECK_IGNORE += "CVE-2021-33631"
+
+# cpe-stable-backport: Backported in 5.15.54
+CVE_CHECK_IGNORE += "CVE-2021-33655"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2021-33656"
+
+# fixed-version: Fixed after version 5.14rc3
+CVE_CHECK_IGNORE += "CVE-2021-33909"
+
+# fixed-version: Fixed after version 5.10
+CVE_CHECK_IGNORE += "CVE-2021-3411"
+
+# fixed-version: Fixed after version 5.9rc2
+CVE_CHECK_IGNORE += "CVE-2021-3428"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2021-3444"
+
+# fixed-version: Fixed after version 5.14rc4
+CVE_CHECK_IGNORE += "CVE-2021-34556"
+
+# fixed-version: Fixed after version 5.13rc7
+CVE_CHECK_IGNORE += "CVE-2021-34693"
+
+# fixed-version: Fixed after version 5.12rc6
+CVE_CHECK_IGNORE += "CVE-2021-3483"
+
+# fixed-version: Fixed after version 5.14
+CVE_CHECK_IGNORE += "CVE-2021-34866"
+
+# fixed-version: Fixed after version 5.13rc4
+CVE_CHECK_IGNORE += "CVE-2021-3489"
+
+# fixed-version: Fixed after version 5.13rc4
+CVE_CHECK_IGNORE += "CVE-2021-3490"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2021-3491"
+
+# CVE-2021-3492 has no known resolution
+
+# fixed-version: Fixed after version 5.11rc1
+CVE_CHECK_IGNORE += "CVE-2021-3493"
+
+# fixed-version: Fixed after version 5.14rc1
+CVE_CHECK_IGNORE += "CVE-2021-34981"
+
+# fixed-version: Fixed after version 5.12rc8
+CVE_CHECK_IGNORE += "CVE-2021-3501"
+
+# fixed-version: Fixed after version 5.13
+CVE_CHECK_IGNORE += "CVE-2021-35039"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2021-3506"
+
+# CVE-2021-3542 has no known resolution
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2021-3543"
+
+# fixed-version: Fixed after version 5.14rc4
+CVE_CHECK_IGNORE += "CVE-2021-35477"
+
+# fixed-version: Fixed after version 5.13rc5
+CVE_CHECK_IGNORE += "CVE-2021-3564"
+
+# fixed-version: Fixed after version 5.13rc5
+CVE_CHECK_IGNORE += "CVE-2021-3573"
+
+# fixed-version: Fixed after version 5.13rc5
+CVE_CHECK_IGNORE += "CVE-2021-3587"
+
+# fixed-version: Fixed after version 5.11
+CVE_CHECK_IGNORE += "CVE-2021-3600"
+
+# fixed-version: Fixed after version 5.14rc1
+CVE_CHECK_IGNORE += "CVE-2021-3609"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2021-3612"
+
+# fixed-version: Fixed after version 5.5rc7
+CVE_CHECK_IGNORE += "CVE-2021-3635"
+
+# cpe-stable-backport: Backported in 5.15.3
+CVE_CHECK_IGNORE += "CVE-2021-3640"
+
+# fixed-version: Fixed after version 5.14rc7
+CVE_CHECK_IGNORE += "CVE-2021-3653"
+
+# fixed-version: Fixed after version 5.14rc1
+CVE_CHECK_IGNORE += "CVE-2021-3655"
+
+# fixed-version: Fixed after version 5.14rc7
+CVE_CHECK_IGNORE += "CVE-2021-3656"
+
+# fixed-version: Fixed after version 5.12rc7
+CVE_CHECK_IGNORE += "CVE-2021-3659"
+
+# fixed-version: Fixed after version 5.15rc1
+CVE_CHECK_IGNORE += "CVE-2021-3669"
+
+# fixed-version: Fixed after version 5.14rc3
+CVE_CHECK_IGNORE += "CVE-2021-3679"
+
+# CVE-2021-3714 has no known resolution
+
+# fixed-version: Fixed after version 5.6
+CVE_CHECK_IGNORE += "CVE-2021-3715"
+
+# fixed-version: Fixed after version 5.14rc3
+CVE_CHECK_IGNORE += "CVE-2021-37159"
+
+# fixed-version: Fixed after version 5.14rc6
+CVE_CHECK_IGNORE += "CVE-2021-3732"
+
+# fixed-version: Fixed after version 5.15rc1
+CVE_CHECK_IGNORE += "CVE-2021-3736"
+
+# fixed-version: Fixed after version 5.15rc1
+CVE_CHECK_IGNORE += "CVE-2021-3739"
+
+# fixed-version: Fixed after version 5.13rc7
+CVE_CHECK_IGNORE += "CVE-2021-3743"
+
+# fixed-version: Fixed after version 5.15rc4
+CVE_CHECK_IGNORE += "CVE-2021-3744"
+
+# cpe-stable-backport: Backported in 5.15.3
+CVE_CHECK_IGNORE += "CVE-2021-3752"
+
+# fixed-version: Fixed after version 5.15rc1
+CVE_CHECK_IGNORE += "CVE-2021-3753"
+
+# fixed-version: Fixed after version 5.14rc3
+CVE_CHECK_IGNORE += "CVE-2021-37576"
+
+# fixed-version: Fixed after version 5.15rc1
+CVE_CHECK_IGNORE += "CVE-2021-3759"
+
+# fixed-version: Fixed after version 5.15rc6
+CVE_CHECK_IGNORE += "CVE-2021-3760"
+
+# fixed-version: Fixed after version 5.15rc4
+CVE_CHECK_IGNORE += "CVE-2021-3764"
+
+# fixed-version: Fixed after version 5.15
+CVE_CHECK_IGNORE += "CVE-2021-3772"
+
+# fixed-version: Fixed after version 5.14rc1
+CVE_CHECK_IGNORE += "CVE-2021-38160"
+
+# fixed-version: Fixed after version 5.14rc6
+CVE_CHECK_IGNORE += "CVE-2021-38166"
+
+# fixed-version: Fixed after version 5.13rc6
+CVE_CHECK_IGNORE += "CVE-2021-38198"
+
+# fixed-version: Fixed after version 5.14rc1
+CVE_CHECK_IGNORE += "CVE-2021-38199"
+
+# fixed-version: Fixed after version 5.13rc7
+CVE_CHECK_IGNORE += "CVE-2021-38200"
+
+# fixed-version: Fixed after version 5.14rc1
+CVE_CHECK_IGNORE += "CVE-2021-38201"
+
+# fixed-version: Fixed after version 5.14rc1
+CVE_CHECK_IGNORE += "CVE-2021-38202"
+
+# fixed-version: Fixed after version 5.14rc2
+CVE_CHECK_IGNORE += "CVE-2021-38203"
+
+# fixed-version: Fixed after version 5.14rc3
+CVE_CHECK_IGNORE += "CVE-2021-38204"
+
+# fixed-version: Fixed after version 5.14rc1
+CVE_CHECK_IGNORE += "CVE-2021-38205"
+
+# fixed-version: Fixed after version 5.13rc7
+CVE_CHECK_IGNORE += "CVE-2021-38206"
+
+# fixed-version: Fixed after version 5.13rc7
+CVE_CHECK_IGNORE += "CVE-2021-38207"
+
+# fixed-version: Fixed after version 5.13rc5
+CVE_CHECK_IGNORE += "CVE-2021-38208"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2021-38209"
+
+# fixed-version: Fixed after version 5.15rc4
+CVE_CHECK_IGNORE += "CVE-2021-38300"
+
+# CVE-2021-3847 has no known resolution
+
+# CVE-2021-3864 has no known resolution
+
+# CVE-2021-3892 has no known resolution
+
+# fixed-version: Fixed after version 5.15rc6
+CVE_CHECK_IGNORE += "CVE-2021-3894"
+
+# fixed-version: Fixed after version 5.15rc6
+CVE_CHECK_IGNORE += "CVE-2021-3896"
+
+# cpe-stable-backport: Backported in 5.15.14
+CVE_CHECK_IGNORE += "CVE-2021-3923"
+
+# fixed-version: Fixed after version 5.14
+CVE_CHECK_IGNORE += "CVE-2021-39633"
+
+# fixed-version: Fixed after version 5.9rc8
+CVE_CHECK_IGNORE += "CVE-2021-39634"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_IGNORE += "CVE-2021-39636"
+
+# fixed-version: Fixed after version 5.11rc3
+CVE_CHECK_IGNORE += "CVE-2021-39648"
+
+# fixed-version: Fixed after version 5.12rc3
+CVE_CHECK_IGNORE += "CVE-2021-39656"
+
+# fixed-version: Fixed after version 5.11rc4
+CVE_CHECK_IGNORE += "CVE-2021-39657"
+
+# cpe-stable-backport: Backported in 5.15.8
+CVE_CHECK_IGNORE += "CVE-2021-39685"
+
+# cpe-stable-backport: Backported in 5.15.2
+CVE_CHECK_IGNORE += "CVE-2021-39686"
+
+# cpe-stable-backport: Backported in 5.15.8
+CVE_CHECK_IGNORE += "CVE-2021-39698"
+
+# fixed-version: Fixed after version 4.18rc6
+CVE_CHECK_IGNORE += "CVE-2021-39711"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_IGNORE += "CVE-2021-39713"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_IGNORE += "CVE-2021-39714"
+
+# CVE-2021-39800 has no known resolution
+
+# CVE-2021-39801 has no known resolution
+
+# CVE-2021-39802 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.5
+CVE_CHECK_IGNORE += "CVE-2021-4001"
+
+# cpe-stable-backport: Backported in 5.15.5
+CVE_CHECK_IGNORE += "CVE-2021-4002"
+
+# fixed-version: Fixed after version 5.15rc1
+CVE_CHECK_IGNORE += "CVE-2021-4023"
+
+# fixed-version: Fixed after version 5.15rc4
+CVE_CHECK_IGNORE += "CVE-2021-4028"
+
+# fixed-version: Fixed after version 5.15rc7
+CVE_CHECK_IGNORE += "CVE-2021-4032"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2021-4037"
+
+# fixed-version: Fixed after version 5.15rc1
+CVE_CHECK_IGNORE += "CVE-2021-40490"
+
+# cpe-stable-backport: Backported in 5.15.7
+CVE_CHECK_IGNORE += "CVE-2021-4083"
+
+# cpe-stable-backport: Backported in 5.15.5
+CVE_CHECK_IGNORE += "CVE-2021-4090"
+
+# fixed-version: Fixed after version 5.15rc7
+CVE_CHECK_IGNORE += "CVE-2021-4093"
+
+# CVE-2021-4095 needs backporting (fixed from 5.17rc1)
+
+# fixed-version: Fixed after version 5.15rc2
+CVE_CHECK_IGNORE += "CVE-2021-41073"
+
+# cpe-stable-backport: Backported in 5.15.11
+CVE_CHECK_IGNORE += "CVE-2021-4135"
+
+# fixed-version: Fixed after version 5.15
+CVE_CHECK_IGNORE += "CVE-2021-4148"
+
+# fixed-version: Fixed after version 5.15rc6
+CVE_CHECK_IGNORE += "CVE-2021-4149"
+
+# fixed-version: Fixed after version 5.15rc7
+CVE_CHECK_IGNORE += "CVE-2021-4150"
+
+# fixed-version: Fixed after version 5.14rc2
+CVE_CHECK_IGNORE += "CVE-2021-4154"
+
+# cpe-stable-backport: Backported in 5.15.14
+CVE_CHECK_IGNORE += "CVE-2021-4155"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2021-4157"
+
+# fixed-version: Fixed after version 5.7rc1
+CVE_CHECK_IGNORE += "CVE-2021-4159"
+
+# fixed-version: Fixed after version 5.15rc5
+CVE_CHECK_IGNORE += "CVE-2021-41864"
+
+# cpe-stable-backport: Backported in 5.15.14
+CVE_CHECK_IGNORE += "CVE-2021-4197"
+
+# fixed-version: Fixed after version 5.14rc7
+CVE_CHECK_IGNORE += "CVE-2021-42008"
+
+# cpe-stable-backport: Backported in 5.15.5
+CVE_CHECK_IGNORE += "CVE-2021-4202"
+
+# fixed-version: Fixed after version 5.15rc4
+CVE_CHECK_IGNORE += "CVE-2021-4203"
+
+# CVE-2021-4204 needs backporting (fixed from 5.17rc1)
+
+# fixed-version: Fixed after version 5.8rc1
+CVE_CHECK_IGNORE += "CVE-2021-4218"
+
+# fixed-version: Fixed after version 5.15rc1
+CVE_CHECK_IGNORE += "CVE-2021-42252"
+
+# fixed-version: Fixed after version 5.15
+CVE_CHECK_IGNORE += "CVE-2021-42327"
+
+# cpe-stable-backport: Backported in 5.15.1
+CVE_CHECK_IGNORE += "CVE-2021-42739"
+
+# fixed-version: Fixed after version 5.15rc6
+CVE_CHECK_IGNORE += "CVE-2021-43056"
+
+# fixed-version: Fixed after version 5.15rc3
+CVE_CHECK_IGNORE += "CVE-2021-43057"
+
+# fixed-version: Fixed after version 5.15
+CVE_CHECK_IGNORE += "CVE-2021-43267"
+
+# fixed-version: Fixed after version 5.15rc6
+CVE_CHECK_IGNORE += "CVE-2021-43389"
+
+# cpe-stable-backport: Backported in 5.15.7
+CVE_CHECK_IGNORE += "CVE-2021-43975"
+
+# cpe-stable-backport: Backported in 5.15.17
+CVE_CHECK_IGNORE += "CVE-2021-43976"
+
+# cpe-stable-backport: Backported in 5.15.12
+CVE_CHECK_IGNORE += "CVE-2021-44733"
+
+# cpe-stable-backport: Backported in 5.15.17
+CVE_CHECK_IGNORE += "CVE-2021-44879"
+
+# cpe-stable-backport: Backported in 5.15.14
+CVE_CHECK_IGNORE += "CVE-2021-45095"
+
+# cpe-stable-backport: Backported in 5.15.12
+CVE_CHECK_IGNORE += "CVE-2021-45100"
+
+# cpe-stable-backport: Backported in 5.15.11
+CVE_CHECK_IGNORE += "CVE-2021-45402"
+
+# cpe-stable-backport: Backported in 5.15.12
+CVE_CHECK_IGNORE += "CVE-2021-45469"
+
+# cpe-stable-backport: Backported in 5.15.11
+CVE_CHECK_IGNORE += "CVE-2021-45480"
+
+# fixed-version: Fixed after version 5.14rc1
+CVE_CHECK_IGNORE += "CVE-2021-45485"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2021-45486"
+
+# cpe-stable-backport: Backported in 5.15.3
+CVE_CHECK_IGNORE += "CVE-2021-45868"
+
+# fixed-version: Fixed after version 5.13rc7
+CVE_CHECK_IGNORE += "CVE-2021-46283"
+
+# cpe-stable-backport: Backported in 5.15.28
+CVE_CHECK_IGNORE += "CVE-2022-0001"
+
+# cpe-stable-backport: Backported in 5.15.28
+CVE_CHECK_IGNORE += "CVE-2022-0002"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-0168"
+
+# cpe-stable-backport: Backported in 5.15.70
+CVE_CHECK_IGNORE += "CVE-2022-0171"
+
+# cpe-stable-backport: Backported in 5.15.16
+CVE_CHECK_IGNORE += "CVE-2022-0185"
+
+# cpe-stable-backport: Backported in 5.15.11
+CVE_CHECK_IGNORE += "CVE-2022-0264"
+
+# fixed-version: Fixed after version 5.14rc2
+CVE_CHECK_IGNORE += "CVE-2022-0286"
+
+# fixed-version: Fixed after version 5.15rc6
+CVE_CHECK_IGNORE += "CVE-2022-0322"
+
+# cpe-stable-backport: Backported in 5.15.18
+CVE_CHECK_IGNORE += "CVE-2022-0330"
+
+# cpe-stable-backport: Backported in 5.15.14
+CVE_CHECK_IGNORE += "CVE-2022-0382"
+
+# CVE-2022-0400 has no known resolution
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-0433"
+
+# cpe-stable-backport: Backported in 5.15.23
+CVE_CHECK_IGNORE += "CVE-2022-0435"
+
+# fixed-version: Fixed after version 5.15rc1
+CVE_CHECK_IGNORE += "CVE-2022-0480"
+
+# cpe-stable-backport: Backported in 5.15.23
+CVE_CHECK_IGNORE += "CVE-2022-0487"
+
+# cpe-stable-backport: Backported in 5.15.20
+CVE_CHECK_IGNORE += "CVE-2022-0492"
+
+# cpe-stable-backport: Backported in 5.15.27
+CVE_CHECK_IGNORE += "CVE-2022-0494"
+
+# cpe-stable-backport: Backported in 5.15.37
+CVE_CHECK_IGNORE += "CVE-2022-0500"
+
+# cpe-stable-backport: Backported in 5.15.23
+CVE_CHECK_IGNORE += "CVE-2022-0516"
+
+# cpe-stable-backport: Backported in 5.15.19
+CVE_CHECK_IGNORE += "CVE-2022-0617"
+
+# fixed-version: Fixed after version 5.15rc7
+CVE_CHECK_IGNORE += "CVE-2022-0644"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-0646"
+
+# cpe-stable-backport: Backported in 5.15.27
+CVE_CHECK_IGNORE += "CVE-2022-0742"
+
+# fixed-version: Fixed after version 5.8rc6
+CVE_CHECK_IGNORE += "CVE-2022-0812"
+
+# cpe-stable-backport: Backported in 5.15.25
+CVE_CHECK_IGNORE += "CVE-2022-0847"
+
+# fixed-version: Fixed after version 5.14rc1
+CVE_CHECK_IGNORE += "CVE-2022-0850"
+
+# fixed-version: only affects 5.17rc6 onwards
+CVE_CHECK_IGNORE += "CVE-2022-0854"
+
+# cpe-stable-backport: Backported in 5.15.29
+CVE_CHECK_IGNORE += "CVE-2022-0995"
+
+# CVE-2022-0998 needs backporting (fixed from 5.17rc1)
+
+# cpe-stable-backport: Backported in 5.15.29
+CVE_CHECK_IGNORE += "CVE-2022-1011"
+
+# cpe-stable-backport: Backported in 5.15.41
+CVE_CHECK_IGNORE += "CVE-2022-1012"
+
+# cpe-stable-backport: Backported in 5.15.32
+CVE_CHECK_IGNORE += "CVE-2022-1015"
+
+# cpe-stable-backport: Backported in 5.15.32
+CVE_CHECK_IGNORE += "CVE-2022-1016"
+
+# fixed-version: Fixed after version 5.14rc7
+CVE_CHECK_IGNORE += "CVE-2022-1043"
+
+# cpe-stable-backport: Backported in 5.15.32
+CVE_CHECK_IGNORE += "CVE-2022-1048"
+
+# cpe-stable-backport: Backported in 5.15.20
+CVE_CHECK_IGNORE += "CVE-2022-1055"
+
+# CVE-2022-1116 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-1158"
+
+# cpe-stable-backport: Backported in 5.15.46
+CVE_CHECK_IGNORE += "CVE-2022-1184"
+
+# cpe-stable-backport: Backported in 5.15.12
+CVE_CHECK_IGNORE += "CVE-2022-1195"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-1198"
+
+# cpe-stable-backport: Backported in 5.15.29
+CVE_CHECK_IGNORE += "CVE-2022-1199"
+
+# cpe-stable-backport: Backported in 5.15.35
+CVE_CHECK_IGNORE += "CVE-2022-1204"
+
+# fixed-version: only affects 5.17rc4 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1205"
+
+# CVE-2022-1247 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.34
+CVE_CHECK_IGNORE += "CVE-2022-1263"
+
+# fixed-version: Fixed after version 5.15rc1
+CVE_CHECK_IGNORE += "CVE-2022-1280"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-1353"
+
+# fixed-version: Fixed after version 5.6rc2
+CVE_CHECK_IGNORE += "CVE-2022-1419"
+
+# cpe-stable-backport: Backported in 5.15.58
+CVE_CHECK_IGNORE += "CVE-2022-1462"
+
+# fixed-version: Fixed after version 5.15rc1
+CVE_CHECK_IGNORE += "CVE-2022-1508"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-1516"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-1651"
+
+# cpe-stable-backport: Backported in 5.15.42
+CVE_CHECK_IGNORE += "CVE-2022-1652"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-1671"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_IGNORE += "CVE-2022-1678"
+
+# cpe-stable-backport: Backported in 5.15.61
+CVE_CHECK_IGNORE += "CVE-2022-1679"
+
+# cpe-stable-backport: Backported in 5.15.42
+CVE_CHECK_IGNORE += "CVE-2022-1729"
+
+# cpe-stable-backport: Backported in 5.15.39
+CVE_CHECK_IGNORE += "CVE-2022-1734"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2022-1786"
+
+# cpe-stable-backport: Backported in 5.15.44
+CVE_CHECK_IGNORE += "CVE-2022-1789"
+
+# cpe-stable-backport: Backported in 5.15.37
+CVE_CHECK_IGNORE += "CVE-2022-1836"
+
+# cpe-stable-backport: Backported in 5.15.45
+CVE_CHECK_IGNORE += "CVE-2022-1852"
+
+# fixed-version: only affects 5.17rc8 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1882"
+
+# cpe-stable-backport: Backported in 5.15.40
+CVE_CHECK_IGNORE += "CVE-2022-1943"
+
+# cpe-stable-backport: Backported in 5.15.45
+CVE_CHECK_IGNORE += "CVE-2022-1966"
+
+# cpe-stable-backport: Backported in 5.15.45
+CVE_CHECK_IGNORE += "CVE-2022-1972"
+
+# cpe-stable-backport: Backported in 5.15.46
+CVE_CHECK_IGNORE += "CVE-2022-1973"
+
+# cpe-stable-backport: Backported in 5.15.39
+CVE_CHECK_IGNORE += "CVE-2022-1974"
+
+# cpe-stable-backport: Backported in 5.15.39
+CVE_CHECK_IGNORE += "CVE-2022-1975"
+
+# fixed-version: only affects 5.18rc2 onwards
+CVE_CHECK_IGNORE += "CVE-2022-1976"
+
+# cpe-stable-backport: Backported in 5.15.20
+CVE_CHECK_IGNORE += "CVE-2022-1998"
+
+# cpe-stable-backport: Backported in 5.15.25
+CVE_CHECK_IGNORE += "CVE-2022-20008"
+
+# cpe-stable-backport: Backported in 5.15.8
+CVE_CHECK_IGNORE += "CVE-2022-20132"
+
+# fixed-version: Fixed after version 5.15rc1
+CVE_CHECK_IGNORE += "CVE-2022-20141"
+
+# cpe-stable-backport: Backported in 5.15.3
+CVE_CHECK_IGNORE += "CVE-2022-20148"
+
+# fixed-version: Fixed after version 5.13rc1
+CVE_CHECK_IGNORE += "CVE-2022-20153"
+
+# cpe-stable-backport: Backported in 5.15.13
+CVE_CHECK_IGNORE += "CVE-2022-20154"
+
+# cpe-stable-backport: Backported in 5.15.31
+CVE_CHECK_IGNORE += "CVE-2022-20158"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2022-20166"
+
+# cpe-stable-backport: Backported in 5.15.31
+CVE_CHECK_IGNORE += "CVE-2022-20368"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-20369"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2022-20409"
+
+# cpe-stable-backport: Backported in 5.15.66
+CVE_CHECK_IGNORE += "CVE-2022-20421"
+
+# cpe-stable-backport: Backported in 5.15.61
+CVE_CHECK_IGNORE += "CVE-2022-20422"
+
+# fixed-version: only affects 5.17rc4 onwards
+CVE_CHECK_IGNORE += "CVE-2022-20423"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2022-20424"
+
+# fixed-version: Fixed after version 5.9rc4
+CVE_CHECK_IGNORE += "CVE-2022-20565"
+
+# cpe-stable-backport: Backported in 5.15.59
+CVE_CHECK_IGNORE += "CVE-2022-20566"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_IGNORE += "CVE-2022-20567"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2022-20568"
+
+# cpe-stable-backport: Backported in 5.15.45
+CVE_CHECK_IGNORE += "CVE-2022-20572"
+
+# cpe-stable-backport: Backported in 5.15.45
+CVE_CHECK_IGNORE += "CVE-2022-2078"
+
+# cpe-stable-backport: Backported in 5.15.48
+CVE_CHECK_IGNORE += "CVE-2022-21123"
+
+# cpe-stable-backport: Backported in 5.15.48
+CVE_CHECK_IGNORE += "CVE-2022-21125"
+
+# cpe-stable-backport: Backported in 5.15.48
+CVE_CHECK_IGNORE += "CVE-2022-21166"
+
+# fixed-version: Fixed after version 4.20
+CVE_CHECK_IGNORE += "CVE-2022-21385"
+
+# cpe-stable-backport: Backported in 5.15.42
+CVE_CHECK_IGNORE += "CVE-2022-21499"
+
+# cpe-stable-backport: Backported in 5.15.58
+CVE_CHECK_IGNORE += "CVE-2022-21505"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-2153"
+
+# cpe-stable-backport: Backported in 5.15.96
+CVE_CHECK_IGNORE += "CVE-2022-2196"
+
+# CVE-2022-2209 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.18
+CVE_CHECK_IGNORE += "CVE-2022-22942"
+
+# cpe-stable-backport: Backported in 5.15.28
+CVE_CHECK_IGNORE += "CVE-2022-23036"
+
+# cpe-stable-backport: Backported in 5.15.28
+CVE_CHECK_IGNORE += "CVE-2022-23037"
+
+# cpe-stable-backport: Backported in 5.15.28
+CVE_CHECK_IGNORE += "CVE-2022-23038"
+
+# cpe-stable-backport: Backported in 5.15.28
+CVE_CHECK_IGNORE += "CVE-2022-23039"
+
+# cpe-stable-backport: Backported in 5.15.28
+CVE_CHECK_IGNORE += "CVE-2022-23040"
+
+# cpe-stable-backport: Backported in 5.15.28
+CVE_CHECK_IGNORE += "CVE-2022-23041"
+
+# cpe-stable-backport: Backported in 5.15.28
+CVE_CHECK_IGNORE += "CVE-2022-23042"
+
+# cpe-stable-backport: Backported in 5.15.72
+CVE_CHECK_IGNORE += "CVE-2022-2308"
+
+# cpe-stable-backport: Backported in 5.15.53
+CVE_CHECK_IGNORE += "CVE-2022-2318"
+
+# cpe-stable-backport: Backported in 5.15.37
+CVE_CHECK_IGNORE += "CVE-2022-23222"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2022-2327"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-2380"
+
+# cpe-stable-backport: Backported in 5.15.57
+CVE_CHECK_IGNORE += "CVE-2022-23816"
+
+# CVE-2022-23825 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.28
+CVE_CHECK_IGNORE += "CVE-2022-23960"
+
+# cpe-stable-backport: Backported in 5.15.19
+CVE_CHECK_IGNORE += "CVE-2022-24122"
+
+# cpe-stable-backport: Backported in 5.15.19
+CVE_CHECK_IGNORE += "CVE-2022-24448"
+
+# cpe-stable-backport: Backported in 5.15.27
+CVE_CHECK_IGNORE += "CVE-2022-24958"
+
+# cpe-stable-backport: Backported in 5.15.19
+CVE_CHECK_IGNORE += "CVE-2022-24959"
+
+# cpe-stable-backport: Backported in 5.15.45
+CVE_CHECK_IGNORE += "CVE-2022-2503"
+
+# cpe-stable-backport: Backported in 5.15.24
+CVE_CHECK_IGNORE += "CVE-2022-25258"
+
+# CVE-2022-25265 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.24
+CVE_CHECK_IGNORE += "CVE-2022-25375"
+
+# cpe-stable-backport: Backported in 5.15.26
+CVE_CHECK_IGNORE += "CVE-2022-25636"
+
+# cpe-stable-backport: Backported in 5.15.61
+CVE_CHECK_IGNORE += "CVE-2022-2585"
+
+# cpe-stable-backport: Backported in 5.15.61
+CVE_CHECK_IGNORE += "CVE-2022-2586"
+
+# cpe-stable-backport: Backported in 5.15.61
+CVE_CHECK_IGNORE += "CVE-2022-2588"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-2590"
+
+# cpe-stable-backport: Backported in 5.15.75
+CVE_CHECK_IGNORE += "CVE-2022-2602"
+
+# cpe-stable-backport: Backported in 5.15.53
+CVE_CHECK_IGNORE += "CVE-2022-26365"
+
+# cpe-stable-backport: Backported in 5.15.60
+CVE_CHECK_IGNORE += "CVE-2022-26373"
+
+# cpe-stable-backport: Backported in 5.15.36
+CVE_CHECK_IGNORE += "CVE-2022-2639"
+
+# cpe-stable-backport: Backported in 5.15.32
+CVE_CHECK_IGNORE += "CVE-2022-26490"
+
+# cpe-stable-backport: Backported in 5.15.68
+CVE_CHECK_IGNORE += "CVE-2022-2663"
+
+# CVE-2022-26878 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.26
+CVE_CHECK_IGNORE += "CVE-2022-26966"
+
+# cpe-stable-backport: Backported in 5.15.26
+CVE_CHECK_IGNORE += "CVE-2022-27223"
+
+# cpe-stable-backport: Backported in 5.15.29
+CVE_CHECK_IGNORE += "CVE-2022-27666"
+
+# cpe-stable-backport: Backported in 5.15.94
+CVE_CHECK_IGNORE += "CVE-2022-27672"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-2785"
+
+# cpe-stable-backport: Backported in 5.15.25
+CVE_CHECK_IGNORE += "CVE-2022-27950"
+
+# cpe-stable-backport: Backported in 5.15.32
+CVE_CHECK_IGNORE += "CVE-2022-28356"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-28388"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-28389"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-28390"
+
+# cpe-stable-backport: Backported in 5.15.45
+CVE_CHECK_IGNORE += "CVE-2022-2873"
+
+# fixed-version: only affects 5.17rc3 onwards
+CVE_CHECK_IGNORE += "CVE-2022-28796"
+
+# cpe-stable-backport: Backported in 5.15.41
+CVE_CHECK_IGNORE += "CVE-2022-28893"
+
+# cpe-stable-backport: Backported in 5.15.64
+CVE_CHECK_IGNORE += "CVE-2022-2905"
+
+# cpe-stable-backport: Backported in 5.15.26
+CVE_CHECK_IGNORE += "CVE-2022-29156"
+
+# cpe-stable-backport: Backported in 5.15.19
+CVE_CHECK_IGNORE += "CVE-2022-2938"
+
+# cpe-stable-backport: Backported in 5.15.36
+CVE_CHECK_IGNORE += "CVE-2022-29581"
+
+# cpe-stable-backport: Backported in 5.15.34
+CVE_CHECK_IGNORE += "CVE-2022-29582"
+
+# cpe-stable-backport: Backported in 5.15.45
+CVE_CHECK_IGNORE += "CVE-2022-2959"
+
+# CVE-2022-2961 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.24
+CVE_CHECK_IGNORE += "CVE-2022-2964"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-2977"
+
+# cpe-stable-backport: Backported in 5.15.73
+CVE_CHECK_IGNORE += "CVE-2022-2978"
+
+# cpe-stable-backport: Backported in 5.15.57
+CVE_CHECK_IGNORE += "CVE-2022-29900"
+
+# cpe-stable-backport: Backported in 5.15.57
+CVE_CHECK_IGNORE += "CVE-2022-29901"
+
+# fixed-version: Fixed after version 5.15rc1
+CVE_CHECK_IGNORE += "CVE-2022-2991"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-29968"
+
+# cpe-stable-backport: Backported in 5.15.64
+CVE_CHECK_IGNORE += "CVE-2022-3028"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-30594"
+
+# cpe-stable-backport: Backported in 5.15.70
+CVE_CHECK_IGNORE += "CVE-2022-3061"
+
+# cpe-stable-backport: Backported in 5.15.45
+CVE_CHECK_IGNORE += "CVE-2022-3077"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-3078"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3103"
+
+# cpe-stable-backport: Backported in 5.15.47
+CVE_CHECK_IGNORE += "CVE-2022-3104"
+
+# cpe-stable-backport: Backported in 5.15.14
+CVE_CHECK_IGNORE += "CVE-2022-3105"
+
+# cpe-stable-backport: Backported in 5.15.11
+CVE_CHECK_IGNORE += "CVE-2022-3106"
+
+# cpe-stable-backport: Backported in 5.15.31
+CVE_CHECK_IGNORE += "CVE-2022-3107"
+
+# cpe-stable-backport: Backported in 5.15.27
+CVE_CHECK_IGNORE += "CVE-2022-3108"
+
+# cpe-stable-backport: Backported in 5.15.47
+CVE_CHECK_IGNORE += "CVE-2022-3110"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-3111"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-3112"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-3113"
+
+# CVE-2022-3114 needs backporting (fixed from 5.19rc1)
+
+# cpe-stable-backport: Backported in 5.15.46
+CVE_CHECK_IGNORE += "CVE-2022-3115"
+
+# cpe-stable-backport: Backported in 5.15.80
+CVE_CHECK_IGNORE += "CVE-2022-3169"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3170"
+
+# cpe-stable-backport: Backported in 5.15.65
+CVE_CHECK_IGNORE += "CVE-2022-3176"
+
+# cpe-stable-backport: Backported in 5.15.34
+CVE_CHECK_IGNORE += "CVE-2022-3202"
+
+# cpe-stable-backport: Backported in 5.15.45
+CVE_CHECK_IGNORE += "CVE-2022-32250"
+
+# cpe-stable-backport: Backported in 5.15.41
+CVE_CHECK_IGNORE += "CVE-2022-32296"
+
+# CVE-2022-3238 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2022-3239"
+
+# cpe-stable-backport: Backported in 5.15.47
+CVE_CHECK_IGNORE += "CVE-2022-32981"
+
+# cpe-stable-backport: Backported in 5.15.68
+CVE_CHECK_IGNORE += "CVE-2022-3303"
+
+# cpe-stable-backport: Backported in 5.15.81
+CVE_CHECK_IGNORE += "CVE-2022-3344"
+
+# cpe-stable-backport: Backported in 5.15.53
+CVE_CHECK_IGNORE += "CVE-2022-33740"
+
+# cpe-stable-backport: Backported in 5.15.53
+CVE_CHECK_IGNORE += "CVE-2022-33741"
+
+# cpe-stable-backport: Backported in 5.15.53
+CVE_CHECK_IGNORE += "CVE-2022-33742"
+
+# cpe-stable-backport: Backported in 5.15.53
+CVE_CHECK_IGNORE += "CVE-2022-33743"
+
+# cpe-stable-backport: Backported in 5.15.53
+CVE_CHECK_IGNORE += "CVE-2022-33744"
+
+# cpe-stable-backport: Backported in 5.15.37
+CVE_CHECK_IGNORE += "CVE-2022-33981"
+
+# cpe-stable-backport: Backported in 5.15.86
+CVE_CHECK_IGNORE += "CVE-2022-3424"
+
+# fixed-version: only affects 5.18rc2 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3435"
+
+# cpe-stable-backport: Backported in 5.15.47
+CVE_CHECK_IGNORE += "CVE-2022-34494"
+
+# cpe-stable-backport: Backported in 5.15.47
+CVE_CHECK_IGNORE += "CVE-2022-34495"
+
+# cpe-stable-backport: Backported in 5.15.54
+CVE_CHECK_IGNORE += "CVE-2022-34918"
+
+# cpe-stable-backport: Backported in 5.15.80
+CVE_CHECK_IGNORE += "CVE-2022-3521"
+
+# CVE-2022-3522 needs backporting (fixed from 6.1rc1)
+
+# CVE-2022-3523 needs backporting (fixed from 6.1rc1)
+
+# cpe-stable-backport: Backported in 5.15.77
+CVE_CHECK_IGNORE += "CVE-2022-3524"
+
+# cpe-stable-backport: Backported in 5.15.35
+CVE_CHECK_IGNORE += "CVE-2022-3526"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3531"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3532"
+
+# CVE-2022-3533 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.86
+CVE_CHECK_IGNORE += "CVE-2022-3534"
+
+# cpe-stable-backport: Backported in 5.15.75
+CVE_CHECK_IGNORE += "CVE-2022-3535"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3541"
+
+# cpe-stable-backport: Backported in 5.15.75
+CVE_CHECK_IGNORE += "CVE-2022-3542"
+
+# cpe-stable-backport: Backported in 5.15.78
+CVE_CHECK_IGNORE += "CVE-2022-3543"
+
+# CVE-2022-3544 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.84
+CVE_CHECK_IGNORE += "CVE-2022-3545"
+
+# cpe-stable-backport: Backported in 5.15.78
+CVE_CHECK_IGNORE += "CVE-2022-3564"
+
+# cpe-stable-backport: Backported in 5.15.75
+CVE_CHECK_IGNORE += "CVE-2022-3565"
+
+# CVE-2022-3566 needs backporting (fixed from 6.1rc1)
+
+# CVE-2022-3567 needs backporting (fixed from 6.1rc1)
+
+# cpe-stable-backport: Backported in 5.15.46
+CVE_CHECK_IGNORE += "CVE-2022-3577"
+
+# cpe-stable-backport: Backported in 5.15.68
+CVE_CHECK_IGNORE += "CVE-2022-3586"
+
+# cpe-stable-backport: Backported in 5.15.75
+CVE_CHECK_IGNORE += "CVE-2022-3594"
+
+# CVE-2022-3595 needs backporting (fixed from 6.1rc1)
+
+# CVE-2022-3606 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.56
+CVE_CHECK_IGNORE += "CVE-2022-36123"
+
+# cpe-stable-backport: Backported in 5.15.78
+CVE_CHECK_IGNORE += "CVE-2022-3619"
+
+# cpe-stable-backport: Backported in 5.15.74
+CVE_CHECK_IGNORE += "CVE-2022-3621"
+
+# cpe-stable-backport: Backported in 5.15.78
+CVE_CHECK_IGNORE += "CVE-2022-3623"
+
+# CVE-2022-3624 needs backporting (fixed from 6.0rc1)
+
+# cpe-stable-backport: Backported in 5.15.63
+CVE_CHECK_IGNORE += "CVE-2022-3625"
+
+# cpe-stable-backport: Backported in 5.15.78
+CVE_CHECK_IGNORE += "CVE-2022-3628"
+
+# cpe-stable-backport: Backported in 5.15.87
+CVE_CHECK_IGNORE += "CVE-2022-36280"
+
+# cpe-stable-backport: Backported in 5.15.63
+CVE_CHECK_IGNORE += "CVE-2022-3629"
+
+# fixed-version: only affects 5.19rc6 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3630"
+
+# cpe-stable-backport: Backported in 5.15.63
+CVE_CHECK_IGNORE += "CVE-2022-3633"
+
+# cpe-stable-backport: Backported in 5.15.63
+CVE_CHECK_IGNORE += "CVE-2022-3635"
+
+# CVE-2022-3636 needs backporting (fixed from 5.19rc1)
+
+# fixed-version: only affects 5.19 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3640"
+
+# cpe-stable-backport: Backported in 5.15.129
+CVE_CHECK_IGNORE += "CVE-2022-36402"
+
+# CVE-2022-3642 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.83
+CVE_CHECK_IGNORE += "CVE-2022-3643"
+
+# cpe-stable-backport: Backported in 5.15.74
+CVE_CHECK_IGNORE += "CVE-2022-3646"
+
+# cpe-stable-backport: Backported in 5.15.74
+CVE_CHECK_IGNORE += "CVE-2022-3649"
+
+# cpe-stable-backport: Backported in 5.15.58
+CVE_CHECK_IGNORE += "CVE-2022-36879"
+
+# cpe-stable-backport: Backported in 5.15.59
+CVE_CHECK_IGNORE += "CVE-2022-36946"
+
+# cpe-stable-backport: Backported in 5.15.96
+CVE_CHECK_IGNORE += "CVE-2022-3707"
+
+# CVE-2022-38096 has no known resolution
+
+# CVE-2022-38457 needs backporting (fixed from 6.2rc4)
+
+# CVE-2022-3903 needs backporting (fixed from 6.1rc2)
+
+# fixed-version: only affects 5.18 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3910"
+
+# CVE-2022-39188 needs backporting (fixed from 5.19rc8)
+
+# cpe-stable-backport: Backported in 5.15.60
+CVE_CHECK_IGNORE += "CVE-2022-39189"
+
+# cpe-stable-backport: Backported in 5.15.64
+CVE_CHECK_IGNORE += "CVE-2022-39190"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-3977"
+
+# cpe-stable-backport: Backported in 5.15.70
+CVE_CHECK_IGNORE += "CVE-2022-39842"
+
+# CVE-2022-40133 needs backporting (fixed from 6.2rc4)
+
+# cpe-stable-backport: Backported in 5.15.68
+CVE_CHECK_IGNORE += "CVE-2022-40307"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-40476"
+
+# cpe-stable-backport: Backported in 5.15.74
+CVE_CHECK_IGNORE += "CVE-2022-40768"
+
+# cpe-stable-backport: Backported in 5.15.66
+CVE_CHECK_IGNORE += "CVE-2022-4095"
+
+# cpe-stable-backport: Backported in 5.15.125
+CVE_CHECK_IGNORE += "CVE-2022-40982"
+
+# cpe-stable-backport: Backported in 5.15.87
+CVE_CHECK_IGNORE += "CVE-2022-41218"
+
+# fixed-version: Fixed after version 5.14rc1
+CVE_CHECK_IGNORE += "CVE-2022-41222"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-4127"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2022-4128"
+
+# cpe-stable-backport: Backported in 5.15.91
+CVE_CHECK_IGNORE += "CVE-2022-4129"
+
+# fixed-version: only affects 5.17rc2 onwards
+CVE_CHECK_IGNORE += "CVE-2022-4139"
+
+# cpe-stable-backport: Backported in 5.15.74
+CVE_CHECK_IGNORE += "CVE-2022-41674"
+
+# CVE-2022-41848 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.75
+CVE_CHECK_IGNORE += "CVE-2022-41849"
+
+# cpe-stable-backport: Backported in 5.15.75
+CVE_CHECK_IGNORE += "CVE-2022-41850"
+
+# cpe-stable-backport: Backported in 5.15.35
+CVE_CHECK_IGNORE += "CVE-2022-41858"
+
+# fixed-version: only affects 5.16rc7 onwards
+CVE_CHECK_IGNORE += "CVE-2022-42328"
+
+# fixed-version: only affects 5.16rc7 onwards
+CVE_CHECK_IGNORE += "CVE-2022-42329"
+
+# cpe-stable-backport: Backported in 5.15.71
+CVE_CHECK_IGNORE += "CVE-2022-42432"
+
+# cpe-stable-backport: Backported in 5.15.105
+CVE_CHECK_IGNORE += "CVE-2022-4269"
+
+# cpe-stable-backport: Backported in 5.15.65
+CVE_CHECK_IGNORE += "CVE-2022-42703"
+
+# cpe-stable-backport: Backported in 5.15.74
+CVE_CHECK_IGNORE += "CVE-2022-42719"
+
+# cpe-stable-backport: Backported in 5.15.74
+CVE_CHECK_IGNORE += "CVE-2022-42720"
+
+# cpe-stable-backport: Backported in 5.15.74
+CVE_CHECK_IGNORE += "CVE-2022-42721"
+
+# cpe-stable-backport: Backported in 5.15.74
+CVE_CHECK_IGNORE += "CVE-2022-42722"
+
+# cpe-stable-backport: Backported in 5.15.78
+CVE_CHECK_IGNORE += "CVE-2022-42895"
+
+# cpe-stable-backport: Backported in 5.15.78
+CVE_CHECK_IGNORE += "CVE-2022-42896"
+
+# cpe-stable-backport: Backported in 5.15.73
+CVE_CHECK_IGNORE += "CVE-2022-43750"
+
+# cpe-stable-backport: Backported in 5.15.82
+CVE_CHECK_IGNORE += "CVE-2022-4378"
+
+# cpe-stable-backport: Backported in 5.15.105
+CVE_CHECK_IGNORE += "CVE-2022-4379"
+
+# cpe-stable-backport: Backported in 5.15.90
+CVE_CHECK_IGNORE += "CVE-2022-4382"
+
+# cpe-stable-backport: Backported in 5.15.75
+CVE_CHECK_IGNORE += "CVE-2022-43945"
+
+# CVE-2022-44032 needs backporting (fixed from 6.4rc1)
+
+# CVE-2022-44033 needs backporting (fixed from 6.4rc1)
+
+# CVE-2022-44034 needs backporting (fixed from 6.4rc1)
+
+# CVE-2022-4543 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.82
+CVE_CHECK_IGNORE += "CVE-2022-45869"
+
+# CVE-2022-45884 has no known resolution
+
+# CVE-2022-45885 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.116
+CVE_CHECK_IGNORE += "CVE-2022-45886"
+
+# cpe-stable-backport: Backported in 5.15.116
+CVE_CHECK_IGNORE += "CVE-2022-45887"
+
+# CVE-2022-45888 needs backporting (fixed from 6.2rc1)
+
+# cpe-stable-backport: Backported in 5.15.116
+CVE_CHECK_IGNORE += "CVE-2022-45919"
+
+# cpe-stable-backport: Backported in 5.15.85
+CVE_CHECK_IGNORE += "CVE-2022-45934"
+
+# cpe-stable-backport: Backported in 5.15.66
+CVE_CHECK_IGNORE += "CVE-2022-4662"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2022-4696"
+
+# cpe-stable-backport: Backported in 5.15.12
+CVE_CHECK_IGNORE += "CVE-2022-4744"
+
+# cpe-stable-backport: Backported in 5.15.81
+CVE_CHECK_IGNORE += "CVE-2022-47518"
+
+# cpe-stable-backport: Backported in 5.15.81
+CVE_CHECK_IGNORE += "CVE-2022-47519"
+
+# cpe-stable-backport: Backported in 5.15.81
+CVE_CHECK_IGNORE += "CVE-2022-47520"
+
+# cpe-stable-backport: Backported in 5.15.81
+CVE_CHECK_IGNORE += "CVE-2022-47521"
+
+# cpe-stable-backport: Backported in 5.15.88
+CVE_CHECK_IGNORE += "CVE-2022-47929"
+
+# cpe-stable-backport: Backported in 5.15.61
+CVE_CHECK_IGNORE += "CVE-2022-47938"
+
+# cpe-stable-backport: Backported in 5.15.61
+CVE_CHECK_IGNORE += "CVE-2022-47939"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2022-47940"
+
+# cpe-stable-backport: Backported in 5.15.61
+CVE_CHECK_IGNORE += "CVE-2022-47941"
+
+# cpe-stable-backport: Backported in 5.15.62
+CVE_CHECK_IGNORE += "CVE-2022-47942"
+
+# cpe-stable-backport: Backported in 5.15.62
+CVE_CHECK_IGNORE += "CVE-2022-47943"
+
+# fixed-version: Fixed after version 5.12rc2
+CVE_CHECK_IGNORE += "CVE-2022-47946"
+
+# cpe-stable-backport: Backported in 5.15.90
+CVE_CHECK_IGNORE += "CVE-2022-4842"
+
+# cpe-stable-backport: Backported in 5.15.87
+CVE_CHECK_IGNORE += "CVE-2022-48423"
+
+# cpe-stable-backport: Backported in 5.15.87
+CVE_CHECK_IGNORE += "CVE-2022-48424"
+
+# cpe-stable-backport: Backported in 5.15.113
+CVE_CHECK_IGNORE += "CVE-2022-48425"
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2022-48502"
+
+# cpe-stable-backport: Backported in 5.15.42
+CVE_CHECK_IGNORE += "CVE-2022-48619"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_IGNORE += "CVE-2023-0030"
+
+# cpe-stable-backport: Backported in 5.15.87
+CVE_CHECK_IGNORE += "CVE-2023-0045"
+
+# cpe-stable-backport: Backported in 5.15.3
+CVE_CHECK_IGNORE += "CVE-2023-0047"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-0122"
+
+# cpe-stable-backport: Backported in 5.15.111
+CVE_CHECK_IGNORE += "CVE-2023-0160"
+
+# cpe-stable-backport: Backported in 5.15.89
+CVE_CHECK_IGNORE += "CVE-2023-0179"
+
+# cpe-stable-backport: Backported in 5.15.87
+CVE_CHECK_IGNORE += "CVE-2023-0210"
+
+# fixed-version: Fixed after version 5.10rc1
+CVE_CHECK_IGNORE += "CVE-2023-0240"
+
+# cpe-stable-backport: Backported in 5.15.88
+CVE_CHECK_IGNORE += "CVE-2023-0266"
+
+# cpe-stable-backport: Backported in 5.15.91
+CVE_CHECK_IGNORE += "CVE-2023-0386"
+
+# cpe-stable-backport: Backported in 5.15.89
+CVE_CHECK_IGNORE += "CVE-2023-0394"
+
+# cpe-stable-backport: Backported in 5.15.90
+CVE_CHECK_IGNORE += "CVE-2023-0458"
+
+# cpe-stable-backport: Backported in 5.15.96
+CVE_CHECK_IGNORE += "CVE-2023-0459"
+
+# cpe-stable-backport: Backported in 5.15.88
+CVE_CHECK_IGNORE += "CVE-2023-0461"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-0468"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-0469"
+
+# cpe-stable-backport: Backported in 5.15.76
+CVE_CHECK_IGNORE += "CVE-2023-0590"
+
+# CVE-2023-0597 needs backporting (fixed from 6.2rc1)
+
+# cpe-stable-backport: Backported in 5.15.77
+CVE_CHECK_IGNORE += "CVE-2023-0615"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-1032"
+
+# cpe-stable-backport: Backported in 5.15.91
+CVE_CHECK_IGNORE += "CVE-2023-1073"
+
+# cpe-stable-backport: Backported in 5.15.91
+CVE_CHECK_IGNORE += "CVE-2023-1074"
+
+# CVE-2023-1075 needs backporting (fixed from 6.2rc7)
+
+# cpe-stable-backport: Backported in 5.15.99
+CVE_CHECK_IGNORE += "CVE-2023-1076"
+
+# cpe-stable-backport: Backported in 5.15.99
+CVE_CHECK_IGNORE += "CVE-2023-1077"
+
+# cpe-stable-backport: Backported in 5.15.94
+CVE_CHECK_IGNORE += "CVE-2023-1078"
+
+# cpe-stable-backport: Backported in 5.15.99
+CVE_CHECK_IGNORE += "CVE-2023-1079"
+
+# cpe-stable-backport: Backported in 5.15.61
+CVE_CHECK_IGNORE += "CVE-2023-1095"
+
+# cpe-stable-backport: Backported in 5.15.99
+CVE_CHECK_IGNORE += "CVE-2023-1118"
+
+# cpe-stable-backport: Backported in 5.15.113
+CVE_CHECK_IGNORE += "CVE-2023-1192"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2023-1193"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2023-1194"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-1195"
+
+# cpe-stable-backport: Backported in 5.15.124
+CVE_CHECK_IGNORE += "CVE-2023-1206"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2023-1249"
+
+# cpe-stable-backport: Backported in 5.15.3
+CVE_CHECK_IGNORE += "CVE-2023-1252"
+
+# cpe-stable-backport: Backported in 5.15.95
+CVE_CHECK_IGNORE += "CVE-2023-1281"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2023-1295"
+
+# cpe-stable-backport: Backported in 5.15.110
+CVE_CHECK_IGNORE += "CVE-2023-1380"
+
+# cpe-stable-backport: Backported in 5.15.81
+CVE_CHECK_IGNORE += "CVE-2023-1382"
+
+# fixed-version: Fixed after version 5.11rc4
+CVE_CHECK_IGNORE += "CVE-2023-1390"
+
+# CVE-2023-1476 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.95
+CVE_CHECK_IGNORE += "CVE-2023-1513"
+
+# cpe-stable-backport: Backported in 5.15.25
+CVE_CHECK_IGNORE += "CVE-2023-1582"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-1583"
+
+# cpe-stable-backport: Backported in 5.15.106
+CVE_CHECK_IGNORE += "CVE-2023-1611"
+
+# cpe-stable-backport: Backported in 5.15.34
+CVE_CHECK_IGNORE += "CVE-2023-1637"
+
+# cpe-stable-backport: Backported in 5.15.91
+CVE_CHECK_IGNORE += "CVE-2023-1652"
+
+# cpe-stable-backport: Backported in 5.15.105
+CVE_CHECK_IGNORE += "CVE-2023-1670"
+
+# cpe-stable-backport: Backported in 5.15.100
+CVE_CHECK_IGNORE += "CVE-2023-1829"
+
+# cpe-stable-backport: Backported in 5.15.42
+CVE_CHECK_IGNORE += "CVE-2023-1838"
+
+# cpe-stable-backport: Backported in 5.15.104
+CVE_CHECK_IGNORE += "CVE-2023-1855"
+
+# cpe-stable-backport: Backported in 5.15.108
+CVE_CHECK_IGNORE += "CVE-2023-1859"
+
+# CVE-2023-1872 needs backporting (fixed from 5.18rc2)
+
+# cpe-stable-backport: Backported in 5.15.105
+CVE_CHECK_IGNORE += "CVE-2023-1989"
+
+# cpe-stable-backport: Backported in 5.15.104
+CVE_CHECK_IGNORE += "CVE-2023-1990"
+
+# fixed-version: only affects 5.19rc7 onwards
+CVE_CHECK_IGNORE += "CVE-2023-1998"
+
+# cpe-stable-backport: Backported in 5.15.110
+CVE_CHECK_IGNORE += "CVE-2023-2002"
+
+# cpe-stable-backport: Backported in 5.15.81
+CVE_CHECK_IGNORE += "CVE-2023-2006"
+
+# CVE-2023-2007 needs backporting (fixed from 6.0rc1)
+
+# cpe-stable-backport: Backported in 5.15.51
+CVE_CHECK_IGNORE += "CVE-2023-2008"
+
+# cpe-stable-backport: Backported in 5.15.61
+CVE_CHECK_IGNORE += "CVE-2023-2019"
+
+# cpe-stable-backport: Backported in 5.15.125
+CVE_CHECK_IGNORE += "CVE-2023-20569"
+
+# cpe-stable-backport: Backported in 5.15.126
+CVE_CHECK_IGNORE += "CVE-2023-20588"
+
+# cpe-stable-backport: Backported in 5.15.122
+CVE_CHECK_IGNORE += "CVE-2023-20593"
+
+# cpe-stable-backport: Backported in 5.15.61
+CVE_CHECK_IGNORE += "CVE-2023-20928"
+
+# CVE-2023-20937 has no known resolution
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-20938"
+
+# CVE-2023-20941 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.90
+CVE_CHECK_IGNORE += "CVE-2023-21102"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-21106"
+
+# cpe-stable-backport: Backported in 5.15.117
+CVE_CHECK_IGNORE += "CVE-2023-2124"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-21255"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-21264"
+
+# CVE-2023-21400 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.109
+CVE_CHECK_IGNORE += "CVE-2023-2156"
+
+# cpe-stable-backport: Backported in 5.15.93
+CVE_CHECK_IGNORE += "CVE-2023-2162"
+
+# cpe-stable-backport: Backported in 5.15.109
+CVE_CHECK_IGNORE += "CVE-2023-2163"
+
+# cpe-stable-backport: Backported in 5.15.83
+CVE_CHECK_IGNORE += "CVE-2023-2166"
+
+# CVE-2023-2176 needs backporting (fixed from 6.3rc1)
+
+# cpe-stable-backport: Backported in 5.15.59
+CVE_CHECK_IGNORE += "CVE-2023-2177"
+
+# cpe-stable-backport: Backported in 5.15.105
+CVE_CHECK_IGNORE += "CVE-2023-2194"
+
+# cpe-stable-backport: Backported in 5.15.104
+CVE_CHECK_IGNORE += "CVE-2023-2235"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-2236"
+
+# cpe-stable-backport: Backported in 5.15.109
+CVE_CHECK_IGNORE += "CVE-2023-2248"
+
+# cpe-stable-backport: Backported in 5.15.111
+CVE_CHECK_IGNORE += "CVE-2023-2269"
+
+# CVE-2023-22995 needs backporting (fixed from 5.17rc1)
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-22996"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-22997"
+
+# cpe-stable-backport: Backported in 5.15.61
+CVE_CHECK_IGNORE += "CVE-2023-22998"
+
+# cpe-stable-backport: Backported in 5.15.17
+CVE_CHECK_IGNORE += "CVE-2023-22999"
+
+# CVE-2023-23000 needs backporting (fixed from 5.17rc1)
+
+# cpe-stable-backport: Backported in 5.15.17
+CVE_CHECK_IGNORE += "CVE-2023-23001"
+
+# cpe-stable-backport: Backported in 5.15.17
+CVE_CHECK_IGNORE += "CVE-2023-23002"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-23003"
+
+# cpe-stable-backport: Backported in 5.15.100
+CVE_CHECK_IGNORE += "CVE-2023-23004"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-23005"
+
+# cpe-stable-backport: Backported in 5.15.13
+CVE_CHECK_IGNORE += "CVE-2023-23006"
+
+# CVE-2023-23039 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.87
+CVE_CHECK_IGNORE += "CVE-2023-23454"
+
+# cpe-stable-backport: Backported in 5.15.87
+CVE_CHECK_IGNORE += "CVE-2023-23455"
+
+# cpe-stable-backport: Backported in 5.15.91
+CVE_CHECK_IGNORE += "CVE-2023-23559"
+
+# fixed-version: Fixed after version 5.12rc1
+CVE_CHECK_IGNORE += "CVE-2023-23586"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-2430"
+
+# cpe-stable-backport: Backported in 5.15.105
+CVE_CHECK_IGNORE += "CVE-2023-2483"
+
+# cpe-stable-backport: Backported in 5.15.99
+CVE_CHECK_IGNORE += "CVE-2023-25012"
+
+# cpe-stable-backport: Backported in 5.15.61
+CVE_CHECK_IGNORE += "CVE-2023-2513"
+
+# cpe-stable-backport: Backported in 5.15.144
+CVE_CHECK_IGNORE += "CVE-2023-25775"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-2598"
+
+# CVE-2023-26242 has no known resolution
+
+# CVE-2023-2640 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.87
+CVE_CHECK_IGNORE += "CVE-2023-26544"
+
+# cpe-stable-backport: Backported in 5.15.95
+CVE_CHECK_IGNORE += "CVE-2023-26545"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-26605"
+
+# cpe-stable-backport: Backported in 5.15.86
+CVE_CHECK_IGNORE += "CVE-2023-26606"
+
+# cpe-stable-backport: Backported in 5.15.80
+CVE_CHECK_IGNORE += "CVE-2023-26607"
+
+# cpe-stable-backport: Backported in 5.15.83
+CVE_CHECK_IGNORE += "CVE-2023-28327"
+
+# cpe-stable-backport: Backported in 5.15.86
+CVE_CHECK_IGNORE += "CVE-2023-28328"
+
+# cpe-stable-backport: Backported in 5.15.33
+CVE_CHECK_IGNORE += "CVE-2023-28410"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-28464"
+
+# cpe-stable-backport: Backported in 5.15.105
+CVE_CHECK_IGNORE += "CVE-2023-28466"
+
+# cpe-stable-backport: Backported in 5.15.68
+CVE_CHECK_IGNORE += "CVE-2023-2860"
+
+# fixed-version: Fixed after version 5.14rc1
+CVE_CHECK_IGNORE += "CVE-2023-28772"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-28866"
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2023-2898"
+
+# cpe-stable-backport: Backported in 5.15.99
+CVE_CHECK_IGNORE += "CVE-2023-2985"
+
+# cpe-stable-backport: Backported in 5.15.77
+CVE_CHECK_IGNORE += "CVE-2023-3006"
+
+# Skipping CVE-2023-3022, no affected_versions
+
+# cpe-stable-backport: Backported in 5.15.104
+CVE_CHECK_IGNORE += "CVE-2023-30456"
+
+# cpe-stable-backport: Backported in 5.15.105
+CVE_CHECK_IGNORE += "CVE-2023-30772"
+
+# cpe-stable-backport: Backported in 5.15.113
+CVE_CHECK_IGNORE += "CVE-2023-3090"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_IGNORE += "CVE-2023-3106"
+
+# Skipping CVE-2023-3108, no affected_versions
+
+# CVE-2023-31081 has no known resolution
+
+# CVE-2023-31082 has no known resolution
+
+# CVE-2023-31083 needs backporting (fixed from 6.6rc1)
+
+# CVE-2023-31084 needs backporting (fixed from 6.4rc3)
+
+# cpe-stable-backport: Backported in 5.15.135
+CVE_CHECK_IGNORE += "CVE-2023-31085"
+
+# cpe-stable-backport: Backported in 5.15.63
+CVE_CHECK_IGNORE += "CVE-2023-3111"
+
+# cpe-stable-backport: Backported in 5.15.118
+CVE_CHECK_IGNORE += "CVE-2023-3117"
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2023-31248"
+
+# cpe-stable-backport: Backported in 5.15.113
+CVE_CHECK_IGNORE += "CVE-2023-3141"
+
+# cpe-stable-backport: Backported in 5.15.109
+CVE_CHECK_IGNORE += "CVE-2023-31436"
+
+# cpe-stable-backport: Backported in 5.15.39
+CVE_CHECK_IGNORE += "CVE-2023-3159"
+
+# cpe-stable-backport: Backported in 5.15.93
+CVE_CHECK_IGNORE += "CVE-2023-3161"
+
+# cpe-stable-backport: Backported in 5.15.116
+CVE_CHECK_IGNORE += "CVE-2023-3212"
+
+# cpe-stable-backport: Backported in 5.15.99
+CVE_CHECK_IGNORE += "CVE-2023-3220"
+
+# cpe-stable-backport: Backported in 5.15.111
+CVE_CHECK_IGNORE += "CVE-2023-32233"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2023-32247"
+
+# cpe-stable-backport: Backported in 5.15.111
+CVE_CHECK_IGNORE += "CVE-2023-32248"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2023-32250"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2023-32252"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2023-32254"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2023-32257"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2023-32258"
+
+# cpe-stable-backport: Backported in 5.15.93
+CVE_CHECK_IGNORE += "CVE-2023-32269"
+
+# CVE-2023-32629 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.111
+CVE_CHECK_IGNORE += "CVE-2023-3268"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3269"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3312"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3317"
+
+# cpe-stable-backport: Backported in 5.15.105
+CVE_CHECK_IGNORE += "CVE-2023-33203"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-33250"
+
+# cpe-stable-backport: Backported in 5.15.105
+CVE_CHECK_IGNORE += "CVE-2023-33288"
+
+# cpe-stable-backport: Backported in 5.15.118
+CVE_CHECK_IGNORE += "CVE-2023-3338"
+
+# cpe-stable-backport: Backported in 5.15.99
+CVE_CHECK_IGNORE += "CVE-2023-3355"
+
+# cpe-stable-backport: Backported in 5.15.86
+CVE_CHECK_IGNORE += "CVE-2023-3357"
+
+# cpe-stable-backport: Backported in 5.15.91
+CVE_CHECK_IGNORE += "CVE-2023-3358"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3359"
+
+# CVE-2023-3389 needs backporting (fixed from 6.0rc1)
+
+# cpe-stable-backport: Backported in 5.15.118
+CVE_CHECK_IGNORE += "CVE-2023-3390"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-33951"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-33952"
+
+# CVE-2023-3397 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.117
+CVE_CHECK_IGNORE += "CVE-2023-34255"
+
+# cpe-stable-backport: Backported in 5.15.112
+CVE_CHECK_IGNORE += "CVE-2023-34256"
+
+# fixed-version: only affects 6.1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-34319"
+
+# cpe-stable-backport: Backported in 5.15.135
+CVE_CHECK_IGNORE += "CVE-2023-34324"
+
+# CVE-2023-3439 needs backporting (fixed from 5.18rc5)
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2023-35001"
+
+# cpe-stable-backport: Backported in 5.15.93
+CVE_CHECK_IGNORE += "CVE-2023-3567"
+
+# CVE-2023-35693 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.116
+CVE_CHECK_IGNORE += "CVE-2023-35788"
+
+# cpe-stable-backport: Backported in 5.15.111
+CVE_CHECK_IGNORE += "CVE-2023-35823"
+
+# cpe-stable-backport: Backported in 5.15.111
+CVE_CHECK_IGNORE += "CVE-2023-35824"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-35826"
+
+# cpe-stable-backport: Backported in 5.15.136
+CVE_CHECK_IGNORE += "CVE-2023-35827"
+
+# cpe-stable-backport: Backported in 5.15.111
+CVE_CHECK_IGNORE += "CVE-2023-35828"
+
+# cpe-stable-backport: Backported in 5.15.111
+CVE_CHECK_IGNORE += "CVE-2023-35829"
+
+# cpe-stable-backport: Backported in 5.15.118
+CVE_CHECK_IGNORE += "CVE-2023-3609"
+
+# cpe-stable-backport: Backported in 5.15.119
+CVE_CHECK_IGNORE += "CVE-2023-3610"
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2023-3611"
+
+# CVE-2023-3640 has no known resolution
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-37453"
+
+# CVE-2023-37454 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.128
+CVE_CHECK_IGNORE += "CVE-2023-3772"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-3773"
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2023-3776"
+
+# cpe-stable-backport: Backported in 5.15.123
+CVE_CHECK_IGNORE += "CVE-2023-3777"
+
+# cpe-stable-backport: Backported in 5.15.78
+CVE_CHECK_IGNORE += "CVE-2023-3812"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-38409"
+
+# cpe-stable-backport: Backported in 5.15.113
+CVE_CHECK_IGNORE += "CVE-2023-38426"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2023-38427"
+
+# cpe-stable-backport: Backported in 5.15.113
+CVE_CHECK_IGNORE += "CVE-2023-38428"
+
+# cpe-stable-backport: Backported in 5.15.113
+CVE_CHECK_IGNORE += "CVE-2023-38429"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2023-38430"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2023-38431"
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2023-38432"
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2023-3863"
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2023-3865"
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2023-3866"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2023-3867"
+
+# cpe-stable-backport: Backported in 5.15.132
+CVE_CHECK_IGNORE += "CVE-2023-39189"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-39191"
+
+# cpe-stable-backport: Backported in 5.15.132
+CVE_CHECK_IGNORE += "CVE-2023-39192"
+
+# cpe-stable-backport: Backported in 5.15.132
+CVE_CHECK_IGNORE += "CVE-2023-39193"
+
+# cpe-stable-backport: Backported in 5.15.128
+CVE_CHECK_IGNORE += "CVE-2023-39194"
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2023-39197"
+
+# cpe-stable-backport: Backported in 5.15.128
+CVE_CHECK_IGNORE += "CVE-2023-39198"
+
+# cpe-stable-backport: Backported in 5.15.123
+CVE_CHECK_IGNORE += "CVE-2023-4004"
+
+# CVE-2023-4010 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.124
+CVE_CHECK_IGNORE += "CVE-2023-4015"
+
+# cpe-stable-backport: Backported in 5.15.126
+CVE_CHECK_IGNORE += "CVE-2023-40283"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-40791"
+
+# cpe-stable-backport: Backported in 5.15.126
+CVE_CHECK_IGNORE += "CVE-2023-4128"
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2023-4132"
+
+# CVE-2023-4133 needs backporting (fixed from 6.3)
+
+# CVE-2023-4134 needs backporting (fixed from 6.5rc1)
+
+# cpe-stable-backport: Backported in 5.15.124
+CVE_CHECK_IGNORE += "CVE-2023-4147"
+
+# CVE-2023-4155 needs backporting (fixed from 6.5rc6)
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-4194"
+
+# cpe-stable-backport: Backported in 5.15.126
+CVE_CHECK_IGNORE += "CVE-2023-4206"
+
+# cpe-stable-backport: Backported in 5.15.126
+CVE_CHECK_IGNORE += "CVE-2023-4207"
+
+# cpe-stable-backport: Backported in 5.15.126
+CVE_CHECK_IGNORE += "CVE-2023-4208"
+
+# cpe-stable-backport: Backported in 5.15.134
+CVE_CHECK_IGNORE += "CVE-2023-4244"
+
+# cpe-stable-backport: Backported in 5.15.128
+CVE_CHECK_IGNORE += "CVE-2023-4273"
+
+# cpe-stable-backport: Backported in 5.15.132
+CVE_CHECK_IGNORE += "CVE-2023-42752"
+
+# cpe-stable-backport: Backported in 5.15.132
+CVE_CHECK_IGNORE += "CVE-2023-42753"
+
+# cpe-stable-backport: Backported in 5.15.134
+CVE_CHECK_IGNORE += "CVE-2023-42754"
+
+# cpe-stable-backport: Backported in 5.15.133
+CVE_CHECK_IGNORE += "CVE-2023-42755"
+
+# fixed-version: only affects 6.4rc6 onwards
+CVE_CHECK_IGNORE += "CVE-2023-42756"
+
+# cpe-stable-backport: Backported in 5.15.46
+CVE_CHECK_IGNORE += "CVE-2023-4385"
+
+# cpe-stable-backport: Backported in 5.15.42
+CVE_CHECK_IGNORE += "CVE-2023-4387"
+
+# cpe-stable-backport: Backported in 5.15.35
+CVE_CHECK_IGNORE += "CVE-2023-4389"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-4394"
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2023-44466"
+
+# cpe-stable-backport: Backported in 5.15.42
+CVE_CHECK_IGNORE += "CVE-2023-4459"
+
+# cpe-stable-backport: Backported in 5.15.134
+CVE_CHECK_IGNORE += "CVE-2023-4563"
+
+# cpe-stable-backport: Backported in 5.15.128
+CVE_CHECK_IGNORE += "CVE-2023-4569"
+
+# cpe-stable-backport: Backported in 5.15.100
+CVE_CHECK_IGNORE += "CVE-2023-45862"
+
+# cpe-stable-backport: Backported in 5.15.99
+CVE_CHECK_IGNORE += "CVE-2023-45863"
+
+# cpe-stable-backport: Backported in 5.15.132
+CVE_CHECK_IGNORE += "CVE-2023-45871"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-45898"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-4610"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-4611"
+
+# CVE-2023-4622 needs backporting (fixed from 6.5rc1)
+
+# cpe-stable-backport: Backported in 5.15.132
+CVE_CHECK_IGNORE += "CVE-2023-4623"
+
+# cpe-stable-backport: Backported in 5.15.137
+CVE_CHECK_IGNORE += "CVE-2023-46343"
+
+# cpe-stable-backport: Backported in 5.15.137
+CVE_CHECK_IGNORE += "CVE-2023-46813"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-46838"
+
+# cpe-stable-backport: Backported in 5.15.140
+CVE_CHECK_IGNORE += "CVE-2023-46862"
+
+# CVE-2023-47233 has no known resolution
+
+# fixed-version: Fixed after version 5.14rc1
+CVE_CHECK_IGNORE += "CVE-2023-4732"
+
+# cpe-stable-backport: Backported in 5.15.132
+CVE_CHECK_IGNORE += "CVE-2023-4881"
+
+# cpe-stable-backport: Backported in 5.15.132
+CVE_CHECK_IGNORE += "CVE-2023-4921"
+
+# CVE-2023-50431 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-5090"
+
+# cpe-stable-backport: Backported in 5.15.128
+CVE_CHECK_IGNORE += "CVE-2023-51042"
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2023-51043"
+
+# cpe-stable-backport: Backported in 5.15.135
+CVE_CHECK_IGNORE += "CVE-2023-5158"
+
+# cpe-stable-backport: Backported in 5.15.146
+CVE_CHECK_IGNORE += "CVE-2023-51779"
+
+# cpe-stable-backport: Backported in 5.15.137
+CVE_CHECK_IGNORE += "CVE-2023-5178"
+
+# cpe-stable-backport: Backported in 5.15.144
+CVE_CHECK_IGNORE += "CVE-2023-51780"
+
+# cpe-stable-backport: Backported in 5.15.144
+CVE_CHECK_IGNORE += "CVE-2023-51781"
+
+# cpe-stable-backport: Backported in 5.15.144
+CVE_CHECK_IGNORE += "CVE-2023-51782"
+
+# cpe-stable-backport: Backported in 5.15.134
+CVE_CHECK_IGNORE += "CVE-2023-5197"
+
+# cpe-stable-backport: Backported in 5.15.147
+CVE_CHECK_IGNORE += "CVE-2023-52340"
+
+# cpe-stable-backport: Backported in 5.15.149
+CVE_CHECK_IGNORE += "CVE-2023-52429"
+
+# fixed-version: only affects 6.5rc6 onwards
+CVE_CHECK_IGNORE += "CVE-2023-52433"
+
+# CVE-2023-52434 needs backporting (fixed from 6.7rc6)
+
+# cpe-stable-backport: Backported in 5.15.149
+CVE_CHECK_IGNORE += "CVE-2023-52435"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52436"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52438"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52439"
+
+# fixed-version: only affects 5.17rc4 onwards
+CVE_CHECK_IGNORE += "CVE-2023-52440"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2023-52441"
+
+# cpe-stable-backport: Backported in 5.15.145
+CVE_CHECK_IGNORE += "CVE-2023-52442"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52443"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52444"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52445"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-52446"
+
+# CVE-2023-52447 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52448"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52449"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-52450"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52451"
+
+# CVE-2023-52452 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-52453"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52454"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-52455"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52456"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52457"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52458"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-52459"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-52460"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-52461"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-52462"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52463"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-52464"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-5345"
+
+# fixed-version: only affects 6.2 onwards
+CVE_CHECK_IGNORE += "CVE-2023-5633"
+
+# cpe-stable-backport: Backported in 5.15.137
+CVE_CHECK_IGNORE += "CVE-2023-5717"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-5972"
+
+# CVE-2023-6039 needs backporting (fixed from 6.5rc5)
+
+# cpe-stable-backport: Backported in 5.15.147
+CVE_CHECK_IGNORE += "CVE-2023-6040"
+
+# fixed-version: only affects 6.6rc3 onwards
+CVE_CHECK_IGNORE += "CVE-2023-6111"
+
+# cpe-stable-backport: Backported in 5.15.141
+CVE_CHECK_IGNORE += "CVE-2023-6121"
+
+# cpe-stable-backport: Backported in 5.15.132
+CVE_CHECK_IGNORE += "CVE-2023-6176"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-6200"
+
+# CVE-2023-6238 has no known resolution
+
+# CVE-2023-6240 has no known resolution
+
+# CVE-2023-6270 has no known resolution
+
+# CVE-2023-6356 has no known resolution
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-6531"
+
+# CVE-2023-6535 has no known resolution
+
+# CVE-2023-6536 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.128
+CVE_CHECK_IGNORE += "CVE-2023-6546"
+
+# CVE-2023-6560 needs backporting (fixed from 6.7rc4)
+
+# cpe-stable-backport: Backported in 5.15.146
+CVE_CHECK_IGNORE += "CVE-2023-6606"
+
+# CVE-2023-6610 needs backporting (fixed from 6.7rc7)
+
+# cpe-stable-backport: Backported in 5.15.143
+CVE_CHECK_IGNORE += "CVE-2023-6622"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2023-6679"
+
+# cpe-stable-backport: Backported in 5.15.143
+CVE_CHECK_IGNORE += "CVE-2023-6817"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2023-6915"
+
+# cpe-stable-backport: Backported in 5.15.143
+CVE_CHECK_IGNORE += "CVE-2023-6931"
+
+# cpe-stable-backport: Backported in 5.15.142
+CVE_CHECK_IGNORE += "CVE-2023-6932"
+
+# CVE-2023-7042 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.100
+CVE_CHECK_IGNORE += "CVE-2023-7192"
+
+# fixed-version: only affects 6.5rc6 onwards
+CVE_CHECK_IGNORE += "CVE-2024-0193"
+
+# cpe-stable-backport: Backported in 5.15.149
+CVE_CHECK_IGNORE += "CVE-2024-0340"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2024-0443"
+
+# cpe-stable-backport: Backported in 5.15.64
+CVE_CHECK_IGNORE += "CVE-2024-0562"
+
+# CVE-2024-0564 has no known resolution
+
+# CVE-2024-0565 needs backporting (fixed from 6.7rc6)
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2024-0582"
+
+# cpe-stable-backport: Backported in 5.15.142
+CVE_CHECK_IGNORE += "CVE-2024-0584"
+
+# cpe-stable-backport: Backported in 5.15.140
+CVE_CHECK_IGNORE += "CVE-2024-0607"
+
+# cpe-stable-backport: Backported in 5.15.121
+CVE_CHECK_IGNORE += "CVE-2024-0639"
+
+# cpe-stable-backport: Backported in 5.15.135
+CVE_CHECK_IGNORE += "CVE-2024-0641"
+
+# cpe-stable-backport: Backported in 5.15.147
+CVE_CHECK_IGNORE += "CVE-2024-0646"
+
+# cpe-stable-backport: Backported in 5.15.112
+CVE_CHECK_IGNORE += "CVE-2024-0775"
+
+# CVE-2024-0841 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2024-1085"
+
+# cpe-stable-backport: Backported in 5.15.149
+CVE_CHECK_IGNORE += "CVE-2024-1086"
+
+# cpe-stable-backport: Backported in 5.15.149
+CVE_CHECK_IGNORE += "CVE-2024-1151"
+
+# CVE-2024-1312 needs backporting (fixed from 6.5rc4)
+
+# CVE-2024-21803 has no known resolution
+
+# CVE-2024-22099 has no known resolution
+
+# CVE-2024-22386 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.146
+CVE_CHECK_IGNORE += "CVE-2024-22705"
+
+# CVE-2024-23196 has no known resolution
+
+# CVE-2024-23307 has no known resolution
+
+# CVE-2024-23848 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.149
+CVE_CHECK_IGNORE += "CVE-2024-23849"
+
+# cpe-stable-backport: Backported in 5.15.149
+CVE_CHECK_IGNORE += "CVE-2024-23850"
+
+# cpe-stable-backport: Backported in 5.15.149
+CVE_CHECK_IGNORE += "CVE-2024-23851"
+
+# CVE-2024-24855 needs backporting (fixed from 6.5rc2)
+
+# CVE-2024-24857 has no known resolution
+
+# CVE-2024-24858 has no known resolution
+
+# CVE-2024-24859 has no known resolution
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2024-24860"
+
+# CVE-2024-24861 has no known resolution
+
+# CVE-2024-24864 has no known resolution
+
+# CVE-2024-25739 has no known resolution
+
+# CVE-2024-25740 has no known resolution
+
+# CVE-2024-25741 has no known resolution
+
+# CVE-2024-25744 needs backporting (fixed from 6.7rc5)
+
+# fixed-version: only affects 6.5rc4 onwards
+CVE_CHECK_IGNORE += "CVE-2024-26581"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2024-26582"
+
+# CVE-2024-26583 needs backporting (fixed from 6.8rc5)
+
+# CVE-2024-26584 needs backporting (fixed from 6.8rc5)
+
+# CVE-2024-26585 needs backporting (fixed from 6.8rc5)
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2024-26586"
+
+# CVE-2024-26587 needs backporting (fixed from 6.8rc1)
+
+# CVE-2024-26588 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2024-26589"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2024-26590"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2024-26591"
+
+# cpe-stable-backport: Backported in 5.15.149
+CVE_CHECK_IGNORE += "CVE-2024-26592"
+
+# cpe-stable-backport: Backported in 5.15.149
+CVE_CHECK_IGNORE += "CVE-2024-26593"
+
+# cpe-stable-backport: Backported in 5.15.149
+CVE_CHECK_IGNORE += "CVE-2024-26594"
+
+# CVE-2024-26595 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2024-26596"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2024-26597"
+
+# cpe-stable-backport: Backported in 5.15.148
+CVE_CHECK_IGNORE += "CVE-2024-26598"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_IGNORE += "CVE-2024-26599"
+
diff --git a/meta/recipes-kernel/linux/generate-cve-exclusions.py b/meta/recipes-kernel/linux/generate-cve-exclusions.py
new file mode 100755
index 0000000000..b9b87f245d
--- /dev/null
+++ b/meta/recipes-kernel/linux/generate-cve-exclusions.py
@@ -0,0 +1,101 @@
+#! /usr/bin/env python3
+
+# Generate granular CVE status metadata for a specific version of the kernel
+# using data from linuxkernelcves.com.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+
+import argparse
+import datetime
+import json
+import pathlib
+import re
+
+from packaging.version import Version
+
+
+def parse_version(s):
+ """
+ Parse the version string and either return a packaging.version.Version, or
+ None if the string was unset or "unk".
+ """
+ if s and s != "unk":
+ # packaging.version.Version doesn't approve of versions like v5.12-rc1-dontuse
+ s = s.replace("-dontuse", "")
+ return Version(s)
+ return None
+
+
+def main(argp=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("datadir", type=pathlib.Path, help="Path to a clone of https://github.com/nluedtke/linux_kernel_cves")
+ parser.add_argument("version", type=Version, help="Kernel version number to generate data for, such as 6.1.38")
+
+ args = parser.parse_args(argp)
+ datadir = args.datadir
+ version = args.version
+ base_version = f"{version.major}.{version.minor}"
+
+ with open(datadir / "data" / "kernel_cves.json", "r") as f:
+ cve_data = json.load(f)
+
+ with open(datadir / "data" / "stream_fixes.json", "r") as f:
+ stream_data = json.load(f)
+
+ print(f"""
+# Auto-generated CVE metadata, DO NOT EDIT BY HAND.
+# Generated at {datetime.datetime.now()} for version {version}
+
+python check_kernel_cve_status_version() {{
+ this_version = "{version}"
+ kernel_version = d.getVar("LINUX_VERSION")
+ if kernel_version != this_version:
+ bb.warn("Kernel CVE status needs updating: generated for %s but kernel is %s" % (this_version, kernel_version))
+}}
+do_cve_check[prefuncs] += "check_kernel_cve_status_version"
+""")
+
+ for cve, data in cve_data.items():
+ if "affected_versions" not in data:
+ print(f"# Skipping {cve}, no affected_versions")
+ print()
+ continue
+
+ affected = data["affected_versions"]
+ first_affected, last_affected = re.search(r"(.+) to (.+)", affected).groups()
+ first_affected = parse_version(first_affected)
+ last_affected = parse_version(last_affected)
+
+ handled = False
+ if not last_affected:
+ print(f"# {cve} has no known resolution")
+ elif first_affected and version < first_affected:
+ print(f"# fixed-version: only affects {first_affected} onwards")
+ handled = True
+ elif last_affected < version:
+ print(f"# fixed-version: Fixed after version {last_affected}")
+ handled = True
+ else:
+ if cve in stream_data:
+ backport_data = stream_data[cve]
+ if base_version in backport_data:
+ backport_ver = Version(backport_data[base_version]["fixed_version"])
+ if backport_ver <= version:
+ print(f"# cpe-stable-backport: Backported in {backport_ver}")
+ handled = True
+ else:
+ # TODO print a note that the kernel needs bumping
+ print(f"# {cve} needs backporting (fixed from {backport_ver})")
+ else:
+ print(f"# {cve} needs backporting (fixed from {last_affected})")
+ else:
+ print(f"# {cve} needs backporting (fixed from {last_affected})")
+
+ if handled:
+ print(f'CVE_CHECK_IGNORE += "{cve}"')
+
+ print()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/meta/recipes-kernel/linux/kernel-devsrc.bb b/meta/recipes-kernel/linux/kernel-devsrc.bb
index 9654488a54..ed9746f837 100644
--- a/meta/recipes-kernel/linux/kernel-devsrc.bb
+++ b/meta/recipes-kernel/linux/kernel-devsrc.bb
@@ -128,8 +128,12 @@ do_install() {
# breaks workflows.
cp -a --parents include/generated/autoconf.h $kerneldir/build 2>/dev/null || :
- if [ -e $kerneldir/include/generated/.vdso-offsets.h.cmd ]; then
- rm $kerneldir/include/generated/.vdso-offsets.h.cmd
+ if [ -e $kerneldir/include/generated/.vdso-offsets.h.cmd ] ||
+ [ -e $kerneldir/build/include/generated/.vdso-offsets.h.cmd ] ||
+ [ -e $kerneldir/build/include/generated/.vdso32-offsets.h.cmd ] ; then
+ rm -f $kerneldir/include/generated/.vdso-offsets.h.cmd
+ rm -f $kerneldir/build/include/generated/.vdso-offsets.h.cmd
+ rm -f $kerneldir/build/include/generated/.vdso32-offsets.h.cmd
fi
)
@@ -330,7 +334,7 @@ do_install[lockfiles] = "${TMPDIR}/kernel-scripts.lock"
FILES:${PN} = "${KERNEL_BUILD_ROOT} ${KERNEL_SRC_PATH}"
FILES:${PN}-dbg += "${KERNEL_BUILD_ROOT}*/build/scripts/*/.debug/*"
-RDEPENDS:${PN} = "bc python3 flex bison ${TCLIBC}-utils"
+RDEPENDS:${PN} = "bc python3-core flex bison ${TCLIBC}-utils"
# 4.15+ needs these next two RDEPENDS
RDEPENDS:${PN} += "openssl-dev util-linux"
# and x86 needs a bit more for 4.15+
diff --git a/meta/recipes-kernel/linux/linux-yocto-dev.bb b/meta/recipes-kernel/linux/linux-yocto-dev.bb
index d35632071b..94800aeaca 100644
--- a/meta/recipes-kernel/linux/linux-yocto-dev.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-dev.bb
@@ -10,8 +10,6 @@
inherit kernel
require recipes-kernel/linux/linux-yocto.inc
-# for ncurses tests
-inherit pkgconfig
# provide this .inc to set specific revisions
include recipes-kernel/linux/linux-yocto-dev-revisions.inc
@@ -50,7 +48,7 @@ PACKAGECONFIG[dt-validation] = ",,python3-dtschema-native"
# we need the wrappers if validation isn't in the packageconfig
DEPENDS += "${@bb.utils.contains('PACKAGECONFIG', 'dt-validation', '', 'python3-dtschema-wrapper-native', d)}"
-COMPATIBLE_MACHINE = "(qemuarm|qemux86|qemuppc|qemumips|qemumips64|qemux86-64|qemuriscv32|qemuriscv64)"
+COMPATIBLE_MACHINE = "^(qemuarm|qemuarm64|qemux86|qemuppc|qemumips|qemumips64|qemux86-64|qemuriscv32|qemuriscv64)$"
KERNEL_DEVICETREE:qemuarmv5 = "versatile-pb.dtb"
diff --git a/meta/recipes-kernel/linux/linux-yocto-rt_5.10.bb b/meta/recipes-kernel/linux/linux-yocto-rt_5.10.bb
index ce903a7372..95aa8a29b2 100644
--- a/meta/recipes-kernel/linux/linux-yocto-rt_5.10.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-rt_5.10.bb
@@ -11,13 +11,13 @@ python () {
raise bb.parse.SkipRecipe("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it")
}
-SRCREV_machine ?= "5629d552de7759e4bb9821e210f3673c14900dd0"
-SRCREV_meta ?= "2f6fa8da5f84c343e6ea57c76829eaca1cc6a840"
+SRCREV_machine ?= "d4e1591e2a700e5317e604af9c46dc9f92d87527"
+SRCREV_meta ?= "b890cbbdcbc8498d1c84ec782bb5de8a51eb7d6d"
SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.10;destsuffix=${KMETA}"
-LINUX_VERSION ?= "5.10.119"
+LINUX_VERSION ?= "5.10.210"
LIC_FILES_CHKSUM = "file://COPYING;md5=6bc538ed5bd9a7fc9398086aedcd7e46"
@@ -31,7 +31,7 @@ KCONF_BSP_AUDIT_LEVEL = "1"
LINUX_KERNEL_TYPE = "preempt-rt"
-COMPATIBLE_MACHINE = "(qemux86|qemux86-64|qemuarm|qemuarmv5|qemuarm64|qemuppc|qemumips)"
+COMPATIBLE_MACHINE = "^(qemux86|qemux86-64|qemuarm|qemuarmv5|qemuarm64|qemuppc|qemumips)$"
KERNEL_DEVICETREE:qemuarmv5 = "versatile-pb.dtb"
diff --git a/meta/recipes-kernel/linux/linux-yocto-rt_5.15.bb b/meta/recipes-kernel/linux/linux-yocto-rt_5.15.bb
index b052db8a15..00c03411b1 100644
--- a/meta/recipes-kernel/linux/linux-yocto-rt_5.15.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-rt_5.15.bb
@@ -11,13 +11,13 @@ python () {
raise bb.parse.SkipRecipe("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it")
}
-SRCREV_machine ?= "83bec9a458bfdfb46c5dc62342bafae2806043d4"
-SRCREV_meta ?= "947149960e1426ace478e4b52c28a28ef8d6e74b"
+SRCREV_machine ?= "da32201bc41d994b0300c6b4738505f4875dc190"
+SRCREV_meta ?= "bef59dc5a78b4d101d1be23d4b36a73fd849241a"
SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.15;destsuffix=${KMETA}"
-LINUX_VERSION ?= "5.15.44"
+LINUX_VERSION ?= "5.15.150"
LIC_FILES_CHKSUM = "file://COPYING;md5=6bc538ed5bd9a7fc9398086aedcd7e46"
@@ -31,7 +31,7 @@ KCONF_BSP_AUDIT_LEVEL = "1"
LINUX_KERNEL_TYPE = "preempt-rt"
-COMPATIBLE_MACHINE = "(qemux86|qemux86-64|qemuarm|qemuarmv5|qemuarm64|qemuppc|qemumips)"
+COMPATIBLE_MACHINE = "^(qemux86|qemux86-64|qemuarm|qemuarmv5|qemuarm64|qemuppc|qemumips)$"
KERNEL_DEVICETREE:qemuarmv5 = "versatile-pb.dtb"
diff --git a/meta/recipes-kernel/linux/linux-yocto-tiny_5.10.bb b/meta/recipes-kernel/linux/linux-yocto-tiny_5.10.bb
index 625cdfd085..832d030031 100644
--- a/meta/recipes-kernel/linux/linux-yocto-tiny_5.10.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-tiny_5.10.bb
@@ -6,7 +6,7 @@ KCONFIG_MODE = "--allnoconfig"
require recipes-kernel/linux/linux-yocto.inc
-LINUX_VERSION ?= "5.10.119"
+LINUX_VERSION ?= "5.10.210"
LIC_FILES_CHKSUM = "file://COPYING;md5=6bc538ed5bd9a7fc9398086aedcd7e46"
DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}"
@@ -15,16 +15,16 @@ DEPENDS += "openssl-native util-linux-native"
KMETA = "kernel-meta"
KCONF_BSP_AUDIT_LEVEL = "2"
-SRCREV_machine:qemuarm ?= "50342ca6fc7dd66bfde9dfd1d6c336f99c55e0ab"
-SRCREV_machine ?= "d2cfd8b4499710877d54129a9137351a9de166fb"
-SRCREV_meta ?= "2f6fa8da5f84c343e6ea57c76829eaca1cc6a840"
+SRCREV_machine:qemuarm ?= "0f8e37aa2c623070ac74f73e6d41ed51fab54b4c"
+SRCREV_machine ?= "2c2439238ab602a10ad6d7aa6b210562ee1df595"
+SRCREV_meta ?= "b890cbbdcbc8498d1c84ec782bb5de8a51eb7d6d"
PV = "${LINUX_VERSION}+git${SRCPV}"
SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.10;destsuffix=${KMETA}"
-COMPATIBLE_MACHINE = "qemux86|qemux86-64|qemuarm|qemuarmv5"
+COMPATIBLE_MACHINE = "^(qemux86|qemux86-64|qemuarm|qemuarmv5)$"
# Functionality flags
KERNEL_FEATURES = ""
diff --git a/meta/recipes-kernel/linux/linux-yocto-tiny_5.15.bb b/meta/recipes-kernel/linux/linux-yocto-tiny_5.15.bb
index f6110354a0..2051d1c0a1 100644
--- a/meta/recipes-kernel/linux/linux-yocto-tiny_5.15.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-tiny_5.15.bb
@@ -5,7 +5,7 @@ KCONFIG_MODE = "--allnoconfig"
require recipes-kernel/linux/linux-yocto.inc
-LINUX_VERSION ?= "5.15.44"
+LINUX_VERSION ?= "5.15.150"
LIC_FILES_CHKSUM = "file://COPYING;md5=6bc538ed5bd9a7fc9398086aedcd7e46"
DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}"
@@ -14,15 +14,15 @@ DEPENDS += "openssl-native util-linux-native"
KMETA = "kernel-meta"
KCONF_BSP_AUDIT_LEVEL = "2"
-SRCREV_machine ?= "768cd295dfc0a311d2e53c1b05e19a246e184d0e"
-SRCREV_meta ?= "947149960e1426ace478e4b52c28a28ef8d6e74b"
+SRCREV_machine ?= "540fc92dd7359025bb09962431565b5a9627536b"
+SRCREV_meta ?= "bef59dc5a78b4d101d1be23d4b36a73fd849241a"
PV = "${LINUX_VERSION}+git${SRCPV}"
SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.15;destsuffix=${KMETA}"
-COMPATIBLE_MACHINE = "qemux86|qemux86-64|qemuarm64|qemuarm|qemuarmv5"
+COMPATIBLE_MACHINE = "^(qemux86|qemux86-64|qemuarm64|qemuarm|qemuarmv5)$"
# Functionality flags
KERNEL_FEATURES = ""
diff --git a/meta/recipes-kernel/linux/linux-yocto.inc b/meta/recipes-kernel/linux/linux-yocto.inc
index cabc8f4975..4943d5ab57 100644
--- a/meta/recipes-kernel/linux/linux-yocto.inc
+++ b/meta/recipes-kernel/linux/linux-yocto.inc
@@ -46,7 +46,6 @@ LINUX_VERSION_EXTENSION ??= "-yocto-${LINUX_KERNEL_TYPE}"
# Pick up shared functions
inherit kernel
inherit kernel-yocto
-inherit pkgconfig
B = "${WORKDIR}/linux-${PACKAGE_ARCH}-${LINUX_KERNEL_TYPE}-build"
@@ -60,7 +59,7 @@ do_install:append(){
KERNEL_FEATURES:append:qemuall=" features/kernel-sample/kernel-sample.scc"
KERNEL_DEBUG_OPTIONS ?= "stack"
-KERNEL_EXTRA_ARGS:append:x86-64 = "${@bb.utils.contains('KERNEL_DEBUG_OPTIONS', 'stack', 'HOST_LIBELF_LIBS="-L${RECIPE_SYSROOT_NATIVE}/usr/lib/pkgconfig/../../../usr/lib/ -lelf"', '', d)}"
+KERNEL_EXTRA_ARGS:append:x86-64 = " ${@bb.utils.contains('KERNEL_DEBUG_OPTIONS', 'stack', 'HOST_LIBELF_LIBS="-L${RECIPE_SYSROOT_NATIVE}/usr/lib/pkgconfig/../../../usr/lib/ -lelf"', '', d)}"
do_devshell:prepend() {
# setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
@@ -70,3 +69,6 @@ do_devshell:prepend() {
d.setVarFlag("PKG_CONFIG_SYSROOT_DIR", "unexport", "1")
d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR")
}
+
+# CVE exclusion
+include recipes-kernel/linux/cve-exclusion.inc
diff --git a/meta/recipes-kernel/linux/linux-yocto_5.10.bb b/meta/recipes-kernel/linux/linux-yocto_5.10.bb
index fcb7b83cec..48fb4868fb 100644
--- a/meta/recipes-kernel/linux/linux-yocto_5.10.bb
+++ b/meta/recipes-kernel/linux/linux-yocto_5.10.bb
@@ -1,6 +1,7 @@
KBRANCH ?= "v5.10/standard/base"
require recipes-kernel/linux/linux-yocto.inc
+include cve-exclusion_5.10.inc
# board specific branches
KBRANCH:qemuarm ?= "v5.10/standard/arm-versatile-926ejs"
@@ -13,23 +14,23 @@ KBRANCH:qemux86 ?= "v5.10/standard/base"
KBRANCH:qemux86-64 ?= "v5.10/standard/base"
KBRANCH:qemumips64 ?= "v5.10/standard/mti-malta64"
-SRCREV_machine:qemuarm ?= "68264cfbddebea663543a7c4ad5131c6cf63d3d2"
-SRCREV_machine:qemuarm64 ?= "b95be3e8c15d939ce402775de98ab80eda493b11"
-SRCREV_machine:qemumips ?= "c6a8eaf00384dcce14bb9e28f2d68b5004e8c6f3"
-SRCREV_machine:qemuppc ?= "6c1e46f34c6b9ababf8c6fcb4c01274099bb034f"
-SRCREV_machine:qemuriscv64 ?= "f844c3765c3270321f0b3347992565cfdb938c99"
-SRCREV_machine:qemuriscv32 ?= "f844c3765c3270321f0b3347992565cfdb938c99"
-SRCREV_machine:qemux86 ?= "f844c3765c3270321f0b3347992565cfdb938c99"
-SRCREV_machine:qemux86-64 ?= "f844c3765c3270321f0b3347992565cfdb938c99"
-SRCREV_machine:qemumips64 ?= "8d66b3ad7fbc8554ba2248cfbe755f8d24cb5a1a"
-SRCREV_machine ?= "f844c3765c3270321f0b3347992565cfdb938c99"
-SRCREV_meta ?= "2f6fa8da5f84c343e6ea57c76829eaca1cc6a840"
+SRCREV_machine:qemuarm ?= "b1aa548003c2c76a8aa7d7ad83c7070cac5a4f21"
+SRCREV_machine:qemuarm64 ?= "32d0647cd0dfa7361a5dfdde34d39192f179f6bf"
+SRCREV_machine:qemumips ?= "8780d18791f93bacc3a7d0529fdeb4e31adacafa"
+SRCREV_machine:qemuppc ?= "9353fbbd955b1f29e56876aa332473e1029e9e96"
+SRCREV_machine:qemuriscv64 ?= "ee0d8dfb898ca2dc199437e79efaa02723ff9378"
+SRCREV_machine:qemuriscv32 ?= "ee0d8dfb898ca2dc199437e79efaa02723ff9378"
+SRCREV_machine:qemux86 ?= "ee0d8dfb898ca2dc199437e79efaa02723ff9378"
+SRCREV_machine:qemux86-64 ?= "ee0d8dfb898ca2dc199437e79efaa02723ff9378"
+SRCREV_machine:qemumips64 ?= "136942e72021d05bd1d93389d94a20f763dd8336"
+SRCREV_machine ?= "ee0d8dfb898ca2dc199437e79efaa02723ff9378"
+SRCREV_meta ?= "b890cbbdcbc8498d1c84ec782bb5de8a51eb7d6d"
SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;name=machine;branch=${KBRANCH}; \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.10;destsuffix=${KMETA}"
LIC_FILES_CHKSUM = "file://COPYING;md5=6bc538ed5bd9a7fc9398086aedcd7e46"
-LINUX_VERSION ?= "5.10.119"
+LINUX_VERSION ?= "5.10.210"
DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}"
DEPENDS += "openssl-native util-linux-native"
@@ -42,7 +43,7 @@ KCONF_BSP_AUDIT_LEVEL = "1"
KERNEL_DEVICETREE:qemuarmv5 = "versatile-pb.dtb"
-COMPATIBLE_MACHINE = "qemuarm|qemuarmv5|qemuarm64|qemux86|qemuppc|qemuppc64|qemumips|qemumips64|qemux86-64|qemuriscv64|qemuriscv32"
+COMPATIBLE_MACHINE = "^(qemuarm|qemuarmv5|qemuarm64|qemux86|qemuppc|qemuppc64|qemumips|qemumips64|qemux86-64|qemuriscv64|qemuriscv32)$"
# Functionality flags
KERNEL_EXTRA_FEATURES ?= "features/netfilter/netfilter.scc"
diff --git a/meta/recipes-kernel/linux/linux-yocto_5.15.bb b/meta/recipes-kernel/linux/linux-yocto_5.15.bb
index 3cb5f93a21..101aceb3dc 100644
--- a/meta/recipes-kernel/linux/linux-yocto_5.15.bb
+++ b/meta/recipes-kernel/linux/linux-yocto_5.15.bb
@@ -1,6 +1,7 @@
KBRANCH ?= "v5.15/standard/base"
require recipes-kernel/linux/linux-yocto.inc
+include cve-exclusion_5.15.inc
# board specific branches
KBRANCH:qemuarm ?= "v5.15/standard/arm-versatile-926ejs"
@@ -13,24 +14,24 @@ KBRANCH:qemux86 ?= "v5.15/standard/base"
KBRANCH:qemux86-64 ?= "v5.15/standard/base"
KBRANCH:qemumips64 ?= "v5.15/standard/mti-malta64"
-SRCREV_machine:qemuarm ?= "1585df4dbb1e353cc1472748186504a377de5dce"
-SRCREV_machine:qemuarm64 ?= "363482e730cbf7f772430c99c83f60ddc01d7518"
-SRCREV_machine:qemumips ?= "f534cfa2119dc7cf07d546d9fed2b822af1bc838"
-SRCREV_machine:qemuppc ?= "c019857d9af595336bb272c981150952b87d2639"
-SRCREV_machine:qemuriscv64 ?= "eb3df10e3f3146911fc3235c458c8ef1660ae9df"
-SRCREV_machine:qemuriscv32 ?= "eb3df10e3f3146911fc3235c458c8ef1660ae9df"
-SRCREV_machine:qemux86 ?= "eb3df10e3f3146911fc3235c458c8ef1660ae9df"
-SRCREV_machine:qemux86-64 ?= "eb3df10e3f3146911fc3235c458c8ef1660ae9df"
-SRCREV_machine:qemumips64 ?= "a26ad766d7372a98d4d1f11d50953c45782f3a37"
-SRCREV_machine ?= "eb3df10e3f3146911fc3235c458c8ef1660ae9df"
-SRCREV_meta ?= "947149960e1426ace478e4b52c28a28ef8d6e74b"
+SRCREV_machine:qemuarm ?= "f7ce03f6b5de6a323b165e8adbaa3caae8646c20"
+SRCREV_machine:qemuarm64 ?= "db39986a84e0bcfe5a488ab8dca114ed27e469ce"
+SRCREV_machine:qemumips ?= "695cf3a24eaedc1e40393947afc22f8dc8324b47"
+SRCREV_machine:qemuppc ?= "3dfb435f3fb9ec38c60d1eeeeebf63b6a88308d3"
+SRCREV_machine:qemuriscv64 ?= "7c82dac028864e8a608e70d3ac2dbc05b3cd1e14"
+SRCREV_machine:qemuriscv32 ?= "7c82dac028864e8a608e70d3ac2dbc05b3cd1e14"
+SRCREV_machine:qemux86 ?= "7c82dac028864e8a608e70d3ac2dbc05b3cd1e14"
+SRCREV_machine:qemux86-64 ?= "7c82dac028864e8a608e70d3ac2dbc05b3cd1e14"
+SRCREV_machine:qemumips64 ?= "2f830b0a13ad4dbe738960d9e7d255ac411064b4"
+SRCREV_machine ?= "7c82dac028864e8a608e70d3ac2dbc05b3cd1e14"
+SRCREV_meta ?= "bef59dc5a78b4d101d1be23d4b36a73fd849241a"
# set your preferred provider of linux-yocto to 'linux-yocto-upstream', and you'll
# get the <version>/base branch, which is pure upstream -stable, and the same
# meta SRCREV as the linux-yocto-standard builds. Select your version using the
# normal PREFERRED_VERSION settings.
BBCLASSEXTEND = "devupstream:target"
-SRCREV_machine:class-devupstream ?= "4e67be407725b1d8b829ed2075987037abec98ec"
+SRCREV_machine:class-devupstream ?= "80efc6265290d34b75921bf7294e0d9c5a8749dc"
PN:class-devupstream = "linux-yocto-upstream"
KBRANCH:class-devupstream = "v5.15/base"
@@ -38,7 +39,7 @@ SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;name=machine;branch=${KBRA
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.15;destsuffix=${KMETA}"
LIC_FILES_CHKSUM = "file://COPYING;md5=6bc538ed5bd9a7fc9398086aedcd7e46"
-LINUX_VERSION ?= "5.15.44"
+LINUX_VERSION ?= "5.15.150"
DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}"
DEPENDS += "openssl-native util-linux-native"
@@ -51,7 +52,7 @@ KCONF_BSP_AUDIT_LEVEL = "1"
KERNEL_DEVICETREE:qemuarmv5 = "versatile-pb.dtb"
-COMPATIBLE_MACHINE = "qemuarm|qemuarmv5|qemuarm64|qemux86|qemuppc|qemuppc64|qemumips|qemumips64|qemux86-64|qemuriscv64|qemuriscv32"
+COMPATIBLE_MACHINE = "^(qemuarm|qemuarmv5|qemuarm64|qemux86|qemuppc|qemuppc64|qemumips|qemumips64|qemux86-64|qemuriscv64|qemuriscv32)$"
# Functionality flags
KERNEL_EXTRA_FEATURES ?= "features/netfilter/netfilter.scc"
diff --git a/meta/recipes-kernel/lttng/babeltrace2_2.0.4.bb b/meta/recipes-kernel/lttng/babeltrace2_2.0.5.bb
index b48f07ea0d..7ece3140f7 100644
--- a/meta/recipes-kernel/lttng/babeltrace2_2.0.4.bb
+++ b/meta/recipes-kernel/lttng/babeltrace2_2.0.5.bb
@@ -12,7 +12,7 @@ SRC_URI = "git://git.efficios.com/babeltrace.git;branch=stable-2.0 \
file://0001-tests-do-not-run-test-applications-from-.libs.patch \
file://0001-Make-manpages-multilib-identical.patch \
"
-SRCREV = "23e8cf4e6fdc1d0b230e964dafac08a57e6228e6"
+SRCREV = "66e76d1ea601705928899138f02730a3a2a3153d"
UPSTREAM_CHECK_GITTAGREGEX = "v(?P<pver>2(\.\d+)+)$"
S = "${WORKDIR}/git"
@@ -28,6 +28,7 @@ FILES:${PN}-staticdev += "${libdir}/babeltrace2/plugins/*.a"
FILES:${PN} += "${libdir}/babeltrace2/plugins/*.so"
ASNEEDED = ""
+LDFLAGS:append = "${@bb.utils.contains('DISTRO_FEATURES', 'ld-is-lld ptest', ' -fuse-ld=bfd ', '', d)}"
RDEPENDS:${PN}-ptest += "bash gawk python3"
diff --git a/meta/recipes-kernel/lttng/babeltrace_1.5.8.bb b/meta/recipes-kernel/lttng/babeltrace_1.5.11.bb
index 19601e7d1b..8e2fe4164d 100644
--- a/meta/recipes-kernel/lttng/babeltrace_1.5.8.bb
+++ b/meta/recipes-kernel/lttng/babeltrace_1.5.11.bb
@@ -10,7 +10,7 @@ DEPENDS = "glib-2.0 util-linux popt bison-native flex-native"
SRC_URI = "git://git.efficios.com/babeltrace.git;branch=stable-1.5 \
file://run-ptest \
"
-SRCREV = "054a54ae10b01a271afc4f19496c041b10fb414c"
+SRCREV = "91c00f70884887ff5c4849a8e3d47e311a22ba9d"
UPSTREAM_CHECK_GITTAGREGEX = "v(?P<pver>1(\.\d+)+)$"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0001-Fix-compaction-migratepages-event-name.patch b/meta/recipes-kernel/lttng/lttng-modules/0001-Fix-compaction-migratepages-event-name.patch
deleted file mode 100644
index e988f7a3d5..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0001-Fix-compaction-migratepages-event-name.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From c312bda00d2dc10ce5f6c1189acbefee5c6c8c6c Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Tue, 29 Mar 2022 16:34:07 -0400
-Subject: [PATCH 01/10] Fix: compaction migratepages event name
-
-The commit "fix: mm: compaction: fix the migration stats in trace_mm_compaction_migratepages() (v5.17)"
-
-Triggers this warning:
-
- LTTng: event provider mismatch: The event name needs to start with provider name + _ + one or more letter, provider: compaction, event name: mm_compaction_migratepages
-
-Upstream-Status: Backport
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Change-Id: I01c7485af765084dafb33bf33ae392e60bfbf1e7
----
- include/instrumentation/events/compaction.h | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
-diff --git a/include/instrumentation/events/compaction.h b/include/instrumentation/events/compaction.h
-index 340e41f5..15964537 100644
---- a/include/instrumentation/events/compaction.h
-+++ b/include/instrumentation/events/compaction.h
-@@ -98,7 +98,9 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(compaction_isolate_template,
- #endif /* #else #if LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,0,0) */
-
- #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,17,0))
--LTTNG_TRACEPOINT_EVENT(mm_compaction_migratepages,
-+LTTNG_TRACEPOINT_EVENT_MAP(mm_compaction_migratepages,
-+
-+ compaction_migratepages,
-
- TP_PROTO(unsigned long nr_all,
- unsigned int nr_succeeded),
---
-2.19.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0001-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch b/meta/recipes-kernel/lttng/lttng-modules/0001-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch
deleted file mode 100644
index d27cbc314f..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0001-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From 13e4c978d45237b8780f1de6d404812b3af26a49 Mon Sep 17 00:00:00 2001
-From: He Zhe <zhe.he@windriver.com>
-Date: Thu, 2 Jun 2022 06:36:08 +0000
-Subject: [PATCH] fix: random: remove unused tracepoints (v5.10, v5.15)
-
-The following kernel commit has been back ported to v5.10.119 and v5.15.44.
-
-commit 14c174633f349cb41ea90c2c0aaddac157012f74
-Author: Jason A. Donenfeld <Jason@zx2c4.com>
-Date: Thu Feb 10 16:40:44 2022 +0100
-
- random: remove unused tracepoints
-
- These explicit tracepoints aren't really used and show sign of aging.
- It's work to keep these up to date, and before I attempted to keep them
- up to date, they weren't up to date, which indicates that they're not
- really used. These days there are better ways of introspecting anyway.
-
-Upstream-Status: Pending
-
-Signed-off-by: He Zhe <zhe.he@windriver.com>
----
- src/probes/Kbuild | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
-diff --git a/src/probes/Kbuild b/src/probes/Kbuild
-index 5478447..31e0ee8 100644
---- a/src/probes/Kbuild
-+++ b/src/probes/Kbuild
-@@ -204,7 +204,10 @@ endif
-
- # Introduced in v3.6, remove in v5.18
- obj-$(CONFIG_LTTNG) += $(shell \
-- if [ \( ! \( $(VERSION) -ge 6 -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -ge 18 \) \) \) \
-+ if [ \( ! \( $(VERSION) -ge 6 \
-+ -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -ge 18 \) \
-+ -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -eq 15 -a $(SUBLEVEL) -ge 44 \) \
-+ -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -eq 10 -a $(SUBLEVEL) -ge 119 \) \) \) \
- -a \
- $(VERSION) -ge 4 \
- -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 6 \) \
---
-2.32.0
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0001-fix-sched-tracing-Append-prev_state-to-tp-args-inste.patch b/meta/recipes-kernel/lttng/lttng-modules/0001-fix-sched-tracing-Append-prev_state-to-tp-args-inste.patch
deleted file mode 100644
index b41053b6bc..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0001-fix-sched-tracing-Append-prev_state-to-tp-args-inste.patch
+++ /dev/null
@@ -1,59 +0,0 @@
-From 9c5b8de32b5745f3ff31079c02da64595e101bee Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Tue, 17 May 2022 11:46:29 -0400
-Subject: [PATCH] fix: sched/tracing: Append prev_state to tp args instead
- (v5.18)
-
-See upstream commit :
-
- commit 9c2136be0878c88c53dea26943ce40bb03ad8d8d
- Author: Delyan Kratunov <delyank@fb.com>
- Date: Wed May 11 18:28:36 2022 +0000
-
- sched/tracing: Append prev_state to tp args instead
-
- Commit fa2c3254d7cf (sched/tracing: Don't re-read p->state when emitting
- sched_switch event, 2022-01-20) added a new prev_state argument to the
- sched_switch tracepoint, before the prev task_struct pointer.
-
- This reordering of arguments broke BPF programs that use the raw
- tracepoint (e.g. tp_btf programs). The type of the second argument has
- changed and existing programs that assume a task_struct* argument
- (e.g. for bpf_task_storage access) will now fail to verify.
-
- If we instead append the new argument to the end, all existing programs
- would continue to work and can conditionally extract the prev_state
- argument on supported kernel versions.
-
-
-Upstream-Status: Backport
-
-Change-Id: Ife2ec88a8bea2743562590cbd357068d7773863f
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- include/instrumentation/events/sched.h | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/include/instrumentation/events/sched.h b/include/instrumentation/events/sched.h
-index 339bec94..c1c3df15 100644
---- a/include/instrumentation/events/sched.h
-+++ b/include/instrumentation/events/sched.h
-@@ -356,11 +356,11 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
- LTTNG_TRACEPOINT_EVENT(sched_switch,
-
- TP_PROTO(bool preempt,
-- unsigned int prev_state,
- struct task_struct *prev,
-- struct task_struct *next),
-+ struct task_struct *next,
-+ unsigned int prev_state),
-
-- TP_ARGS(preempt, prev_state, prev, next),
-+ TP_ARGS(preempt, prev, next, prev_state),
-
- TP_FIELDS(
- ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
---
-2.19.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0002-Fix-tracepoint-event-allow-same-provider-and-event-n.patch b/meta/recipes-kernel/lttng/lttng-modules/0002-Fix-tracepoint-event-allow-same-provider-and-event-n.patch
deleted file mode 100644
index 00367eebf8..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0002-Fix-tracepoint-event-allow-same-provider-and-event-n.patch
+++ /dev/null
@@ -1,48 +0,0 @@
-From a7eb2e3d0a4beb1ee80b132927641dd05ef2d542 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 4 Apr 2022 15:49:32 -0400
-Subject: [PATCH 02/10] Fix: tracepoint event: allow same provider and event
- name
-
-Using the same name for the provider (TRACE_SYSTEM) and event name
-causes a compilation error because the same identifiers are emitted
-twice.
-
-Fix this by prefixing the provider identifier with
-"__provider_event_desc___".
-
-Upstream-Status: Backport
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Change-Id: I8cdf8f859e35b8bd5c19737860d12f1ed546dfc2
----
- include/lttng/tracepoint-event-impl.h | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/include/lttng/tracepoint-event-impl.h b/include/lttng/tracepoint-event-impl.h
-index 38b1dc43..dcb22247 100644
---- a/include/lttng/tracepoint-event-impl.h
-+++ b/include/lttng/tracepoint-event-impl.h
-@@ -1255,7 +1255,7 @@ static const struct lttng_kernel_event_desc __event_desc___##_map = { \
- #define TP_ID1(_token, _system) _token##_system
- #define TP_ID(_token, _system) TP_ID1(_token, _system)
-
--static const struct lttng_kernel_event_desc * const TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
-+static const struct lttng_kernel_event_desc * const TP_ID(__provider_event_desc___, TRACE_SYSTEM)[] = {
- #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
- };
-
-@@ -1274,8 +1274,8 @@ static const struct lttng_kernel_event_desc * const TP_ID(__event_desc___, TRACE
- /* non-const because list head will be modified when registered. */
- static __used struct lttng_kernel_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
- .provider_name = __stringify(TRACE_SYSTEM),
-- .event_desc = TP_ID(__event_desc___, TRACE_SYSTEM),
-- .nr_events = ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)),
-+ .event_desc = TP_ID(__provider_event_desc___, TRACE_SYSTEM),
-+ .nr_events = ARRAY_SIZE(TP_ID(__provider_event_desc___, TRACE_SYSTEM)),
- .head = { NULL, NULL },
- .lazy_init_head = { NULL, NULL },
- .lazy = 0,
---
-2.19.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0003-fix-sched-tracing-Don-t-re-read-p-state-when-emittin.patch b/meta/recipes-kernel/lttng/lttng-modules/0003-fix-sched-tracing-Don-t-re-read-p-state-when-emittin.patch
deleted file mode 100644
index afe514de82..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0003-fix-sched-tracing-Don-t-re-read-p-state-when-emittin.patch
+++ /dev/null
@@ -1,183 +0,0 @@
-From 8e52fd71e693619f7a58de2692e59f0c826e9988 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 4 Apr 2022 13:52:57 -0400
-Subject: [PATCH 03/10] fix: sched/tracing: Don't re-read p->state when
- emitting sched_switch event (v5.18)
-
-See upstream commit :
-
- commit fa2c3254d7cfff5f7a916ab928a562d1165f17bb
- Author: Valentin Schneider <valentin.schneider@arm.com>
- Date: Thu Jan 20 16:25:19 2022 +0000
-
- sched/tracing: Don't re-read p->state when emitting sched_switch event
-
- As of commit
-
- c6e7bd7afaeb ("sched/core: Optimize ttwu() spinning on p->on_cpu")
-
- the following sequence becomes possible:
-
- p->__state = TASK_INTERRUPTIBLE;
- __schedule()
- deactivate_task(p);
- ttwu()
- READ !p->on_rq
- p->__state=TASK_WAKING
- trace_sched_switch()
- __trace_sched_switch_state()
- task_state_index()
- return 0;
-
- TASK_WAKING isn't in TASK_REPORT, so the task appears as TASK_RUNNING in
- the trace event.
-
- Prevent this by pushing the value read from __schedule() down the trace
- event.
-
-Upstream-Status: Backport
-
-Change-Id: I46743cd006be4b4d573cae2d77df7d6d16744d04
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- include/instrumentation/events/sched.h | 88 +++++++++++++++++++++++---
- 1 file changed, 78 insertions(+), 10 deletions(-)
-
-diff --git a/include/instrumentation/events/sched.h b/include/instrumentation/events/sched.h
-index 91953a6f..339bec94 100644
---- a/include/instrumentation/events/sched.h
-+++ b/include/instrumentation/events/sched.h
-@@ -20,7 +20,37 @@
- #ifndef _TRACE_SCHED_DEF_
- #define _TRACE_SCHED_DEF_
-
--#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,15,0))
-+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0))
-+
-+static inline long __trace_sched_switch_state(bool preempt,
-+ unsigned int prev_state,
-+ struct task_struct *p)
-+{
-+ unsigned int state;
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+ BUG_ON(p != current);
-+#endif /* CONFIG_SCHED_DEBUG */
-+
-+ /*
-+ * Preemption ignores task state, therefore preempted tasks are always
-+ * RUNNING (we will not have dequeued if state != RUNNING).
-+ */
-+ if (preempt)
-+ return TASK_REPORT_MAX;
-+
-+ /*
-+ * task_state_index() uses fls() and returns a value from 0-8 range.
-+ * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
-+ * it for left shift operation to get the correct task->state
-+ * mapping.
-+ */
-+ state = __task_state_index(prev_state, p->exit_state);
-+
-+ return state ? (1 << (state - 1)) : state;
-+}
-+
-+#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,15,0))
-
- static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
- {
-@@ -321,43 +351,81 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
- /*
- * Tracepoint for task switches, performed by the scheduler:
- */
-+
-+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0))
- LTTNG_TRACEPOINT_EVENT(sched_switch,
-
--#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0))
- TP_PROTO(bool preempt,
-- struct task_struct *prev,
-- struct task_struct *next),
-+ unsigned int prev_state,
-+ struct task_struct *prev,
-+ struct task_struct *next),
-
-- TP_ARGS(preempt, prev, next),
-+ TP_ARGS(preempt, prev_state, prev, next),
-+
-+ TP_FIELDS(
-+ ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
-+ ctf_integer(pid_t, prev_tid, prev->pid)
-+ ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
-+#ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
-+ ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(preempt, prev_state, prev))
- #else
-- TP_PROTO(struct task_struct *prev,
-+ ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev_state, prev))
-+#endif
-+ ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
-+ ctf_integer(pid_t, next_tid, next->pid)
-+ ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
-+ )
-+)
-+
-+#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0))
-+
-+LTTNG_TRACEPOINT_EVENT(sched_switch,
-+
-+ TP_PROTO(bool preempt,
-+ struct task_struct *prev,
- struct task_struct *next),
-
-- TP_ARGS(prev, next),
--#endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0)) */
-+ TP_ARGS(preempt, prev, next),
-
- TP_FIELDS(
- ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
- ctf_integer(pid_t, prev_tid, prev->pid)
- ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
--#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0))
- #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
- ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(preempt, prev))
- #else
- ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev))
- #endif
-+ ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
-+ ctf_integer(pid_t, next_tid, next->pid)
-+ ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
-+ )
-+)
-+
- #else
-+
-+LTTNG_TRACEPOINT_EVENT(sched_switch,
-+
-+ TP_PROTO(struct task_struct *prev,
-+ struct task_struct *next),
-+
-+ TP_ARGS(prev, next),
-+
-+ TP_FIELDS(
-+ ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
-+ ctf_integer(pid_t, prev_tid, prev->pid)
-+ ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
- #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
- ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(prev))
- #else
- ctf_integer(long, prev_state, __trace_sched_switch_state(prev))
--#endif
- #endif
- ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
- ctf_integer(pid_t, next_tid, next->pid)
- ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
- )
- )
-+#endif
-
- /*
- * Tracepoint for a task being migrated:
---
-2.19.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0004-fix-block-remove-genhd.h-v5.18.patch b/meta/recipes-kernel/lttng/lttng-modules/0004-fix-block-remove-genhd.h-v5.18.patch
deleted file mode 100644
index 9248ffe4ff..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0004-fix-block-remove-genhd.h-v5.18.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-From 868e0b6db59159197c2cec3550fa4ad5e6572bc5 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 4 Apr 2022 13:54:59 -0400
-Subject: [PATCH 04/10] fix: block: remove genhd.h (v5.18)
-
-See upstream commit :
-
- commit 322cbb50de711814c42fb088f6d31901502c711a
- Author: Christoph Hellwig <hch@lst.de>
- Date: Mon Jan 24 10:39:13 2022 +0100
-
- block: remove genhd.h
-
- There is no good reason to keep genhd.h separate from the main blkdev.h
- header that includes it. So fold the contents of genhd.h into blkdev.h
- and remove genhd.h entirely.
-
-Upstream-Status: Backport
-
-Change-Id: I7cf2aaa3a4c133320b95f2edde49f790f9515dbd
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- include/wrapper/genhd.h | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/include/wrapper/genhd.h b/include/wrapper/genhd.h
-index 3c6dbcbe..4a59b68e 100644
---- a/include/wrapper/genhd.h
-+++ b/include/wrapper/genhd.h
-@@ -12,7 +12,11 @@
- #ifndef _LTTNG_WRAPPER_GENHD_H
- #define _LTTNG_WRAPPER_GENHD_H
-
-+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0))
-+#include <linux/blkdev.h>
-+#else
- #include <linux/genhd.h>
-+#endif
-
- #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,17,0))
- #define LTTNG_GENHD_FL_HIDDEN GENHD_FL_HIDDEN
---
-2.19.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0005-fix-scsi-block-Remove-REQ_OP_WRITE_SAME-support-v5.1.patch b/meta/recipes-kernel/lttng/lttng-modules/0005-fix-scsi-block-Remove-REQ_OP_WRITE_SAME-support-v5.1.patch
deleted file mode 100644
index 0751827613..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0005-fix-scsi-block-Remove-REQ_OP_WRITE_SAME-support-v5.1.patch
+++ /dev/null
@@ -1,79 +0,0 @@
-From 2bc7cb7193124d20aa4e1b5dbad0410bfb97a470 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 4 Apr 2022 14:12:13 -0400
-Subject: [PATCH 05/10] fix: scsi: block: Remove REQ_OP_WRITE_SAME support
- (v5.18)
-
-See upstream commit :
-
- commit 73bd66d9c834220579c881a3eb020fd8917075d8
- Author: Christoph Hellwig <hch@lst.de>
- Date: Wed Feb 9 09:28:28 2022 +0100
-
- scsi: block: Remove REQ_OP_WRITE_SAME support
-
- No more users of REQ_OP_WRITE_SAME or drivers implementing it are left,
- so remove the infrastructure.
-
-Upstream-Status: Backport
-
-Change-Id: Ifbff71f79f8b590436fc7cb79f82d90c6e033d84
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- include/instrumentation/events/block.h | 32 ++++++++++++++++++++++++++
- 1 file changed, 32 insertions(+)
-
-diff --git a/include/instrumentation/events/block.h b/include/instrumentation/events/block.h
-index 3e1104d7..050a59a2 100644
---- a/include/instrumentation/events/block.h
-+++ b/include/instrumentation/events/block.h
-@@ -66,6 +66,37 @@ LTTNG_TRACEPOINT_ENUM(block_rq_type,
- #define lttng_bio_op(bio) bio_op(bio)
- #define lttng_bio_rw(bio) ((bio)->bi_opf)
-
-+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0))
-+#ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
-+#define blk_rwbs_ctf_integer(type, rwbs, op, rw, bytes) \
-+ ctf_enum(block_rq_type, type, rwbs, \
-+ ( (op) == REQ_OP_WRITE ? RWBS_FLAG_WRITE : \
-+ ( (op) == REQ_OP_DISCARD ? RWBS_FLAG_DISCARD : \
-+ ( (op) == REQ_OP_SECURE_ERASE ? (RWBS_FLAG_DISCARD | RWBS_FLAG_SECURE) : \
-+ ( (op) == REQ_OP_FLUSH ? RWBS_FLAG_FLUSH : \
-+ ( (op) == REQ_OP_READ ? RWBS_FLAG_READ : \
-+ ( 0 )))))) \
-+ | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
-+ | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
-+ | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
-+ | ((rw) & REQ_PREFLUSH ? RWBS_FLAG_PREFLUSH : 0) \
-+ | ((rw) & REQ_FUA ? RWBS_FLAG_FUA : 0))
-+#else
-+#define blk_rwbs_ctf_integer(type, rwbs, op, rw, bytes) \
-+ ctf_integer(type, rwbs, \
-+ ( (op) == REQ_OP_WRITE ? RWBS_FLAG_WRITE : \
-+ ( (op) == REQ_OP_DISCARD ? RWBS_FLAG_DISCARD : \
-+ ( (op) == REQ_OP_SECURE_ERASE ? (RWBS_FLAG_DISCARD | RWBS_FLAG_SECURE) : \
-+ ( (op) == REQ_OP_FLUSH ? RWBS_FLAG_FLUSH : \
-+ ( (op) == REQ_OP_READ ? RWBS_FLAG_READ : \
-+ ( 0 )))))) \
-+ | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
-+ | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
-+ | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
-+ | ((rw) & REQ_PREFLUSH ? RWBS_FLAG_PREFLUSH : 0) \
-+ | ((rw) & REQ_FUA ? RWBS_FLAG_FUA : 0))
-+#endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
-+#else
- #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
- #define blk_rwbs_ctf_integer(type, rwbs, op, rw, bytes) \
- ctf_enum(block_rq_type, type, rwbs, \
-@@ -95,6 +126,7 @@ LTTNG_TRACEPOINT_ENUM(block_rq_type,
- | ((rw) & REQ_PREFLUSH ? RWBS_FLAG_PREFLUSH : 0) \
- | ((rw) & REQ_FUA ? RWBS_FLAG_FUA : 0))
- #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
-+#endif
-
- #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,1,0))
-
---
-2.19.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0006-fix-random-remove-unused-tracepoints-v5.18.patch b/meta/recipes-kernel/lttng/lttng-modules/0006-fix-random-remove-unused-tracepoints-v5.18.patch
deleted file mode 100644
index 9c2f70d4af..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0006-fix-random-remove-unused-tracepoints-v5.18.patch
+++ /dev/null
@@ -1,47 +0,0 @@
-From 369d82bb1746447514c877088d7c5fd0f39140f8 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 4 Apr 2022 14:33:42 -0400
-Subject: [PATCH 06/10] fix: random: remove unused tracepoints (v5.18)
-
-See upstream commit :
-
- commit 14c174633f349cb41ea90c2c0aaddac157012f74
- Author: Jason A. Donenfeld <Jason@zx2c4.com>
- Date: Thu Feb 10 16:40:44 2022 +0100
-
- random: remove unused tracepoints
-
- These explicit tracepoints aren't really used and show sign of aging.
- It's work to keep these up to date, and before I attempted to keep them
- up to date, they weren't up to date, which indicates that they're not
- really used. These days there are better ways of introspecting anyway.
-
-Upstream-Status: Backport
-
-Change-Id: I3b8c3e2732e7efdd76ce63204ac53a48784d0df6
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- src/probes/Kbuild | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
-diff --git a/src/probes/Kbuild b/src/probes/Kbuild
-index e26b4359..8d6ff0f2 100644
---- a/src/probes/Kbuild
-+++ b/src/probes/Kbuild
-@@ -187,8 +187,11 @@ ifneq ($(CONFIG_FRAME_WARN),0)
- CFLAGS_lttng-probe-printk.o += -Wframe-larger-than=2200
- endif
-
-+# Introduced in v3.6, remove in v5.18
- obj-$(CONFIG_LTTNG) += $(shell \
-- if [ $(VERSION) -ge 4 \
-+ if [ \( ! \( $(VERSION) -ge 6 -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -ge 18 \) \) \) \
-+ -a \
-+ $(VERSION) -ge 4 \
- -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 6 \) \
- -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 5 -a $(SUBLEVEL) -ge 2 \) \
- -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 4 -a $(SUBLEVEL) -ge 9 \) \
---
-2.19.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0007-fix-kprobes-Use-rethook-for-kretprobe-if-possible-v5.patch b/meta/recipes-kernel/lttng/lttng-modules/0007-fix-kprobes-Use-rethook-for-kretprobe-if-possible-v5.patch
deleted file mode 100644
index effd37ffe1..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0007-fix-kprobes-Use-rethook-for-kretprobe-if-possible-v5.patch
+++ /dev/null
@@ -1,72 +0,0 @@
-From 3c46ddc134621dba65030263aa321dd6bdae3ba3 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 4 Apr 2022 15:02:10 -0400
-Subject: [PATCH 07/10] fix: kprobes: Use rethook for kretprobe if possible
- (v5.18)
-
-See upstream commit :
-
- commit 73f9b911faa74ac5107879de05c9489c419f41bb
- Author: Masami Hiramatsu <mhiramat@kernel.org>
- Date: Sat Mar 26 11:27:05 2022 +0900
-
- kprobes: Use rethook for kretprobe if possible
-
- Use rethook for kretprobe function return hooking if the arch sets
- CONFIG_HAVE_RETHOOK=y. In this case, CONFIG_KRETPROBE_ON_RETHOOK is
- set to 'y' automatically, and the kretprobe internal data fields
- switches to use rethook. If not, it continues to use kretprobe
- specific function return hooks.
-
-Upstream-Status: Backport
-
-Change-Id: I2b7670dc04e4769c1e3c372582ad2f555f6d7a66
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- include/wrapper/kprobes.h | 17 +++++++++++++++++
- src/probes/lttng-kretprobes.c | 2 +-
- 2 files changed, 18 insertions(+), 1 deletion(-)
-
-diff --git a/include/wrapper/kprobes.h b/include/wrapper/kprobes.h
-index b546d615..51d32b7c 100644
---- a/include/wrapper/kprobes.h
-+++ b/include/wrapper/kprobes.h
-@@ -29,4 +29,21 @@ struct kretprobe *lttng_get_kretprobe(struct kretprobe_instance *ri)
-
- #endif /* LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,11,0) */
-
-+
-+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0))
-+static inline
-+unsigned long lttng_get_kretprobe_retaddr(struct kretprobe_instance *ri)
-+{
-+ return get_kretprobe_retaddr(ri);
-+}
-+
-+#else
-+
-+static inline
-+unsigned long lttng_get_kretprobe_retaddr(struct kretprobe_instance *ri)
-+{
-+ return (unsigned long) ri->ret_addr;
-+}
-+#endif
-+
- #endif /* _LTTNG_WRAPPER_KPROBES_H */
-diff --git a/src/probes/lttng-kretprobes.c b/src/probes/lttng-kretprobes.c
-index 5cb2e953..565df739 100644
---- a/src/probes/lttng-kretprobes.c
-+++ b/src/probes/lttng-kretprobes.c
-@@ -81,7 +81,7 @@ int _lttng_kretprobes_handler(struct kretprobe_instance *krpi,
- int ret;
-
- payload.ip = (unsigned long) lttng_get_kretprobe(krpi)->kp.addr;
-- payload.parent_ip = (unsigned long) krpi->ret_addr;
-+ payload.parent_ip = lttng_get_kretprobe_retaddr(krpi);
-
- lib_ring_buffer_ctx_init(&ctx, event_recorder, sizeof(payload),
- lttng_alignof(payload), &lttng_probe_ctx);
---
-2.19.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0008-fix-scsi-core-Remove-scsi-scsi_request.h-v5.18.patch b/meta/recipes-kernel/lttng/lttng-modules/0008-fix-scsi-core-Remove-scsi-scsi_request.h-v5.18.patch
deleted file mode 100644
index 13c504b859..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0008-fix-scsi-core-Remove-scsi-scsi_request.h-v5.18.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From e8d2f286b5b208ac8870d0a9c167b170e96169b3 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 4 Apr 2022 15:08:48 -0400
-Subject: [PATCH 08/10] fix: scsi: core: Remove <scsi/scsi_request.h> (v5.18)
-
-See upstream commit :
-
- commit 26440303310591e29121964ede0048583cb3126d
- Author: Christoph Hellwig <hch@lst.de>
- Date: Thu Feb 24 18:55:52 2022 +0100
-
- scsi: core: Remove <scsi/scsi_request.h>
-
- This header is empty now except for an include of <linux/blk-mq.h>, so
- remove it.
-
-Upstream-Status: Backport
-
-Change-Id: Ic8ee3352f1e8bddfcd44c31be9b788db82f183aa
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- include/instrumentation/events/block.h | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/include/instrumentation/events/block.h b/include/instrumentation/events/block.h
-index 050a59a2..882e6e08 100644
---- a/include/instrumentation/events/block.h
-+++ b/include/instrumentation/events/block.h
-@@ -11,9 +11,9 @@
- #include <linux/trace_seq.h>
- #include <lttng/kernel-version.h>
-
--#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,11,0))
-+#if LTTNG_KERNEL_RANGE(4,11,0, 5,18,0)
- #include <scsi/scsi_request.h>
--#endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,11,0)) */
-+#endif /* LTTNG_KERNEL_RANGE(4,11,0, 5,18,0) */
-
- #ifndef _TRACE_BLOCK_DEF_
- #define _TRACE_BLOCK_DEF_
---
-2.19.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0010-fix-mm-compaction-cleanup-the-compaction-trace-event.patch b/meta/recipes-kernel/lttng/lttng-modules/0010-fix-mm-compaction-cleanup-the-compaction-trace-event.patch
deleted file mode 100644
index 892d3f0d23..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0010-fix-mm-compaction-cleanup-the-compaction-trace-event.patch
+++ /dev/null
@@ -1,106 +0,0 @@
-From f9208dc00756dfa0a2f191799722030bdf3f793d Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 4 Apr 2022 15:14:01 -0400
-Subject: [PATCH 10/10] fix: mm: compaction: cleanup the compaction trace
- events (v5.18)
-
-See upstream commit :
-
- commit abd4349ff9b8d242376b67711254221f64f447c7
- Author: Baolin Wang <baolin.wang@linux.alibaba.com>
- Date: Tue Mar 22 14:45:56 2022 -0700
-
- mm: compaction: cleanup the compaction trace events
-
- As Steven suggested [1], we should access the pointers from the trace
- event to avoid dereferencing them to the tracepoint function when the
- tracepoint is disabled.
-
- [1] https://lkml.org/lkml/2021/11/3/409
-
-Upstream-Status: Backport
-
-Change-Id: I6c08250df8596e8dbc76780ae5d95c899c12e6fe
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- include/instrumentation/events/compaction.h | 17 ++++++++++++++++-
- src/probes/Kbuild | 17 ++++++++++++++++-
- src/probes/lttng-probe-compaction.c | 5 +++++
- 3 files changed, 37 insertions(+), 2 deletions(-)
-
-diff --git a/include/instrumentation/events/compaction.h b/include/instrumentation/events/compaction.h
-index 15964537..ecae39a8 100644
---- a/include/instrumentation/events/compaction.h
-+++ b/include/instrumentation/events/compaction.h
-@@ -97,7 +97,22 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(compaction_isolate_template,
-
- #endif /* #else #if LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,0,0) */
-
--#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,17,0))
-+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0))
-+LTTNG_TRACEPOINT_EVENT_MAP(mm_compaction_migratepages,
-+
-+ compaction_migratepages,
-+
-+ TP_PROTO(struct compact_control *cc,
-+ unsigned int nr_succeeded),
-+
-+ TP_ARGS(cc, nr_succeeded),
-+
-+ TP_FIELDS(
-+ ctf_integer(unsigned long, nr_migrated, nr_succeeded)
-+ ctf_integer(unsigned long, nr_failed, cc->nr_migratepages - nr_succeeded)
-+ )
-+)
-+#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,17,0))
- LTTNG_TRACEPOINT_EVENT_MAP(mm_compaction_migratepages,
-
- compaction_migratepages,
-diff --git a/src/probes/Kbuild b/src/probes/Kbuild
-index 8d6ff0f2..54784477 100644
---- a/src/probes/Kbuild
-+++ b/src/probes/Kbuild
-@@ -167,7 +167,22 @@ ifneq ($(CONFIG_BTRFS_FS),)
- endif # $(wildcard $(btrfs_dep))
- endif # CONFIG_BTRFS_FS
-
--obj-$(CONFIG_LTTNG) += lttng-probe-compaction.o
-+# A dependency on internal header 'mm/internal.h' was introduced in v5.18
-+compaction_dep = $(srctree)/mm/internal.h
-+compaction_dep_wildcard = $(wildcard $(compaction_dep))
-+compaction_dep_check = $(shell \
-+if [ \( $(VERSION) -ge 6 \
-+ -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -ge 18 \) \) -a \
-+ -z "$(compaction_dep_wildcard)" ] ; then \
-+ echo "warn" ; \
-+else \
-+ echo "ok" ; \
-+fi ;)
-+ifeq ($(compaction_dep_check),ok)
-+ obj-$(CONFIG_LTTNG) += lttng-probe-compaction.o
-+else
-+ $(warning Files $(compaction_dep) not found. Probe "compaction" is disabled. Use full kernel source tree to enable it.)
-+endif # $(wildcard $(compaction_dep))
-
- ifneq ($(CONFIG_EXT4_FS),)
- ext4_dep = $(srctree)/fs/ext4/*.h
-diff --git a/src/probes/lttng-probe-compaction.c b/src/probes/lttng-probe-compaction.c
-index f8ddf384..ffaf45f0 100644
---- a/src/probes/lttng-probe-compaction.c
-+++ b/src/probes/lttng-probe-compaction.c
-@@ -10,6 +10,11 @@
-
- #include <linux/module.h>
- #include <lttng/tracer.h>
-+#include <lttng/kernel-version.h>
-+
-+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0))
-+#include "../mm/internal.h"
-+#endif
-
- /*
- * Create the tracepoint static inlines from the kernel to validate that our
---
-2.19.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules_2.13.3.bb b/meta/recipes-kernel/lttng/lttng-modules_2.13.9.bb
index e049bdc6d2..a08386b053 100644
--- a/meta/recipes-kernel/lttng/lttng-modules_2.13.3.bb
+++ b/meta/recipes-kernel/lttng/lttng-modules_2.13.9.bb
@@ -10,24 +10,13 @@ inherit module
include lttng-platforms.inc
SRC_URI = "https://lttng.org/files/${BPN}/${BPN}-${PV}.tar.bz2 \
- file://0001-Fix-compaction-migratepages-event-name.patch \
- file://0002-Fix-tracepoint-event-allow-same-provider-and-event-n.patch \
- file://0003-fix-sched-tracing-Don-t-re-read-p-state-when-emittin.patch \
- file://0004-fix-block-remove-genhd.h-v5.18.patch \
- file://0005-fix-scsi-block-Remove-REQ_OP_WRITE_SAME-support-v5.1.patch \
- file://0006-fix-random-remove-unused-tracepoints-v5.18.patch \
- file://0007-fix-kprobes-Use-rethook-for-kretprobe-if-possible-v5.patch \
- file://0008-fix-scsi-core-Remove-scsi-scsi_request.h-v5.18.patch \
file://0009-Rename-genhd-wrapper-to-blkdev.patch \
- file://0010-fix-mm-compaction-cleanup-the-compaction-trace-event.patch \
- file://0001-fix-sched-tracing-Append-prev_state-to-tp-args-inste.patch \
- file://0001-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch \
- "
+ "
# Use :append here so that the patch is applied also when using devupstream
SRC_URI:append = " file://0001-src-Kbuild-change-missing-CONFIG_TRACEPOINTS-to-warn.patch"
-SRC_URI[sha256sum] = "7cf1acbb50b84116acc9b4281b81dcc2643d6018bbd1e8514ad1270239896c4b"
+SRC_URI[sha256sum] = "bf808b113544287cfe837a6382887fa66354ef5cc8216460cebbef3d27dc3581"
export INSTALL_MOD_DIR="kernel/lttng-modules"
diff --git a/meta/recipes-kernel/lttng/lttng-platforms.inc b/meta/recipes-kernel/lttng/lttng-platforms.inc
index 933c65d85d..900e36df82 100644
--- a/meta/recipes-kernel/lttng/lttng-platforms.inc
+++ b/meta/recipes-kernel/lttng/lttng-platforms.inc
@@ -15,3 +15,7 @@ LTTNGUST:arc = ""
COMPATIBLE_HOST:arc:pn-lttng-ust = "null"
+# Whether the platform supports lttng-tools
+# lttng-tools requires SYS_ppoll and SYS_pselect6 which are not supported on riscv32.
+# It's also turned off for riscv32 in meta-riscv. See https://github.com/riscv/meta-riscv/blob/master/conf/layer.conf
+COMPATIBLE_HOST:riscv32:pn-lttng-tools = "null"
diff --git a/meta/recipes-kernel/lttng/lttng-tools/determinism.patch b/meta/recipes-kernel/lttng/lttng-tools/determinism.patch
deleted file mode 100644
index b2ab880bd6..0000000000
--- a/meta/recipes-kernel/lttng/lttng-tools/determinism.patch
+++ /dev/null
@@ -1,64 +0,0 @@
-This is a bit ugly. Specifing abs_builddir as an RPATH is plain wrong when
-cross compiling. Sadly, removing the rpath makes libtool/automake do
-weird things and breaks the build as shared libs are no longer generated.
-
-We already try and delete the RPATH at do_install with chrpath however
-that does leave the path in the string table so it doesn't help us
-with reproducibility.
-
-Instead, hack in a bogus but harmless path, then delete it later in
-our do_install. Ultimately we may want to pass a specific path to use
-to configure if we really do need to set an RPATH at all. It is unclear
-to me whether the tests need that or not.
-
-Fixes reproducibility issues for lttng-tools.
-
-Upstream-Status: Pending [needs discussion with upstream about the correct solution]
-RP 2021/3/1
-
-Index: lttng-tools-2.12.2/tests/regression/ust/ust-dl/Makefile.am
-===================================================================
---- lttng-tools-2.12.2.orig/tests/regression/ust/ust-dl/Makefile.am
-+++ lttng-tools-2.12.2/tests/regression/ust/ust-dl/Makefile.am
-@@ -27,16 +27,16 @@ noinst_LTLIBRARIES = libzzz.la libbar.la
-
- libzzz_la_SOURCES = libzzz.c libzzz.h
- libzzz_la_LDFLAGS = -module -shared -avoid-version \
-- -rpath $(abs_builddir)
-+ -rpath /usr/lib
-
- libbar_la_SOURCES = libbar.c libbar.h
- libbar_la_LDFLAGS = -module -shared -avoid-version \
-- -rpath $(abs_builddir)
-+ -rpath /usr/lib
- libbar_la_LIBADD = libzzz.la
-
- libfoo_la_SOURCES = libfoo.c libfoo.h
- libfoo_la_LDFLAGS = -module -shared -avoid-version \
-- -rpath $(abs_builddir)
-+ -rpath /usr/lib
- libfoo_la_LIBADD = libbar.la
-
- CLEANFILES = libfoo.so libfoo.so.debug libbar.so libbar.so.debug \
-@@ -44,7 +44,7 @@ CLEANFILES = libfoo.so libfoo.so.debug l
-
- libtp_la_SOURCES = libbar-tp.h libbar-tp.c libfoo-tp.h libfoo-tp.c \
- libzzz-tp.h libzzz-tp.c
--libtp_la_LDFLAGS = -module -shared -rpath $(abs_builddir)
-+libtp_la_LDFLAGS = -module -shared -rpath /usr/lib
-
- # Extract debug symbols
- libfoo.so.debug: libfoo.la
-Index: lttng-tools-2.12.2/tests/utils/testapp/userspace-probe-elf-binary/Makefile.am
-===================================================================
---- lttng-tools-2.12.2.orig/tests/utils/testapp/userspace-probe-elf-binary/Makefile.am
-+++ lttng-tools-2.12.2/tests/utils/testapp/userspace-probe-elf-binary/Makefile.am
-@@ -5,7 +5,7 @@ AM_CFLAGS += -O0
- noinst_LTLIBRARIES = libfoo.la
-
- libfoo_la_SOURCES = foo.c foo.h
--libfoo_la_LDFLAGS = -shared -module -avoid-version -rpath $(abs_builddir)/.libs/
-+libfoo_la_LDFLAGS = -shared -module -avoid-version -rpath /usr/lib
-
- noinst_PROGRAMS = userspace-probe-elf-binary
- userspace_probe_elf_binary_SOURCES = userspace-probe-elf-binary.c
diff --git a/meta/recipes-kernel/lttng/lttng-tools_2.13.4.bb b/meta/recipes-kernel/lttng/lttng-tools_2.13.9.bb
index 0ea4da05ce..1f6929e307 100644
--- a/meta/recipes-kernel/lttng/lttng-tools_2.13.4.bb
+++ b/meta/recipes-kernel/lttng/lttng-tools_2.13.9.bb
@@ -35,11 +35,10 @@ SRC_URI = "https://lttng.org/files/lttng-tools/lttng-tools-${PV}.tar.bz2 \
file://0001-tests-do-not-strip-a-helper-library.patch \
file://run-ptest \
file://lttng-sessiond.service \
- file://determinism.patch \
file://disable-tests.patch \
"
-SRC_URI[sha256sum] = "565f3102410a53d484f4c8ff517978f1dc59f67f9d16f872f4357f3ca12200f6"
+SRC_URI[sha256sum] = "8d94dc95b608cf70216b01203a3f8242b97a232db2e23421a2f43708da08f337"
inherit autotools ptest pkgconfig useradd python3-dir manpages systemd
@@ -113,7 +112,7 @@ do_install_ptest () {
for f in $(find "${B}/tests/$d" -maxdepth 1 -executable -type f -printf '%P ') ; do
cp ${B}/tests/$d/$f ${D}${PTEST_PATH}/tests/`dirname $d`/$f
case $f in
- *.so|userspace-probe-elf-binary)
+ *.so|userspace-probe-elf-*)
install -d ${D}${PTEST_PATH}/tests/$d/
ln -s ../$f ${D}${PTEST_PATH}/tests/$d/$f
# Remove any rpath/runpath to pass QA check.
@@ -124,6 +123,7 @@ do_install_ptest () {
done
chrpath --delete ${D}${PTEST_PATH}/tests/utils/testapp/userspace-probe-elf-binary/userspace-probe-elf-binary
+ chrpath --delete ${D}${PTEST_PATH}/tests/utils/testapp/userspace-probe-elf-cxx-binary/userspace-probe-elf-cxx-binary
chrpath --delete ${D}${PTEST_PATH}/tests/regression/ust/ust-dl/libbar.so
chrpath --delete ${D}${PTEST_PATH}/tests/regression/ust/ust-dl/libfoo.so
@@ -185,4 +185,10 @@ do_install_ptest () {
INHIBIT_PACKAGE_STRIP_FILES = "\
${PKGD}${PTEST_PATH}/tests/utils/testapp/userspace-probe-elf-binary/userspace-probe-elf-binary \
${PKGD}${PTEST_PATH}/tests/utils/testapp/userspace-probe-elf-binary/.libs/userspace-probe-elf-binary \
+ ${PKGD}${PTEST_PATH}/tests/utils/testapp/userspace-probe-elf-cxx-binary/userspace-probe-elf-cxx-binary \
+ ${PKGD}${PTEST_PATH}/tests/utils/testapp/userspace-probe-elf-cxx-binary/.libs/userspace-probe-elf-cxx-binary \
+ ${PKGD}${PTEST_PATH}/tests/utils/testapp/gen-syscall-events/gen-syscall-events \
+ ${PKGD}${PTEST_PATH}/tests/utils/testapp/gen-syscall-events/.libs/gen-syscall-events \
+ ${PKGD}${PTEST_PATH}/tests/utils/testapp/gen-syscall-events-callstack/gen-syscall-events-callstack \
+ ${PKGD}${PTEST_PATH}/tests/utils/testapp/gen-syscall-events-callstack/.libs/gen-syscall-events-callstack \
"
diff --git a/meta/recipes-kernel/lttng/lttng-ust_2.13.3.bb b/meta/recipes-kernel/lttng/lttng-ust_2.13.6.bb
index cc88bf5b11..424b0fa645 100644
--- a/meta/recipes-kernel/lttng/lttng-ust_2.13.3.bb
+++ b/meta/recipes-kernel/lttng/lttng-ust_2.13.6.bb
@@ -34,7 +34,7 @@ SRC_URI = "https://lttng.org/files/lttng-ust/lttng-ust-${PV}.tar.bz2 \
file://0001-Makefile.am-update-rpath-link.patch \
"
-SRC_URI[sha256sum] = "2cc42f51145050430ac4ab72b32d95fd78d5566ccbe44e14a8fcdd23c0ed8f6f"
+SRC_URI[sha256sum] = "e7e04596dd73ac7aa99e27cd000f949dbb0fed51bd29099f9b08a25c1df0ced5"
CVE_PRODUCT = "ust"
diff --git a/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb b/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb
index 0e420a25d9..8727d003f9 100644
--- a/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb
+++ b/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb
@@ -3,7 +3,7 @@ HOMEPAGE = "https://www.yoctoproject.org/"
LICENSE = "GPL-2.0-only"
LIC_FILES_CHKSUM = "file://${COREBASE}/meta/files/common-licenses/GPL-2.0-only;md5=801f80980d171dd6425610833a22dbe6"
-inherit kernel-arch
+inherit kernel-arch linux-kernel-base
inherit pkgconfig
PACKAGE_ARCH = "${MACHINE_ARCH}"
@@ -21,6 +21,9 @@ DEPENDS += "gmp-native"
EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX} ${BUILD_CXXFLAGS} ${BUILD_LDFLAGS}" CROSS_COMPILE=${TARGET_PREFIX}"
+KERNEL_LOCALVERSION = "${@get_kernellocalversion_file("${STAGING_KERNEL_BUILDDIR}")}"
+export LOCALVERSION="${KERNEL_LOCALVERSION}"
+
# Build some host tools under work-shared. CC, LD, and AR are probably
# not used, but this is the historical way of invoking "make scripts".
#
diff --git a/meta/recipes-kernel/perf/perf.bb b/meta/recipes-kernel/perf/perf.bb
index adefc44eaa..a4ce3169d3 100644
--- a/meta/recipes-kernel/perf/perf.bb
+++ b/meta/recipes-kernel/perf/perf.bb
@@ -13,7 +13,7 @@ PR = "r9"
PACKAGECONFIG ??= "scripting tui libunwind"
PACKAGECONFIG[dwarf] = ",NO_DWARF=1"
-PACKAGECONFIG[scripting] = ",NO_LIBPERL=1 NO_LIBPYTHON=1,perl python3"
+PACKAGECONFIG[scripting] = ",NO_LIBPERL=1 NO_LIBPYTHON=1,perl python3 python3-setuptools-native"
# gui support was added with kernel 3.6.35
# since 3.10 libnewt was replaced by slang
# to cover a wide range of kernel we add both dependencies
@@ -144,6 +144,9 @@ do_install() {
# we are checking for this make target to be compatible with older perf versions
if ${@bb.utils.contains('PACKAGECONFIG', 'scripting', 'true', 'false', d)} && grep -q install-python_ext ${S}/tools/perf/Makefile*; then
oe_runmake DESTDIR=${D} install-python_ext
+ if [ -e ${D}${libdir}/python*/site-packages/perf-*/SOURCES.txt ]; then
+ sed -i -e 's#${WORKDIR}##g' ${D}${libdir}/python*/site-packages/perf-*/SOURCES.txt
+ fi
fi
}
@@ -203,7 +206,7 @@ do_configure:prepend () {
if [ -e "${S}/tools/perf/Makefile.perf" ]; then
sed -i -e 's,\ .config-detected, $(OUTPUT)/config-detected,g' \
${S}/tools/perf/Makefile.perf
- sed -i -e "s,prefix='\$(DESTDIR_SQ)/usr'$,prefix='\$(DESTDIR_SQ)/usr' --install-lib='\$(DESTDIR)\$(PYTHON_SITEPACKAGES_DIR)',g" \
+ sed -i -e "s,prefix='\$(DESTDIR_SQ)/usr'$,prefix='\$(DESTDIR_SQ)/usr' --install-lib='\$(PYTHON_SITEPACKAGES_DIR)' --root='\$(DESTDIR)',g" \
${S}/tools/perf/Makefile.perf
# backport https://github.com/torvalds/linux/commit/e4ffd066ff440a57097e9140fa9e16ceef905de8
sed -i -e 's,\($(Q)$(SHELL) .$(arch_errno_tbl).\) $(CC) $(arch_errno_hdr_dir),\1 $(firstword $(CC)) $(arch_errno_hdr_dir),g' \
@@ -227,6 +230,15 @@ do_configure:prepend () {
# reproducible.
sed -i -e 's,$(call get-executable-or-default\,PYTHON\,$(PYTHON_AUTO)),$(notdir $(call get-executable-or-default\,PYTHON\,$(PYTHON_AUTO))),g' \
${S}/tools/perf/Makefile.config
+ # The same line is in older releases, but looking explicitly for Python 2
+ sed -i -e 's,$(call get-executable-or-default\,PYTHON\,$(PYTHON2)),$(notdir $(call get-executable-or-default\,PYTHON\,$(PYTHON2))),g' \
+ ${S}/tools/perf/Makefile.config
+
+ # likewise with this substitution. Kernels with commit 18f2967418d031a39
+ # [perf tools: Use Python devtools for version autodetection rather than runtime]
+ # need this substitution for reproducibility.
+ sed -i -e 's,$(call get-executable-or-default\,PYTHON\,$(subst -config\,\,$(PYTHON_AUTO))),$(notdir $(call get-executable-or-default\,PYTHON\,$(subst -config\,\,$(PYTHON_AUTO)))),g' \
+ ${S}/tools/perf/Makefile.config
# The following line:
# srcdir_SQ = $(patsubst %tools/perf,tools/perf,$(subst ','\'',$(srcdir))),
@@ -235,6 +247,9 @@ do_configure:prepend () {
# change the Makefile line to remove everything before 'tools/perf'
sed -i -e "s%srcdir_SQ = \$(subst ','\\\'',\$(srcdir))%srcdir_SQ = \$(patsubst \%tools/perf,tools/perf,\$(subst ','\\\'',\$(srcdir)))%g" \
${S}/tools/perf/Makefile.config
+ # Avoid hardcoded path to python-native
+ sed -i -e 's#\(PYTHON_WORD := \)$(call shell-wordify,$(PYTHON))#\1 python3#g' \
+ ${S}/tools/perf/Makefile.config
fi
if [ -e "${S}/tools/perf/tests/Build" ]; then
# OUTPUT is the full path, we have python on the path so we remove it from the
diff --git a/meta/recipes-kernel/perf/perf/sort-pmuevents.py b/meta/recipes-kernel/perf/perf/sort-pmuevents.py
index 09ba3328a7..0a87e553ab 100755
--- a/meta/recipes-kernel/perf/perf/sort-pmuevents.py
+++ b/meta/recipes-kernel/perf/perf/sort-pmuevents.py
@@ -36,10 +36,10 @@ with open(infile, 'r') as file:
preamble_regex = re.compile( '^(.*?)^(struct|const struct|static struct|static const struct)', re.MULTILINE | re.DOTALL )
preamble = re.search( preamble_regex, data )
-struct_block_regex = re.compile( '^(struct|const struct|static struct|static const struct).*?(\w+) (.*?)\[\] = {(.*?)^};', re.MULTILINE | re.DOTALL )
-field_regex = re.compile( '{.*?},', re.MULTILINE | re.DOTALL )
-cpuid_regex = re.compile( '\.cpuid = (.*?),', re.MULTILINE | re.DOTALL )
-name_regex = re.compile( '\.name = (.*?),', re.MULTILINE | re.DOTALL )
+struct_block_regex = re.compile(r'^(struct|const struct|static struct|static const struct).*?(\w+) (.*?)\[\] = {(.*?)^};', re.MULTILINE | re.DOTALL )
+field_regex = re.compile(r'{.*?},', re.MULTILINE | re.DOTALL )
+cpuid_regex = re.compile(r'\.cpuid = (.*?),', re.MULTILINE | re.DOTALL )
+name_regex = re.compile(r'\.name = (.*?),', re.MULTILINE | re.DOTALL )
# create a dictionary structure to store all the structs, their
# types and then their fields.
@@ -62,7 +62,10 @@ for struct in re.findall( struct_block_regex, data ):
#print( " name found: %s" % name.group(1) )
entry_dict[struct[2]]['fields'][name.group(1)] = entry
- if not entry_dict[struct[2]]['fields']:
+ # unmatched entries are most likely array terminators and
+ # should end up as the last element in the sorted list, which
+ # is achieved by using '0' as the key
+ if not cpuid and not name:
entry_dict[struct[2]]['fields']['0'] = entry
# created ordered dictionaries from the captured values. These are ordered by
diff --git a/meta/recipes-kernel/systemtap/systemtap/0001-bpf-translate.cxx-Prevent-Werror-maybe-uninitialized.patch b/meta/recipes-kernel/systemtap/systemtap/0001-bpf-translate.cxx-Prevent-Werror-maybe-uninitialized.patch
new file mode 100644
index 0000000000..130eefab5d
--- /dev/null
+++ b/meta/recipes-kernel/systemtap/systemtap/0001-bpf-translate.cxx-Prevent-Werror-maybe-uninitialized.patch
@@ -0,0 +1,53 @@
+From df3425f51a512f65522522daf1f78c7fab0a63fd Mon Sep 17 00:00:00 2001
+From: Aaron Merey <amerey@redhat.com>
+Date: Fri, 25 Feb 2022 19:18:29 -0500
+Subject: [PATCH] bpf-translate.cxx: Prevent -Werror=maybe-uninitialized
+
+Two variables in bpf-translate.cxx can trigger -Werror=maybe-uninitialized.
+The code is designed so that uninitialized uses are not actually possible,
+but to convince gcc of this we move a throw statement and initialize one
+of the variables with a value.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=systemtap.git;a=commit;h=df3425f51a512f65522522daf1f78c7fab0a63fd]
+
+Signed-off-by: Li Wang <li.wang@windriver.com>
+---
+ bpf-translate.cxx | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/bpf-translate.cxx b/bpf-translate.cxx
+index 3f45c721f..1b63d6078 100644
+--- a/bpf-translate.cxx
++++ b/bpf-translate.cxx
+@@ -1203,7 +1203,7 @@ bpf_unparser::emit_asm_arg (const asm_stmt &stmt, const std::string &arg,
+ {
+ /* arg is a register number */
+ std::string reg = arg[0] == 'r' ? arg.substr(1) : arg;
+- unsigned long num;
++ unsigned long num = ULONG_MAX;
+ bool parsed = false;
+ try {
+ num = stoul(reg, 0, 0);
+@@ -1941,8 +1941,6 @@ bpf_unparser::visit_foreach_loop(foreach_loop* s)
+ for (unsigned k = 0; k < arraydecl->index_types.size(); k++)
+ {
+ auto type = arraydecl->index_types[k];
+- if (type != pe_long && type != pe_string)
+- throw SEMANTIC_ERROR(_("unhandled foreach index type"), s->tok);
+ int this_column_size;
+ // PR23875: foreach should handle string keys
+ if (type == pe_long)
+@@ -1953,6 +1951,10 @@ bpf_unparser::visit_foreach_loop(foreach_loop* s)
+ {
+ this_column_size = BPF_MAXSTRINGLEN;
+ }
++ else
++ {
++ throw SEMANTIC_ERROR(_("unhandled foreach index type"), s->tok);
++ }
+ if (info.sort_column == k + 1) // record sort column
+ {
+ info.sort_column_size = this_column_size;
+--
+2.25.1
+
diff --git a/meta/recipes-kernel/systemtap/systemtap_git.bb b/meta/recipes-kernel/systemtap/systemtap_git.bb
index ce86d5274d..c84fc27001 100644
--- a/meta/recipes-kernel/systemtap/systemtap_git.bb
+++ b/meta/recipes-kernel/systemtap/systemtap_git.bb
@@ -9,6 +9,7 @@ require systemtap_git.inc
SRC_URI += "file://0001-improve-reproducibility-for-c-compiling.patch \
file://0001-staprun-address-ncurses-6.3-failures.patch \
file://0001-gcc12-c-compatibility-re-tweak-for-rhel6-use-functio.patch \
+ file://0001-bpf-translate.cxx-Prevent-Werror-maybe-uninitialized.patch \
"
DEPENDS = "elfutils"
diff --git a/meta/recipes-kernel/wireless-regdb/wireless-regdb_2022.04.08.bb b/meta/recipes-kernel/wireless-regdb/wireless-regdb_2024.01.23.bb
index cd42039680..8fde236ab4 100644
--- a/meta/recipes-kernel/wireless-regdb/wireless-regdb_2022.04.08.bb
+++ b/meta/recipes-kernel/wireless-regdb/wireless-regdb_2024.01.23.bb
@@ -5,7 +5,7 @@ LICENSE = "ISC"
LIC_FILES_CHKSUM = "file://LICENSE;md5=07c4f6dea3845b02a18dc00c8c87699c"
SRC_URI = "https://www.kernel.org/pub/software/network/${BPN}/${BP}.tar.xz"
-SRC_URI[sha256sum] = "884ba2e3c1e8b98762b6dc25ff60b5ec75c8d33a39e019b3ed4aa615491460d3"
+SRC_URI[sha256sum] = "c8a61c9acf76fa7eb4239e89f640dee3e87098d9f69b4d3518c9c60fc6d20c55"
inherit bin_package allarch
@@ -13,7 +13,7 @@ do_install() {
install -d -m0755 ${D}${nonarch_libdir}/crda
install -d -m0755 ${D}${sysconfdir}/wireless-regdb/pubkeys
install -m 0644 regulatory.bin ${D}${nonarch_libdir}/crda/regulatory.bin
- install -m 0644 sforshee.key.pub.pem ${D}${sysconfdir}/wireless-regdb/pubkeys/sforshee.key.pub.pem
+ install -m 0644 wens.key.pub.pem ${D}${sysconfdir}/wireless-regdb/pubkeys/wens.key.pub.pem
install -m 0644 -D regulatory.db ${D}${nonarch_base_libdir}/firmware/regulatory.db
install -m 0644 regulatory.db.p7s ${D}${nonarch_base_libdir}/firmware/regulatory.db.p7s
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avcodec-rpzaenc-stop-accessing-out-of-bounds-frame.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avcodec-rpzaenc-stop-accessing-out-of-bounds-frame.patch
new file mode 100644
index 0000000000..97fcfd993a
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avcodec-rpzaenc-stop-accessing-out-of-bounds-frame.patch
@@ -0,0 +1,86 @@
+From ce25c03fb83395c0a8b5b8121182a486c4408dd4 Mon Sep 17 00:00:00 2001
+From: Paul B Mahol <onemda@gmail.com>
+Date: Sat, 12 Nov 2022 16:12:00 +0100
+Subject: [PATCH] avcodec/rpzaenc: stop accessing out of bounds frame
+
+Upstream-Status: Backport [https://github.com/FFmpeg/FFmpeg/commit/92f9b28ed84a77138105475beba16c146bdaf984]
+CVE: CVE-2022-3964
+Signed-off-by: <narpat.mali@windriver.com>
+
+---
+ libavcodec/rpzaenc.c | 22 +++++++++++++++-------
+ 1 file changed, 15 insertions(+), 7 deletions(-)
+
+diff --git a/libavcodec/rpzaenc.c b/libavcodec/rpzaenc.c
+index 337b1fa..3e97c87 100644
+--- a/libavcodec/rpzaenc.c
++++ b/libavcodec/rpzaenc.c
+@@ -205,7 +205,7 @@ static void get_max_component_diff(BlockInfo *bi, uint16_t *block_ptr,
+
+ // loop thru and compare pixels
+ for (y = 0; y < bi->block_height; y++) {
+- for (x = 0; x < bi->block_width; x++){
++ for (x = 0; x < bi->block_width; x++) {
+ // TODO: optimize
+ min_r = FFMIN(R(block_ptr[x]), min_r);
+ min_g = FFMIN(G(block_ptr[x]), min_g);
+@@ -277,7 +277,7 @@ static int leastsquares(uint16_t *block_ptr, BlockInfo *bi,
+ return -1;
+
+ for (i = 0; i < bi->block_height; i++) {
+- for (j = 0; j < bi->block_width; j++){
++ for (j = 0; j < bi->block_width; j++) {
+ x = GET_CHAN(block_ptr[j], xchannel);
+ y = GET_CHAN(block_ptr[j], ychannel);
+ sumx += x;
+@@ -324,7 +324,7 @@ static int calc_lsq_max_fit_error(uint16_t *block_ptr, BlockInfo *bi,
+ int max_err = 0;
+
+ for (i = 0; i < bi->block_height; i++) {
+- for (j = 0; j < bi->block_width; j++){
++ for (j = 0; j < bi->block_width; j++) {
+ int x_inc, lin_y, lin_x;
+ x = GET_CHAN(block_ptr[j], xchannel);
+ y = GET_CHAN(block_ptr[j], ychannel);
+@@ -419,7 +419,9 @@ static void update_block_in_prev_frame(const uint16_t *src_pixels,
+ uint16_t *dest_pixels,
+ const BlockInfo *bi, int block_counter)
+ {
+- for (int y = 0; y < 4; y++) {
++ const int y_size = FFMIN(4, bi->image_height - bi->row * 4);
++
++ for (int y = 0; y < y_size; y++) {
+ memcpy(dest_pixels, src_pixels, 8);
+ dest_pixels += bi->rowstride;
+ src_pixels += bi->rowstride;
+@@ -729,14 +731,15 @@ post_skip :
+
+ if (err > s->sixteen_color_thresh) { // DO SIXTEEN COLOR BLOCK
+ uint16_t *row_ptr;
+- int rgb555;
++ int y_size, rgb555;
+
+ block_offset = get_block_info(&bi, block_counter);
+
+ row_ptr = &src_pixels[block_offset];
++ y_size = FFMIN(4, bi.image_height - bi.row * 4);
+
+- for (int y = 0; y < 4; y++) {
+- for (int x = 0; x < 4; x++){
++ for (int y = 0; y < y_size; y++) {
++ for (int x = 0; x < 4; x++) {
+ rgb555 = row_ptr[x] & ~0x8000;
+
+ put_bits(&s->pb, 16, rgb555);
+@@ -744,6 +747,11 @@ post_skip :
+ row_ptr += bi.rowstride;
+ }
+
++ for (int y = y_size; y < 4; y++) {
++ for (int x = 0; x < 4; x++)
++ put_bits(&s->pb, 16, 0);
++ }
++
+ block_counter++;
+ } else { // FOUR COLOR BLOCK
+ block_counter += encode_four_color_block(min_color, max_color,
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avcodec-smcenc-stop-accessing-out-of-bounds-frame.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avcodec-smcenc-stop-accessing-out-of-bounds-frame.patch
new file mode 100644
index 0000000000..8ebf1f69c4
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avcodec-smcenc-stop-accessing-out-of-bounds-frame.patch
@@ -0,0 +1,106 @@
+From d2f31887df2c42948dba7446c475026fdbc69336 Mon Sep 17 00:00:00 2001
+From: Paul B Mahol <onemda@gmail.com>
+Date: Sat, 12 Nov 2022 15:19:21 +0100
+Subject: [PATCH] avcodec/smcenc: stop accessing out of bounds frame
+
+Upstream-Status: Backport [https://github.com/FFmpeg/FFmpeg/commit/13c13109759090b7f7182480d075e13b36ed8edd]
+
+CVE: CVE-2022-3965
+Signed-off-by: <narpat.mali@windriver.com>
+
+---
+ libavcodec/smcenc.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/libavcodec/smcenc.c b/libavcodec/smcenc.c
+index 52795ef..618dc4e 100644
+--- a/libavcodec/smcenc.c
++++ b/libavcodec/smcenc.c
+@@ -61,6 +61,7 @@ typedef struct SMCContext {
+ { \
+ row_ptr += stride * 4; \
+ pixel_ptr = row_ptr; \
++ cur_y += 4; \
+ } \
+ } \
+ }
+@@ -117,6 +118,7 @@ static void smc_encode_stream(SMCContext *s, const AVFrame *frame,
+ const uint8_t *prev_pixels = (const uint8_t *)s->prev_frame->data[0];
+ uint8_t *distinct_values = s->distinct_values;
+ const uint8_t *pixel_ptr, *row_ptr;
++ const int height = frame->height;
+ const int width = frame->width;
+ uint8_t block_values[16];
+ int block_counter = 0;
+@@ -125,13 +127,14 @@ static void smc_encode_stream(SMCContext *s, const AVFrame *frame,
+ int color_octet_index = 0;
+ int color_table_index; /* indexes to color pair, quad, or octet tables */
+ int total_blocks;
++ int cur_y = 0;
+
+ memset(s->color_pairs, 0, sizeof(s->color_pairs));
+ memset(s->color_quads, 0, sizeof(s->color_quads));
+ memset(s->color_octets, 0, sizeof(s->color_octets));
+
+ /* Number of 4x4 blocks in frame. */
+- total_blocks = ((frame->width + 3) / 4) * ((frame->height + 3) / 4);
++ total_blocks = ((width + 3) / 4) * ((height + 3) / 4);
+
+ pixel_ptr = row_ptr = src_pixels;
+
+@@ -145,11 +148,13 @@ static void smc_encode_stream(SMCContext *s, const AVFrame *frame,
+ int cache_index;
+ int distinct = 0;
+ int blocks = 0;
++ int frame_y = cur_y;
+
+ while (prev_pixels && s->key_frame == 0 && block_counter + inter_skip_blocks < total_blocks) {
++ const int y_size = FFMIN(4, height - cur_y);
+ int compare = 0;
+
+- for (int y = 0; y < 4; y++) {
++ for (int y = 0; y < y_size; y++) {
+ const ptrdiff_t offset = pixel_ptr - src_pixels;
+ const uint8_t *prev_pixel_ptr = prev_pixels + offset;
+
+@@ -170,8 +175,10 @@ static void smc_encode_stream(SMCContext *s, const AVFrame *frame,
+
+ pixel_ptr = xpixel_ptr;
+ row_ptr = xrow_ptr;
++ cur_y = frame_y;
+
+ while (block_counter > 0 && block_counter + intra_skip_blocks < total_blocks) {
++ const int y_size = FFMIN(4, height - cur_y);
+ const ptrdiff_t offset = pixel_ptr - src_pixels;
+ const int sy = offset / stride;
+ const int sx = offset % stride;
+@@ -180,7 +187,7 @@ static void smc_encode_stream(SMCContext *s, const AVFrame *frame,
+ const uint8_t *old_pixel_ptr = src_pixels + nx + ny * stride;
+ int compare = 0;
+
+- for (int y = 0; y < 4; y++) {
++ for (int y = 0; y < y_size; y++) {
+ compare |= memcmp(old_pixel_ptr + y * stride, pixel_ptr + y * stride, 4);
+ if (compare)
+ break;
+@@ -197,9 +204,11 @@ static void smc_encode_stream(SMCContext *s, const AVFrame *frame,
+
+ pixel_ptr = xpixel_ptr;
+ row_ptr = xrow_ptr;
++ cur_y = frame_y;
+
+ while (block_counter + coded_blocks < total_blocks && coded_blocks < 256) {
+- for (int y = 0; y < 4; y++)
++ const int y_size = FFMIN(4, height - cur_y);
++ for (int y = 0; y < y_size; y++)
+ memcpy(block_values + y * 4, pixel_ptr + y * stride, 4);
+
+ qsort(block_values, 16, sizeof(block_values[0]), smc_cmp_values);
+@@ -224,6 +233,7 @@ static void smc_encode_stream(SMCContext *s, const AVFrame *frame,
+
+ pixel_ptr = xpixel_ptr;
+ row_ptr = xrow_ptr;
++ cur_y = frame_y;
+
+ blocks = coded_blocks;
+ distinct = coded_distinct;
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avcodec-vp3-Add-missing-check-for-av_malloc.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avcodec-vp3-Add-missing-check-for-av_malloc.patch
new file mode 100644
index 0000000000..dca7c827e3
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avcodec-vp3-Add-missing-check-for-av_malloc.patch
@@ -0,0 +1,42 @@
+From ef748a8bd8720416b673e1743e5673a801e8279f Mon Sep 17 00:00:00 2001
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Date: Tue, 15 Feb 2022 17:58:08 +0800
+Subject: [PATCH] avcodec/vp3: Add missing check for av_malloc
+
+Since the av_malloc() may fail and return NULL pointer,
+it is needed that the 's->edge_emu_buffer' should be checked
+whether the new allocation is success.
+
+Fixes: d14723861b ("VP3: fix decoding of videos with stride > 2048")
+Reviewed-by: Peter Ross <pross@xvid.org>
+Signed-off-by: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+
+CVE: CVE-2022-3109
+
+Upstream-Status: Backport [https://github.com/FFmpeg/FFmpeg/commit/656cb0450aeb73b25d7d26980af342b37ac4c568]
+
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+
+---
+ libavcodec/vp3.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c
+index 5b9ba60..f1eccfe 100644
+--- a/libavcodec/vp3.c
++++ b/libavcodec/vp3.c
+@@ -2677,8 +2677,13 @@ static int vp3_decode_frame(AVCodecContext *avctx,
+ if ((ret = ff_thread_get_buffer(avctx, &s->current_frame, AV_GET_BUFFER_FLAG_REF)) < 0)
+ goto error;
+
+- if (!s->edge_emu_buffer)
++ if (!s->edge_emu_buffer) {
+ s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0]));
++ if (!s->edge_emu_buffer) {
++ ret = AVERROR(ENOMEM);
++ goto error;
++ }
++ }
+
+ if (s->keyframe) {
+ if (!s->theora) {
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avformat-nutdec-Add-check-for-avformat_new_stream.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avformat-nutdec-Add-check-for-avformat_new_stream.patch
new file mode 100644
index 0000000000..41d5884f88
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-avformat-nutdec-Add-check-for-avformat_new_stream.patch
@@ -0,0 +1,67 @@
+From 9cf652cef49d74afe3d454f27d49eb1a1394951e Mon Sep 17 00:00:00 2001
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Date: Wed, 23 Feb 2022 10:31:59 +0800
+Subject: [PATCH] avformat/nutdec: Add check for avformat_new_stream
+
+Check for failure of avformat_new_stream() and propagate
+the error code.
+
+Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
+
+CVE: CVE-2022-3341
+
+Upstream-Status: Backport [https://github.com/FFmpeg/FFmpeg/commit/9cf652cef49d74afe3d454f27d49eb1a1394951e]
+
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+---
+ libavformat/nutdec.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/libavformat/nutdec.c b/libavformat/nutdec.c
+index 0a8a700acf..f9ad2c0af1 100644
+--- a/libavformat/nutdec.c
++++ b/libavformat/nutdec.c
+@@ -351,8 +351,12 @@ static int decode_main_header(NUTContext *nut)
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+- for (i = 0; i < stream_count; i++)
+- avformat_new_stream(s, NULL);
++ for (i = 0; i < stream_count; i++) {
++ if (!avformat_new_stream(s, NULL)) {
++ ret = AVERROR(ENOMEM);
++ goto fail;
++ }
++ }
+
+ return 0;
+ fail:
+@@ -800,19 +804,23 @@ static int nut_read_header(AVFormatContext *s)
+ NUTContext *nut = s->priv_data;
+ AVIOContext *bc = s->pb;
+ int64_t pos;
+- int initialized_stream_count;
++ int initialized_stream_count, ret;
+
+ nut->avf = s;
+
+ /* main header */
+ pos = 0;
++ ret = 0;
+ do {
++ if (ret == AVERROR(ENOMEM))
++ return ret;
++
+ pos = find_startcode(bc, MAIN_STARTCODE, pos) + 1;
+ if (pos < 0 + 1) {
+ av_log(s, AV_LOG_ERROR, "No main startcode found.\n");
+ return AVERROR_INVALIDDATA;
+ }
+- } while (decode_main_header(nut) < 0);
++ } while ((ret = decode_main_header(nut)) < 0);
+
+ /* stream headers */
+ pos = 0;
+--
+2.34.1
+
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-48434.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-48434.patch
new file mode 100644
index 0000000000..3cd374dc39
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-48434.patch
@@ -0,0 +1,130 @@
+From e40c964a0678908e2c756741343ed50d6a99ee12 Mon Sep 17 00:00:00 2001
+From: Anton Khirnov <anton@khirnov.net>
+Date: Fri, 28 Apr 2023 11:45:30 +0000
+Subject: [PATCH] lavc/pthread_frame: avoid leaving stale hwaccel state in
+ worker threads
+
+This state is not refcounted, so make sure it always has a well-defined
+owner.
+
+Remove the block added in 091341f, as
+this commit also solves that issue in a more general way.
+
+CVE:CVE-2022-48434
+
+Upstream-Status: Backport [https://github.com/FFmpeg/FFmpeg/commit/cc867f2c09d2b69cee8a0eccd62aff002cbbfe11]
+
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+---
+ libavcodec/pthread_frame.c | 46 +++++++++++++++++++++++++++++---------
+ 1 file changed, 35 insertions(+), 11 deletions(-)
+
+diff --git a/libavcodec/pthread_frame.c b/libavcodec/pthread_frame.c
+index 85a6bc9..e40dced 100644
+--- a/libavcodec/pthread_frame.c
++++ b/libavcodec/pthread_frame.c
+@@ -145,6 +145,12 @@ typedef struct FrameThreadContext {
+ * Set for the first N packets, where N is the number of threads.
+ * While it is set, ff_thread_en/decode_frame won't return any results.
+ */
++
++ /* hwaccel state is temporarily stored here in order to transfer its ownership
++ * to the next decoding thread without the need for extra synchronization */
++ const AVHWAccel *stash_hwaccel;
++ void *stash_hwaccel_context;
++ void *stash_hwaccel_priv;
+ } FrameThreadContext;
+
+ #if FF_API_THREAD_SAFE_CALLBACKS
+@@ -229,9 +235,17 @@ FF_ENABLE_DEPRECATION_WARNINGS
+ ff_thread_finish_setup(avctx);
+
+ if (p->hwaccel_serializing) {
++ /* wipe hwaccel state to avoid stale pointers lying around;
++ * the state was transferred to FrameThreadContext in
++ * ff_thread_finish_setup(), so nothing is leaked */
++ avctx->hwaccel = NULL;
++ avctx->hwaccel_context = NULL;
++ avctx->internal->hwaccel_priv_data = NULL;
++
+ p->hwaccel_serializing = 0;
+ pthread_mutex_unlock(&p->parent->hwaccel_mutex);
+ }
++ av_assert0(!avctx->hwaccel);
+
+ if (p->async_serializing) {
+ p->async_serializing = 0;
+@@ -294,14 +308,10 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src,
+ dst->color_range = src->color_range;
+ dst->chroma_sample_location = src->chroma_sample_location;
+
+- dst->hwaccel = src->hwaccel;
+- dst->hwaccel_context = src->hwaccel_context;
+-
+ dst->channels = src->channels;
+ dst->sample_rate = src->sample_rate;
+ dst->sample_fmt = src->sample_fmt;
+ dst->channel_layout = src->channel_layout;
+- dst->internal->hwaccel_priv_data = src->internal->hwaccel_priv_data;
+
+ if (!!dst->hw_frames_ctx != !!src->hw_frames_ctx ||
+ (dst->hw_frames_ctx && dst->hw_frames_ctx->data != src->hw_frames_ctx->data)) {
+@@ -442,6 +452,12 @@ static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
+ pthread_mutex_unlock(&p->mutex);
+ return err;
+ }
++
++ /* transfer hwaccel state stashed from previous thread, if any */
++ av_assert0(!p->avctx->hwaccel);
++ FFSWAP(const AVHWAccel*, p->avctx->hwaccel, fctx->stash_hwaccel);
++ FFSWAP(void*, p->avctx->hwaccel_context, fctx->stash_hwaccel_context);
++ FFSWAP(void*, p->avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
+ }
+
+ av_packet_unref(p->avpkt);
+@@ -647,6 +663,14 @@ void ff_thread_finish_setup(AVCodecContext *avctx) {
+ async_lock(p->parent);
+ }
+
++ /* save hwaccel state for passing to the next thread;
++ * this is done here so that this worker thread can wipe its own hwaccel
++ * state after decoding, without requiring synchronization */
++ av_assert0(!p->parent->stash_hwaccel);
++ p->parent->stash_hwaccel = avctx->hwaccel;
++ p->parent->stash_hwaccel_context = avctx->hwaccel_context;
++ p->parent->stash_hwaccel_priv = avctx->internal->hwaccel_priv_data;
++
+ pthread_mutex_lock(&p->progress_mutex);
+ if(atomic_load(&p->state) == STATE_SETUP_FINISHED){
+ av_log(avctx, AV_LOG_WARNING, "Multiple ff_thread_finish_setup() calls\n");
+@@ -700,13 +724,6 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
+
+ park_frame_worker_threads(fctx, thread_count);
+
+- if (fctx->prev_thread && avctx->internal->hwaccel_priv_data !=
+- fctx->prev_thread->avctx->internal->hwaccel_priv_data) {
+- if (update_context_from_thread(avctx, fctx->prev_thread->avctx, 1) < 0) {
+- av_log(avctx, AV_LOG_ERROR, "Failed to update user thread.\n");
+- }
+- }
+-
+ if (fctx->prev_thread && fctx->prev_thread != fctx->threads)
+ if (update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Final thread update failed\n");
+@@ -760,6 +777,13 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
+ av_freep(&fctx->threads);
+ ff_pthread_free(fctx, thread_ctx_offsets);
+
++ /* if we have stashed hwaccel state, move it to the user-facing context,
++ * so it will be freed in avcodec_close() */
++ av_assert0(!avctx->hwaccel);
++ FFSWAP(const AVHWAccel*, avctx->hwaccel, fctx->stash_hwaccel);
++ FFSWAP(void*, avctx->hwaccel_context, fctx->stash_hwaccel_context);
++ FFSWAP(void*, avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
++
+ av_freep(&avctx->internal->thread_ctx);
+ }
+
+--
+2.40.0
+
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg_5.0.1.bb b/meta/recipes-multimedia/ffmpeg/ffmpeg_5.0.1.bb
index dd14f8df6f..1295d5cdf1 100644
--- a/meta/recipes-multimedia/ffmpeg/ffmpeg_5.0.1.bb
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg_5.0.1.bb
@@ -24,9 +24,21 @@ LIC_FILES_CHKSUM = "file://COPYING.GPLv2;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
SRC_URI = "https://www.ffmpeg.org/releases/${BP}.tar.xz \
file://0001-libavutil-include-assembly-with-full-path-from-sourc.patch \
- "
+ file://0001-avcodec-rpzaenc-stop-accessing-out-of-bounds-frame.patch \
+ file://0001-avcodec-smcenc-stop-accessing-out-of-bounds-frame.patch \
+ file://0001-avcodec-vp3-Add-missing-check-for-av_malloc.patch \
+ file://0001-avformat-nutdec-Add-check-for-avformat_new_stream.patch \
+ file://CVE-2022-48434.patch \
+ "
+
SRC_URI[sha256sum] = "ef2efae259ce80a240de48ec85ecb062cecca26e4352ffb3fda562c21a93007b"
+# CVE-2023-39018 issue belongs to ffmpeg-cli-wrapper (Java wrapper around the FFmpeg CLI)
+# and not ffmepg itself.
+# https://security-tracker.debian.org/tracker/CVE-2023-39018
+# https://bugzilla.suse.com/show_bug.cgi?id=CVE-2023-39018
+CVE_CHECK_IGNORE += "CVE-2023-39018"
+
# Build fails when thumb is enabled: https://bugzilla.yoctoproject.org/show_bug.cgi?id=7717
ARM_INSTRUCTION_SET:armv4 = "arm"
ARM_INSTRUCTION_SET:armv5 = "arm"
diff --git a/meta/recipes-multimedia/flac/files/CVE-2020-22219.patch b/meta/recipes-multimedia/flac/files/CVE-2020-22219.patch
new file mode 100644
index 0000000000..e042872dc0
--- /dev/null
+++ b/meta/recipes-multimedia/flac/files/CVE-2020-22219.patch
@@ -0,0 +1,197 @@
+From 579ff6922089cbbbd179619e40e622e279bd719f Mon Sep 17 00:00:00 2001
+From: Martijn van Beurden <mvanb1@gmail.com>
+Date: Wed, 3 Aug 2022 13:52:19 +0200
+Subject: [PATCH] flac: Add and use _nofree variants of safe_realloc functions
+
+Parts of the code use realloc like
+
+x = safe_realloc(x, somesize);
+
+when this is the case, the safe_realloc variant used must free the
+old memory block in case it fails, otherwise it will leak. However,
+there are also instances in the code where handling is different:
+
+if (0 == (x = safe_realloc(y, somesize)))
+ return false
+
+in this case, y should not be freed, as y is not set to NULL we
+could encounter double frees. Here the safe_realloc_nofree
+functions are used.
+
+Upstream-Status: Backport [https://github.com/xiph/flac/commit/21fe95ee828b0b9b944f6aa0bb02d24fbb981815]
+CVE: CVE-2020-22219
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ include/share/alloc.h | 41 +++++++++++++++++++++++++++++++----
+ src/flac/encode.c | 4 ++--
+ src/flac/foreign_metadata.c | 2 +-
+ src/libFLAC/bitwriter.c | 2 +-
+ src/libFLAC/metadata_object.c | 2 +-
+ src/plugin_common/tags.c | 2 +-
+ src/share/utf8/iconvert.c | 2 +-
+ 7 files changed, 44 insertions(+), 11 deletions(-)
+
+diff --git a/include/share/alloc.h b/include/share/alloc.h
+index 914de9b..55bdd1d 100644
+--- a/include/share/alloc.h
++++ b/include/share/alloc.h
+@@ -161,17 +161,30 @@ static inline void *safe_realloc_(void *ptr, size_t size)
+ free(oldptr);
+ return newptr;
+ }
+-static inline void *safe_realloc_add_2op_(void *ptr, size_t size1, size_t size2)
++static inline void *safe_realloc_nofree_add_2op_(void *ptr, size_t size1, size_t size2)
++{
++ size2 += size1;
++ if(size2 < size1)
++ return 0;
++ return realloc(ptr, size2);
++}
++
++static inline void *safe_realloc_add_3op_(void *ptr, size_t size1, size_t size2, size_t size3)
+ {
+ size2 += size1;
+ if(size2 < size1) {
+ free(ptr);
+ return 0;
+ }
+- return realloc(ptr, size2);
++ size3 += size2;
++ if(size3 < size2) {
++ free(ptr);
++ return 0;
++ }
++ return safe_realloc_(ptr, size3);
+ }
+
+-static inline void *safe_realloc_add_3op_(void *ptr, size_t size1, size_t size2, size_t size3)
++static inline void *safe_realloc_nofree_add_3op_(void *ptr, size_t size1, size_t size2, size_t size3)
+ {
+ size2 += size1;
+ if(size2 < size1)
+@@ -182,7 +195,7 @@ static inline void *safe_realloc_add_3op_(void *ptr, size_t size1, size_t size2,
+ return realloc(ptr, size3);
+ }
+
+-static inline void *safe_realloc_add_4op_(void *ptr, size_t size1, size_t size2, size_t size3, size_t size4)
++static inline void *safe_realloc_nofree_add_4op_(void *ptr, size_t size1, size_t size2, size_t size3, size_t size4)
+ {
+ size2 += size1;
+ if(size2 < size1)
+@@ -205,6 +218,15 @@ static inline void *safe_realloc_mul_2op_(void *ptr, size_t size1, size_t size2)
+ return safe_realloc_(ptr, size1*size2);
+ }
+
++static inline void *safe_realloc_nofree_mul_2op_(void *ptr, size_t size1, size_t size2)
++{
++ if(!size1 || !size2)
++ return realloc(ptr, 0); /* preserve POSIX realloc(ptr, 0) semantics */
++ if(size1 > SIZE_MAX / size2)
++ return 0;
++ return realloc(ptr, size1*size2);
++}
++
+ /* size1 * (size2 + size3) */
+ static inline void *safe_realloc_muladd2_(void *ptr, size_t size1, size_t size2, size_t size3)
+ {
+@@ -216,4 +238,15 @@ static inline void *safe_realloc_muladd2_(void *ptr, size_t size1, size_t size2,
+ return safe_realloc_mul_2op_(ptr, size1, size2);
+ }
+
++/* size1 * (size2 + size3) */
++static inline void *safe_realloc_nofree_muladd2_(void *ptr, size_t size1, size_t size2, size_t size3)
++{
++ if(!size1 || (!size2 && !size3))
++ return realloc(ptr, 0); /* preserve POSIX realloc(ptr, 0) semantics */
++ size2 += size3;
++ if(size2 < size3)
++ return 0;
++ return safe_realloc_nofree_mul_2op_(ptr, size1, size2);
++}
++
+ #endif
+diff --git a/src/flac/encode.c b/src/flac/encode.c
+index a9b907f..f87250c 100644
+--- a/src/flac/encode.c
++++ b/src/flac/encode.c
+@@ -1743,10 +1743,10 @@ static void static_metadata_clear(static_metadata_t *m)
+ static FLAC__bool static_metadata_append(static_metadata_t *m, FLAC__StreamMetadata *d, FLAC__bool needs_delete)
+ {
+ void *x;
+- if(0 == (x = safe_realloc_muladd2_(m->metadata, sizeof(*m->metadata), /*times (*/m->num_metadata, /*+*/1/*)*/)))
++ if(0 == (x = safe_realloc_nofree_muladd2_(m->metadata, sizeof(*m->metadata), /*times (*/m->num_metadata, /*+*/1/*)*/)))
+ return false;
+ m->metadata = (FLAC__StreamMetadata**)x;
+- if(0 == (x = safe_realloc_muladd2_(m->needs_delete, sizeof(*m->needs_delete), /*times (*/m->num_metadata, /*+*/1/*)*/)))
++ if(0 == (x = safe_realloc_nofree_muladd2_(m->needs_delete, sizeof(*m->needs_delete), /*times (*/m->num_metadata, /*+*/1/*)*/)))
+ return false;
+ m->needs_delete = (FLAC__bool*)x;
+ m->metadata[m->num_metadata] = d;
+diff --git a/src/flac/foreign_metadata.c b/src/flac/foreign_metadata.c
+index 9ad9c18..fdfb3cf 100644
+--- a/src/flac/foreign_metadata.c
++++ b/src/flac/foreign_metadata.c
+@@ -75,7 +75,7 @@ static FLAC__bool copy_data_(FILE *fin, FILE *fout, size_t size, const char **er
+
+ static FLAC__bool append_block_(foreign_metadata_t *fm, FLAC__off_t offset, FLAC__uint32 size, const char **error)
+ {
+- foreign_block_t *fb = safe_realloc_muladd2_(fm->blocks, sizeof(foreign_block_t), /*times (*/fm->num_blocks, /*+*/1/*)*/);
++ foreign_block_t *fb = safe_realloc_nofree_muladd2_(fm->blocks, sizeof(foreign_block_t), /*times (*/fm->num_blocks, /*+*/1/*)*/);
+ if(fb) {
+ fb[fm->num_blocks].offset = offset;
+ fb[fm->num_blocks].size = size;
+diff --git a/src/libFLAC/bitwriter.c b/src/libFLAC/bitwriter.c
+index 6e86585..a510b0d 100644
+--- a/src/libFLAC/bitwriter.c
++++ b/src/libFLAC/bitwriter.c
+@@ -124,7 +124,7 @@ FLAC__bool bitwriter_grow_(FLAC__BitWriter *bw, uint32_t bits_to_add)
+ FLAC__ASSERT(new_capacity > bw->capacity);
+ FLAC__ASSERT(new_capacity >= bw->words + ((bw->bits + bits_to_add + FLAC__BITS_PER_WORD - 1) / FLAC__BITS_PER_WORD));
+
+- new_buffer = safe_realloc_mul_2op_(bw->buffer, sizeof(bwword), /*times*/new_capacity);
++ new_buffer = safe_realloc_nofree_mul_2op_(bw->buffer, sizeof(bwword), /*times*/new_capacity);
+ if(new_buffer == 0)
+ return false;
+ bw->buffer = new_buffer;
+diff --git a/src/libFLAC/metadata_object.c b/src/libFLAC/metadata_object.c
+index de8e513..aef65be 100644
+--- a/src/libFLAC/metadata_object.c
++++ b/src/libFLAC/metadata_object.c
+@@ -98,7 +98,7 @@ static FLAC__bool free_copy_bytes_(FLAC__byte **to, const FLAC__byte *from, uint
+ /* realloc() failure leaves entry unchanged */
+ static FLAC__bool ensure_null_terminated_(FLAC__byte **entry, uint32_t length)
+ {
+- FLAC__byte *x = safe_realloc_add_2op_(*entry, length, /*+*/1);
++ FLAC__byte *x = safe_realloc_nofree_add_2op_(*entry, length, /*+*/1);
+ if (x != NULL) {
+ x[length] = '\0';
+ *entry = x;
+diff --git a/src/plugin_common/tags.c b/src/plugin_common/tags.c
+index ae440c5..dfa10d3 100644
+--- a/src/plugin_common/tags.c
++++ b/src/plugin_common/tags.c
+@@ -317,7 +317,7 @@ FLAC__bool FLAC_plugin__tags_add_tag_utf8(FLAC__StreamMetadata *tags, const char
+ const size_t value_len = strlen(value);
+ const size_t separator_len = strlen(separator);
+ FLAC__byte *new_entry;
+- if(0 == (new_entry = safe_realloc_add_4op_(entry->entry, entry->length, /*+*/value_len, /*+*/separator_len, /*+*/1)))
++ if(0 == (new_entry = safe_realloc_nofree_add_4op_(entry->entry, entry->length, /*+*/value_len, /*+*/separator_len, /*+*/1)))
+ return false;
+ memcpy(new_entry+entry->length, separator, separator_len);
+ entry->length += separator_len;
+diff --git a/src/share/utf8/iconvert.c b/src/share/utf8/iconvert.c
+index 8ab53c1..876c06e 100644
+--- a/src/share/utf8/iconvert.c
++++ b/src/share/utf8/iconvert.c
+@@ -149,7 +149,7 @@ int iconvert(const char *fromcode, const char *tocode,
+ iconv_close(cd1);
+ return ret;
+ }
+- newbuf = safe_realloc_add_2op_(utfbuf, (ob - utfbuf), /*+*/1);
++ newbuf = safe_realloc_nofree_add_2op_(utfbuf, (ob - utfbuf), /*+*/1);
+ if (!newbuf)
+ goto fail;
+ ob = (ob - utfbuf) + newbuf;
+--
+2.40.0
diff --git a/meta/recipes-multimedia/flac/flac_1.3.4.bb b/meta/recipes-multimedia/flac/flac_1.3.4.bb
index 012da0a0a0..1a44718bba 100644
--- a/meta/recipes-multimedia/flac/flac_1.3.4.bb
+++ b/meta/recipes-multimedia/flac/flac_1.3.4.bb
@@ -15,6 +15,7 @@ LIC_FILES_CHKSUM = "file://COPYING.FDL;md5=ad1419ecc56e060eccf8184a87c4285f \
DEPENDS = "libogg"
SRC_URI = "http://downloads.xiph.org/releases/flac/${BP}.tar.xz \
+ file://CVE-2020-22219.patch \
"
SRC_URI[sha256sum] = "8ff0607e75a322dd7cd6ec48f4f225471404ae2730d0ea945127b1355155e737"
diff --git a/meta/recipes-multimedia/gstreamer/gst-devtools_1.20.2.bb b/meta/recipes-multimedia/gstreamer/gst-devtools_1.20.7.bb
index 4819a34b26..2409ea25e1 100644
--- a/meta/recipes-multimedia/gstreamer/gst-devtools_1.20.2.bb
+++ b/meta/recipes-multimedia/gstreamer/gst-devtools_1.20.7.bb
@@ -12,7 +12,7 @@ SRC_URI = "https://gstreamer.freedesktop.org/src/gst-devtools/gst-devtools-${PV}
file://0001-connect-has-a-different-signature-on-musl.patch \
"
-SRC_URI[sha256sum] = "b28dba953a92532208b30467ff91076295e266f65364b1b3482b4c4372d44b2a"
+SRC_URI[sha256sum] = "2df2ddfee05f6ce978207de9086ca22f00fc36e04f74a11869074da178585e35"
DEPENDS = "json-glib glib-2.0 glib-2.0-native gstreamer1.0 gstreamer1.0-plugins-base"
RRECOMMENDS:${PN} = "git"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.20.2.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.20.7.bb
index 4ef9755c07..f3f53893b6 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.20.2.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.20.7.bb
@@ -12,7 +12,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=69333daa044cb77e486cc36129f7a770 \
"
SRC_URI = "https://gstreamer.freedesktop.org/src/gst-libav/gst-libav-${PV}.tar.xz"
-SRC_URI[sha256sum] = "b5c531dd8413bf771c79dab66b8e389f20b3991f745115133f0fa0b8e32809f9"
+SRC_URI[sha256sum] = "65e776e366f7f3549a9a829418817f464dcc5dc9845220c64a886683d8841b56"
S = "${WORKDIR}/gst-libav-${PV}"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.20.2.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.20.7.bb
index c4f5d719bb..bcbe0206d7 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.20.2.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.20.7.bb
@@ -10,7 +10,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c \
SRC_URI = "https://gstreamer.freedesktop.org/src/gst-omx/gst-omx-${PV}.tar.xz"
-SRC_URI[sha256sum] = "7efed7cc5b0acf9a669e38c5360a7892430a4e86c858daac6faa1ade2b151668"
+SRC_URI[sha256sum] = "e3dd418e3235db044104c1cb024f609e57035251fd1718e4e3e5d64780af1805"
S = "${WORKDIR}/gst-omx-${PV}"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-40474.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-40474.patch
new file mode 100644
index 0000000000..dd5886863d
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-40474.patch
@@ -0,0 +1,118 @@
+From ce17e968e4cf900d28ca5b46f6e095febc42b4f0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= <sebastian@centricular.com>
+Date: Thu, 10 Aug 2023 15:45:01 +0300
+Subject: [PATCH] mxfdemux: Fix integer overflow causing out of bounds writes
+ when handling invalid uncompressed video
+
+Check ahead of time when parsing the track information whether
+width, height and bpp are valid and usable without overflows.
+
+Fixes ZDI-CAN-21660, CVE-2023-40474
+
+Fixes https://gitlab.freedesktop.org/gstreamer/gstreamer/-/issues/2896
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/5362>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/gstreamer/gstreamer/-/commit/ce17e968e4cf900d28ca5b46f6e095febc42b4f0]
+CVE: CVE-2023-40474
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ gst/mxf/mxfup.c | 51 +++++++++++++++++----
+ 1 file changed, 43 insertions(+), 8 deletions(-)
+
+diff --git a/gst/mxf/mxfup.c b/gst/mxf/mxfup.c
+index d72ed22cb7..0c0178c1c9 100644
+--- a/gst/mxf/mxfup.c
++++ b/gst/mxf/mxfup.c
+@@ -118,6 +118,8 @@ mxf_up_handle_essence_element (const MXFUL * key, GstBuffer * buffer,
+ gpointer mapping_data, GstBuffer ** outbuf)
+ {
+ MXFUPMappingData *data = mapping_data;
++ gsize expected_in_stride = 0, out_stride = 0;
++ gsize expected_in_size = 0, out_size = 0;
+
+ /* SMPTE 384M 7.1 */
+ if (key->u[12] != 0x15 || (key->u[14] != 0x01 && key->u[14] != 0x02
+@@ -146,22 +148,25 @@ mxf_up_handle_essence_element (const MXFUL * key, GstBuffer * buffer,
+ }
+ }
+
+- if (gst_buffer_get_size (buffer) != data->bpp * data->width * data->height) {
++ // Checked for overflows when parsing the descriptor
++ expected_in_stride = data->bpp * data->width;
++ out_stride = GST_ROUND_UP_4 (expected_in_stride);
++ expected_in_size = expected_in_stride * data->height;
++ out_size = out_stride * data->height;
++
++ if (gst_buffer_get_size (buffer) != expected_in_size) {
+ GST_ERROR ("Invalid buffer size");
+ gst_buffer_unref (buffer);
+ return GST_FLOW_ERROR;
+ }
+
+- if (data->bpp != 4
+- || GST_ROUND_UP_4 (data->width * data->bpp) != data->width * data->bpp) {
++ if (data->bpp != 4 || out_stride != expected_in_stride) {
+ guint y;
+ GstBuffer *ret;
+ GstMapInfo inmap, outmap;
+ guint8 *indata, *outdata;
+
+- ret =
+- gst_buffer_new_and_alloc (GST_ROUND_UP_4 (data->width * data->bpp) *
+- data->height);
++ ret = gst_buffer_new_and_alloc (out_size);
+ gst_buffer_map (buffer, &inmap, GST_MAP_READ);
+ gst_buffer_map (ret, &outmap, GST_MAP_WRITE);
+ indata = inmap.data;
+@@ -169,8 +174,8 @@ mxf_up_handle_essence_element (const MXFUL * key, GstBuffer * buffer,
+
+ for (y = 0; y < data->height; y++) {
+ memcpy (outdata, indata, data->width * data->bpp);
+- outdata += GST_ROUND_UP_4 (data->width * data->bpp);
+- indata += data->width * data->bpp;
++ outdata += out_stride;
++ indata += expected_in_stride;
+ }
+
+ gst_buffer_unmap (buffer, &inmap);
+@@ -378,6 +383,36 @@ mxf_up_create_caps (MXFMetadataTimelineTrack * track, GstTagList ** tags,
+ return NULL;
+ }
+
++ if (caps) {
++ MXFUPMappingData *data = *mapping_data;
++ gsize expected_in_stride = 0, out_stride = 0;
++ gsize expected_in_size = 0, out_size = 0;
++
++ // Do some checking of the parameters to see if they're valid and
++ // we can actually work with them.
++ if (data->image_start_offset > data->image_end_offset) {
++ GST_WARNING ("Invalid image start/end offset");
++ g_free (data);
++ *mapping_data = NULL;
++ gst_clear_caps (&caps);
++
++ return NULL;
++ }
++
++ if (!g_size_checked_mul (&expected_in_stride, data->bpp, data->width) ||
++ (out_stride = GST_ROUND_UP_4 (expected_in_stride)) < expected_in_stride
++ || !g_size_checked_mul (&expected_in_size, expected_in_stride,
++ data->height)
++ || !g_size_checked_mul (&out_size, out_stride, data->height)) {
++ GST_ERROR ("Invalid resolution or bit depth");
++ g_free (data);
++ *mapping_data = NULL;
++ gst_clear_caps (&caps);
++
++ return NULL;
++ }
++ }
++
+ return caps;
+ }
+
+--
+2.40.0
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-40475.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-40475.patch
new file mode 100644
index 0000000000..ab9ac7afaa
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-40475.patch
@@ -0,0 +1,49 @@
+From 72742dee30cce7bf909639f82de119871566ce39 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= <sebastian@centricular.com>
+Date: Thu, 10 Aug 2023 15:47:03 +0300
+Subject: [PATCH] mxfdemux: Check number of channels for AES3 audio
+
+Only up to 8 channels are allowed and using a higher number would cause
+integer overflows when copying the data, and lead to out of bound
+writes.
+
+Also check that each buffer is at least 4 bytes long to avoid another
+overflow.
+
+Fixes ZDI-CAN-21661, CVE-2023-40475
+
+Fixes https://gitlab.freedesktop.org/gstreamer/gstreamer/-/issues/2897
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/5362>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/gstreamer/gstreamer/-/commit/72742dee30cce7bf909639f82de119871566ce39]
+CVE: CVE-2023-40475
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ gst/mxf/mxfd10.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/gst/mxf/mxfd10.c b/gst/mxf/mxfd10.c
+index 03854d9303..0ad0d2d283 100644
+--- a/gst/mxf/mxfd10.c
++++ b/gst/mxf/mxfd10.c
+@@ -101,7 +101,7 @@ mxf_d10_sound_handle_essence_element (const MXFUL * key, GstBuffer * buffer,
+ gst_buffer_map (buffer, &map, GST_MAP_READ);
+
+ /* Now transform raw AES3 into raw audio, see SMPTE 331M */
+- if ((map.size - 4) % 32 != 0) {
++ if (map.size < 4 || (map.size - 4) % 32 != 0) {
+ gst_buffer_unmap (buffer, &map);
+ GST_ERROR ("Invalid D10 sound essence buffer size");
+ return GST_FLOW_ERROR;
+@@ -201,6 +201,7 @@ mxf_d10_create_caps (MXFMetadataTimelineTrack * track, GstTagList ** tags,
+ GstAudioFormat audio_format;
+
+ if (s->channel_count == 0 ||
++ s->channel_count > 8 ||
+ s->quantization_bits == 0 ||
+ s->audio_sampling_rate.n == 0 || s->audio_sampling_rate.d == 0) {
+ GST_ERROR ("Invalid descriptor");
+--
+2.40.0
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-40476.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-40476.patch
new file mode 100644
index 0000000000..7810e98024
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-40476.patch
@@ -0,0 +1,44 @@
+From 1b51467ea640bcc73c97f3186350d72cbfba5cb4 Mon Sep 17 00:00:00 2001
+From: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Date: Wed, 9 Aug 2023 12:49:19 -0400
+Subject: [PATCH] h265parser: Fix possible overflow using max_sub_layers_minus1
+
+This fixes a possible overflow that can be triggered by an invalid value of
+max_sub_layers_minus1 being set in the bitstream. The bitstream uses 3 bits,
+but the allowed range is 0 to 6 only.
+
+Fixes ZDI-CAN-21768, CVE-2023-40476
+
+Fixes https://gitlab.freedesktop.org/gstreamer/gstreamer/-/issues/2895
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/5364>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/gstreamer/gstreamer/-/commit/ff91a3d8d6f7e2412c44663bf30fad5c7fdbc9d9]
+CVE: CVE-2023-40476
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+
+---
+ gst-libs/gst/codecparsers/gsth265parser.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/gst-libs/gst/codecparsers/gsth265parser.c b/gst-libs/gst/codecparsers/gsth265parser.c
+index a4e7549..3db1c38 100644
+--- a/gst-libs/gst/codecparsers/gsth265parser.c
++++ b/gst-libs/gst/codecparsers/gsth265parser.c
+@@ -1670,6 +1670,7 @@ gst_h265_parse_vps (GstH265NalUnit * nalu, GstH265VPS * vps)
+
+ READ_UINT8 (&nr, vps->max_layers_minus1, 6);
+ READ_UINT8 (&nr, vps->max_sub_layers_minus1, 3);
++ CHECK_ALLOWED (vps->max_sub_layers_minus1, 0, 6);
+ READ_UINT8 (&nr, vps->temporal_id_nesting_flag, 1);
+
+ /* skip reserved_0xffff_16bits */
+@@ -1849,6 +1850,7 @@ gst_h265_parse_sps (GstH265Parser * parser, GstH265NalUnit * nalu,
+ sps->vps = vps;
+
+ READ_UINT8 (&nr, sps->max_sub_layers_minus1, 3);
++ CHECK_ALLOWED (sps->max_sub_layers_minus1, 0, 6);
+ READ_UINT8 (&nr, sps->temporal_id_nesting_flag, 1);
+
+ if (!gst_h265_parse_profile_tier_level (&sps->profile_tier_level, &nr,
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-44429.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-44429.patch
new file mode 100644
index 0000000000..5070d6b865
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/CVE-2023-44429.patch
@@ -0,0 +1,38 @@
+From 1db83d3f745332cbda6adf954b2c53a10caa205e Mon Sep 17 00:00:00 2001
+From: Benjamin Gaignard <benjamin.gaignard@collabora.com>
+Date: Wed, 4 Oct 2023 11:14:38 +0200
+Subject: [PATCH] codecparsers: av1: Clip max tile rows and cols values
+
+Clip tile rows and cols to 64 as describe in AV1 specification.
+
+Fixes ZDI-CAN-22226 / CVE-2023-44429
+
+Fixes https://gitlab.freedesktop.org/gstreamer/gstreamer/-/issues/3015
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/5634>
+
+CVE: CVE-2023-44429
+
+Upstream-Status: Backport
+[https://gitlab.freedesktop.org/gstreamer/gstreamer/-/commit/1db83d3f745332cbda6adf954b2c53a10caa205e]
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ gst-libs/gst/codecparsers/gstav1parser.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/gst-libs/gst/codecparsers/gstav1parser.c b/gst-libs/gst/codecparsers/gstav1parser.c
+index 7b9378c..68f8a76 100644
+--- a/gst-libs/gst/codecparsers/gstav1parser.c
++++ b/gst-libs/gst/codecparsers/gstav1parser.c
+@@ -2219,6 +2219,8 @@ gst_av1_parse_tile_info (GstAV1Parser * parser, GstBitReader * br,
+ ((parser->state.mi_cols + 31) >> 5) : ((parser->state.mi_cols + 15) >> 4);
+ sb_rows = seq_header->use_128x128_superblock ? ((parser->state.mi_rows +
+ 31) >> 5) : ((parser->state.mi_rows + 15) >> 4);
++ sb_cols = MIN (GST_AV1_MAX_TILE_COLS, sb_cols);
++ sb_rows = MIN (GST_AV1_MAX_TILE_ROWS, sb_rows);
+ sb_shift = seq_header->use_128x128_superblock ? 5 : 4;
+ sb_size = sb_shift + 2;
+ max_tile_width_sb = GST_AV1_MAX_TILE_WIDTH >> sb_size;
+--
+2.40.0
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.20.2.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.20.7.bb
index bb33e3822e..504cfce1fd 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.20.2.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.20.7.bb
@@ -10,8 +10,12 @@ SRC_URI = "https://gstreamer.freedesktop.org/src/gst-plugins-bad/gst-plugins-bad
file://0002-avoid-including-sys-poll.h-directly.patch \
file://0003-ensure-valid-sentinals-for-gst_structure_get-etc.patch \
file://0004-opencv-resolve-missing-opencv-data-dir-in-yocto-buil.patch \
+ file://CVE-2023-40474.patch \
+ file://CVE-2023-40475.patch \
+ file://CVE-2023-40476.patch \
+ file://CVE-2023-44429.patch \
"
-SRC_URI[sha256sum] = "4adc4c05f41051f8136b80cda99b0d049a34e777832f9fea7c5a70347658745b"
+SRC_URI[sha256sum] = "87251beebfd1325e5118cc67774061f6e8971761ca65a9e5957919610080d195"
S = "${WORKDIR}/gst-plugins-bad-${PV}"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.20.2.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.20.7.bb
index e47851700a..8dfa70aea3 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.20.2.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.20.7.bb
@@ -11,7 +11,7 @@ SRC_URI = "https://gstreamer.freedesktop.org/src/gst-plugins-base/gst-plugins-ba
file://0003-viv-fb-Make-sure-config.h-is-included.patch \
file://0002-ssaparse-enhance-SSA-text-lines-parsing.patch \
"
-SRC_URI[sha256sum] = "ab0656f2ad4d38292a803e0cb4ca090943a9b43c8063f650b4d3e3606c317f17"
+SRC_URI[sha256sum] = "fde6696a91875095d82c1012b5777c28ba926047ffce08508e12c1d2c66f0057"
S = "${WORKDIR}/gst-plugins-base-${PV}"
@@ -21,7 +21,8 @@ inherit gobject-introspection
# opengl packageconfig factored out to make it easy for distros
# and BSP layers to choose OpenGL APIs/platforms/window systems
-PACKAGECONFIG_GL ?= "${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'gles2 egl', '', d)}"
+PACKAGECONFIG_X11 = "${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'opengl glx', '', d)}"
+PACKAGECONFIG_GL ?= "${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'gles2 egl ${PACKAGECONFIG_X11}', '', d)}"
PACKAGECONFIG ??= " \
${GSTREAMER_ORC} \
@@ -32,7 +33,7 @@ PACKAGECONFIG ??= " \
"
OPENGL_APIS = 'opengl gles2'
-OPENGL_PLATFORMS = 'egl'
+OPENGL_PLATFORMS = 'egl glx'
X11DEPENDS = "virtual/libx11 libsm libxrender libxv"
X11ENABLEOPTS = "-Dx11=enabled -Dxvideo=enabled -Dxshm=enabled"
@@ -61,6 +62,7 @@ PACKAGECONFIG[gles2] = ",,virtual/libgles2"
# OpenGL platform packageconfigs
PACKAGECONFIG[egl] = ",,virtual/egl"
+PACKAGECONFIG[glx] = ",,virtual/libgl"
# OpenGL window systems (except for X11)
PACKAGECONFIG[gbm] = ",,virtual/libgbm libgudev libdrm"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.20.2.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.20.7.bb
index 6c52fb35b9..dfb0c0f342 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.20.2.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.20.7.bb
@@ -8,7 +8,7 @@ SRC_URI = "https://gstreamer.freedesktop.org/src/gst-plugins-good/gst-plugins-go
file://0001-qt-include-ext-qt-gstqtgl.h-instead-of-gst-gl-gstglf.patch \
"
-SRC_URI[sha256sum] = "83589007bf002b8f9ef627718f308c16d83351905f0db8e85c3060f304143aae"
+SRC_URI[sha256sum] = "599f093cc833a1e346939ab6e78a3f8046855b6da13520aae80dd385434f4ab2"
S = "${WORKDIR}/gst-plugins-good-${PV}"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.20.2.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.20.7.bb
index edc2ece979..1068bb4d80 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.20.2.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.20.7.bb
@@ -14,7 +14,7 @@ LICENSE_FLAGS = "commercial"
SRC_URI = " \
https://gstreamer.freedesktop.org/src/gst-plugins-ugly/gst-plugins-ugly-${PV}.tar.xz \
"
-SRC_URI[sha256sum] = "b43fb4df94459afbf67ec22003ca58ffadcd19e763f276dca25b64c848adb7bf"
+SRC_URI[sha256sum] = "e761665bb3c66fb35ff3567a283b3763b494acf0fe1df8f4abeda047b22dbc55"
S = "${WORKDIR}/gst-plugins-ugly-${PV}"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.20.2.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.20.7.bb
index 34bc4bf4f4..83445fab09 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.20.2.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.20.7.bb
@@ -8,7 +8,7 @@ LICENSE = "LGPL-2.1-or-later"
LIC_FILES_CHKSUM = "file://COPYING;md5=c34deae4e395ca07e725ab0076a5f740"
SRC_URI = "https://gstreamer.freedesktop.org/src/${PNREAL}/${PNREAL}-${PV}.tar.xz"
-SRC_URI[sha256sum] = "853ea35a1088c762fb703e5aea9c30031a19222b59786b6599956e154620fa2f"
+SRC_URI[sha256sum] = "a63db0cb502308446db3d3b0a23772f1966f9f2b98fddc22fca49560a0575adc"
DEPENDS = "gstreamer1.0 gstreamer1.0-plugins-base python3-pygobject"
RDEPENDS:${PN} += "gstreamer1.0 gstreamer1.0-plugins-base python3-pygobject"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.20.2.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.20.7.bb
index fd6e16c2bf..2901be69d2 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.20.2.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.20.7.bb
@@ -10,7 +10,7 @@ PNREAL = "gst-rtsp-server"
SRC_URI = "https://gstreamer.freedesktop.org/src/${PNREAL}/${PNREAL}-${PV}.tar.xz"
-SRC_URI[sha256sum] = "6a8e9d136bbee4fc03858a0680dd5cbf91e2e989c43da115858eb21fb1adbcab"
+SRC_URI[sha256sum] = "2c8f46aa9df2245e5b39a2082be8e9d3edc0f61bc34f667803d7a21da1b51987"
S = "${WORKDIR}/${PNREAL}-${PV}"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.20.2.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.20.7.bb
index 40dc21cf45..21676bddde 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.20.2.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.20.7.bb
@@ -11,7 +11,7 @@ LIC_FILES_CHKSUM = "file://COPYING.LIB;md5=4fbd65380cdd255951079008b364516c"
SRC_URI = "https://gstreamer.freedesktop.org/src/${REALPN}/${REALPN}-${PV}.tar.xz"
-SRC_URI[sha256sum] = "30126ab6e3105dab8da76bd9951a68886149bcd70c7fee0bac68de564de41d3d"
+SRC_URI[sha256sum] = "40b9747408c7066a1344adae001d2d53203adda012814944a1c0a5cff3f33dd6"
S = "${WORKDIR}/${REALPN}-${PV}"
DEPENDS = "libva gstreamer1.0 gstreamer1.0-plugins-base gstreamer1.0-plugins-bad"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0/0005-tests-remove-gstbin-test_watch_for_state_change-test.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0/0005-tests-remove-gstbin-test_watch_for_state_change-test.patch
deleted file mode 100644
index f51df6d20b..0000000000
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0/0005-tests-remove-gstbin-test_watch_for_state_change-test.patch
+++ /dev/null
@@ -1,107 +0,0 @@
-From b935abba3d8fa3ea1ce384c08e650afd8c20b78a Mon Sep 17 00:00:00 2001
-From: Claudius Heine <ch@denx.de>
-Date: Wed, 2 Feb 2022 13:47:02 +0100
-Subject: [PATCH] tests: remove gstbin:test_watch_for_state_change testcase
-
-This testcase seems to be flaky, and upstream marked it as such:
-https://gitlab.freedesktop.org/gstreamer/gstreamer/-/issues/778
-
-This patch removes the testcase to avoid it interfering with out ptest.
-
-Signed-off-by: Claudius Heine <ch@denx.de>
-
-Upstream-Status: Inappropriate [needs proper upstream fix]
----
- tests/check/gst/gstbin.c | 69 -------------------
- 1 file changed, 69 deletions(-)
-
-diff --git a/tests/check/gst/gstbin.c b/tests/check/gst/gstbin.c
-index e366d5fe20..ac29d81474 100644
---- a/tests/check/gst/gstbin.c
-+++ b/tests/check/gst/gstbin.c
-@@ -691,74 +691,6 @@ GST_START_TEST (test_message_state_changed_children)
-
- GST_END_TEST;
-
--GST_START_TEST (test_watch_for_state_change)
--{
-- GstElement *src, *sink, *bin;
-- GstBus *bus;
-- GstStateChangeReturn ret;
--
-- bin = gst_element_factory_make ("bin", NULL);
-- fail_unless (bin != NULL, "Could not create bin");
--
-- bus = g_object_new (gst_bus_get_type (), NULL);
-- gst_object_ref_sink (bus);
-- gst_element_set_bus (GST_ELEMENT_CAST (bin), bus);
--
-- src = gst_element_factory_make ("fakesrc", NULL);
-- fail_if (src == NULL, "Could not create fakesrc");
-- sink = gst_element_factory_make ("fakesink", NULL);
-- fail_if (sink == NULL, "Could not create fakesink");
--
-- gst_bin_add (GST_BIN (bin), sink);
-- gst_bin_add (GST_BIN (bin), src);
--
-- fail_unless (gst_element_link (src, sink), "could not link src and sink");
--
-- /* change state, spawning two times three messages */
-- ret = gst_element_set_state (GST_ELEMENT (bin), GST_STATE_PAUSED);
-- fail_unless (ret == GST_STATE_CHANGE_ASYNC);
-- ret =
-- gst_element_get_state (GST_ELEMENT (bin), NULL, NULL,
-- GST_CLOCK_TIME_NONE);
-- fail_unless (ret == GST_STATE_CHANGE_SUCCESS);
--
-- pop_state_changed (bus, 6);
-- pop_async_done (bus);
-- pop_latency (bus);
--
-- fail_unless (gst_bus_have_pending (bus) == FALSE,
-- "Unexpected messages on bus");
--
-- ret = gst_element_set_state (GST_ELEMENT (bin), GST_STATE_PLAYING);
-- fail_unless (ret == GST_STATE_CHANGE_SUCCESS);
--
-- pop_state_changed (bus, 3);
--
-- /* this one might return either SUCCESS or ASYNC, likely SUCCESS */
-- ret = gst_element_set_state (GST_ELEMENT (bin), GST_STATE_PAUSED);
-- gst_element_get_state (GST_ELEMENT (bin), NULL, NULL, GST_CLOCK_TIME_NONE);
--
-- pop_state_changed (bus, 3);
-- if (ret == GST_STATE_CHANGE_ASYNC) {
-- pop_async_done (bus);
-- pop_latency (bus);
-- }
--
-- fail_unless (gst_bus_have_pending (bus) == FALSE,
-- "Unexpected messages on bus");
--
-- gst_bus_set_flushing (bus, TRUE);
--
-- ret = gst_element_set_state (GST_ELEMENT (bin), GST_STATE_NULL);
-- fail_unless (ret == GST_STATE_CHANGE_SUCCESS);
--
-- /* clean up */
-- gst_object_unref (bus);
-- gst_object_unref (bin);
--}
--
--GST_END_TEST;
--
- GST_START_TEST (test_state_change_error_message)
- {
- GstElement *src, *sink, *bin;
-@@ -1956,7 +1888,6 @@ gst_bin_suite (void)
- tcase_add_test (tc_chain, test_message_state_changed);
- tcase_add_test (tc_chain, test_message_state_changed_child);
- tcase_add_test (tc_chain, test_message_state_changed_children);
-- tcase_add_test (tc_chain, test_watch_for_state_change);
- tcase_add_test (tc_chain, test_state_change_error_message);
- tcase_add_test (tc_chain, test_add_linked);
- tcase_add_test (tc_chain, test_add_self);
---
-2.33.1
-
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.20.2.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.20.7.bb
index 3aa9aa7048..6d002198ae 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.20.2.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.20.7.bb
@@ -21,9 +21,8 @@ SRC_URI = "https://gstreamer.freedesktop.org/src/gstreamer/gstreamer-${PV}.tar.x
file://0002-tests-add-support-for-install-the-tests.patch;striplevel=3 \
file://0003-tests-use-a-dictionaries-for-environment.patch;striplevel=3 \
file://0004-tests-add-helper-script-to-run-the-installed_tests.patch;striplevel=3 \
- file://0005-tests-remove-gstbin-test_watch_for_state_change-test.patch \
"
-SRC_URI[sha256sum] = "df24e8792691a02dfe003b3833a51f1dbc6c3331ae625d143b17da939ceb5e0a"
+SRC_URI[sha256sum] = "1757184a07b9703219e8b1961f81cb1dd64320d147fc045ac8eb499efbea79be"
PACKAGECONFIG ??= "${@bb.utils.contains('PTEST_ENABLED', '1', 'tests', '', d)} \
check \
diff --git a/meta/recipes-multimedia/libpng/files/run-ptest b/meta/recipes-multimedia/libpng/files/run-ptest
new file mode 100644
index 0000000000..9ab5d0c1f4
--- /dev/null
+++ b/meta/recipes-multimedia/libpng/files/run-ptest
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+set -eux
+
+./pngfix pngtest.png &> log.txt 2>&1
+
+if grep -i "OK" log.txt 2>&1 ; then
+ echo "PASS: pngfix passed"
+else
+ echo "FAIL: pngfix failed"
+fi
+rm -f log.txt
+
+./pngtest pngtest.png &> log.txt 2>&1
+
+if grep -i "PASS" log.txt 2>&1 ; then
+ echo "PASS: pngtest passed"
+else
+ echo "FAIL: pngtest failed"
+fi
+rm -f log.txt
+
+for i in pngstest timepng; do
+ if "./${i}" pngtest.png 2>&1; then
+ echo "PASS: $i"
+ else
+ echo "FAIL: $i"
+ fi
+done
diff --git a/meta/recipes-multimedia/libpng/libpng_1.6.37.bb b/meta/recipes-multimedia/libpng/libpng_1.6.39.bb
index 61e3d92e95..94db1d3f6b 100644
--- a/meta/recipes-multimedia/libpng/libpng_1.6.37.bb
+++ b/meta/recipes-multimedia/libpng/libpng_1.6.39.bb
@@ -5,14 +5,17 @@ library for use in applications that read, create, and manipulate PNG \
HOMEPAGE = "http://www.libpng.org/"
SECTION = "libs"
LICENSE = "Libpng"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=b0085051bf265bac2bfc38bc89f50000"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=5c900cc124ba35a274073b5de7639b13"
DEPENDS = "zlib"
LIBV = "16"
-SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BPN}${LIBV}/${BP}.tar.xz"
-SRC_URI[md5sum] = "015e8e15db1eecde5f2eb9eb5b6e59e9"
-SRC_URI[sha256sum] = "505e70834d35383537b6491e7ae8641f1a4bed1876dbfe361201fc80868d88ca"
+SRC_URI = "\
+ ${SOURCEFORGE_MIRROR}/${BPN}/${BPN}${LIBV}/${BP}.tar.xz \
+ file://run-ptest \
+ "
+
+SRC_URI[sha256sum] = "1f4696ce70b4ee5f85f1e1623dc1229b210029fa4b7aee573df3e2ba7b036937"
MIRRORS += "${SOURCEFORGE_MIRROR}/${BPN}/${BPN}${LIBV}/ ${SOURCEFORGE_MIRROR}/${BPN}/${BPN}${LIBV}/older-releases/"
@@ -20,7 +23,7 @@ UPSTREAM_CHECK_URI = "http://libpng.org/pub/png/libpng.html"
BINCONFIG = "${bindir}/libpng-config ${bindir}/libpng16-config"
-inherit autotools binconfig-disabled pkgconfig
+inherit autotools binconfig-disabled pkgconfig ptest
# Work around missing symbols
EXTRA_OECONF:append:class-target = " ${@bb.utils.contains("TUNE_FEATURES", "neon", "--enable-arm-neon=on", "--enable-arm-neon=off", d)}"
@@ -33,3 +36,11 @@ BBCLASSEXTEND = "native nativesdk"
# CVE-2019-17371 is actually a memory leak in gif2png 2.x
CVE_CHECK_IGNORE += "CVE-2019-17371"
+
+do_install_ptest() {
+ install -m644 "${S}/pngtest.png" "${D}${PTEST_PATH}"
+ install -m755 "${B}/.libs/pngfix" "${D}${PTEST_PATH}"
+ install -m755 "${B}/.libs/pngtest" "${D}${PTEST_PATH}"
+ install -m755 "${B}/.libs/pngstest" "${D}${PTEST_PATH}"
+ install -m755 "${B}/.libs/timepng" "${D}${PTEST_PATH}"
+}
diff --git a/meta/recipes-multimedia/libsndfile/libsndfile1/0001-flac-Fix-improper-buffer-reusing-732.patch b/meta/recipes-multimedia/libsndfile/libsndfile1/0001-flac-Fix-improper-buffer-reusing-732.patch
new file mode 100644
index 0000000000..ede696180a
--- /dev/null
+++ b/meta/recipes-multimedia/libsndfile/libsndfile1/0001-flac-Fix-improper-buffer-reusing-732.patch
@@ -0,0 +1,29 @@
+From 9e4e9224c39195bde8ec14d1295944f713adb79a Mon Sep 17 00:00:00 2001
+From: yuan <ssspeed00@gmail.com>
+Date: Tue, 20 Apr 2021 16:16:32 +0800
+Subject: [PATCH] flac: Fix improper buffer reusing (#732)
+
+Upstream-Status: Backport [https://github.com/libsndfile/libsndfile/commit/ced91d7b971be6173b604154c39279ce90ad87cc]
+CVE: CVE-2021-4156
+
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+---
+ src/flac.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/src/flac.c b/src/flac.c
+index 64d0172e..e3320450 100644
+--- a/src/flac.c
++++ b/src/flac.c
+@@ -948,7 +948,11 @@ flac_read_loop (SF_PRIVATE *psf, unsigned len)
+ /* Decode some more. */
+ while (pflac->pos < pflac->len)
+ { if (FLAC__stream_decoder_process_single (pflac->fsd) == 0)
++ { psf_log_printf (psf, "FLAC__stream_decoder_process_single returned false\n") ;
++ /* Current frame is busted, so NULL the pointer. */
++ pflac->frame = NULL ;
+ break ;
++ } ;
+ state = FLAC__stream_decoder_get_state (pflac->fsd) ;
+ if (state >= FLAC__STREAM_DECODER_END_OF_STREAM)
+ { psf_log_printf (psf, "FLAC__stream_decoder_get_state returned %s\n", FLAC__StreamDecoderStateString [state]) ;
diff --git a/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2022-33065.patch b/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2022-33065.patch
new file mode 100644
index 0000000000..c5fba4d6b5
--- /dev/null
+++ b/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2022-33065.patch
@@ -0,0 +1,46 @@
+From 0754562e13d2e63a248a1c82f90b30bc0ffe307c Mon Sep 17 00:00:00 2001
+From: Alex Stewart <alex.stewart@ni.com>
+Date: Tue, 10 Oct 2023 16:10:34 -0400
+Subject: [PATCH] mat4/mat5: fix int overflow in dataend calculation
+
+The clang sanitizer warns of a possible signed integer overflow when
+calculating the `dataend` value in `mat4_read_header()`.
+
+```
+src/mat4.c:323:41: runtime error: signed integer overflow: 205 * -100663296 cannot be represented in type 'int'
+SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior src/mat4.c:323:41 in
+src/mat4.c:323:48: runtime error: signed integer overflow: 838860800 * 4 cannot be represented in type 'int'
+SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior src/mat4.c:323:48 in
+```
+
+Cast the offending `rows` and `cols` ints to `sf_count_t` (the type of
+`dataend` before performing the calculation, to avoid the issue.
+
+CVE: CVE-2022-33065
+Fixes: https://github.com/libsndfile/libsndfile/issues/789
+Fixes: https://github.com/libsndfile/libsndfile/issues/833
+
+Signed-off-by: Alex Stewart <alex.stewart@ni.com>
+
+Upstream-Status: Backport [https://github.com/libsndfile/libsndfile/commit/0754562e13d2e63a248a1c82f90b30bc0ffe307c]
+CVE: CVE-2022-33065
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/mat4.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/mat4.c b/src/mat4.c
+index 0b1b414..575683b 100644
+--- a/src/mat4.c
++++ b/src/mat4.c
+@@ -320,7 +320,7 @@ mat4_read_header (SF_PRIVATE *psf)
+ psf->filelength - psf->dataoffset, psf->sf.channels * psf->sf.frames * psf->bytewidth) ;
+ }
+ else if ((psf->filelength - psf->dataoffset) > psf->sf.channels * psf->sf.frames * psf->bytewidth)
+- psf->dataend = psf->dataoffset + rows * cols * psf->bytewidth ;
++ psf->dataend = psf->dataoffset + (sf_count_t) rows * (sf_count_t) cols * psf->bytewidth ;
+
+ psf->datalength = psf->filelength - psf->dataoffset - psf->dataend ;
+
+--
+2.40.1
diff --git a/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.31.bb b/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.31.bb
index ea14fe29cb..0c654fd853 100644
--- a/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.31.bb
+++ b/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.31.bb
@@ -10,6 +10,8 @@ LICENSE = "LGPL-2.1-only"
SRC_URI = "https://github.com/libsndfile/libsndfile/releases/download/${PV}/libsndfile-${PV}.tar.bz2 \
file://noopus.patch \
+ file://0001-flac-Fix-improper-buffer-reusing-732.patch \
+ file://CVE-2022-33065.patch \
"
UPSTREAM_CHECK_URI = "https://github.com/libsndfile/libsndfile/releases/"
diff --git a/meta/recipes-multimedia/libtiff/tiff/0001-Revised-handling-of-TIFFTAG_INKNAMES-and-related-TIF.patch b/meta/recipes-multimedia/libtiff/tiff/0001-Revised-handling-of-TIFFTAG_INKNAMES-and-related-TIF.patch
new file mode 100644
index 0000000000..17b37be041
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/0001-Revised-handling-of-TIFFTAG_INKNAMES-and-related-TIF.patch
@@ -0,0 +1,267 @@
+From f00484b9519df933723deb38fff943dc291a793d Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Tue, 30 Aug 2022 16:56:48 +0200
+Subject: [PATCH] Revised handling of TIFFTAG_INKNAMES and related
+ TIFFTAG_NUMBEROFINKS value
+
+In order to solve the buffer overflow issues related to TIFFTAG_INKNAMES and related TIFFTAG_NUMBEROFINKS value, a revised handling of those tags within LibTiff is proposed:
+
+Behaviour for writing:
+ `NumberOfInks` MUST fit to the number of inks in the `InkNames` string.
+ `NumberOfInks` is automatically set when `InkNames` is set.
+ If `NumberOfInks` is different to the number of inks within `InkNames` string, that will be corrected and a warning is issued.
+ If `NumberOfInks` is not equal to samplesperpixel only a warning will be issued.
+
+Behaviour for reading:
+ When reading `InkNames` from a TIFF file, the `NumberOfInks` will be set automatically to the number of inks in `InkNames` string.
+ If `NumberOfInks` is different to the number of inks within `InkNames` string, that will be corrected and a warning is issued.
+ If `NumberOfInks` is not equal to samplesperpixel only a warning will be issued.
+
+This allows the safe use of the NumberOfInks value to read out the InkNames without buffer overflow
+
+This MR will close the following issues: #149, #150, #152, #168 (to be checked), #250, #269, #398 and #456.
+
+It also fixes the old bug at http://bugzilla.maptools.org/show_bug.cgi?id=2599, for which the limitation of `NumberOfInks = SPP` was introduced, which is in my opinion not necessary and does not solve the general issue.
+
+CVE: CVE-2022-3599 CVE-2022-4645
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/e813112545942107551433d61afd16ac094ff246.patch]
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+Signed-off-by: Pawan Badganchi <Pawan.Badganchi@kpit.com>
+---
+ libtiff/tif_dir.c | 119 ++++++++++++++++++++++++-----------------
+ libtiff/tif_dir.h | 2 +
+ libtiff/tif_dirinfo.c | 2 +-
+ libtiff/tif_dirwrite.c | 5 ++
+ libtiff/tif_print.c | 4 ++
+ 5 files changed, 82 insertions(+), 50 deletions(-)
+
+diff --git a/libtiff/tif_dir.c b/libtiff/tif_dir.c
+index 793e8a79..816f7756 100644
+--- a/libtiff/tif_dir.c
++++ b/libtiff/tif_dir.c
+@@ -136,32 +136,30 @@ setExtraSamples(TIFF* tif, va_list ap, uint32_t* v)
+ }
+
+ /*
+- * Confirm we have "samplesperpixel" ink names separated by \0. Returns
++ * Count ink names separated by \0. Returns
+ * zero if the ink names are not as expected.
+ */
+-static uint32_t
+-checkInkNamesString(TIFF* tif, uint32_t slen, const char* s)
++static uint16_t
++countInkNamesString(TIFF *tif, uint32_t slen, const char *s)
+ {
+- TIFFDirectory* td = &tif->tif_dir;
+- uint16_t i = td->td_samplesperpixel;
++ uint16_t i = 0;
++ const char *ep = s + slen;
++ const char *cp = s;
+
+ if (slen > 0) {
+- const char* ep = s+slen;
+- const char* cp = s;
+- for (; i > 0; i--) {
++ do {
+ for (; cp < ep && *cp != '\0'; cp++) {}
+ if (cp >= ep)
+ goto bad;
+ cp++; /* skip \0 */
+- }
+- return ((uint32_t)(cp - s));
++ i++;
++ } while (cp < ep);
++ return (i);
+ }
+ bad:
+ TIFFErrorExt(tif->tif_clientdata, "TIFFSetField",
+- "%s: Invalid InkNames value; expecting %"PRIu16" names, found %"PRIu16,
+- tif->tif_name,
+- td->td_samplesperpixel,
+- (uint16_t)(td->td_samplesperpixel-i));
++ "%s: Invalid InkNames value; no NUL at given buffer end location %"PRIu32", after %"PRIu16" ink",
++ tif->tif_name, slen, i);
+ return (0);
+ }
+
+@@ -478,13 +476,61 @@ _TIFFVSetField(TIFF* tif, uint32_t tag, va_list ap)
+ _TIFFsetFloatArray(&td->td_refblackwhite, va_arg(ap, float*), 6);
+ break;
+ case TIFFTAG_INKNAMES:
+- v = (uint16_t) va_arg(ap, uint16_vap);
+- s = va_arg(ap, char*);
+- v = checkInkNamesString(tif, v, s);
+- status = v > 0;
+- if( v > 0 ) {
+- _TIFFsetNString(&td->td_inknames, s, v);
+- td->td_inknameslen = v;
++ {
++ v = (uint16_t) va_arg(ap, uint16_vap);
++ s = va_arg(ap, char*);
++ uint16_t ninksinstring;
++ ninksinstring = countInkNamesString(tif, v, s);
++ status = ninksinstring > 0;
++ if(ninksinstring > 0 ) {
++ _TIFFsetNString(&td->td_inknames, s, v);
++ td->td_inknameslen = v;
++ /* Set NumberOfInks to the value ninksinstring */
++ if (TIFFFieldSet(tif, FIELD_NUMBEROFINKS))
++ {
++ if (td->td_numberofinks != ninksinstring) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Warning %s; Tag %s:\n Value %"PRIu16" of NumberOfInks is different from the number of inks %"PRIu16".\n -> NumberOfInks value adapted to %"PRIu16"",
++ tif->tif_name, fip->field_name, td->td_numberofinks, ninksinstring, ninksinstring);
++ td->td_numberofinks = ninksinstring;
++ }
++ } else {
++ td->td_numberofinks = ninksinstring;
++ TIFFSetFieldBit(tif, FIELD_NUMBEROFINKS);
++ }
++ if (TIFFFieldSet(tif, FIELD_SAMPLESPERPIXEL))
++ {
++ if (td->td_numberofinks != td->td_samplesperpixel) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Warning %s; Tag %s:\n Value %"PRIu16" of NumberOfInks is different from the SamplesPerPixel value %"PRIu16"",
++ tif->tif_name, fip->field_name, td->td_numberofinks, td->td_samplesperpixel);
++ }
++ }
++ }
++ }
++ break;
++ case TIFFTAG_NUMBEROFINKS:
++ v = (uint16_t)va_arg(ap, uint16_vap);
++ /* If InkNames already set also NumberOfInks is set accordingly and should be equal */
++ if (TIFFFieldSet(tif, FIELD_INKNAMES))
++ {
++ if (v != td->td_numberofinks) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Error %s; Tag %s:\n It is not possible to set the value %"PRIu32" for NumberOfInks\n which is different from the number of inks in the InkNames tag (%"PRIu16")",
++ tif->tif_name, fip->field_name, v, td->td_numberofinks);
++ /* Do not set / overwrite number of inks already set by InkNames case accordingly. */
++ status = 0;
++ }
++ } else {
++ td->td_numberofinks = (uint16_t)v;
++ if (TIFFFieldSet(tif, FIELD_SAMPLESPERPIXEL))
++ {
++ if (td->td_numberofinks != td->td_samplesperpixel) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Warning %s; Tag %s:\n Value %"PRIu32" of NumberOfInks is different from the SamplesPerPixel value %"PRIu16"",
++ tif->tif_name, fip->field_name, v, td->td_samplesperpixel);
++ }
++ }
+ }
+ break;
+ case TIFFTAG_PERSAMPLE:
+@@ -986,34 +1032,6 @@ _TIFFVGetField(TIFF* tif, uint32_t tag, va_list ap)
+ if (fip->field_bit == FIELD_CUSTOM) {
+ standard_tag = 0;
+ }
+-
+- if( standard_tag == TIFFTAG_NUMBEROFINKS )
+- {
+- int i;
+- for (i = 0; i < td->td_customValueCount; i++) {
+- uint16_t val;
+- TIFFTagValue *tv = td->td_customValues + i;
+- if (tv->info->field_tag != standard_tag)
+- continue;
+- if( tv->value == NULL )
+- return 0;
+- val = *(uint16_t *)tv->value;
+- /* Truncate to SamplesPerPixel, since the */
+- /* setting code for INKNAMES assume that there are SamplesPerPixel */
+- /* inknames. */
+- /* Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2599 */
+- if( val > td->td_samplesperpixel )
+- {
+- TIFFWarningExt(tif->tif_clientdata,"_TIFFVGetField",
+- "Truncating NumberOfInks from %u to %"PRIu16,
+- val, td->td_samplesperpixel);
+- val = td->td_samplesperpixel;
+- }
+- *va_arg(ap, uint16_t*) = val;
+- return 1;
+- }
+- return 0;
+- }
+
+ switch (standard_tag) {
+ case TIFFTAG_SUBFILETYPE:
+@@ -1195,6 +1213,9 @@ _TIFFVGetField(TIFF* tif, uint32_t tag, va_list ap)
+ case TIFFTAG_INKNAMES:
+ *va_arg(ap, const char**) = td->td_inknames;
+ break;
++ case TIFFTAG_NUMBEROFINKS:
++ *va_arg(ap, uint16_t *) = td->td_numberofinks;
++ break;
+ default:
+ {
+ int i;
+diff --git a/libtiff/tif_dir.h b/libtiff/tif_dir.h
+index 09065648..0c251c9e 100644
+--- a/libtiff/tif_dir.h
++++ b/libtiff/tif_dir.h
+@@ -117,6 +117,7 @@ typedef struct {
+ /* CMYK parameters */
+ int td_inknameslen;
+ char* td_inknames;
++ uint16_t td_numberofinks; /* number of inks in InkNames string */
+
+ int td_customValueCount;
+ TIFFTagValue *td_customValues;
+@@ -174,6 +175,7 @@ typedef struct {
+ #define FIELD_TRANSFERFUNCTION 44
+ #define FIELD_INKNAMES 46
+ #define FIELD_SUBIFD 49
++#define FIELD_NUMBEROFINKS 50
+ /* FIELD_CUSTOM (see tiffio.h) 65 */
+ /* end of support for well-known tags; codec-private tags follow */
+ #define FIELD_CODEC 66 /* base of codec-private tags */
+diff --git a/libtiff/tif_dirinfo.c b/libtiff/tif_dirinfo.c
+index 3371cb5c..3b4bcd33 100644
+--- a/libtiff/tif_dirinfo.c
++++ b/libtiff/tif_dirinfo.c
+@@ -114,7 +114,7 @@ tiffFields[] = {
+ { TIFFTAG_SUBIFD, -1, -1, TIFF_IFD8, 0, TIFF_SETGET_C16_IFD8, TIFF_SETGET_UNDEFINED, FIELD_SUBIFD, 1, 1, "SubIFD", (TIFFFieldArray*) &tiffFieldArray },
+ { TIFFTAG_INKSET, 1, 1, TIFF_SHORT, 0, TIFF_SETGET_UINT16, TIFF_SETGET_UNDEFINED, FIELD_CUSTOM, 0, 0, "InkSet", NULL },
+ { TIFFTAG_INKNAMES, -1, -1, TIFF_ASCII, 0, TIFF_SETGET_C16_ASCII, TIFF_SETGET_UNDEFINED, FIELD_INKNAMES, 1, 1, "InkNames", NULL },
+- { TIFFTAG_NUMBEROFINKS, 1, 1, TIFF_SHORT, 0, TIFF_SETGET_UINT16, TIFF_SETGET_UNDEFINED, FIELD_CUSTOM, 1, 0, "NumberOfInks", NULL },
++ { TIFFTAG_NUMBEROFINKS, 1, 1, TIFF_SHORT, 0, TIFF_SETGET_UINT16, TIFF_SETGET_UNDEFINED, FIELD_NUMBEROFINKS, 1, 0, "NumberOfInks", NULL },
+ { TIFFTAG_DOTRANGE, 2, 2, TIFF_SHORT, 0, TIFF_SETGET_UINT16_PAIR, TIFF_SETGET_UNDEFINED, FIELD_CUSTOM, 0, 0, "DotRange", NULL },
+ { TIFFTAG_TARGETPRINTER, -1, -1, TIFF_ASCII, 0, TIFF_SETGET_ASCII, TIFF_SETGET_UNDEFINED, FIELD_CUSTOM, 1, 0, "TargetPrinter", NULL },
+ { TIFFTAG_EXTRASAMPLES, -1, -1, TIFF_SHORT, 0, TIFF_SETGET_C16_UINT16, TIFF_SETGET_UNDEFINED, FIELD_EXTRASAMPLES, 0, 1, "ExtraSamples", NULL },
+diff --git a/libtiff/tif_dirwrite.c b/libtiff/tif_dirwrite.c
+index 6c86fdca..062e4610 100644
+--- a/libtiff/tif_dirwrite.c
++++ b/libtiff/tif_dirwrite.c
+@@ -626,6 +626,11 @@ TIFFWriteDirectorySec(TIFF* tif, int isimage, int imagedone, uint64_t* pdiroff)
+ if (!TIFFWriteDirectoryTagAscii(tif,&ndir,dir,TIFFTAG_INKNAMES,tif->tif_dir.td_inknameslen,tif->tif_dir.td_inknames))
+ goto bad;
+ }
++ if (TIFFFieldSet(tif, FIELD_NUMBEROFINKS))
++ {
++ if (!TIFFWriteDirectoryTagShort(tif, &ndir, dir, TIFFTAG_NUMBEROFINKS, tif->tif_dir.td_numberofinks))
++ goto bad;
++ }
+ if (TIFFFieldSet(tif,FIELD_SUBIFD))
+ {
+ if (!TIFFWriteDirectoryTagSubifd(tif,&ndir,dir))
+diff --git a/libtiff/tif_print.c b/libtiff/tif_print.c
+index 16ce5780..a91b9e7b 100644
+--- a/libtiff/tif_print.c
++++ b/libtiff/tif_print.c
+@@ -397,6 +397,10 @@ TIFFPrintDirectory(TIFF* tif, FILE* fd, long flags)
+ }
+ fputs("\n", fd);
+ }
++ if (TIFFFieldSet(tif, FIELD_NUMBEROFINKS)) {
++ fprintf(fd, " NumberOfInks: %d\n",
++ td->td_numberofinks);
++ }
+ if (TIFFFieldSet(tif,FIELD_THRESHHOLDING)) {
+ fprintf(fd, " Thresholding: ");
+ switch (td->td_threshholding) {
+--
+2.34.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/0001-fix-the-FPE-in-tiffcrop-415-427-and-428.patch b/meta/recipes-multimedia/libtiff/tiff/0001-fix-the-FPE-in-tiffcrop-415-427-and-428.patch
new file mode 100644
index 0000000000..a9dd42d755
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/0001-fix-the-FPE-in-tiffcrop-415-427-and-428.patch
@@ -0,0 +1,182 @@
+From 6cfe933df4dbac5479801b2bd10103ef7db815ee Mon Sep 17 00:00:00 2001
+From: 4ugustus <wangdw.augustus@qq.com>
+Date: Sat, 11 Jun 2022 09:31:43 +0000
+Subject: [PATCH] fix the FPE in tiffcrop (#415, #427, and #428)
+
+CVE: CVE-2022-2056 CVE-2022-2057 CVE-2022-2058
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ libtiff/tif_aux.c | 9 +++++++
+ libtiff/tiffiop.h | 1 +
+ tools/tiffcrop.c | 62 ++++++++++++++++++++++++++---------------------
+ 3 files changed, 44 insertions(+), 28 deletions(-)
+
+diff --git a/libtiff/tif_aux.c b/libtiff/tif_aux.c
+index 140f26c..5b88c8d 100644
+--- a/libtiff/tif_aux.c
++++ b/libtiff/tif_aux.c
+@@ -402,6 +402,15 @@ float _TIFFClampDoubleToFloat( double val )
+ return (float)val;
+ }
+
++uint32_t _TIFFClampDoubleToUInt32(double val)
++{
++ if( val < 0 )
++ return 0;
++ if( val > 0xFFFFFFFFU || val != val )
++ return 0xFFFFFFFFU;
++ return (uint32_t)val;
++}
++
+ int _TIFFSeekOK(TIFF* tif, toff_t off)
+ {
+ /* Huge offsets, especially -1 / UINT64_MAX, can cause issues */
+diff --git a/libtiff/tiffiop.h b/libtiff/tiffiop.h
+index f1151f5..c1d0276 100644
+--- a/libtiff/tiffiop.h
++++ b/libtiff/tiffiop.h
+@@ -368,6 +368,7 @@ extern double _TIFFUInt64ToDouble(uint64_t);
+ extern float _TIFFUInt64ToFloat(uint64_t);
+
+ extern float _TIFFClampDoubleToFloat(double);
++extern uint32_t _TIFFClampDoubleToUInt32(double);
+
+ extern tmsize_t
+ _TIFFReadEncodedStripAndAllocBuffer(TIFF* tif, uint32_t strip,
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index e407bf5..b9b13d8 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -5182,17 +5182,17 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ {
+ if ((crop->res_unit == RESUNIT_INCH) || (crop->res_unit == RESUNIT_CENTIMETER))
+ {
+- x1 = (uint32_t) (crop->corners[i].X1 * scale * xres);
+- x2 = (uint32_t) (crop->corners[i].X2 * scale * xres);
+- y1 = (uint32_t) (crop->corners[i].Y1 * scale * yres);
+- y2 = (uint32_t) (crop->corners[i].Y2 * scale * yres);
++ x1 = _TIFFClampDoubleToUInt32(crop->corners[i].X1 * scale * xres);
++ x2 = _TIFFClampDoubleToUInt32(crop->corners[i].X2 * scale * xres);
++ y1 = _TIFFClampDoubleToUInt32(crop->corners[i].Y1 * scale * yres);
++ y2 = _TIFFClampDoubleToUInt32(crop->corners[i].Y2 * scale * yres);
+ }
+ else
+ {
+- x1 = (uint32_t) (crop->corners[i].X1);
+- x2 = (uint32_t) (crop->corners[i].X2);
+- y1 = (uint32_t) (crop->corners[i].Y1);
+- y2 = (uint32_t) (crop->corners[i].Y2);
++ x1 = _TIFFClampDoubleToUInt32(crop->corners[i].X1);
++ x2 = _TIFFClampDoubleToUInt32(crop->corners[i].X2);
++ y1 = _TIFFClampDoubleToUInt32(crop->corners[i].Y1);
++ y2 = _TIFFClampDoubleToUInt32(crop->corners[i].Y2);
+ }
+ if (x1 < 1)
+ crop->regionlist[i].x1 = 0;
+@@ -5255,17 +5255,17 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ {
+ if (crop->res_unit != RESUNIT_INCH && crop->res_unit != RESUNIT_CENTIMETER)
+ { /* User has specified pixels as reference unit */
+- tmargin = (uint32_t)(crop->margins[0]);
+- lmargin = (uint32_t)(crop->margins[1]);
+- bmargin = (uint32_t)(crop->margins[2]);
+- rmargin = (uint32_t)(crop->margins[3]);
++ tmargin = _TIFFClampDoubleToUInt32(crop->margins[0]);
++ lmargin = _TIFFClampDoubleToUInt32(crop->margins[1]);
++ bmargin = _TIFFClampDoubleToUInt32(crop->margins[2]);
++ rmargin = _TIFFClampDoubleToUInt32(crop->margins[3]);
+ }
+ else
+ { /* inches or centimeters specified */
+- tmargin = (uint32_t)(crop->margins[0] * scale * yres);
+- lmargin = (uint32_t)(crop->margins[1] * scale * xres);
+- bmargin = (uint32_t)(crop->margins[2] * scale * yres);
+- rmargin = (uint32_t)(crop->margins[3] * scale * xres);
++ tmargin = _TIFFClampDoubleToUInt32(crop->margins[0] * scale * yres);
++ lmargin = _TIFFClampDoubleToUInt32(crop->margins[1] * scale * xres);
++ bmargin = _TIFFClampDoubleToUInt32(crop->margins[2] * scale * yres);
++ rmargin = _TIFFClampDoubleToUInt32(crop->margins[3] * scale * xres);
+ }
+
+ if ((lmargin + rmargin) > image->width)
+@@ -5295,24 +5295,24 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ if (crop->res_unit != RESUNIT_INCH && crop->res_unit != RESUNIT_CENTIMETER)
+ {
+ if (crop->crop_mode & CROP_WIDTH)
+- width = (uint32_t)crop->width;
++ width = _TIFFClampDoubleToUInt32(crop->width);
+ else
+ width = image->width - lmargin - rmargin;
+
+ if (crop->crop_mode & CROP_LENGTH)
+- length = (uint32_t)crop->length;
++ length = _TIFFClampDoubleToUInt32(crop->length);
+ else
+ length = image->length - tmargin - bmargin;
+ }
+ else
+ {
+ if (crop->crop_mode & CROP_WIDTH)
+- width = (uint32_t)(crop->width * scale * image->xres);
++ width = _TIFFClampDoubleToUInt32(crop->width * scale * image->xres);
+ else
+ width = image->width - lmargin - rmargin;
+
+ if (crop->crop_mode & CROP_LENGTH)
+- length = (uint32_t)(crop->length * scale * image->yres);
++ length = _TIFFClampDoubleToUInt32(crop->length * scale * image->yres);
+ else
+ length = image->length - tmargin - bmargin;
+ }
+@@ -5711,13 +5711,13 @@ computeOutputPixelOffsets (struct crop_mask *crop, struct image_data *image,
+ {
+ if (page->res_unit == RESUNIT_INCH || page->res_unit == RESUNIT_CENTIMETER)
+ { /* inches or centimeters specified */
+- hmargin = (uint32_t)(page->hmargin * scale * page->hres * ((image->bps + 7) / 8));
+- vmargin = (uint32_t)(page->vmargin * scale * page->vres * ((image->bps + 7) / 8));
++ hmargin = _TIFFClampDoubleToUInt32(page->hmargin * scale * page->hres * ((image->bps + 7) / 8));
++ vmargin = _TIFFClampDoubleToUInt32(page->vmargin * scale * page->vres * ((image->bps + 7) / 8));
+ }
+ else
+ { /* Otherwise user has specified pixels as reference unit */
+- hmargin = (uint32_t)(page->hmargin * scale * ((image->bps + 7) / 8));
+- vmargin = (uint32_t)(page->vmargin * scale * ((image->bps + 7) / 8));
++ hmargin = _TIFFClampDoubleToUInt32(page->hmargin * scale * ((image->bps + 7) / 8));
++ vmargin = _TIFFClampDoubleToUInt32(page->vmargin * scale * ((image->bps + 7) / 8));
+ }
+
+ if ((hmargin * 2.0) > (pwidth * page->hres))
+@@ -5755,13 +5755,13 @@ computeOutputPixelOffsets (struct crop_mask *crop, struct image_data *image,
+ {
+ if (page->mode & PAGE_MODE_PAPERSIZE )
+ {
+- owidth = (uint32_t)((pwidth * page->hres) - (hmargin * 2));
+- olength = (uint32_t)((plength * page->vres) - (vmargin * 2));
++ owidth = _TIFFClampDoubleToUInt32((pwidth * page->hres) - (hmargin * 2));
++ olength = _TIFFClampDoubleToUInt32((plength * page->vres) - (vmargin * 2));
+ }
+ else
+ {
+- owidth = (uint32_t)(iwidth - (hmargin * 2 * page->hres));
+- olength = (uint32_t)(ilength - (vmargin * 2 * page->vres));
++ owidth = _TIFFClampDoubleToUInt32(iwidth - (hmargin * 2 * page->hres));
++ olength = _TIFFClampDoubleToUInt32(ilength - (vmargin * 2 * page->vres));
+ }
+ }
+
+@@ -5770,6 +5770,12 @@ computeOutputPixelOffsets (struct crop_mask *crop, struct image_data *image,
+ if (olength > ilength)
+ olength = ilength;
+
++ if (owidth == 0 || olength == 0)
++ {
++ TIFFError("computeOutputPixelOffsets", "Integer overflow when calculating the number of pages");
++ exit(EXIT_FAILURE);
++ }
++
+ /* Compute the number of pages required for Portrait or Landscape */
+ switch (page->orient)
+ {
diff --git a/meta/recipes-multimedia/libtiff/tiff/0001-tif_jbig.c-fix-crash-when-reading-a-file-with-multip.patch b/meta/recipes-multimedia/libtiff/tiff/0001-tif_jbig.c-fix-crash-when-reading-a-file-with-multip.patch
index f1a4ab4251..a4d8bebe8c 100644
--- a/meta/recipes-multimedia/libtiff/tiff/0001-tif_jbig.c-fix-crash-when-reading-a-file-with-multip.patch
+++ b/meta/recipes-multimedia/libtiff/tiff/0001-tif_jbig.c-fix-crash-when-reading-a-file-with-multip.patch
@@ -1,11 +1,12 @@
+From adfd6be615635705c2f4eb8dfe49e2f463786361 Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Thu, 24 Feb 2022 22:26:02 +0100
+Subject: [PATCH] tif_jbig.c: fix crash when reading a file with multiple
+
CVE: CVE-2022-0865
Upstream-Status: Backport
Signed-off-by: Ross Burton <ross.burton@arm.com>
-From 88da11ae3c4db527cb870fb1017456cc8fbac2e7 Mon Sep 17 00:00:00 2001
-From: Even Rouault <even.rouault@spatialys.com>
-Date: Thu, 24 Feb 2022 22:26:02 +0100
-Subject: [PATCH 1/6] tif_jbig.c: fix crash when reading a file with multiple
IFD in memory-mapped mode and when bit reversal is needed (fixes #385)
---
@@ -13,7 +14,7 @@ Subject: [PATCH 1/6] tif_jbig.c: fix crash when reading a file with multiple
1 file changed, 10 insertions(+)
diff --git a/libtiff/tif_jbig.c b/libtiff/tif_jbig.c
-index 74086338..8bfa4cef 100644
+index 7408633..8bfa4ce 100644
--- a/libtiff/tif_jbig.c
+++ b/libtiff/tif_jbig.c
@@ -209,6 +209,16 @@ int TIFFInitJBIG(TIFF* tif, int scheme)
@@ -33,6 +34,3 @@ index 74086338..8bfa4cef 100644
/* Setup the function pointers for encode, decode, and cleanup. */
tif->tif_setupdecode = JBIGSetupDecode;
---
-2.25.1
-
diff --git a/meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-Fix-issue-330-and-some-more-from-320-to-349.patch b/meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-Fix-issue-330-and-some-more-from-320-to-349.patch
new file mode 100644
index 0000000000..7c4feabc38
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-Fix-issue-330-and-some-more-from-320-to-349.patch
@@ -0,0 +1,607 @@
+From 0ab805f46f68500da3b49d6f89380bab169bf6bb Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Tue, 10 May 2022 20:03:17 +0000
+Subject: [PATCH] tiffcrop: Fix issue #330 and some more from 320 to 349
+
+Upstream-Status: Backport
+Signed-off-by: Zheng Qiu <zheng.qiu@windriver.com>
+---
+ tools/tiffcrop.c | 282 +++++++++++++++++++++++++++++++++++------------
+ 1 file changed, 210 insertions(+), 72 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index 99e4208..b596f9e 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -63,20 +63,24 @@
+ * units when sectioning image into columns x rows
+ * using the -S cols:rows option.
+ * -X # Horizontal dimension of region to extract expressed in current
+- * units
++ * units, relative to the specified origin reference 'edge' left (default for X) or right.
+ * -Y # Vertical dimension of region to extract expressed in current
+- * units
++ * units, relative to the specified origin reference 'edge' top (default for Y) or bottom.
+ * -O orient Orientation for output image, portrait, landscape, auto
+ * -P page Page size for output image segments, eg letter, legal, tabloid,
+ * etc.
+ * -S cols:rows Divide the image into equal sized segments using cols across
+ * and rows down
+- * -E t|l|r|b Edge to use as origin
++ * -E t|l|r|b Edge to use as origin (i.e. 'side' of the image not 'corner')
++ * top = width from left, zones from top to bottom (default)
++ * bottom = width from left, zones from bottom to top
++ * left = zones from left to right, length from top
++ * right = zones from right to left, length from top
+ * -m #,#,#,# Margins from edges for selection: top, left, bottom, right
+ * (commas separated)
+ * -Z #:#,#:# Zones of the image designated as zone X of Y,
+ * eg 1:3 would be first of three equal portions measured
+- * from reference edge
++ * from reference edge (i.e. 'side' not corner)
+ * -N odd|even|#,#-#,#|last
+ * Select sequences and/or ranges of images within file
+ * to process. The words odd or even may be used to specify
+@@ -103,10 +107,13 @@
+ * selects which functions dump data, with higher numbers selecting
+ * lower level, scanline level routines. Debug reports a limited set
+ * of messages to monitor progress without enabling dump logs.
++ *
++ * Note: The (-X|-Y), -Z and -z options are mutually exclusive.
++ * In no case should the options be applied to a given selection successively.
+ */
+
+-static char tiffcrop_version_id[] = "2.4.1";
+-static char tiffcrop_rev_date[] = "03-03-2010";
++static char tiffcrop_version_id[] = "2.5";
++static char tiffcrop_rev_date[] = "02-09-2022";
+
+ #include "tif_config.h"
+ #include "libport.h"
+@@ -774,6 +781,9 @@ static const char usage_info[] =
+ " The four debug/dump options are independent, though it makes little sense to\n"
+ " specify a dump file without specifying a detail level.\n"
+ "\n"
++"Note: The (-X|-Y), -Z and -z options are mutually exclusive.\n"
++" In no case should the options be applied to a given selection successively.\n"
++"\n"
+ ;
+
+ /* This function could be modified to pass starting sample offset
+@@ -2123,6 +2133,15 @@ void process_command_opts (int argc, char *argv[], char *mp, char *mode, uint32
+ /*NOTREACHED*/
+ }
+ }
++ /*-- Check for not allowed combinations (e.g. -X, -Y and -Z and -z are mutually exclusive) --*/
++ char XY, Z, R;
++ XY = ((crop_data->crop_mode & CROP_WIDTH) || (crop_data->crop_mode & CROP_LENGTH));
++ Z = (crop_data->crop_mode & CROP_ZONES);
++ R = (crop_data->crop_mode & CROP_REGIONS);
++ if ((XY && Z) || (XY && R) || (Z && R)) {
++ TIFFError("tiffcrop input error", "The crop options(-X|-Y), -Z and -z are mutually exclusive.->Exit");
++ exit(EXIT_FAILURE);
++ }
+ } /* end process_command_opts */
+
+ /* Start a new output file if one has not been previously opened or
+@@ -2748,7 +2767,7 @@ extractContigSamplesBytes (uint8_t *in, uint8_t *out, uint32_t cols,
+ tsample_t count, uint32_t start, uint32_t end)
+ {
+ int i, bytes_per_sample, sindex;
+- uint32_t col, dst_rowsize, bit_offset;
++ uint32_t col, dst_rowsize, bit_offset, numcols;
+ uint32_t src_byte /*, src_bit */;
+ uint8_t *src = in;
+ uint8_t *dst = out;
+@@ -2759,6 +2778,10 @@ extractContigSamplesBytes (uint8_t *in, uint8_t *out, uint32_t cols,
+ return (1);
+ }
+
++ /* Number of extracted columns shall be kept as (end-start + 1). Otherwise buffer-overflow might occur.
++ * 'start' and 'col' count from 0 to (cols-1) but 'end' is to be set one after the index of the last column to be copied!
++ */
++ numcols = abs(end - start);
+ if ((start > end) || (start > cols))
+ {
+ TIFFError ("extractContigSamplesBytes",
+@@ -2771,6 +2794,9 @@ extractContigSamplesBytes (uint8_t *in, uint8_t *out, uint32_t cols,
+ "Invalid end column value %"PRIu32" ignored", end);
+ end = cols;
+ }
++ if (abs(end - start) > numcols) {
++ end = start + numcols;
++ }
+
+ dst_rowsize = (bps * (end - start) * count) / 8;
+
+@@ -2814,7 +2840,7 @@ extractContigSamples8bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ tsample_t count, uint32_t start, uint32_t end)
+ {
+ int ready_bits = 0, sindex = 0;
+- uint32_t col, src_byte, src_bit, bit_offset;
++ uint32_t col, src_byte, src_bit, bit_offset, numcols;
+ uint8_t maskbits = 0, matchbits = 0;
+ uint8_t buff1 = 0, buff2 = 0;
+ uint8_t *src = in;
+@@ -2826,6 +2852,10 @@ extractContigSamples8bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ return (1);
+ }
+
++ /* Number of extracted columns shall be kept as (end-start + 1). Otherwise buffer-overflow might occur.
++ * 'start' and 'col' count from 0 to (cols-1) but 'end' is to be set one after the index of the last column to be copied!
++ */
++ numcols = abs(end - start);
+ if ((start > end) || (start > cols))
+ {
+ TIFFError ("extractContigSamples8bits",
+@@ -2838,7 +2868,10 @@ extractContigSamples8bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ "Invalid end column value %"PRIu32" ignored", end);
+ end = cols;
+ }
+-
++ if (abs(end - start) > numcols) {
++ end = start + numcols;
++ }
++
+ ready_bits = 0;
+ maskbits = (uint8_t)-1 >> (8 - bps);
+ buff1 = buff2 = 0;
+@@ -2891,7 +2924,7 @@ extractContigSamples16bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ tsample_t count, uint32_t start, uint32_t end)
+ {
+ int ready_bits = 0, sindex = 0;
+- uint32_t col, src_byte, src_bit, bit_offset;
++ uint32_t col, src_byte, src_bit, bit_offset, numcols;
+ uint16_t maskbits = 0, matchbits = 0;
+ uint16_t buff1 = 0, buff2 = 0;
+ uint8_t bytebuff = 0;
+@@ -2904,6 +2937,10 @@ extractContigSamples16bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ return (1);
+ }
+
++ /* Number of extracted columns shall be kept as (end-start + 1). Otherwise buffer-overflow might occur.
++ * 'start' and 'col' count from 0 to (cols-1) but 'end' is to be set one after the index of the last column to be copied!
++ */
++ numcols = abs(end - start);
+ if ((start > end) || (start > cols))
+ {
+ TIFFError ("extractContigSamples16bits",
+@@ -2916,6 +2953,9 @@ extractContigSamples16bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ "Invalid end column value %"PRIu32" ignored", end);
+ end = cols;
+ }
++ if (abs(end - start) > numcols) {
++ end = start + numcols;
++ }
+
+ ready_bits = 0;
+ maskbits = (uint16_t)-1 >> (16 - bps);
+@@ -2980,7 +3020,7 @@ extractContigSamples24bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ tsample_t count, uint32_t start, uint32_t end)
+ {
+ int ready_bits = 0, sindex = 0;
+- uint32_t col, src_byte, src_bit, bit_offset;
++ uint32_t col, src_byte, src_bit, bit_offset, numcols;
+ uint32_t maskbits = 0, matchbits = 0;
+ uint32_t buff1 = 0, buff2 = 0;
+ uint8_t bytebuff1 = 0, bytebuff2 = 0;
+@@ -2993,6 +3033,10 @@ extractContigSamples24bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ return (1);
+ }
+
++ /* Number of extracted columns shall be kept as (end-start + 1). Otherwise buffer-overflow might occur.
++ * 'start' and 'col' count from 0 to (cols-1) but 'end' is to be set one after the index of the last column to be copied!
++ */
++ numcols = abs(end - start);
+ if ((start > end) || (start > cols))
+ {
+ TIFFError ("extractContigSamples24bits",
+@@ -3005,6 +3049,9 @@ extractContigSamples24bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ "Invalid end column value %"PRIu32" ignored", end);
+ end = cols;
+ }
++ if (abs(end - start) > numcols) {
++ end = start + numcols;
++ }
+
+ ready_bits = 0;
+ maskbits = (uint32_t)-1 >> (32 - bps);
+@@ -3089,7 +3136,7 @@ extractContigSamples32bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ tsample_t count, uint32_t start, uint32_t end)
+ {
+ int ready_bits = 0, sindex = 0 /*, shift_width = 0 */;
+- uint32_t col, src_byte, src_bit, bit_offset;
++ uint32_t col, src_byte, src_bit, bit_offset, numcols;
+ uint32_t longbuff1 = 0, longbuff2 = 0;
+ uint64_t maskbits = 0, matchbits = 0;
+ uint64_t buff1 = 0, buff2 = 0, buff3 = 0;
+@@ -3104,6 +3151,10 @@ extractContigSamples32bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ }
+
+
++ /* Number of extracted columns shall be kept as (end-start + 1). Otherwise buffer-overflow might occur.
++ * 'start' and 'col' count from 0 to (cols-1) but 'end' is to be set one after the index of the last column to be copied!
++ */
++ numcols = abs(end - start);
+ if ((start > end) || (start > cols))
+ {
+ TIFFError ("extractContigSamples32bits",
+@@ -3116,6 +3167,9 @@ extractContigSamples32bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ "Invalid end column value %"PRIu32" ignored", end);
+ end = cols;
+ }
++ if (abs(end - start) > numcols) {
++ end = start + numcols;
++ }
+
+ /* shift_width = ((bps + 7) / 8) + 1; */
+ ready_bits = 0;
+@@ -3195,7 +3249,7 @@ extractContigSamplesShifted8bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ int shift)
+ {
+ int ready_bits = 0, sindex = 0;
+- uint32_t col, src_byte, src_bit, bit_offset;
++ uint32_t col, src_byte, src_bit, bit_offset, numcols;
+ uint8_t maskbits = 0, matchbits = 0;
+ uint8_t buff1 = 0, buff2 = 0;
+ uint8_t *src = in;
+@@ -3207,6 +3261,10 @@ extractContigSamplesShifted8bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ return (1);
+ }
+
++ /* Number of extracted columns shall be kept as (end-start + 1). Otherwise buffer-overflow might occur.
++ * 'start' and 'col' count from 0 to (cols-1) but 'end' is to be set one after the index of the last column to be copied!
++ */
++ numcols = abs(end - start);
+ if ((start > end) || (start > cols))
+ {
+ TIFFError ("extractContigSamplesShifted8bits",
+@@ -3219,6 +3277,9 @@ extractContigSamplesShifted8bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ "Invalid end column value %"PRIu32" ignored", end);
+ end = cols;
+ }
++ if (abs(end - start) > numcols) {
++ end = start + numcols;
++ }
+
+ ready_bits = shift;
+ maskbits = (uint8_t)-1 >> (8 - bps);
+@@ -3275,7 +3336,7 @@ extractContigSamplesShifted16bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ int shift)
+ {
+ int ready_bits = 0, sindex = 0;
+- uint32_t col, src_byte, src_bit, bit_offset;
++ uint32_t col, src_byte, src_bit, bit_offset, numcols;
+ uint16_t maskbits = 0, matchbits = 0;
+ uint16_t buff1 = 0, buff2 = 0;
+ uint8_t bytebuff = 0;
+@@ -3288,6 +3349,10 @@ extractContigSamplesShifted16bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ return (1);
+ }
+
++ /* Number of extracted columns shall be kept as (end-start + 1). Otherwise buffer-overflow might occur.
++ * 'start' and 'col' count from 0 to (cols-1) but 'end' is to be set one after the index of the last column to be copied!
++ */
++ numcols = abs(end - start);
+ if ((start > end) || (start > cols))
+ {
+ TIFFError ("extractContigSamplesShifted16bits",
+@@ -3300,6 +3365,9 @@ extractContigSamplesShifted16bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ "Invalid end column value %"PRIu32" ignored", end);
+ end = cols;
+ }
++ if (abs(end - start) > numcols) {
++ end = start + numcols;
++ }
+
+ ready_bits = shift;
+ maskbits = (uint16_t)-1 >> (16 - bps);
+@@ -3365,7 +3433,7 @@ extractContigSamplesShifted24bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ int shift)
+ {
+ int ready_bits = 0, sindex = 0;
+- uint32_t col, src_byte, src_bit, bit_offset;
++ uint32_t col, src_byte, src_bit, bit_offset, numcols;
+ uint32_t maskbits = 0, matchbits = 0;
+ uint32_t buff1 = 0, buff2 = 0;
+ uint8_t bytebuff1 = 0, bytebuff2 = 0;
+@@ -3378,6 +3446,16 @@ extractContigSamplesShifted24bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ return (1);
+ }
+
++ /* Number of extracted columns shall be kept as (end-start + 1). Otherwise buffer-overflow might occur.
++ * 'start' and 'col' count from 0 to (cols-1) but 'end' is to be set one after the index of the last column to be copied!
++ */
++ /*--- Remark, which is true for all those functions extractCongigSamplesXXX() --
++ * The mitigation of the start/end test does not allways make sense, because the function is often called with e.g.:
++ * start = 31; end = 32; cols = 32 to extract the last column in a 32x32 sample image.
++ * If then, a worng parameter (e.g. cols = 10) is provided, the mitigated settings would be start=0; end=1.
++ * Therefore, an error message and no copy action might be the better reaction to wrong parameter configurations.
++ */
++ numcols = abs(end - start);
+ if ((start > end) || (start > cols))
+ {
+ TIFFError ("extractContigSamplesShifted24bits",
+@@ -3390,6 +3468,9 @@ extractContigSamplesShifted24bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ "Invalid end column value %"PRIu32" ignored", end);
+ end = cols;
+ }
++ if (abs(end - start) > numcols) {
++ end = start + numcols;
++ }
+
+ ready_bits = shift;
+ maskbits = (uint32_t)-1 >> (32 - bps);
+@@ -3451,7 +3532,7 @@ extractContigSamplesShifted24bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ buff2 = (buff2 << 8);
+ bytebuff2 = bytebuff1;
+ ready_bits -= 8;
+- }
++ }
+
+ return (0);
+ } /* end extractContigSamplesShifted24bits */
+@@ -3463,7 +3544,7 @@ extractContigSamplesShifted32bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ int shift)
+ {
+ int ready_bits = 0, sindex = 0 /*, shift_width = 0 */;
+- uint32_t col, src_byte, src_bit, bit_offset;
++ uint32_t col, src_byte, src_bit, bit_offset, numcols;
+ uint32_t longbuff1 = 0, longbuff2 = 0;
+ uint64_t maskbits = 0, matchbits = 0;
+ uint64_t buff1 = 0, buff2 = 0, buff3 = 0;
+@@ -3478,6 +3559,10 @@ extractContigSamplesShifted32bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ }
+
+
++ /* Number of extracted columns shall be kept as (end-start + 1). Otherwise buffer-overflow might occur.
++ * 'start' and 'col' count from 0 to (cols-1) but 'end' is to be set one after the index of the last column to be copied!
++ */
++ numcols = abs(end - start);
+ if ((start > end) || (start > cols))
+ {
+ TIFFError ("extractContigSamplesShifted32bits",
+@@ -3490,6 +3575,9 @@ extractContigSamplesShifted32bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ "Invalid end column value %"PRIu32" ignored", end);
+ end = cols;
+ }
++ if (abs(end - start) > numcols) {
++ end = start + numcols;
++ }
+
+ /* shift_width = ((bps + 7) / 8) + 1; */
+ ready_bits = shift;
+@@ -5431,7 +5519,7 @@ getCropOffsets(struct image_data *image, struct crop_mask *crop, struct dump_opt
+ {
+ struct offset offsets;
+ int i;
+- int32_t test;
++ uint32_t uaux;
+ uint32_t seg, total, need_buff = 0;
+ uint32_t buffsize;
+ uint32_t zwidth, zlength;
+@@ -5512,8 +5600,13 @@ getCropOffsets(struct image_data *image, struct crop_mask *crop, struct dump_opt
+ seg = crop->zonelist[j].position;
+ total = crop->zonelist[j].total;
+
+- /* check for not allowed zone cases like 0:0; 4:3; etc. and skip that input */
++ /* check for not allowed zone cases like 0:0; 4:3; or negative ones etc. and skip that input */
++ if (crop->zonelist[j].position < 0 || crop->zonelist[j].total < 0) {
++ TIFFError("getCropOffsets", "Negative crop zone values %d:%d are not allowed, thus skipped.", crop->zonelist[j].position, crop->zonelist[j].total);
++ continue;
++ }
+ if (seg == 0 || total == 0 || seg > total) {
++ TIFFError("getCropOffsets", "Crop zone %d:%d is out of specification, thus skipped.", seg, total);
+ continue;
+ }
+
+@@ -5526,17 +5619,23 @@ getCropOffsets(struct image_data *image, struct crop_mask *crop, struct dump_opt
+
+ crop->regionlist[i].x1 = offsets.startx +
+ (uint32_t)(offsets.crop_width * 1.0 * (seg - 1) / total);
+- test = (int32_t)offsets.startx +
+- (int32_t)(offsets.crop_width * 1.0 * seg / total);
+- if (test < 1 )
+- crop->regionlist[i].x2 = 0;
+- else
+- {
+- if (test > (int32_t)(image->width - 1))
++ /* FAULT: IMHO in the old code here, the calculation of x2 was based on wrong assumtions. The whole image was assumed and 'endy' and 'starty' are not respected anymore!*/
++ /* NEW PROPOSED Code: Assumption: offsets are within image with top left corner as origin (0,0) and 'start' <= 'end'. */
++ if (crop->regionlist[i].x1 > offsets.endx) {
++ crop->regionlist[i].x1 = offsets.endx;
++ } else if (crop->regionlist[i].x1 >= image->width) {
++ crop->regionlist[i].x1 = image->width - 1;
++ }
++
++ crop->regionlist[i].x2 = offsets.startx + (uint32_t)(offsets.crop_width * 1.0 * seg / total);
++ if (crop->regionlist[i].x2 > 0) crop->regionlist[i].x2 = crop->regionlist[i].x2 - 1;
++ if (crop->regionlist[i].x2 < crop->regionlist[i].x1) {
++ crop->regionlist[i].x2 = crop->regionlist[i].x1;
++ } else if (crop->regionlist[i].x2 > offsets.endx) {
++ crop->regionlist[i].x2 = offsets.endx;
++ } else if (crop->regionlist[i].x2 >= image->width) {
+ crop->regionlist[i].x2 = image->width - 1;
+- else
+- crop->regionlist[i].x2 = test - 1;
+- }
++ }
+ zwidth = crop->regionlist[i].x2 - crop->regionlist[i].x1 + 1;
+
+ /* This is passed to extractCropZone or extractCompositeZones */
+@@ -5551,22 +5650,27 @@ getCropOffsets(struct image_data *image, struct crop_mask *crop, struct dump_opt
+ crop->regionlist[i].x1 = offsets.startx;
+ crop->regionlist[i].x2 = offsets.endx;
+
+- test = offsets.endy - (uint32_t)(offsets.crop_length * 1.0 * seg / total);
+- if (test < 1 )
+- crop->regionlist[i].y1 = 0;
+- else
+- crop->regionlist[i].y1 = test + 1;
++ /* FAULT: IMHO in the old code here, the calculation of y1/y2 was based on wrong assumtions. The whole image was assumed and 'endy' and 'starty' are not respected anymore!*/
++ /* NEW PROPOSED Code: Assumption: offsets are within image with top left corner as origin (0,0) and 'start' <= 'end'. */
++ uaux = (uint32_t)(offsets.crop_length * 1.0 * seg / total);
++ if (uaux <= offsets.endy + 1) {
++ crop->regionlist[i].y1 = offsets.endy - uaux + 1;
++ } else {
++ crop->regionlist[i].y1 = 0;
++ }
++ if (crop->regionlist[i].y1 < offsets.starty) {
++ crop->regionlist[i].y1 = offsets.starty;
++ }
+
+- test = offsets.endy - (offsets.crop_length * 1.0 * (seg - 1) / total);
+- if (test < 1 )
+- crop->regionlist[i].y2 = 0;
+- else
+- {
+- if (test > (int32_t)(image->length - 1))
+- crop->regionlist[i].y2 = image->length - 1;
+- else
+- crop->regionlist[i].y2 = test;
+- }
++ uaux = (uint32_t)(offsets.crop_length * 1.0 * (seg - 1) / total);
++ if (uaux <= offsets.endy) {
++ crop->regionlist[i].y2 = offsets.endy - uaux;
++ } else {
++ crop->regionlist[i].y2 = 0;
++ }
++ if (crop->regionlist[i].y2 < offsets.starty) {
++ crop->regionlist[i].y2 = offsets.starty;
++ }
+ zlength = crop->regionlist[i].y2 - crop->regionlist[i].y1 + 1;
+
+ /* This is passed to extractCropZone or extractCompositeZones */
+@@ -5577,32 +5681,42 @@ getCropOffsets(struct image_data *image, struct crop_mask *crop, struct dump_opt
+ crop->combined_width = (uint32_t)zwidth;
+ break;
+ case EDGE_RIGHT: /* zones from right to left, length from top */
+- zlength = offsets.crop_length;
+- crop->regionlist[i].y1 = offsets.starty;
+- crop->regionlist[i].y2 = offsets.endy;
+-
+- crop->regionlist[i].x1 = offsets.startx +
+- (uint32_t)(offsets.crop_width * (total - seg) * 1.0 / total);
+- test = offsets.startx +
+- (offsets.crop_width * (total - seg + 1) * 1.0 / total);
+- if (test < 1 )
+- crop->regionlist[i].x2 = 0;
+- else
+- {
+- if (test > (int32_t)(image->width - 1))
+- crop->regionlist[i].x2 = image->width - 1;
+- else
+- crop->regionlist[i].x2 = test - 1;
+- }
+- zwidth = crop->regionlist[i].x2 - crop->regionlist[i].x1 + 1;
++ zlength = offsets.crop_length;
++ crop->regionlist[i].y1 = offsets.starty;
++ crop->regionlist[i].y2 = offsets.endy;
++
++ crop->regionlist[i].x1 = offsets.startx +
++ (uint32_t)(offsets.crop_width * (total - seg) * 1.0 / total);
++ /* FAULT: IMHO from here on, the calculation of y2 are based on wrong assumtions. The whole image is assumed and 'endy' and 'starty' are not respected anymore!*/
++ /* NEW PROPOSED Code: Assumption: offsets are within image with top left corner as origin (0,0) and 'start' <= 'end'. */
++ uaux = (uint32_t)(offsets.crop_width * 1.0 * seg / total);
++ if (uaux <= offsets.endx + 1) {
++ crop->regionlist[i].x1 = offsets.endx - uaux + 1;
++ } else {
++ crop->regionlist[i].x1 = 0;
++ }
++ if (crop->regionlist[i].x1 < offsets.startx) {
++ crop->regionlist[i].x1 = offsets.startx;
++ }
+
+- /* This is passed to extractCropZone or extractCompositeZones */
+- crop->combined_length = (uint32_t)zlength;
+- if (crop->exp_mode == COMPOSITE_IMAGES)
+- crop->combined_width += (uint32_t)zwidth;
+- else
+- crop->combined_width = (uint32_t)zwidth;
+- break;
++ uaux = (uint32_t)(offsets.crop_width * 1.0 * (seg - 1) / total);
++ if (uaux <= offsets.endx) {
++ crop->regionlist[i].x2 = offsets.endx - uaux;
++ } else {
++ crop->regionlist[i].x2 = 0;
++ }
++ if (crop->regionlist[i].x2 < offsets.startx) {
++ crop->regionlist[i].x2 = offsets.startx;
++ }
++ zwidth = crop->regionlist[i].x2 - crop->regionlist[i].x1 + 1;
++
++ /* This is passed to extractCropZone or extractCompositeZones */
++ crop->combined_length = (uint32_t)zlength;
++ if (crop->exp_mode == COMPOSITE_IMAGES)
++ crop->combined_width += (uint32_t)zwidth;
++ else
++ crop->combined_width = (uint32_t)zwidth;
++ break;
+ case EDGE_TOP: /* width from left, zones from top to bottom */
+ default:
+ zwidth = offsets.crop_width;
+@@ -5610,6 +5724,14 @@ getCropOffsets(struct image_data *image, struct crop_mask *crop, struct dump_opt
+ crop->regionlist[i].x2 = offsets.endx;
+
+ crop->regionlist[i].y1 = offsets.starty + (uint32_t)(offsets.crop_length * 1.0 * (seg - 1) / total);
++ if (crop->regionlist[i].y1 > offsets.endy) {
++ crop->regionlist[i].y1 = offsets.endy;
++ } else if (crop->regionlist[i].y1 >= image->length) {
++ crop->regionlist[i].y1 = image->length - 1;
++ }
++
++ /* FAULT: IMHO from here on, the calculation of y2 are based on wrong assumtions. The whole image is assumed and 'endy' and 'starty' are not respected anymore!*/
++ /* OLD Code:
+ test = offsets.starty + (uint32_t)(offsets.crop_length * 1.0 * seg / total);
+ if (test < 1 )
+ crop->regionlist[i].y2 = 0;
+@@ -5620,6 +5742,18 @@ getCropOffsets(struct image_data *image, struct crop_mask *crop, struct dump_opt
+ else
+ crop->regionlist[i].y2 = test - 1;
+ }
++ */
++ /* NEW PROPOSED Code: Assumption: offsets are within image with top left corner as origin (0,0) and 'start' <= 'end'. */
++ crop->regionlist[i].y2 = offsets.starty + (uint32_t)(offsets.crop_length * 1.0 * seg / total);
++ if (crop->regionlist[i].y2 > 0)crop->regionlist[i].y2 = crop->regionlist[i].y2 - 1;
++ if (crop->regionlist[i].y2 < crop->regionlist[i].y1) {
++ crop->regionlist[i].y2 = crop->regionlist[i].y1;
++ } else if (crop->regionlist[i].y2 > offsets.endy) {
++ crop->regionlist[i].y2 = offsets.endy;
++ } else if (crop->regionlist[i].y2 >= image->length) {
++ crop->regionlist[i].y2 = image->length - 1;
++ }
++
+ zlength = crop->regionlist[i].y2 - crop->regionlist[i].y1 + 1;
+
+ /* This is passed to extractCropZone or extractCompositeZones */
+@@ -7543,7 +7677,8 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ total_width = total_length = 0;
+ for (i = 0; i < crop->selections; i++)
+ {
+- cropsize = crop->bufftotal;
++
++ cropsize = crop->bufftotal;
+ crop_buff = seg_buffs[i].buffer;
+ if (!crop_buff)
+ crop_buff = (unsigned char *)limitMalloc(cropsize);
+@@ -7632,6 +7767,9 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
++ /* rotateImage() changes image->width, ->length, ->xres and ->yres, what it schouldn't do here, when more than one section is processed.
++ * ToDo: Therefore rotateImage() and its usage has to be reworked (e.g. like mirrorImage()) !!
++ */
+ if (rotateImage(crop->rotation, image, &crop->regionlist[i].width,
+ &crop->regionlist[i].length, &crop_buff))
+ {
+@@ -7647,8 +7785,8 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ seg_buffs[i].size = (((crop->regionlist[i].width * image->bps + 7 ) / 8)
+ * image->spp) * crop->regionlist[i].length;
+ }
+- }
+- }
++ } /* for crop->selections loop */
++ } /* Separated Images (else case) */
+ return (0);
+ } /* end processCropSelections */
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-S-option-Make-decision-simpler.patch b/meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-S-option-Make-decision-simpler.patch
new file mode 100644
index 0000000000..79b4ff3f6e
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-S-option-Make-decision-simpler.patch
@@ -0,0 +1,36 @@
+From bad48e90b410df32172006c7876da449ba62cdba Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Sat, 20 Aug 2022 23:35:26 +0200
+Subject: [PATCH] tiffcrop -S option: Make decision simpler.
+
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+---
+ tools/tiffcrop.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index c3b758ec..8fd856dc 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -2133,11 +2133,11 @@ void process_command_opts (int argc, char *argv[], char *mp, char *mode, uint32
+ }
+ /*-- Check for not allowed combinations (e.g. -X, -Y and -Z, -z and -S are mutually exclusive) --*/
+ char XY, Z, R, S;
+- XY = ((crop_data->crop_mode & CROP_WIDTH) || (crop_data->crop_mode & CROP_LENGTH));
+- Z = (crop_data->crop_mode & CROP_ZONES);
+- R = (crop_data->crop_mode & CROP_REGIONS);
+- S = (page->mode & PAGE_MODE_ROWSCOLS);
+- if ((XY && Z) || (XY && R) || (XY && S) || (Z && R) || (Z && S) || (R && S)) {
++ XY = ((crop_data->crop_mode & CROP_WIDTH) || (crop_data->crop_mode & CROP_LENGTH)) ? 1 : 0;
++ Z = (crop_data->crop_mode & CROP_ZONES) ? 1 : 0;
++ R = (crop_data->crop_mode & CROP_REGIONS) ? 1 : 0;
++ S = (page->mode & PAGE_MODE_ROWSCOLS) ? 1 : 0;
++ if (XY + Z + R + S > 1) {
+ TIFFError("tiffcrop input error", "The crop options(-X|-Y), -Z, -z and -S are mutually exclusive.->Exit");
+ exit(EXIT_FAILURE);
+ }
+--
+2.34.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-disable-incompatibility-of-Z-X-Y-z-options-.patch b/meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-disable-incompatibility-of-Z-X-Y-z-options-.patch
new file mode 100644
index 0000000000..6a62787648
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-disable-incompatibility-of-Z-X-Y-z-options-.patch
@@ -0,0 +1,59 @@
+From 4746f16253b784287bc8a5003990c1c3b9a03a62 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Thu, 25 Aug 2022 16:11:41 +0200
+Subject: [PATCH] tiffcrop: disable incompatibility of -Z, -X, -Y, -z options
+ with any PAGE_MODE_x option (fixes #411 and #413)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+tiffcrop does not support –Z, -z, -X and –Y options together with any other PAGE_MODE_x options like -H, -V, -P, -J, -K or –S.
+
+Code analysis:
+
+With the options –Z, -z, the crop.selections are set to a value > 0. Within main(), this triggers the call of processCropSelections(), which copies the sections from the read_buff into seg_buffs[].
+In the following code in main(), the only supported step, where that seg_buffs are further handled are within an if-clause with if (page.mode == PAGE_MODE_NONE) .
+
+Execution of the else-clause often leads to buffer-overflows.
+
+Therefore, the above option combination is not supported and will be disabled to prevent those buffer-overflows.
+
+The MR solves issues #411 and #413.
+
+CVE: CVE-2022-3597 CVE-2022-3626 CVE-2022-3627
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+---
+ doc/tools/tiffcrop.rst | 8 ++++++++
+ tools/tiffcrop.c | 32 +++++++++++++++++++++++++-------
+ 2 files changed, 33 insertions(+), 7 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index 8fd856dc..41a2ea36 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -2138,9 +2143,20 @@ void process_command_opts (int argc, char *argv[], char *mp, char *mode, uint32
+ R = (crop_data->crop_mode & CROP_REGIONS) ? 1 : 0;
+ S = (page->mode & PAGE_MODE_ROWSCOLS) ? 1 : 0;
+ if (XY + Z + R + S > 1) {
+- TIFFError("tiffcrop input error", "The crop options(-X|-Y), -Z, -z and -S are mutually exclusive.->Exit");
++ TIFFError("tiffcrop input error", "The crop options(-X|-Y), -Z, -z and -S are mutually exclusive.->exit");
+ exit(EXIT_FAILURE);
+ }
++
++ /* Check for not allowed combination:
++ * Any of the -X, -Y, -Z and -z options together with other PAGE_MODE_x options
++ * such as -H, -V, -P, -J or -K are not supported and may cause buffer overflows.
++. */
++ if ((XY + Z + R > 0) && page->mode != PAGE_MODE_NONE) {
++ TIFFError("tiffcrop input error",
++ "Any of the crop options -X, -Y, -Z and -z together with other PAGE_MODE_x options such as - H, -V, -P, -J or -K is not supported and may cause buffer overflows..->exit");
++ exit(EXIT_FAILURE);
++ }
++
+ } /* end process_command_opts */
+
+ /* Start a new output file if one has not been previously opened or
+--
+2.34.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-subroutines-require-a-larger-buffer-fixes-2.patch b/meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-subroutines-require-a-larger-buffer-fixes-2.patch
new file mode 100644
index 0000000000..e10e37ccc9
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/0001-tiffcrop-subroutines-require-a-larger-buffer-fixes-2.patch
@@ -0,0 +1,640 @@
+From 1e000b3484808f1ee7a68bd276220d1cd82dec73 Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Thu, 13 Oct 2022 14:33:27 +0000
+Subject: [PATCH] tiffcrop subroutines require a larger buffer (fixes #271,
+ #381, #386, #388, #389, #435)
+
+CVE: CVE-2022-3570 CVE-2022-3598
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+---
+ tools/tiffcrop.c | 203 ++++++++++++++++++++++++++---------------------
+ 1 file changed, 114 insertions(+), 89 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index f96c7d60..adf0f849 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -210,6 +210,10 @@ static char tiffcrop_rev_date[] = "02-09-2022";
+
+ #define TIFF_DIR_MAX 65534
+
++/* Some conversion subroutines require image buffers, which are at least 3 bytes
++ * larger than the necessary size for the image itself. */
++#define NUM_BUFF_OVERSIZE_BYTES 3
++
+ /* Offsets into buffer for margins and fixed width and length segments */
+ struct offset {
+ uint32_t tmargin;
+@@ -231,7 +235,7 @@ struct offset {
+ */
+
+ struct buffinfo {
+- uint32_t size; /* size of this buffer */
++ size_t size; /* size of this buffer */
+ unsigned char *buffer; /* address of the allocated buffer */
+ };
+
+@@ -805,8 +809,8 @@ static int readContigTilesIntoBuffer (TIFF* in, uint8_t* buf,
+ uint32_t dst_rowsize, shift_width;
+ uint32_t bytes_per_sample, bytes_per_pixel;
+ uint32_t trailing_bits, prev_trailing_bits;
+- uint32_t tile_rowsize = TIFFTileRowSize(in);
+- uint32_t src_offset, dst_offset;
++ tmsize_t tile_rowsize = TIFFTileRowSize(in);
++ tmsize_t src_offset, dst_offset;
+ uint32_t row_offset, col_offset;
+ uint8_t *bufp = (uint8_t*) buf;
+ unsigned char *src = NULL;
+@@ -856,7 +860,7 @@ static int readContigTilesIntoBuffer (TIFF* in, uint8_t* buf,
+ TIFFError("readContigTilesIntoBuffer", "Integer overflow when calculating buffer size.");
+ exit(EXIT_FAILURE);
+ }
+- tilebuf = limitMalloc(tile_buffsize + 3);
++ tilebuf = limitMalloc(tile_buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (tilebuf == 0)
+ return 0;
+ tilebuf[tile_buffsize] = 0;
+@@ -1019,7 +1023,7 @@ static int readSeparateTilesIntoBuffer (TIFF* in, uint8_t *obuf,
+ for (sample = 0; (sample < spp) && (sample < MAX_SAMPLES); sample++)
+ {
+ srcbuffs[sample] = NULL;
+- tbuff = (unsigned char *)limitMalloc(tilesize + 8);
++ tbuff = (unsigned char *)limitMalloc(tilesize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!tbuff)
+ {
+ TIFFError ("readSeparateTilesIntoBuffer",
+@@ -1213,7 +1217,8 @@ writeBufferToSeparateStrips (TIFF* out, uint8_t* buf,
+ }
+ rowstripsize = rowsperstrip * bytes_per_sample * (width + 1);
+
+- obuf = limitMalloc (rowstripsize);
++ /* Add 3 padding bytes for extractContigSamples32bits */
++ obuf = limitMalloc (rowstripsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (obuf == NULL)
+ return 1;
+
+@@ -1226,7 +1231,7 @@ writeBufferToSeparateStrips (TIFF* out, uint8_t* buf,
+ stripsize = TIFFVStripSize(out, nrows);
+ src = buf + (row * rowsize);
+ total_bytes += stripsize;
+- memset (obuf, '\0', rowstripsize);
++ memset (obuf, '\0',rowstripsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (extractContigSamplesToBuffer(obuf, src, nrows, width, s, spp, bps, dump))
+ {
+ _TIFFfree(obuf);
+@@ -1234,10 +1239,15 @@ writeBufferToSeparateStrips (TIFF* out, uint8_t* buf,
+ }
+ if ((dump->outfile != NULL) && (dump->level == 1))
+ {
+- dump_info(dump->outfile, dump->format,"",
++ if (scanlinesize > 0x0ffffffffULL) {
++ dump_info(dump->infile, dump->format, "loadImage",
++ "Attention: scanlinesize %"PRIu64" is larger than UINT32_MAX.\nFollowing dump might be wrong.",
++ scanlinesize);
++ }
++ dump_info(dump->outfile, dump->format,"",
+ "Sample %2d, Strip: %2d, bytes: %4d, Row %4d, bytes: %4d, Input offset: %6d",
+- s + 1, strip + 1, stripsize, row + 1, scanlinesize, src - buf);
+- dump_buffer(dump->outfile, dump->format, nrows, scanlinesize, row, obuf);
++ s + 1, strip + 1, stripsize, row + 1, (uint32_t)scanlinesize, src - buf);
++ dump_buffer(dump->outfile, dump->format, nrows, (uint32_t)scanlinesize, row, obuf);
+ }
+
+ if (TIFFWriteEncodedStrip(out, strip++, obuf, stripsize) < 0)
+@@ -1264,7 +1274,7 @@ static int writeBufferToContigTiles (TIFF* out, uint8_t* buf, uint32_t imageleng
+ uint32_t tl, tw;
+ uint32_t row, col, nrow, ncol;
+ uint32_t src_rowsize, col_offset;
+- uint32_t tile_rowsize = TIFFTileRowSize(out);
++ tmsize_t tile_rowsize = TIFFTileRowSize(out);
+ uint8_t* bufp = (uint8_t*) buf;
+ tsize_t tile_buffsize = 0;
+ tsize_t tilesize = TIFFTileSize(out);
+@@ -1307,9 +1317,11 @@ static int writeBufferToContigTiles (TIFF* out, uint8_t* buf, uint32_t imageleng
+ }
+ src_rowsize = ((imagewidth * spp * bps) + 7U) / 8;
+
+- tilebuf = limitMalloc(tile_buffsize);
++ /* Add 3 padding bytes for extractContigSamples32bits */
++ tilebuf = limitMalloc(tile_buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (tilebuf == 0)
+ return 1;
++ memset(tilebuf, 0, tile_buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ for (row = 0; row < imagelength; row += tl)
+ {
+ nrow = (row + tl > imagelength) ? imagelength - row : tl;
+@@ -1355,7 +1367,8 @@ static int writeBufferToSeparateTiles (TIFF* out, uint8_t* buf, uint32_t imagele
+ uint32_t imagewidth, tsample_t spp,
+ struct dump_opts * dump)
+ {
+- tdata_t obuf = limitMalloc(TIFFTileSize(out));
++ /* Add 3 padding bytes for extractContigSamples32bits */
++ tdata_t obuf = limitMalloc(TIFFTileSize(out) + NUM_BUFF_OVERSIZE_BYTES);
+ uint32_t tl, tw;
+ uint32_t row, col, nrow, ncol;
+ uint32_t src_rowsize, col_offset;
+@@ -1365,6 +1378,7 @@ static int writeBufferToSeparateTiles (TIFF* out, uint8_t* buf, uint32_t imagele
+
+ if (obuf == NULL)
+ return 1;
++ memset(obuf, 0, TIFFTileSize(out) + NUM_BUFF_OVERSIZE_BYTES);
+
+ if( !TIFFGetField(out, TIFFTAG_TILELENGTH, &tl) ||
+ !TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw) ||
+@@ -1790,14 +1804,14 @@ void process_command_opts (int argc, char *argv[], char *mp, char *mode, uint32
+
+ *opt_offset = '\0';
+ /* convert option to lowercase */
+- end = strlen (opt_ptr);
++ end = (unsigned int)strlen (opt_ptr);
+ for (i = 0; i < end; i++)
+ *(opt_ptr + i) = tolower((int) *(opt_ptr + i));
+ /* Look for dump format specification */
+ if (strncmp(opt_ptr, "for", 3) == 0)
+ {
+ /* convert value to lowercase */
+- end = strlen (opt_offset + 1);
++ end = (unsigned int)strlen (opt_offset + 1);
+ for (i = 1; i <= end; i++)
+ *(opt_offset + i) = tolower((int) *(opt_offset + i));
+ /* check dump format value */
+@@ -2270,6 +2284,8 @@ main(int argc, char* argv[])
+ size_t length;
+ char temp_filename[PATH_MAX + 16]; /* Extra space keeps the compiler from complaining */
+
++ assert(NUM_BUFF_OVERSIZE_BYTES >= 3);
++
+ little_endian = *((unsigned char *)&little_endian) & '1';
+
+ initImageData(&image);
+@@ -3222,13 +3238,13 @@ extractContigSamples32bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ /* If we have a full buffer's worth, write it out */
+ if (ready_bits >= 32)
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8_t)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8_t)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8_t)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8_t)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -3637,13 +3653,13 @@ extractContigSamplesShifted32bits (uint8_t *in, uint8_t *out, uint32_t cols,
+ }
+ else /* If we have a full buffer's worth, write it out */
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8_t)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8_t)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8_t)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8_t)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -3820,10 +3836,10 @@ extractContigSamplesToTileBuffer(uint8_t *out, uint8_t *in, uint32_t rows, uint3
+ static int readContigStripsIntoBuffer (TIFF* in, uint8_t* buf)
+ {
+ uint8_t* bufp = buf;
+- int32_t bytes_read = 0;
++ tmsize_t bytes_read = 0;
+ uint32_t strip, nstrips = TIFFNumberOfStrips(in);
+- uint32_t stripsize = TIFFStripSize(in);
+- uint32_t rows = 0;
++ tmsize_t stripsize = TIFFStripSize(in);
++ tmsize_t rows = 0;
+ uint32_t rps = TIFFGetFieldDefaulted(in, TIFFTAG_ROWSPERSTRIP, &rps);
+ tsize_t scanline_size = TIFFScanlineSize(in);
+
+@@ -3836,11 +3852,11 @@ static int readContigStripsIntoBuffer (TIFF* in, uint8_t* buf)
+ bytes_read = TIFFReadEncodedStrip (in, strip, bufp, -1);
+ rows = bytes_read / scanline_size;
+ if ((strip < (nstrips - 1)) && (bytes_read != (int32_t)stripsize))
+- TIFFError("", "Strip %"PRIu32": read %"PRId32" bytes, strip size %"PRIu32,
++ TIFFError("", "Strip %"PRIu32": read %"PRId64" bytes, strip size %"PRIu64,
+ strip + 1, bytes_read, stripsize);
+
+ if (bytes_read < 0 && !ignore) {
+- TIFFError("", "Error reading strip %"PRIu32" after %"PRIu32" rows",
++ TIFFError("", "Error reading strip %"PRIu32" after %"PRIu64" rows",
+ strip, rows);
+ return 0;
+ }
+@@ -4305,13 +4321,13 @@ combineSeparateSamples32bits (uint8_t *in[], uint8_t *out, uint32_t cols,
+ /* If we have a full buffer's worth, write it out */
+ if (ready_bits >= 32)
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8_t)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8_t)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8_t)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8_t)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -4354,10 +4370,10 @@ combineSeparateSamples32bits (uint8_t *in[], uint8_t *out, uint32_t cols,
+ "Row %3d, Col %3d, Src byte offset %3d bit offset %2d Dst offset %3d",
+ row + 1, col + 1, src_byte, src_bit, dst - out);
+
+- dump_long (dumpfile, format, "Match bits ", matchbits);
++ dump_wide (dumpfile, format, "Match bits ", matchbits);
+ dump_data (dumpfile, format, "Src bits ", src, 4);
+- dump_long (dumpfile, format, "Buff1 bits ", buff1);
+- dump_long (dumpfile, format, "Buff2 bits ", buff2);
++ dump_wide (dumpfile, format, "Buff1 bits ", buff1);
++ dump_wide (dumpfile, format, "Buff2 bits ", buff2);
+ dump_byte (dumpfile, format, "Write bits1", bytebuff1);
+ dump_byte (dumpfile, format, "Write bits2", bytebuff2);
+ dump_info (dumpfile, format, "", "Ready bits: %2d", ready_bits);
+@@ -4830,13 +4846,13 @@ combineSeparateTileSamples32bits (uint8_t *in[], uint8_t *out, uint32_t cols,
+ /* If we have a full buffer's worth, write it out */
+ if (ready_bits >= 32)
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8_t)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8_t)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8_t)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8_t)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -4879,10 +4895,10 @@ combineSeparateTileSamples32bits (uint8_t *in[], uint8_t *out, uint32_t cols,
+ "Row %3d, Col %3d, Src byte offset %3d bit offset %2d Dst offset %3d",
+ row + 1, col + 1, src_byte, src_bit, dst - out);
+
+- dump_long (dumpfile, format, "Match bits ", matchbits);
++ dump_wide (dumpfile, format, "Match bits ", matchbits);
+ dump_data (dumpfile, format, "Src bits ", src, 4);
+- dump_long (dumpfile, format, "Buff1 bits ", buff1);
+- dump_long (dumpfile, format, "Buff2 bits ", buff2);
++ dump_wide (dumpfile, format, "Buff1 bits ", buff1);
++ dump_wide (dumpfile, format, "Buff2 bits ", buff2);
+ dump_byte (dumpfile, format, "Write bits1", bytebuff1);
+ dump_byte (dumpfile, format, "Write bits2", bytebuff2);
+ dump_info (dumpfile, format, "", "Ready bits: %2d", ready_bits);
+@@ -4905,7 +4921,7 @@ static int readSeparateStripsIntoBuffer (TIFF *in, uint8_t *obuf, uint32_t lengt
+ {
+ int i, bytes_per_sample, bytes_per_pixel, shift_width, result = 1;
+ uint32_t j;
+- int32_t bytes_read = 0;
++ tmsize_t bytes_read = 0;
+ uint16_t bps = 0, planar;
+ uint32_t nstrips;
+ uint32_t strips_per_sample;
+@@ -4971,7 +4987,7 @@ static int readSeparateStripsIntoBuffer (TIFF *in, uint8_t *obuf, uint32_t lengt
+ for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++)
+ {
+ srcbuffs[s] = NULL;
+- buff = limitMalloc(stripsize + 3);
++ buff = limitMalloc(stripsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!buff)
+ {
+ TIFFError ("readSeparateStripsIntoBuffer",
+@@ -4994,7 +5010,7 @@ static int readSeparateStripsIntoBuffer (TIFF *in, uint8_t *obuf, uint32_t lengt
+ buff = srcbuffs[s];
+ strip = (s * strips_per_sample) + j;
+ bytes_read = TIFFReadEncodedStrip (in, strip, buff, stripsize);
+- rows_this_strip = bytes_read / src_rowsize;
++ rows_this_strip = (uint32_t)(bytes_read / src_rowsize);
+ if (bytes_read < 0 && !ignore)
+ {
+ TIFFError(TIFFFileName(in),
+@@ -6047,13 +6063,14 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ uint16_t input_compression = 0, input_photometric = 0;
+ uint16_t subsampling_horiz, subsampling_vert;
+ uint32_t width = 0, length = 0;
+- uint32_t stsize = 0, tlsize = 0, buffsize = 0, scanlinesize = 0;
++ tmsize_t stsize = 0, tlsize = 0, buffsize = 0;
++ tmsize_t scanlinesize = 0;
+ uint32_t tw = 0, tl = 0; /* Tile width and length */
+- uint32_t tile_rowsize = 0;
++ tmsize_t tile_rowsize = 0;
+ unsigned char *read_buff = NULL;
+ unsigned char *new_buff = NULL;
+ int readunit = 0;
+- static uint32_t prev_readsize = 0;
++ static tmsize_t prev_readsize = 0;
+
+ TIFFGetFieldDefaulted(in, TIFFTAG_BITSPERSAMPLE, &bps);
+ TIFFGetFieldDefaulted(in, TIFFTAG_SAMPLESPERPIXEL, &spp);
+@@ -6355,7 +6372,7 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
+ return (-1);
+ }
+- read_buff = (unsigned char *)limitMalloc(buffsize+3);
++ read_buff = (unsigned char *)limitMalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ {
+@@ -6366,11 +6383,11 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
+ return (-1);
+ }
+- new_buff = _TIFFrealloc(read_buff, buffsize+3);
++ new_buff = _TIFFrealloc(read_buff, buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!new_buff)
+ {
+ free (read_buff);
+- read_buff = (unsigned char *)limitMalloc(buffsize+3);
++ read_buff = (unsigned char *)limitMalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ read_buff = new_buff;
+@@ -6443,8 +6460,13 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ dump_info (dump->infile, dump->format, "",
+ "Bits per sample %"PRIu16", Samples per pixel %"PRIu16, bps, spp);
+
++ if (scanlinesize > 0x0ffffffffULL) {
++ dump_info(dump->infile, dump->format, "loadImage",
++ "Attention: scanlinesize %"PRIu64" is larger than UINT32_MAX.\nFollowing dump might be wrong.",
++ scanlinesize);
++ }
+ for (i = 0; i < length; i++)
+- dump_buffer(dump->infile, dump->format, 1, scanlinesize,
++ dump_buffer(dump->infile, dump->format, 1, (uint32_t)scanlinesize,
+ i, read_buff + (i * scanlinesize));
+ }
+ return (0);
+@@ -7464,13 +7486,13 @@ writeSingleSection(TIFF *in, TIFF *out, struct image_data *image,
+ if (TIFFGetField(in, TIFFTAG_NUMBEROFINKS, &ninks)) {
+ TIFFSetField(out, TIFFTAG_NUMBEROFINKS, ninks);
+ if (TIFFGetField(in, TIFFTAG_INKNAMES, &inknames)) {
+- int inknameslen = strlen(inknames) + 1;
++ int inknameslen = (int)strlen(inknames) + 1;
+ const char* cp = inknames;
+ while (ninks > 1) {
+ cp = strchr(cp, '\0');
+ if (cp) {
+ cp++;
+- inknameslen += (strlen(cp) + 1);
++ inknameslen += ((int)strlen(cp) + 1);
+ }
+ ninks--;
+ }
+@@ -7533,23 +7555,23 @@ createImageSection(uint32_t sectsize, unsigned char **sect_buff_ptr)
+
+ if (!sect_buff)
+ {
+- sect_buff = (unsigned char *)limitMalloc(sectsize);
++ sect_buff = (unsigned char *)limitMalloc(sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!sect_buff)
+ {
+ TIFFError("createImageSection", "Unable to allocate/reallocate section buffer");
+ return (-1);
+ }
+- _TIFFmemset(sect_buff, 0, sectsize);
++ _TIFFmemset(sect_buff, 0, sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ {
+ if (prev_sectsize < sectsize)
+ {
+- new_buff = _TIFFrealloc(sect_buff, sectsize);
++ new_buff = _TIFFrealloc(sect_buff, sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!new_buff)
+ {
+ _TIFFfree (sect_buff);
+- sect_buff = (unsigned char *)limitMalloc(sectsize);
++ sect_buff = (unsigned char *)limitMalloc(sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ sect_buff = new_buff;
+@@ -7559,7 +7581,7 @@ createImageSection(uint32_t sectsize, unsigned char **sect_buff_ptr)
+ TIFFError("createImageSection", "Unable to allocate/reallocate section buffer");
+ return (-1);
+ }
+- _TIFFmemset(sect_buff, 0, sectsize);
++ _TIFFmemset(sect_buff, 0, sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ }
+
+@@ -7590,17 +7612,17 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ cropsize = crop->bufftotal;
+ crop_buff = seg_buffs[0].buffer;
+ if (!crop_buff)
+- crop_buff = (unsigned char *)limitMalloc(cropsize);
++ crop_buff = (unsigned char *)limitMalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ else
+ {
+ prev_cropsize = seg_buffs[0].size;
+ if (prev_cropsize < cropsize)
+ {
+- next_buff = _TIFFrealloc(crop_buff, cropsize);
++ next_buff = _TIFFrealloc(crop_buff, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (! next_buff)
+ {
+ _TIFFfree (crop_buff);
+- crop_buff = (unsigned char *)limitMalloc(cropsize);
++ crop_buff = (unsigned char *)limitMalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ crop_buff = next_buff;
+@@ -7613,7 +7635,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ return (-1);
+ }
+
+- _TIFFmemset(crop_buff, 0, cropsize);
++ _TIFFmemset(crop_buff, 0, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ seg_buffs[0].buffer = crop_buff;
+ seg_buffs[0].size = cropsize;
+
+@@ -7693,17 +7715,17 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ cropsize = crop->bufftotal;
+ crop_buff = seg_buffs[i].buffer;
+ if (!crop_buff)
+- crop_buff = (unsigned char *)limitMalloc(cropsize);
++ crop_buff = (unsigned char *)limitMalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ else
+ {
+ prev_cropsize = seg_buffs[0].size;
+ if (prev_cropsize < cropsize)
+ {
+- next_buff = _TIFFrealloc(crop_buff, cropsize);
++ next_buff = _TIFFrealloc(crop_buff, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (! next_buff)
+ {
+ _TIFFfree (crop_buff);
+- crop_buff = (unsigned char *)limitMalloc(cropsize);
++ crop_buff = (unsigned char *)limitMalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ crop_buff = next_buff;
+@@ -7716,7 +7738,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ return (-1);
+ }
+
+- _TIFFmemset(crop_buff, 0, cropsize);
++ _TIFFmemset(crop_buff, 0, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ seg_buffs[i].buffer = crop_buff;
+ seg_buffs[i].size = cropsize;
+
+@@ -7832,24 +7854,24 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ crop_buff = *crop_buff_ptr;
+ if (!crop_buff)
+ {
+- crop_buff = (unsigned char *)limitMalloc(cropsize);
++ crop_buff = (unsigned char *)limitMalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!crop_buff)
+ {
+ TIFFError("createCroppedImage", "Unable to allocate/reallocate crop buffer");
+ return (-1);
+ }
+- _TIFFmemset(crop_buff, 0, cropsize);
++ _TIFFmemset(crop_buff, 0, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ prev_cropsize = cropsize;
+ }
+ else
+ {
+ if (prev_cropsize < cropsize)
+ {
+- new_buff = _TIFFrealloc(crop_buff, cropsize);
++ new_buff = _TIFFrealloc(crop_buff, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!new_buff)
+ {
+ free (crop_buff);
+- crop_buff = (unsigned char *)limitMalloc(cropsize);
++ crop_buff = (unsigned char *)limitMalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ crop_buff = new_buff;
+@@ -7858,7 +7880,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ TIFFError("createCroppedImage", "Unable to allocate/reallocate crop buffer");
+ return (-1);
+ }
+- _TIFFmemset(crop_buff, 0, cropsize);
++ _TIFFmemset(crop_buff, 0, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ }
+
+@@ -8156,13 +8178,13 @@ writeCroppedImage(TIFF *in, TIFF *out, struct image_data *image,
+ if (TIFFGetField(in, TIFFTAG_NUMBEROFINKS, &ninks)) {
+ TIFFSetField(out, TIFFTAG_NUMBEROFINKS, ninks);
+ if (TIFFGetField(in, TIFFTAG_INKNAMES, &inknames)) {
+- int inknameslen = strlen(inknames) + 1;
++ int inknameslen = (int)strlen(inknames) + 1;
+ const char* cp = inknames;
+ while (ninks > 1) {
+ cp = strchr(cp, '\0');
+ if (cp) {
+ cp++;
+- inknameslen += (strlen(cp) + 1);
++ inknameslen += ((int)strlen(cp) + 1);
+ }
+ ninks--;
+ }
+@@ -8547,13 +8569,13 @@ rotateContigSamples32bits(uint16_t rotation, uint16_t spp, uint16_t bps, uint32_
+ }
+ else /* If we have a full buffer's worth, write it out */
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8_t)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8_t)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8_t)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8_t)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -8622,12 +8644,13 @@ rotateImage(uint16_t rotation, struct image_data *image, uint32_t *img_width,
+ return (-1);
+ }
+
+- if (!(rbuff = (unsigned char *)limitMalloc(buffsize)))
++ /* Add 3 padding bytes for extractContigSamplesShifted32bits */
++ if (!(rbuff = (unsigned char *)limitMalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES)))
+ {
+- TIFFError("rotateImage", "Unable to allocate rotation buffer of %1u bytes", buffsize);
++ TIFFError("rotateImage", "Unable to allocate rotation buffer of %1u bytes", buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ return (-1);
+ }
+- _TIFFmemset(rbuff, '\0', buffsize);
++ _TIFFmemset(rbuff, '\0', buffsize + NUM_BUFF_OVERSIZE_BYTES);
+
+ ibuff = *ibuff_ptr;
+ switch (rotation)
+@@ -9155,13 +9178,13 @@ reverseSamples32bits (uint16_t spp, uint16_t bps, uint32_t width,
+ }
+ else /* If we have a full buffer's worth, write it out */
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8_t)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8_t)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8_t)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8_t)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -9252,12 +9275,13 @@ mirrorImage(uint16_t spp, uint16_t bps, uint16_t mirror, uint32_t width, uint32_
+ {
+ case MIRROR_BOTH:
+ case MIRROR_VERT:
+- line_buff = (unsigned char *)limitMalloc(rowsize);
++ line_buff = (unsigned char *)limitMalloc(rowsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (line_buff == NULL)
+ {
+- TIFFError ("mirrorImage", "Unable to allocate mirror line buffer of %1u bytes", rowsize);
++ TIFFError ("mirrorImage", "Unable to allocate mirror line buffer of %1u bytes", rowsize + NUM_BUFF_OVERSIZE_BYTES);
+ return (-1);
+ }
++ _TIFFmemset(line_buff, '\0', rowsize + NUM_BUFF_OVERSIZE_BYTES);
+
+ dst = ibuff + (rowsize * (length - 1));
+ for (row = 0; row < length / 2; row++)
+@@ -9289,11 +9313,12 @@ mirrorImage(uint16_t spp, uint16_t bps, uint16_t mirror, uint32_t width, uint32_
+ }
+ else
+ { /* non 8 bit per sample data */
+- if (!(line_buff = (unsigned char *)limitMalloc(rowsize + 1)))
++ if (!(line_buff = (unsigned char *)limitMalloc(rowsize + NUM_BUFF_OVERSIZE_BYTES)))
+ {
+ TIFFError("mirrorImage", "Unable to allocate mirror line buffer");
+ return (-1);
+ }
++ _TIFFmemset(line_buff, '\0', rowsize + NUM_BUFF_OVERSIZE_BYTES);
+ bytes_per_sample = (bps + 7) / 8;
+ bytes_per_pixel = ((bps * spp) + 7) / 8;
+ if (bytes_per_pixel < (bytes_per_sample + 1))
+@@ -9305,7 +9330,7 @@ mirrorImage(uint16_t spp, uint16_t bps, uint16_t mirror, uint32_t width, uint32_
+ {
+ row_offset = row * rowsize;
+ src = ibuff + row_offset;
+- _TIFFmemset (line_buff, '\0', rowsize);
++ _TIFFmemset (line_buff, '\0', rowsize + NUM_BUFF_OVERSIZE_BYTES);
+ switch (shift_width)
+ {
+ case 1: if (reverseSamples16bits(spp, bps, width, src, line_buff))
diff --git a/meta/recipes-multimedia/libtiff/tiff/0001-tiffset-fix-global-buffer-overflow-for-ASCII-tags-wh.patch b/meta/recipes-multimedia/libtiff/tiff/0001-tiffset-fix-global-buffer-overflow-for-ASCII-tags-wh.patch
index 72776f09ba..e79964de55 100644
--- a/meta/recipes-multimedia/libtiff/tiff/0001-tiffset-fix-global-buffer-overflow-for-ASCII-tags-wh.patch
+++ b/meta/recipes-multimedia/libtiff/tiff/0001-tiffset-fix-global-buffer-overflow-for-ASCII-tags-wh.patch
@@ -1,11 +1,12 @@
+From bc71e64b6f4477ed69064802b1252bab904a89b4 Mon Sep 17 00:00:00 2001
+From: 4ugustus <wangdw.augustus@qq.com>
+Date: Tue, 25 Jan 2022 16:25:28 +0000
+Subject: [PATCH] tiffset: fix global-buffer-overflow for ASCII tags where
+
CVE: CVE-2022-22844
Upstream-Status: Backport
Signed-off-by: Ross Burton <ross.burton@arm.com>
-From b12a0326e6064b6e0b051d1184a219877472f69b Mon Sep 17 00:00:00 2001
-From: 4ugustus <wangdw.augustus@qq.com>
-Date: Tue, 25 Jan 2022 16:25:28 +0000
-Subject: [PATCH] tiffset: fix global-buffer-overflow for ASCII tags where
count is required (fixes #355)
---
@@ -13,7 +14,7 @@ Subject: [PATCH] tiffset: fix global-buffer-overflow for ASCII tags where
1 file changed, 13 insertions(+), 3 deletions(-)
diff --git a/tools/tiffset.c b/tools/tiffset.c
-index 8c9e23c5..e7a88c09 100644
+index 8c9e23c..e7a88c0 100644
--- a/tools/tiffset.c
+++ b/tools/tiffset.c
@@ -146,9 +146,19 @@ main(int argc, char* argv[])
@@ -39,5 +40,3 @@ index 8c9e23c5..e7a88c09 100644
} else if (TIFFFieldWriteCount(fip) > 0
|| TIFFFieldWriteCount(fip) == TIFF_VARIABLE) {
int ret = 1;
---
-2.25.1
diff --git a/meta/recipes-multimedia/libtiff/tiff/0002-tiffcrop-fix-issue-380-and-382-heap-buffer-overflow-.patch b/meta/recipes-multimedia/libtiff/tiff/0002-tiffcrop-fix-issue-380-and-382-heap-buffer-overflow-.patch
index 812ffb232d..2becf53806 100644
--- a/meta/recipes-multimedia/libtiff/tiff/0002-tiffcrop-fix-issue-380-and-382-heap-buffer-overflow-.patch
+++ b/meta/recipes-multimedia/libtiff/tiff/0002-tiffcrop-fix-issue-380-and-382-heap-buffer-overflow-.patch
@@ -1,12 +1,13 @@
+From 9b2645d830b4ad004824cf28d81f3b974faf0037 Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Tue, 8 Mar 2022 17:02:44 +0000
+Subject: [PATCH] tiffcrop: fix issue #380 and #382 heap buffer overflow in
+
CVE: CVE-2022-0891
CVE: CVE-2022-1056
Upstream-Status: Backport
Signed-off-by: Ross Burton <ross.burton@arm.com>
-From e46b49e60fddb2e924302fb1751f79eb9cfb2253 Mon Sep 17 00:00:00 2001
-From: Su Laus <sulau@freenet.de>
-Date: Tue, 8 Mar 2022 17:02:44 +0000
-Subject: [PATCH 2/6] tiffcrop: fix issue #380 and #382 heap buffer overflow in
extractImageSection
---
@@ -14,7 +15,7 @@ Subject: [PATCH 2/6] tiffcrop: fix issue #380 and #382 heap buffer overflow in
1 file changed, 36 insertions(+), 56 deletions(-)
diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
-index b85c2ce7..302a7e91 100644
+index b85c2ce..302a7e9 100644
--- a/tools/tiffcrop.c
+++ b/tools/tiffcrop.c
@@ -105,8 +105,8 @@
@@ -214,6 +215,3 @@ index b85c2ce7..302a7e91 100644
/* allocate a buffer if we don't have one already */
if (createImageSection(sectsize, sect_buff_ptr))
{
---
-2.25.1
-
diff --git a/meta/recipes-multimedia/libtiff/tiff/0003-add-checks-for-return-value-of-limitMalloc-392.patch b/meta/recipes-multimedia/libtiff/tiff/0003-add-checks-for-return-value-of-limitMalloc-392.patch
index a0b856b9e1..b48a3df1a9 100644
--- a/meta/recipes-multimedia/libtiff/tiff/0003-add-checks-for-return-value-of-limitMalloc-392.patch
+++ b/meta/recipes-multimedia/libtiff/tiff/0003-add-checks-for-return-value-of-limitMalloc-392.patch
@@ -1,18 +1,18 @@
+From b4743cc69d2f506e1f1c4db9adc8e58d75805e4d Mon Sep 17 00:00:00 2001
+From: Augustus <wangdw.augustus@qq.com>
+Date: Mon, 7 Mar 2022 18:21:49 +0800
+Subject: [PATCH] add checks for return value of limitMalloc (#392)
+
CVE: CVE-2022-0907
Upstream-Status: Backport
Signed-off-by: Ross Burton <ross.burton@arm.com>
-From a139191cc86f4dc44c74a0f22928e0fb38ed2485 Mon Sep 17 00:00:00 2001
-From: Augustus <wangdw.augustus@qq.com>
-Date: Mon, 7 Mar 2022 18:21:49 +0800
-Subject: [PATCH 3/6] add checks for return value of limitMalloc (#392)
-
---
tools/tiffcrop.c | 33 +++++++++++++++++++++------------
1 file changed, 21 insertions(+), 12 deletions(-)
diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
-index 302a7e91..e407bf51 100644
+index 302a7e9..e407bf5 100644
--- a/tools/tiffcrop.c
+++ b/tools/tiffcrop.c
@@ -7357,7 +7357,11 @@ createImageSection(uint32_t sectsize, unsigned char **sect_buff_ptr)
@@ -88,6 +88,3 @@ index 302a7e91..e407bf51 100644
* End:
*/
+
---
-2.25.1
-
diff --git a/meta/recipes-multimedia/libtiff/tiff/0004-TIFFFetchNormalTag-avoid-calling-memcpy-with-a-null-.patch b/meta/recipes-multimedia/libtiff/tiff/0004-TIFFFetchNormalTag-avoid-calling-memcpy-with-a-null-.patch
index 719dabaecc..6f2df44bd5 100644
--- a/meta/recipes-multimedia/libtiff/tiff/0004-TIFFFetchNormalTag-avoid-calling-memcpy-with-a-null-.patch
+++ b/meta/recipes-multimedia/libtiff/tiff/0004-TIFFFetchNormalTag-avoid-calling-memcpy-with-a-null-.patch
@@ -1,11 +1,12 @@
+From 0343619094bfc7b8e23814f672411b008db2aa66 Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Thu, 17 Feb 2022 15:28:43 +0100
+Subject: [PATCH] TIFFFetchNormalTag(): avoid calling memcpy() with a null
+
CVE: CVE-2022-0908
Upstream-Status: Backport
Signed-off-by: Ross Burton <ross.burton@arm.com>
-From ef5a0bf271823df168642444d051528a68205cb0 Mon Sep 17 00:00:00 2001
-From: Even Rouault <even.rouault@spatialys.com>
-Date: Thu, 17 Feb 2022 15:28:43 +0100
-Subject: [PATCH 4/6] TIFFFetchNormalTag(): avoid calling memcpy() with a null
source pointer and size of zero (fixes #383)
---
@@ -13,10 +14,10 @@ Subject: [PATCH 4/6] TIFFFetchNormalTag(): avoid calling memcpy() with a null
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/libtiff/tif_dirread.c b/libtiff/tif_dirread.c
-index d84147a0..4e8ce729 100644
+index d654a1c..a31109a 100644
--- a/libtiff/tif_dirread.c
+++ b/libtiff/tif_dirread.c
-@@ -5079,7 +5079,10 @@ TIFFFetchNormalTag(TIFF* tif, TIFFDirEntry* dp, int recover)
+@@ -5080,7 +5080,10 @@ TIFFFetchNormalTag(TIFF* tif, TIFFDirEntry* dp, int recover)
_TIFFfree(data);
return(0);
}
@@ -28,6 +29,3 @@ index d84147a0..4e8ce729 100644
o[(uint32_t)dp->tdir_count]=0;
if (data!=0)
_TIFFfree(data);
---
-2.25.1
-
diff --git a/meta/recipes-multimedia/libtiff/tiff/0005-fix-the-FPE-in-tiffcrop-393.patch b/meta/recipes-multimedia/libtiff/tiff/0005-fix-the-FPE-in-tiffcrop-393.patch
index 64dbe9ef92..21dc552036 100644
--- a/meta/recipes-multimedia/libtiff/tiff/0005-fix-the-FPE-in-tiffcrop-393.patch
+++ b/meta/recipes-multimedia/libtiff/tiff/0005-fix-the-FPE-in-tiffcrop-393.patch
@@ -1,18 +1,18 @@
+From e56d66a033b533f26872a20cb2052473962a0f2e Mon Sep 17 00:00:00 2001
+From: 4ugustus <wangdw.augustus@qq.com>
+Date: Tue, 8 Mar 2022 16:22:04 +0000
+Subject: [PATCH] fix the FPE in tiffcrop (#393)
+
CVE: CVE-2022-0909
Upstream-Status: Backport
Signed-off-by: Ross Burton <ross.burton@arm.com>
-From 4768355a074d562177e0a8b551c561d1af7eb74a Mon Sep 17 00:00:00 2001
-From: 4ugustus <wangdw.augustus@qq.com>
-Date: Tue, 8 Mar 2022 16:22:04 +0000
-Subject: [PATCH 5/6] fix the FPE in tiffcrop (#393)
-
---
libtiff/tif_dir.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/libtiff/tif_dir.c b/libtiff/tif_dir.c
-index a6c254fc..77da6ea4 100644
+index a6c254f..77da6ea 100644
--- a/libtiff/tif_dir.c
+++ b/libtiff/tif_dir.c
@@ -335,13 +335,13 @@ _TIFFVSetField(TIFF* tif, uint32_t tag, va_list ap)
@@ -31,6 +31,3 @@ index a6c254fc..77da6ea4 100644
goto badvaluedouble;
td->td_yresolution = _TIFFClampDoubleToFloat( dblval );
break;
---
-2.25.1
-
diff --git a/meta/recipes-multimedia/libtiff/tiff/0006-fix-heap-buffer-overflow-in-tiffcp-278.patch b/meta/recipes-multimedia/libtiff/tiff/0006-fix-heap-buffer-overflow-in-tiffcp-278.patch
index afd5e59960..337b84d992 100644
--- a/meta/recipes-multimedia/libtiff/tiff/0006-fix-heap-buffer-overflow-in-tiffcp-278.patch
+++ b/meta/recipes-multimedia/libtiff/tiff/0006-fix-heap-buffer-overflow-in-tiffcp-278.patch
@@ -1,18 +1,18 @@
+From 2dd282a54e5fccf9b501973e6da5f83ebde8e980 Mon Sep 17 00:00:00 2001
+From: 4ugustus <wangdw.augustus@qq.com>
+Date: Thu, 10 Mar 2022 08:48:00 +0000
+Subject: [PATCH] fix heap buffer overflow in tiffcp (#278)
+
CVE: CVE-2022-0924
Upstream-Status: Backport
Signed-off-by: Ross Burton <ross.burton@arm.com>
-From 1074b9691322b1e3671cd8ea0b6b3509d08978fb Mon Sep 17 00:00:00 2001
-From: 4ugustus <wangdw.augustus@qq.com>
-Date: Thu, 10 Mar 2022 08:48:00 +0000
-Subject: [PATCH 6/6] fix heap buffer overflow in tiffcp (#278)
-
---
tools/tiffcp.c | 17 ++++++++++++++++-
1 file changed, 16 insertions(+), 1 deletion(-)
diff --git a/tools/tiffcp.c b/tools/tiffcp.c
-index 1f889516..552d8fad 100644
+index 1f88951..552d8fa 100644
--- a/tools/tiffcp.c
+++ b/tools/tiffcp.c
@@ -1661,12 +1661,27 @@ DECLAREwriteFunc(writeBufferToSeparateStrips)
@@ -52,6 +52,3 @@ index 1f889516..552d8fad 100644
if (TIFFWriteEncodedStrip(out, strip++, obuf, stripsize) < 0) {
TIFFError(TIFFFileName(out),
"Error, can't write strip %"PRIu32,
---
-2.25.1
-
diff --git a/meta/recipes-multimedia/libtiff/tiff/561599c99f987dc32ae110370cfdd7df7975586b.patch b/meta/recipes-multimedia/libtiff/tiff/561599c99f987dc32ae110370cfdd7df7975586b.patch
index 0b41dde606..e5b34fd258 100644
--- a/meta/recipes-multimedia/libtiff/tiff/561599c99f987dc32ae110370cfdd7df7975586b.patch
+++ b/meta/recipes-multimedia/libtiff/tiff/561599c99f987dc32ae110370cfdd7df7975586b.patch
@@ -1,4 +1,4 @@
-From 561599c99f987dc32ae110370cfdd7df7975586b Mon Sep 17 00:00:00 2001
+From 7b91458541769f3d7eddc55a39d01730af2489fc Mon Sep 17 00:00:00 2001
From: Even Rouault <even.rouault@spatialys.com>
Date: Sat, 5 Feb 2022 20:36:41 +0100
Subject: [PATCH] TIFFReadDirectory(): avoid calling memcpy() with a null
@@ -12,10 +12,10 @@ CVE: CVE-2022-0562
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/libtiff/tif_dirread.c b/libtiff/tif_dirread.c
-index 2bbc4585..23194ced 100644
+index d84147a..ae52ad4 100644
--- a/libtiff/tif_dirread.c
+++ b/libtiff/tif_dirread.c
-@@ -4177,7 +4177,8 @@ TIFFReadDirectory(TIFF* tif)
+@@ -4173,7 +4173,8 @@ TIFFReadDirectory(TIFF* tif)
goto bad;
}
@@ -25,6 +25,3 @@ index 2bbc4585..23194ced 100644
_TIFFsetShortArray(&tif->tif_dir.td_sampleinfo, new_sampleinfo, tif->tif_dir.td_extrasamples);
_TIFFfree(new_sampleinfo);
}
---
-GitLab
-
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1354.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1354.patch
new file mode 100644
index 0000000000..989ccbfa50
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1354.patch
@@ -0,0 +1,210 @@
+From 281fa3cf0e0e8a44b93478c63d90dbfb64359e88 Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Sun, 5 Dec 2021 14:37:46 +0100
+Subject: [PATCH] TIFFReadDirectory: fix OJPEG hack (fixes #319)
+
+to avoid having the size of the strip arrays inconsistent with the
+number of strips returned by TIFFNumberOfStrips(), which may cause
+out-ouf-bounds array read afterwards.
+
+One of the OJPEG hack that alters SamplesPerPixel may influence the
+number of strips. Hence compute tif_dir.td_nstrips only afterwards.
+
+CVE: CVE-2022-1354
+
+Upstream-Status: Backport
+[https://gitlab.com/libtiff/libtiff/-/commit/87f580f39011109b3bb5f6eca13fac543a542798]
+
+Signed-off-by: Yi Zhao <yi.zhao@windriver.com>
+
+---
+ libtiff/tif_dirread.c | 162 ++++++++++++++++++++++--------------------
+ 1 file changed, 83 insertions(+), 79 deletions(-)
+
+diff --git a/libtiff/tif_dirread.c b/libtiff/tif_dirread.c
+index a31109a..d7cccbe 100644
+--- a/libtiff/tif_dirread.c
++++ b/libtiff/tif_dirread.c
+@@ -3794,50 +3794,7 @@ TIFFReadDirectory(TIFF* tif)
+ MissingRequired(tif,"ImageLength");
+ goto bad;
+ }
+- /*
+- * Setup appropriate structures (by strip or by tile)
+- */
+- if (!TIFFFieldSet(tif, FIELD_TILEDIMENSIONS)) {
+- tif->tif_dir.td_nstrips = TIFFNumberOfStrips(tif);
+- tif->tif_dir.td_tilewidth = tif->tif_dir.td_imagewidth;
+- tif->tif_dir.td_tilelength = tif->tif_dir.td_rowsperstrip;
+- tif->tif_dir.td_tiledepth = tif->tif_dir.td_imagedepth;
+- tif->tif_flags &= ~TIFF_ISTILED;
+- } else {
+- tif->tif_dir.td_nstrips = TIFFNumberOfTiles(tif);
+- tif->tif_flags |= TIFF_ISTILED;
+- }
+- if (!tif->tif_dir.td_nstrips) {
+- TIFFErrorExt(tif->tif_clientdata, module,
+- "Cannot handle zero number of %s",
+- isTiled(tif) ? "tiles" : "strips");
+- goto bad;
+- }
+- tif->tif_dir.td_stripsperimage = tif->tif_dir.td_nstrips;
+- if (tif->tif_dir.td_planarconfig == PLANARCONFIG_SEPARATE)
+- tif->tif_dir.td_stripsperimage /= tif->tif_dir.td_samplesperpixel;
+- if (!TIFFFieldSet(tif, FIELD_STRIPOFFSETS)) {
+-#ifdef OJPEG_SUPPORT
+- if ((tif->tif_dir.td_compression==COMPRESSION_OJPEG) &&
+- (isTiled(tif)==0) &&
+- (tif->tif_dir.td_nstrips==1)) {
+- /*
+- * XXX: OJPEG hack.
+- * If a) compression is OJPEG, b) it's not a tiled TIFF,
+- * and c) the number of strips is 1,
+- * then we tolerate the absence of stripoffsets tag,
+- * because, presumably, all required data is in the
+- * JpegInterchangeFormat stream.
+- */
+- TIFFSetFieldBit(tif, FIELD_STRIPOFFSETS);
+- } else
+-#endif
+- {
+- MissingRequired(tif,
+- isTiled(tif) ? "TileOffsets" : "StripOffsets");
+- goto bad;
+- }
+- }
++
+ /*
+ * Second pass: extract other information.
+ */
+@@ -4042,41 +3999,6 @@ TIFFReadDirectory(TIFF* tif)
+ } /* -- if (!dp->tdir_ignore) */
+ } /* -- for-loop -- */
+
+- if( tif->tif_mode == O_RDWR &&
+- tif->tif_dir.td_stripoffset_entry.tdir_tag != 0 &&
+- tif->tif_dir.td_stripoffset_entry.tdir_count == 0 &&
+- tif->tif_dir.td_stripoffset_entry.tdir_type == 0 &&
+- tif->tif_dir.td_stripoffset_entry.tdir_offset.toff_long8 == 0 &&
+- tif->tif_dir.td_stripbytecount_entry.tdir_tag != 0 &&
+- tif->tif_dir.td_stripbytecount_entry.tdir_count == 0 &&
+- tif->tif_dir.td_stripbytecount_entry.tdir_type == 0 &&
+- tif->tif_dir.td_stripbytecount_entry.tdir_offset.toff_long8 == 0 )
+- {
+- /* Directory typically created with TIFFDeferStrileArrayWriting() */
+- TIFFSetupStrips(tif);
+- }
+- else if( !(tif->tif_flags&TIFF_DEFERSTRILELOAD) )
+- {
+- if( tif->tif_dir.td_stripoffset_entry.tdir_tag != 0 )
+- {
+- if (!TIFFFetchStripThing(tif,&(tif->tif_dir.td_stripoffset_entry),
+- tif->tif_dir.td_nstrips,
+- &tif->tif_dir.td_stripoffset_p))
+- {
+- goto bad;
+- }
+- }
+- if( tif->tif_dir.td_stripbytecount_entry.tdir_tag != 0 )
+- {
+- if (!TIFFFetchStripThing(tif,&(tif->tif_dir.td_stripbytecount_entry),
+- tif->tif_dir.td_nstrips,
+- &tif->tif_dir.td_stripbytecount_p))
+- {
+- goto bad;
+- }
+- }
+- }
+-
+ /*
+ * OJPEG hack:
+ * - If a) compression is OJPEG, and b) photometric tag is missing,
+@@ -4147,6 +4069,88 @@ TIFFReadDirectory(TIFF* tif)
+ }
+ }
+
++ /*
++ * Setup appropriate structures (by strip or by tile)
++ * We do that only after the above OJPEG hack which alters SamplesPerPixel
++ * and thus influences the number of strips in the separate planarconfig.
++ */
++ if (!TIFFFieldSet(tif, FIELD_TILEDIMENSIONS)) {
++ tif->tif_dir.td_nstrips = TIFFNumberOfStrips(tif);
++ tif->tif_dir.td_tilewidth = tif->tif_dir.td_imagewidth;
++ tif->tif_dir.td_tilelength = tif->tif_dir.td_rowsperstrip;
++ tif->tif_dir.td_tiledepth = tif->tif_dir.td_imagedepth;
++ tif->tif_flags &= ~TIFF_ISTILED;
++ } else {
++ tif->tif_dir.td_nstrips = TIFFNumberOfTiles(tif);
++ tif->tif_flags |= TIFF_ISTILED;
++ }
++ if (!tif->tif_dir.td_nstrips) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Cannot handle zero number of %s",
++ isTiled(tif) ? "tiles" : "strips");
++ goto bad;
++ }
++ tif->tif_dir.td_stripsperimage = tif->tif_dir.td_nstrips;
++ if (tif->tif_dir.td_planarconfig == PLANARCONFIG_SEPARATE)
++ tif->tif_dir.td_stripsperimage /= tif->tif_dir.td_samplesperpixel;
++ if (!TIFFFieldSet(tif, FIELD_STRIPOFFSETS)) {
++#ifdef OJPEG_SUPPORT
++ if ((tif->tif_dir.td_compression==COMPRESSION_OJPEG) &&
++ (isTiled(tif)==0) &&
++ (tif->tif_dir.td_nstrips==1)) {
++ /*
++ * XXX: OJPEG hack.
++ * If a) compression is OJPEG, b) it's not a tiled TIFF,
++ * and c) the number of strips is 1,
++ * then we tolerate the absence of stripoffsets tag,
++ * because, presumably, all required data is in the
++ * JpegInterchangeFormat stream.
++ */
++ TIFFSetFieldBit(tif, FIELD_STRIPOFFSETS);
++ } else
++#endif
++ {
++ MissingRequired(tif,
++ isTiled(tif) ? "TileOffsets" : "StripOffsets");
++ goto bad;
++ }
++ }
++
++ if( tif->tif_mode == O_RDWR &&
++ tif->tif_dir.td_stripoffset_entry.tdir_tag != 0 &&
++ tif->tif_dir.td_stripoffset_entry.tdir_count == 0 &&
++ tif->tif_dir.td_stripoffset_entry.tdir_type == 0 &&
++ tif->tif_dir.td_stripoffset_entry.tdir_offset.toff_long8 == 0 &&
++ tif->tif_dir.td_stripbytecount_entry.tdir_tag != 0 &&
++ tif->tif_dir.td_stripbytecount_entry.tdir_count == 0 &&
++ tif->tif_dir.td_stripbytecount_entry.tdir_type == 0 &&
++ tif->tif_dir.td_stripbytecount_entry.tdir_offset.toff_long8 == 0 )
++ {
++ /* Directory typically created with TIFFDeferStrileArrayWriting() */
++ TIFFSetupStrips(tif);
++ }
++ else if( !(tif->tif_flags&TIFF_DEFERSTRILELOAD) )
++ {
++ if( tif->tif_dir.td_stripoffset_entry.tdir_tag != 0 )
++ {
++ if (!TIFFFetchStripThing(tif,&(tif->tif_dir.td_stripoffset_entry),
++ tif->tif_dir.td_nstrips,
++ &tif->tif_dir.td_stripoffset_p))
++ {
++ goto bad;
++ }
++ }
++ if( tif->tif_dir.td_stripbytecount_entry.tdir_tag != 0 )
++ {
++ if (!TIFFFetchStripThing(tif,&(tif->tif_dir.td_stripbytecount_entry),
++ tif->tif_dir.td_nstrips,
++ &tif->tif_dir.td_stripbytecount_p))
++ {
++ goto bad;
++ }
++ }
++ }
++
+ /*
+ * Make sure all non-color channels are extrasamples.
+ * If it's not the case, define them as such.
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1355.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1355.patch
new file mode 100644
index 0000000000..19ce68dfbc
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1355.patch
@@ -0,0 +1,60 @@
+From 19d775e058bf6bb0b0e9c56f406b775f9e725355 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Sat, 2 Apr 2022 22:33:31 +0200
+Subject: [PATCH] tiffcp: avoid buffer overflow in "mode" string (fixes #400)
+
+CVE: CVE-2022-1355
+
+Upstream-Status: Backport
+[https://gitlab.com/libtiff/libtiff/-/commit/c1ae29f9ebacd29b7c3e0c7db671af7db3584bc2]
+
+Signed-off-by: Yi Zhao <yi.zhao@windriver.com>
+
+---
+ tools/tiffcp.c | 25 ++++++++++++++++++++-----
+ 1 file changed, 20 insertions(+), 5 deletions(-)
+
+diff --git a/tools/tiffcp.c b/tools/tiffcp.c
+index 552d8fa..57eef90 100644
+--- a/tools/tiffcp.c
++++ b/tools/tiffcp.c
+@@ -274,19 +274,34 @@ main(int argc, char* argv[])
+ deftilewidth = atoi(optarg);
+ break;
+ case 'B':
+- *mp++ = 'b'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode) - 1))
++ {
++ *mp++ = 'b'; *mp = '\0';
++ }
+ break;
+ case 'L':
+- *mp++ = 'l'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode) - 1))
++ {
++ *mp++ = 'l'; *mp = '\0';
++ }
+ break;
+ case 'M':
+- *mp++ = 'm'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode) - 1))
++ {
++ *mp++ = 'm'; *mp = '\0';
++ }
+ break;
+ case 'C':
+- *mp++ = 'c'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode) - 1))
++ {
++ *mp++ = 'c'; *mp = '\0';
++ }
+ break;
+ case '8':
+- *mp++ = '8'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode)-1))
++ {
++ *mp++ = '8'; *mp = '\0';
++ }
+ break;
+ case 'x':
+ pageInSeq = 1;
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2022-2867.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-2867.patch
new file mode 100644
index 0000000000..73905acb17
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-2867.patch
@@ -0,0 +1,129 @@
+From cca32f0d4f3dd2bd73d044bd6991ab3c764fc718 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Sun, 6 Feb 2022 17:53:53 +0100
+Subject: [PATCH] tiffcrop.c: This update fixes also issues #350 and #351.
+
+ Issue 350 is fixed by checking for not allowed zone input cases like -Z 0:0
+ in getCropOffsets().
+
+CVE: CVE-2022-2867
+
+Upstream-Status: Backport
+[https://gitlab.com/libtiff/libtiff/-/commit/7d7bfa4416366ec64068ac389414241ed4730a54?merge_request_iid=294]
+
+Signed-off-by: Teoh Jay Shen <jay.shen.teoh@intel.com>
+
+---
+ tools/tiffcrop.c | 58 +++++++++++++++++++++++++++++++++---------------
+ 1 file changed, 40 insertions(+), 18 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index 4a4ace8..0ef5bb2 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -5194,20 +5194,33 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ y1 = _TIFFClampDoubleToUInt32(crop->corners[i].Y1);
+ y2 = _TIFFClampDoubleToUInt32(crop->corners[i].Y2);
+ }
+- /* region needs to be within image sizes 0.. width-1; 0..length-1
+- * - be aware x,y are already casted to (uint32_t) and avoid (0 - 1)
++ /* a) Region needs to be within image sizes 0.. width-1; 0..length-1
++ * b) Corners are expected to be submitted as top-left to bottom-right.
++ * Therefore, check that and reorder input.
++ * (be aware x,y are already casted to (uint32_t) and avoid (0 - 1) )
+ */
+- if (x1 > image->width - 1)
++ uint32_t aux;
++ if (x1 > x2) {
++ aux = x1;
++ x1 = x2;
++ x2 = aux;
++ }
++ if (y1 > y2) {
++ aux = y1;
++ y1 = y2;
++ y2 = aux;
++ }
++ if (x1 > image->width - 1)
+ crop->regionlist[i].x1 = image->width - 1;
+- else if (x1 > 0)
+- crop->regionlist[i].x1 = (uint32_t) (x1 - 1);
++ else if (x1 > 0)
++ crop->regionlist[i].x1 = (uint32_t)(x1 - 1);
+
+- if (x2 > image->width - 1)
+- crop->regionlist[i].x2 = image->width - 1;
+- else if (x2 > 0)
+- crop->regionlist[i].x2 = (uint32_t)(x2 - 1);
++ if (x2 > image->width - 1)
++ crop->regionlist[i].x2 = image->width - 1;
++ else if (x2 > 0)
++ crop->regionlist[i].x2 = (uint32_t)(x2 - 1);
+
+- zwidth = crop->regionlist[i].x2 - crop->regionlist[i].x1 + 1;
++ zwidth = crop->regionlist[i].x2 - crop->regionlist[i].x1 + 1;
+
+ if (y1 > image->length - 1)
+ crop->regionlist[i].y1 = image->length - 1;
+@@ -5219,8 +5232,7 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ else if (y2 > 0)
+ crop->regionlist[i].y2 = (uint32_t)(y2 - 1);
+
+- zlength = crop->regionlist[i].y2 - crop->regionlist[i].y1 + 1;
+-
++ zlength = crop->regionlist[i].y2 - crop->regionlist[i].y1 + 1;
+ if (zwidth > max_width)
+ max_width = zwidth;
+ if (zlength > max_length)
+@@ -5250,7 +5262,7 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ }
+ }
+ return (0);
+- }
++ } /* crop_mode == CROP_REGIONS */
+
+ /* Convert crop margins into offsets into image
+ * Margins are expressed as pixel rows and columns, not bytes
+@@ -5286,7 +5298,7 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ bmargin = (uint32_t) 0;
+ return (-1);
+ }
+- }
++ } /* crop_mode == CROP_MARGINS */
+ else
+ { /* no margins requested */
+ tmargin = (uint32_t) 0;
+@@ -5494,10 +5506,17 @@ getCropOffsets(struct image_data *image, struct crop_mask *crop, struct dump_opt
+ else
+ crop->selections = crop->zones;
+
+- for (i = 0; i < crop->zones; i++)
++ /* Initialize regions iterator i */
++ i = 0;
++ for (int j = 0; j < crop->zones; j++)
+ {
+- seg = crop->zonelist[i].position;
+- total = crop->zonelist[i].total;
++ seg = crop->zonelist[j].position;
++ total = crop->zonelist[j].total;
++
++ /* check for not allowed zone cases like 0:0; 4:3; etc. and skip that input */
++ if (seg == 0 || total == 0 || seg > total) {
++ continue;
++ }
+
+ switch (crop->edge_ref)
+ {
+@@ -5626,8 +5645,11 @@ getCropOffsets(struct image_data *image, struct crop_mask *crop, struct dump_opt
+ i + 1, zwidth, zlength,
+ crop->regionlist[i].x1, crop->regionlist[i].x2,
+ crop->regionlist[i].y1, crop->regionlist[i].y2);
++ /* increment regions iterator */
++ i++;
+ }
+-
++ /* set number of generated regions out of given zones */
++ crop->selections = i;
+ return (0);
+ } /* end getCropOffsets */
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2022-2869.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-2869.patch
new file mode 100644
index 0000000000..bda3427c0f
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-2869.patch
@@ -0,0 +1,84 @@
+From b4cf40182c865db554c6e67034afa6ea12c5554d Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Sun, 6 Feb 2022 10:53:45 +0100
+Subject: [PATCH] tiffcrop.c: Fix issue #352 heap-buffer-overflow by correcting
+
+ uint32_t underflow.
+
+CVE: CVE-2022-2869
+
+Upstream-Status: Backport
+[https://gitlab.com/libtiff/libtiff/-/commit/bcf28bb7f630f24fa47701a9907013f3548092cd?merge_request_iid=294]
+
+Signed-off-by: Teoh Jay Shen <jay.shen.teoh@intel.com>
+
+---
+ tools/tiffcrop.c | 34 +++++++++++++++++++---------------
+ 1 file changed, 19 insertions(+), 15 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index b9b13d8..4a4ace8 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -5194,26 +5194,30 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ y1 = _TIFFClampDoubleToUInt32(crop->corners[i].Y1);
+ y2 = _TIFFClampDoubleToUInt32(crop->corners[i].Y2);
+ }
+- if (x1 < 1)
+- crop->regionlist[i].x1 = 0;
+- else
++ /* region needs to be within image sizes 0.. width-1; 0..length-1
++ * - be aware x,y are already casted to (uint32_t) and avoid (0 - 1)
++ */
++ if (x1 > image->width - 1)
++ crop->regionlist[i].x1 = image->width - 1;
++ else if (x1 > 0)
+ crop->regionlist[i].x1 = (uint32_t) (x1 - 1);
+
+- if (x2 > image->width - 1)
+- crop->regionlist[i].x2 = image->width - 1;
+- else
+- crop->regionlist[i].x2 = (uint32_t) (x2 - 1);
++ if (x2 > image->width - 1)
++ crop->regionlist[i].x2 = image->width - 1;
++ else if (x2 > 0)
++ crop->regionlist[i].x2 = (uint32_t)(x2 - 1);
++
+ zwidth = crop->regionlist[i].x2 - crop->regionlist[i].x1 + 1;
+
+- if (y1 < 1)
+- crop->regionlist[i].y1 = 0;
+- else
+- crop->regionlist[i].y1 = (uint32_t) (y1 - 1);
++ if (y1 > image->length - 1)
++ crop->regionlist[i].y1 = image->length - 1;
++ else if (y1 > 0)
++ crop->regionlist[i].y1 = (uint32_t)(y1 - 1);
+
+ if (y2 > image->length - 1)
+ crop->regionlist[i].y2 = image->length - 1;
+- else
+- crop->regionlist[i].y2 = (uint32_t) (y2 - 1);
++ else if (y2 > 0)
++ crop->regionlist[i].y2 = (uint32_t)(y2 - 1);
+
+ zlength = crop->regionlist[i].y2 - crop->regionlist[i].y1 + 1;
+
+@@ -5376,7 +5380,7 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ crop_width = endx - startx + 1;
+ crop_length = endy - starty + 1;
+
+- if (crop_width <= 0)
++ if (endx + 1 <= startx)
+ {
+ TIFFError("computeInputPixelOffsets",
+ "Invalid left/right margins and /or image crop width requested");
+@@ -5385,7 +5389,7 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ if (crop_width > image->width)
+ crop_width = image->width;
+
+- if (crop_length <= 0)
++ if (endy + 1 <= starty)
+ {
+ TIFFError("computeInputPixelOffsets",
+ "Invalid top/bottom margins and /or image crop length requested");
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2022-2953.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-2953.patch
new file mode 100644
index 0000000000..92906521b0
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-2953.patch
@@ -0,0 +1,87 @@
+From 05ef5e05a0b8d18ab075e09b1ea349acc0035e67 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Mon, 15 Aug 2022 22:11:03 +0200
+Subject: [PATCH] tiffcrop: disable incompatibility of -S
+
+CVE: CVE-2022-2953
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+Signed-off-by: Zheng Qiu <zheng.qiu@windriver.com>
+
+According to Richard Nolde
+https://gitlab.com/libtiff/libtiff/-/issues/401#note_877637400 the
+tiffcrop option "-S" is also mutually exclusive to the other crop
+options (-X|-Y), -Z and -z.
+
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This is now checked and ends tiffcrop if those arguments are not mutually exclusive.
+
+This MR will fix the following tiffcrop issues: #349, #414, #422, #423, #424
+
+---
+ tools/tiffcrop.c | 25 +++++++++++++------------
+ 1 file changed, 13 insertions(+), 12 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index b596f9e..8af85c9 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -173,12 +173,12 @@ static char tiffcrop_rev_date[] = "02-09-2022";
+ #define ROTATECW_270 32
+ #define ROTATE_ANY (ROTATECW_90 | ROTATECW_180 | ROTATECW_270)
+
+-#define CROP_NONE 0
+-#define CROP_MARGINS 1
+-#define CROP_WIDTH 2
+-#define CROP_LENGTH 4
+-#define CROP_ZONES 8
+-#define CROP_REGIONS 16
++#define CROP_NONE 0 /* "-S" -> Page_MODE_ROWSCOLS and page->rows/->cols != 0 */
++#define CROP_MARGINS 1 /* "-m" */
++#define CROP_WIDTH 2 /* "-X" */
++#define CROP_LENGTH 4 /* "-Y" */
++#define CROP_ZONES 8 /* "-Z" */
++#define CROP_REGIONS 16 /* "-z" */
+ #define CROP_ROTATE 32
+ #define CROP_MIRROR 64
+ #define CROP_INVERT 128
+@@ -316,7 +316,7 @@ struct crop_mask {
+ #define PAGE_MODE_RESOLUTION 1
+ #define PAGE_MODE_PAPERSIZE 2
+ #define PAGE_MODE_MARGINS 4
+-#define PAGE_MODE_ROWSCOLS 8
++#define PAGE_MODE_ROWSCOLS 8 /* for -S option */
+
+ #define INVERT_DATA_ONLY 10
+ #define INVERT_DATA_AND_TAG 11
+@@ -781,7 +781,7 @@ static const char usage_info[] =
+ " The four debug/dump options are independent, though it makes little sense to\n"
+ " specify a dump file without specifying a detail level.\n"
+ "\n"
+-"Note: The (-X|-Y), -Z and -z options are mutually exclusive.\n"
++"Note: The (-X|-Y), -Z, -z and -S options are mutually exclusive.\n"
+ " In no case should the options be applied to a given selection successively.\n"
+ "\n"
+ ;
+@@ -2133,13 +2133,14 @@ void process_command_opts (int argc, char *argv[], char *mp, char *mode, uint32
+ /*NOTREACHED*/
+ }
+ }
+- /*-- Check for not allowed combinations (e.g. -X, -Y and -Z and -z are mutually exclusive) --*/
+- char XY, Z, R;
++ /*-- Check for not allowed combinations (e.g. -X, -Y and -Z, -z and -S are mutually exclusive) --*/
++ char XY, Z, R, S;
+ XY = ((crop_data->crop_mode & CROP_WIDTH) || (crop_data->crop_mode & CROP_LENGTH));
+ Z = (crop_data->crop_mode & CROP_ZONES);
+ R = (crop_data->crop_mode & CROP_REGIONS);
+- if ((XY && Z) || (XY && R) || (Z && R)) {
+- TIFFError("tiffcrop input error", "The crop options(-X|-Y), -Z and -z are mutually exclusive.->Exit");
++ S = (page->mode & PAGE_MODE_ROWSCOLS);
++ if ((XY && Z) || (XY && R) || (XY && S) || (Z && R) || (Z && S) || (R && S)) {
++ TIFFError("tiffcrop input error", "The crop options(-X|-Y), -Z, -z and -S are mutually exclusive.->Exit");
+ exit(EXIT_FAILURE);
+ }
+ } /* end process_command_opts */
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2022-34526.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-34526.patch
new file mode 100644
index 0000000000..f3f8121735
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-34526.patch
@@ -0,0 +1,27 @@
+From 786a8b6fd1384c6e20c17729822d1f61ed569320 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 18 Aug 2022 10:46:30 +0530
+Subject: [PATCH] CVE-2022-34526
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/275735d0354e39c0ac1dc3c0db2120d6f31d1990]
+CVE: CVE-2022-34526
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+---
+ libtiff/tif_dirinfo.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/libtiff/tif_dirinfo.c b/libtiff/tif_dirinfo.c
+index 8565dfb..0f722a5 100644
+--- a/libtiff/tif_dirinfo.c
++++ b/libtiff/tif_dirinfo.c
+@@ -1157,6 +1157,9 @@ _TIFFCheckFieldIsValidForCodec(TIFF *tif, ttag_t tag)
+ default:
+ return 1;
+ }
++ if( !TIFFIsCODECConfigured(tif->tif_dir.td_compression) ) {
++ return 0;
++ }
+ /* Check if codec specific tags are allowed for the current
+ * compression scheme (codec) */
+ switch (tif->tif_dir.td_compression) {
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2022-3970.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-3970.patch
new file mode 100644
index 0000000000..3779ebf646
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-3970.patch
@@ -0,0 +1,38 @@
+From 11c8026913e190b02266c1247e7a770e488d925e Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 8 Nov 2022 15:16:58 +0100
+Subject: [PATCH] TIFFReadRGBATileExt(): fix (unsigned) integer overflow on
+ strips/tiles > 2 GB
+
+Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=53137
+Upstream-Status: Accepted
+
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+---
+ libtiff/tif_getimage.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/libtiff/tif_getimage.c b/libtiff/tif_getimage.c
+index a1b6570b..9a2e0c59 100644
+--- a/libtiff/tif_getimage.c
++++ b/libtiff/tif_getimage.c
+@@ -3058,15 +3058,15 @@ TIFFReadRGBATileExt(TIFF* tif, uint32_t col, uint32_t row, uint32_t * raster, in
+ return( ok );
+
+ for( i_row = 0; i_row < read_ysize; i_row++ ) {
+- memmove( raster + (tile_ysize - i_row - 1) * tile_xsize,
+- raster + (read_ysize - i_row - 1) * read_xsize,
++ memmove( raster + (size_t)(tile_ysize - i_row - 1) * tile_xsize,
++ raster + (size_t)(read_ysize - i_row - 1) * read_xsize,
+ read_xsize * sizeof(uint32_t) );
+- _TIFFmemset( raster + (tile_ysize - i_row - 1) * tile_xsize+read_xsize,
++ _TIFFmemset( raster + (size_t)(tile_ysize - i_row - 1) * tile_xsize+read_xsize,
+ 0, sizeof(uint32_t) * (tile_xsize - read_xsize) );
+ }
+
+ for( i_row = read_ysize; i_row < tile_ysize; i_row++ ) {
+- _TIFFmemset( raster + (tile_ysize - i_row - 1) * tile_xsize,
++ _TIFFmemset( raster + (size_t)(tile_ysize - i_row - 1) * tile_xsize,
+ 0, sizeof(uint32_t) * tile_xsize );
+ }
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2022-40090.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-40090.patch
new file mode 100644
index 0000000000..fe48dc6028
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-40090.patch
@@ -0,0 +1,569 @@
+From c7caec9a4d8f24c17e667480d2c7d0d51c9fae41 Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Thu, 6 Oct 2022 10:11:05 +0000
+Subject: [PATCH] Improved IFD-Loop Handling (fixes #455)
+
+IFD infinite looping was not fixed by MR 20 (see #455).
+An improved IFD loop handling is proposed.
+
+Basic approach:
+
+- The order in the entire chain must be checked, and not only whether an offset has already been read once.
+- To do this, pairs of directory number and offset are stored and checked.
+- The offset of a directory number can change.
+- TIFFAdvanceDirectory() must also perform an IFD loop check.
+- TIFFCheckDirOffset() is replaced by _TIFFCheckDirNumberAndOffset().
+
+Rules for the check:
+
+- If an offset is already in the list, it must have the same IFD number. Otherwise it is an IDF loop.
+- If the offset is not in the list and the IFD number is greater than there are list entries, a new list entry is added.
+- Otherwise, the offset of the IFD number is updated.
+
+Reference is also made to old bugzilla bug 2772 and MR 20, which did not solve the general issue.
+This MR closes #455
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/c7caec9a4d8f24c17e667480d2c7d0d51c9fae41]
+CVE: CVE-2022-40090
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ libtiff/tif_close.c | 6 +-
+ libtiff/tif_dir.c | 129 +++++++++++++++++++++++++-----------
+ libtiff/tif_dir.h | 2 +
+ libtiff/tif_dirread.c | 147 +++++++++++++++++++++++++++++++++---------
+ libtiff/tif_open.c | 3 +-
+ libtiff/tiffiop.h | 3 +-
+ 6 files changed, 219 insertions(+), 71 deletions(-)
+
+diff --git a/libtiff/tif_close.c b/libtiff/tif_close.c
+index 0fe7af4..2fe2bde 100644
+--- a/libtiff/tif_close.c
++++ b/libtiff/tif_close.c
+@@ -52,8 +52,10 @@ TIFFCleanup(TIFF* tif)
+ (*tif->tif_cleanup)(tif);
+ TIFFFreeDirectory(tif);
+
+- if (tif->tif_dirlist)
+- _TIFFfree(tif->tif_dirlist);
++ if (tif->tif_dirlistoff)
++ _TIFFfree(tif->tif_dirlistoff);
++ if (tif->tif_dirlistdirn)
++ _TIFFfree(tif->tif_dirlistdirn);
+
+ /*
+ * Clean up client info links.
+diff --git a/libtiff/tif_dir.c b/libtiff/tif_dir.c
+index 1402c8e..6d4bf58 100644
+--- a/libtiff/tif_dir.c
++++ b/libtiff/tif_dir.c
+@@ -1511,12 +1511,22 @@ TIFFDefaultDirectory(TIFF* tif)
+ }
+
+ static int
+-TIFFAdvanceDirectory(TIFF* tif, uint64_t* nextdir, uint64_t* off)
++TIFFAdvanceDirectory(TIFF* tif, uint64_t* nextdiroff, uint64_t* off, uint16_t* nextdirnum)
+ {
+ static const char module[] = "TIFFAdvanceDirectory";
++
++ /* Add this directory to the directory list, if not already in. */
++ if (!_TIFFCheckDirNumberAndOffset(tif, *nextdirnum, *nextdiroff)) {
++ TIFFErrorExt(tif->tif_clientdata, module, "Starting directory %"PRIu16" at offset 0x%"PRIx64" (%"PRIu64") might cause an IFD loop",
++ *nextdirnum, *nextdiroff, *nextdiroff);
++ *nextdiroff = 0;
++ *nextdirnum = 0;
++ return(0);
++ }
++
+ if (isMapped(tif))
+ {
+- uint64_t poff=*nextdir;
++ uint64_t poff=*nextdiroff;
+ if (!(tif->tif_flags&TIFF_BIGTIFF))
+ {
+ tmsize_t poffa,poffb,poffc,poffd;
+@@ -1527,7 +1537,7 @@ TIFFAdvanceDirectory(TIFF* tif, uint64_t* nextdir, uint64_t* off)
+ if (((uint64_t)poffa != poff) || (poffb < poffa) || (poffb < (tmsize_t)sizeof(uint16_t)) || (poffb > tif->tif_size))
+ {
+ TIFFErrorExt(tif->tif_clientdata,module,"Error fetching directory count");
+- *nextdir=0;
++ *nextdiroff=0;
+ return(0);
+ }
+ _TIFFmemcpy(&dircount,tif->tif_base+poffa,sizeof(uint16_t));
+@@ -1545,7 +1555,7 @@ TIFFAdvanceDirectory(TIFF* tif, uint64_t* nextdir, uint64_t* off)
+ _TIFFmemcpy(&nextdir32,tif->tif_base+poffc,sizeof(uint32_t));
+ if (tif->tif_flags&TIFF_SWAB)
+ TIFFSwabLong(&nextdir32);
+- *nextdir=nextdir32;
++ *nextdiroff=nextdir32;
+ }
+ else
+ {
+@@ -1577,11 +1587,10 @@ TIFFAdvanceDirectory(TIFF* tif, uint64_t* nextdir, uint64_t* off)
+ }
+ if (off!=NULL)
+ *off=(uint64_t)poffc;
+- _TIFFmemcpy(nextdir,tif->tif_base+poffc,sizeof(uint64_t));
++ _TIFFmemcpy(nextdiroff,tif->tif_base+poffc,sizeof(uint64_t));
+ if (tif->tif_flags&TIFF_SWAB)
+- TIFFSwabLong8(nextdir);
++ TIFFSwabLong8(nextdiroff);
+ }
+- return(1);
+ }
+ else
+ {
+@@ -1589,7 +1598,7 @@ TIFFAdvanceDirectory(TIFF* tif, uint64_t* nextdir, uint64_t* off)
+ {
+ uint16_t dircount;
+ uint32_t nextdir32;
+- if (!SeekOK(tif, *nextdir) ||
++ if (!SeekOK(tif, *nextdiroff) ||
+ !ReadOK(tif, &dircount, sizeof (uint16_t))) {
+ TIFFErrorExt(tif->tif_clientdata, module, "%s: Error fetching directory count",
+ tif->tif_name);
+@@ -1610,13 +1619,13 @@ TIFFAdvanceDirectory(TIFF* tif, uint64_t* nextdir, uint64_t* off)
+ }
+ if (tif->tif_flags & TIFF_SWAB)
+ TIFFSwabLong(&nextdir32);
+- *nextdir=nextdir32;
++ *nextdiroff=nextdir32;
+ }
+ else
+ {
+ uint64_t dircount64;
+ uint16_t dircount16;
+- if (!SeekOK(tif, *nextdir) ||
++ if (!SeekOK(tif, *nextdiroff) ||
+ !ReadOK(tif, &dircount64, sizeof (uint64_t))) {
+ TIFFErrorExt(tif->tif_clientdata, module, "%s: Error fetching directory count",
+ tif->tif_name);
+@@ -1636,17 +1645,27 @@ TIFFAdvanceDirectory(TIFF* tif, uint64_t* nextdir, uint64_t* off)
+ else
+ (void) TIFFSeekFile(tif,
+ dircount16*20, SEEK_CUR);
+- if (!ReadOK(tif, nextdir, sizeof (uint64_t))) {
++ if (!ReadOK(tif, nextdiroff, sizeof (uint64_t))) {
+ TIFFErrorExt(tif->tif_clientdata, module,
+ "%s: Error fetching directory link",
+ tif->tif_name);
+ return (0);
+ }
+ if (tif->tif_flags & TIFF_SWAB)
+- TIFFSwabLong8(nextdir);
++ TIFFSwabLong8(nextdiroff);
+ }
+- return (1);
+ }
++ if (*nextdiroff != 0) {
++ (*nextdirnum)++;
++ /* Check next directory for IFD looping and if so, set it as last directory. */
++ if (!_TIFFCheckDirNumberAndOffset(tif, *nextdirnum, *nextdiroff)) {
++ TIFFWarningExt(tif->tif_clientdata, module, "the next directory %"PRIu16" at offset 0x%"PRIx64" (%"PRIu64") might be an IFD loop. Treating directory %"PRIu16" as last directory",
++ *nextdirnum, *nextdiroff, *nextdiroff, *nextdirnum-1);
++ *nextdiroff = 0;
++ (*nextdirnum)--;
++ }
++ }
++ return (1);
+ }
+
+ /*
+@@ -1656,14 +1675,16 @@ uint16_t
+ TIFFNumberOfDirectories(TIFF* tif)
+ {
+ static const char module[] = "TIFFNumberOfDirectories";
+- uint64_t nextdir;
++ uint64_t nextdiroff;
++ uint16_t nextdirnum;
+ uint16_t n;
+ if (!(tif->tif_flags&TIFF_BIGTIFF))
+- nextdir = tif->tif_header.classic.tiff_diroff;
++ nextdiroff = tif->tif_header.classic.tiff_diroff;
+ else
+- nextdir = tif->tif_header.big.tiff_diroff;
++ nextdiroff = tif->tif_header.big.tiff_diroff;
++ nextdirnum = 0;
+ n = 0;
+- while (nextdir != 0 && TIFFAdvanceDirectory(tif, &nextdir, NULL))
++ while (nextdiroff != 0 && TIFFAdvanceDirectory(tif, &nextdiroff, NULL, &nextdirnum))
+ {
+ if (n != 65535) {
+ ++n;
+@@ -1686,28 +1707,30 @@ TIFFNumberOfDirectories(TIFF* tif)
+ int
+ TIFFSetDirectory(TIFF* tif, uint16_t dirn)
+ {
+- uint64_t nextdir;
++ uint64_t nextdiroff;
++ uint16_t nextdirnum;
+ uint16_t n;
+
+ if (!(tif->tif_flags&TIFF_BIGTIFF))
+- nextdir = tif->tif_header.classic.tiff_diroff;
++ nextdiroff = tif->tif_header.classic.tiff_diroff;
+ else
+- nextdir = tif->tif_header.big.tiff_diroff;
+- for (n = dirn; n > 0 && nextdir != 0; n--)
+- if (!TIFFAdvanceDirectory(tif, &nextdir, NULL))
++ nextdiroff = tif->tif_header.big.tiff_diroff;
++ nextdirnum = 0;
++ for (n = dirn; n > 0 && nextdiroff != 0; n--)
++ if (!TIFFAdvanceDirectory(tif, &nextdiroff, NULL, &nextdirnum))
+ return (0);
+- tif->tif_nextdiroff = nextdir;
++ /* If the n-th directory could not be reached (does not exist),
++ * return here without touching anything further. */
++ if (nextdiroff == 0 || n > 0)
++ return (0);
++
++ tif->tif_nextdiroff = nextdiroff;
+ /*
+ * Set curdir to the actual directory index. The
+ * -1 is because TIFFReadDirectory will increment
+ * tif_curdir after successfully reading the directory.
+ */
+ tif->tif_curdir = (dirn - n) - 1;
+- /*
+- * Reset tif_dirnumber counter and start new list of seen directories.
+- * We need this to prevent IFD loops.
+- */
+- tif->tif_dirnumber = 0;
+ return (TIFFReadDirectory(tif));
+ }
+
+@@ -1720,13 +1743,42 @@ TIFFSetDirectory(TIFF* tif, uint16_t dirn)
+ int
+ TIFFSetSubDirectory(TIFF* tif, uint64_t diroff)
+ {
+- tif->tif_nextdiroff = diroff;
+- /*
+- * Reset tif_dirnumber counter and start new list of seen directories.
+- * We need this to prevent IFD loops.
++ /* Match nextdiroff and curdir for consistent IFD-loop checking.
++ * Only with TIFFSetSubDirectory() the IFD list can be corrupted with invalid offsets
++ * within the main IFD tree.
++ * In the case of several subIFDs of a main image,
++ * there are two possibilities that are not even mutually exclusive.
++ * a.) The subIFD tag contains an array with all offsets of the subIFDs.
++ * b.) The SubIFDs are concatenated with their NextIFD parameters.
++ * (refer to https://www.awaresystems.be/imaging/tiff/specification/TIFFPM6.pdf.)
+ */
+- tif->tif_dirnumber = 0;
+- return (TIFFReadDirectory(tif));
++ int retval;
++ uint16_t curdir = 0;
++ int8_t probablySubIFD = 0;
++ if (diroff == 0) {
++ /* Special case to invalidate the tif_lastdiroff member. */
++ tif->tif_curdir = 65535;
++ } else {
++ if (!_TIFFGetDirNumberFromOffset(tif, diroff, &curdir)) {
++ /* Non-existing offsets might point to a SubIFD or invalid IFD.*/
++ probablySubIFD = 1;
++ }
++ /* -1 because TIFFReadDirectory() will increment tif_curdir. */
++ tif->tif_curdir = curdir - 1;
++ }
++
++ tif->tif_nextdiroff = diroff;
++ retval = TIFFReadDirectory(tif);
++ /* If failed, curdir was not incremented in TIFFReadDirectory(), so set it back. */
++ if (!retval )tif->tif_curdir++;
++ if (retval && probablySubIFD) {
++ /* Reset IFD list to start new one for SubIFD chain and also start SubIFD chain with tif_curdir=0. */
++ tif->tif_dirnumber = 0;
++ tif->tif_curdir = 0; /* first directory of new chain */
++ /* add this offset to new IFD list */
++ _TIFFCheckDirNumberAndOffset(tif, tif->tif_curdir, diroff);
++ }
++ return (retval);
+ }
+
+ /*
+@@ -1750,12 +1802,15 @@ TIFFLastDirectory(TIFF* tif)
+
+ /*
+ * Unlink the specified directory from the directory chain.
++ * Note: First directory starts with number dirn=1.
++ * This is different to TIFFSetDirectory() where the first directory starts with zero.
+ */
+ int
+ TIFFUnlinkDirectory(TIFF* tif, uint16_t dirn)
+ {
+ static const char module[] = "TIFFUnlinkDirectory";
+ uint64_t nextdir;
++ uint16_t nextdirnum;
+ uint64_t off;
+ uint16_t n;
+
+@@ -1779,19 +1834,21 @@ TIFFUnlinkDirectory(TIFF* tif, uint16_t dirn)
+ nextdir = tif->tif_header.big.tiff_diroff;
+ off = 8;
+ }
++ nextdirnum = 0; /* First directory is dirn=0 */
++
+ for (n = dirn-1; n > 0; n--) {
+ if (nextdir == 0) {
+ TIFFErrorExt(tif->tif_clientdata, module, "Directory %"PRIu16" does not exist", dirn);
+ return (0);
+ }
+- if (!TIFFAdvanceDirectory(tif, &nextdir, &off))
++ if (!TIFFAdvanceDirectory(tif, &nextdir, &off, &nextdirnum))
+ return (0);
+ }
+ /*
+ * Advance to the directory to be unlinked and fetch
+ * the offset of the directory that follows.
+ */
+- if (!TIFFAdvanceDirectory(tif, &nextdir, NULL))
++ if (!TIFFAdvanceDirectory(tif, &nextdir, NULL, &nextdirnum))
+ return (0);
+ /*
+ * Go back and patch the link field of the preceding
+diff --git a/libtiff/tif_dir.h b/libtiff/tif_dir.h
+index 900dec1..f1a5125 100644
+--- a/libtiff/tif_dir.h
++++ b/libtiff/tif_dir.h
+@@ -302,6 +302,8 @@ extern int _TIFFMergeFields(TIFF*, const TIFFField[], uint32_t);
+ extern const TIFFField* _TIFFFindOrRegisterField(TIFF *, uint32_t, TIFFDataType);
+ extern TIFFField* _TIFFCreateAnonField(TIFF *, uint32_t, TIFFDataType);
+ extern int _TIFFCheckFieldIsValidForCodec(TIFF *tif, ttag_t tag);
++extern int _TIFFCheckDirNumberAndOffset(TIFF *tif, uint16_t dirn, uint64_t diroff);
++extern int _TIFFGetDirNumberFromOffset(TIFF *tif, uint64_t diroff, uint16_t *dirn);
+
+ #if defined(__cplusplus)
+ }
+diff --git a/libtiff/tif_dirread.c b/libtiff/tif_dirread.c
+index d7cccbe..f07de60 100644
+--- a/libtiff/tif_dirread.c
++++ b/libtiff/tif_dirread.c
+@@ -154,7 +154,6 @@ static void TIFFReadDirectoryFindFieldInfo(TIFF* tif, uint16_t tagid, uint32_t*
+
+ static int EstimateStripByteCounts(TIFF* tif, TIFFDirEntry* dir, uint16_t dircount);
+ static void MissingRequired(TIFF*, const char*);
+-static int TIFFCheckDirOffset(TIFF* tif, uint64_t diroff);
+ static int CheckDirCount(TIFF*, TIFFDirEntry*, uint32_t);
+ static uint16_t TIFFFetchDirectory(TIFF* tif, uint64_t diroff, TIFFDirEntry** pdir, uint64_t* nextdiroff);
+ static int TIFFFetchNormalTag(TIFF*, TIFFDirEntry*, int recover);
+@@ -3590,12 +3589,19 @@ TIFFReadDirectory(TIFF* tif)
+ int bitspersample_read = FALSE;
+ int color_channels;
+
+- tif->tif_diroff=tif->tif_nextdiroff;
+- if (!TIFFCheckDirOffset(tif,tif->tif_nextdiroff))
+- return 0; /* last offset or bad offset (IFD looping) */
+- (*tif->tif_cleanup)(tif); /* cleanup any previous compression state */
+- tif->tif_curdir++;
+- nextdiroff = tif->tif_nextdiroff;
++ if (tif->tif_nextdiroff == 0) {
++ /* In this special case, tif_diroff needs also to be set to 0. */
++ tif->tif_diroff = tif->tif_nextdiroff;
++ return 0; /* last offset, thus no checking necessary */
++ }
++
++ nextdiroff = tif->tif_nextdiroff;
++ /* tif_curdir++ and tif_nextdiroff should only be updated after SUCCESSFUL reading of the directory. Otherwise, invalid IFD offsets could corrupt the IFD list. */
++ if (!_TIFFCheckDirNumberAndOffset(tif, tif->tif_curdir + 1, nextdiroff)) {
++ TIFFWarningExt(tif->tif_clientdata, module,
++ "Didn't read next directory due to IFD looping at offset 0x%"PRIx64" (%"PRIu64") to offset 0x%"PRIx64" (%"PRIu64")", tif->tif_diroff, tif->tif_diroff, nextdiroff, nextdiroff);
++ return 0; /* bad offset (IFD looping) */
++ }
+ dircount=TIFFFetchDirectory(tif,nextdiroff,&dir,&tif->tif_nextdiroff);
+ if (!dircount)
+ {
+@@ -3603,6 +3609,11 @@ TIFFReadDirectory(TIFF* tif)
+ "Failed to read directory at offset %" PRIu64, nextdiroff);
+ return 0;
+ }
++ /* Set global values after a valid directory has been fetched.
++ * tif_diroff is already set to nextdiroff in TIFFFetchDirectory() in the beginning. */
++ tif->tif_curdir++;
++ (*tif->tif_cleanup)(tif); /* cleanup any previous compression state */
++
+ TIFFReadDirectoryCheckOrder(tif,dir,dircount);
+
+ /*
+@@ -4687,53 +4698,127 @@ MissingRequired(TIFF* tif, const char* tagname)
+ }
+
+ /*
+- * Check the directory offset against the list of already seen directory
+- * offsets. This is a trick to prevent IFD looping. The one can create TIFF
+- * file with looped directory pointers. We will maintain a list of already
+- * seen directories and check every IFD offset against that list.
++ * Check the directory number and offset against the list of already seen
++ * directory numbers and offsets. This is a trick to prevent IFD looping.
++ * The one can create TIFF file with looped directory pointers. We will
++ * maintain a list of already seen directories and check every IFD offset
++ * and its IFD number against that list. However, the offset of an IFD number
++ * can change - e.g. when writing updates to file.
++ * Returns 1 if all is ok; 0 if last directory or IFD loop is encountered,
++ * or an error has occured.
+ */
+-static int
+-TIFFCheckDirOffset(TIFF* tif, uint64_t diroff)
++int
++_TIFFCheckDirNumberAndOffset(TIFF *tif, uint16_t dirn, uint64_t diroff)
+ {
+ uint16_t n;
+
+ if (diroff == 0) /* no more directories */
+ return 0;
+ if (tif->tif_dirnumber == 65535) {
+- TIFFErrorExt(tif->tif_clientdata, "TIFFCheckDirOffset",
+- "Cannot handle more than 65535 TIFF directories");
+- return 0;
++ TIFFErrorExt(tif->tif_clientdata, "_TIFFCheckDirNumberAndOffset",
++ "Cannot handle more than 65535 TIFF directories");
++ return 0;
+ }
+
+- for (n = 0; n < tif->tif_dirnumber && tif->tif_dirlist; n++) {
+- if (tif->tif_dirlist[n] == diroff)
+- return 0;
++ /* Check if offset is already in the list:
++ * - yes: check, if offset is at the same IFD number - if not, it is an IFD loop
++ * - no: add to list or update offset at that IFD number
++ */
++ for (n = 0; n < tif->tif_dirnumber && tif->tif_dirlistdirn && tif->tif_dirlistoff; n++) {
++ if (tif->tif_dirlistoff[n] == diroff) {
++ if (tif->tif_dirlistdirn[n] == dirn) {
++ return 1;
++ } else {
++ TIFFWarningExt(tif->tif_clientdata, "_TIFFCheckDirNumberAndOffset",
++ "TIFF directory %"PRIu16" has IFD looping to directory %"PRIu16" at offset 0x%"PRIx64" (%"PRIu64")",
++ dirn-1, tif->tif_dirlistdirn[n], diroff, diroff);
++ return 0;
++ }
++ }
++ }
++ /* Check if offset of an IFD has been changed and update offset of that IFD number. */
++ if (dirn < tif->tif_dirnumber && tif->tif_dirlistdirn && tif->tif_dirlistoff) {
++ /* tif_dirlistdirn can have IFD numbers dirn in random order */
++ for (n = 0; n < tif->tif_dirnumber; n++) {
++ if (tif->tif_dirlistdirn[n] == dirn) {
++ tif->tif_dirlistoff[n] = diroff;
++ return 1;
++ }
++ }
+ }
+
++ /* Add IFD offset and dirn to IFD directory list */
+ tif->tif_dirnumber++;
+
+- if (tif->tif_dirlist == NULL || tif->tif_dirnumber > tif->tif_dirlistsize) {
+- uint64_t* new_dirlist;
+-
++ if (tif->tif_dirlistoff == NULL || tif->tif_dirlistdirn == NULL || tif->tif_dirnumber > tif->tif_dirlistsize) {
++ uint64_t *new_dirlist;
+ /*
+ * XXX: Reduce memory allocation granularity of the dirlist
+ * array.
+ */
+- new_dirlist = (uint64_t*)_TIFFCheckRealloc(tif, tif->tif_dirlist,
+- tif->tif_dirnumber, 2 * sizeof(uint64_t), "for IFD list");
++ if (tif->tif_dirnumber >= 32768)
++ tif->tif_dirlistsize = 65535;
++ else
++ tif->tif_dirlistsize = 2 * tif->tif_dirnumber;
++
++ new_dirlist = (uint64_t *)_TIFFCheckRealloc(tif, tif->tif_dirlistoff,
++ tif->tif_dirlistsize, sizeof(uint64_t), "for IFD offset list");
+ if (!new_dirlist)
+ return 0;
+- if( tif->tif_dirnumber >= 32768 )
+- tif->tif_dirlistsize = 65535;
+- else
+- tif->tif_dirlistsize = 2 * tif->tif_dirnumber;
+- tif->tif_dirlist = new_dirlist;
++ tif->tif_dirlistoff = new_dirlist;
++ new_dirlist = (uint64_t *)_TIFFCheckRealloc(tif, tif->tif_dirlistdirn,
++ tif->tif_dirlistsize, sizeof(uint16_t), "for IFD dirnumber list");
++ if (!new_dirlist)
++ return 0;
++ tif->tif_dirlistdirn = (uint16_t *)new_dirlist;
+ }
+
+- tif->tif_dirlist[tif->tif_dirnumber - 1] = diroff;
++ tif->tif_dirlistoff[tif->tif_dirnumber - 1] = diroff;
++ tif->tif_dirlistdirn[tif->tif_dirnumber - 1] = dirn;
+
+ return 1;
+-}
++} /* --- _TIFFCheckDirNumberAndOffset() ---*/
++
++/*
++ * Retrieve the matching IFD directory number of a given IFD offset
++ * from the list of directories already seen.
++ * Returns 1 if the offset was in the list and the directory number
++ * can be returned.
++ * Otherwise returns 0 or if an error occured.
++ */
++int
++_TIFFGetDirNumberFromOffset(TIFF *tif, uint64_t diroff, uint16_t* dirn)
++{
++ uint16_t n;
++
++ if (diroff == 0) /* no more directories */
++ return 0;
++ if (tif->tif_dirnumber == 65535) {
++ TIFFErrorExt(tif->tif_clientdata, "_TIFFGetDirNumberFromOffset",
++ "Cannot handle more than 65535 TIFF directories");
++ return 0;
++ }
++
++ /* Check if offset is already in the list and return matching directory number.
++ * Otherwise update IFD list using TIFFNumberOfDirectories()
++ * and search again in IFD list.
++ */
++ for (n = 0; n < tif->tif_dirnumber && tif->tif_dirlistoff && tif->tif_dirlistdirn; n++) {
++ if (tif->tif_dirlistoff[n] == diroff) {
++ *dirn = tif->tif_dirlistdirn[n];
++ return 1;
++ }
++ }
++ TIFFNumberOfDirectories(tif);
++ for (n = 0; n < tif->tif_dirnumber && tif->tif_dirlistoff && tif->tif_dirlistdirn; n++) {
++ if (tif->tif_dirlistoff[n] == diroff) {
++ *dirn = tif->tif_dirlistdirn[n];
++ return 1;
++ }
++ }
++ return 0;
++} /*--- _TIFFGetDirNumberFromOffset() ---*/
++
+
+ /*
+ * Check the count field of a directory entry against a known value. The
+diff --git a/libtiff/tif_open.c b/libtiff/tif_open.c
+index 9724162..f047c73 100644
+--- a/libtiff/tif_open.c
++++ b/libtiff/tif_open.c
+@@ -354,7 +354,8 @@ TIFFClientOpen(
+ if (!TIFFDefaultDirectory(tif))
+ goto bad;
+ tif->tif_diroff = 0;
+- tif->tif_dirlist = NULL;
++ tif->tif_dirlistoff = NULL;
++ tif->tif_dirlistdirn = NULL;
+ tif->tif_dirlistsize = 0;
+ tif->tif_dirnumber = 0;
+ return (tif);
+diff --git a/libtiff/tiffiop.h b/libtiff/tiffiop.h
+index c1d0276..9459fe8 100644
+--- a/libtiff/tiffiop.h
++++ b/libtiff/tiffiop.h
+@@ -117,7 +117,8 @@ struct tiff {
+ #define TIFF_CHOPPEDUPARRAYS 0x4000000U /* set when allocChoppedUpStripArrays() has modified strip array */
+ uint64_t tif_diroff; /* file offset of current directory */
+ uint64_t tif_nextdiroff; /* file offset of following directory */
+- uint64_t* tif_dirlist; /* list of offsets to already seen directories to prevent IFD looping */
++ uint64_t* tif_dirlistoff; /* list of offsets to already seen directories to prevent IFD looping */
++ uint16_t* tif_dirlistdirn; /* list of directory numbers to already seen directories to prevent IFD looping */
+ uint16_t tif_dirlistsize; /* number of entries in offset list */
+ uint16_t tif_dirnumber; /* number of already seen directories */
+ TIFFDirectory tif_dir; /* internal rep of current directory */
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2022-48281.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-48281.patch
new file mode 100644
index 0000000000..4f8dc35251
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-48281.patch
@@ -0,0 +1,26 @@
+From 97d65859bc29ee334012e9c73022d8a8e55ed586 Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Sat, 21 Jan 2023 15:58:10 +0000
+Subject: [PATCH] tiffcrop: Correct simple copy paste error. Fix #488.
+
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.2.0-1+deb11u4.debian.tar.xz]
+CVE: CVE-2022-48281
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ tools/tiffcrop.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: tiff-4.2.0/tools/tiffcrop.c
+===================================================================
+--- tiff-4.2.0.orig/tools/tiffcrop.c
++++ tiff-4.2.0/tools/tiffcrop.c
+@@ -7516,7 +7516,7 @@ processCropSelections(struct image_data
+ crop_buff = (unsigned char *)limitMalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ else
+ {
+- prev_cropsize = seg_buffs[0].size;
++ prev_cropsize = seg_buffs[1].size;
+ if (prev_cropsize < cropsize)
+ {
+ next_buff = _TIFFrealloc(crop_buff, cropsize + NUM_BUFF_OVERSIZE_BYTES);
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-0795_0796_0797_0798_0799.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-0795_0796_0797_0798_0799.patch
new file mode 100644
index 0000000000..498d5ec8ab
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-0795_0796_0797_0798_0799.patch
@@ -0,0 +1,162 @@
+From 7808740e100ba30ffb791044f3b14dec3e85ed6f Mon Sep 17 00:00:00 2001
+From: Markus Koschany <apo@debian.org>
+Date: Tue, 21 Feb 2023 14:26:43 +0100
+Subject: [PATCH] CVE-2023-0795
+
+This is also the fix for CVE-2023-0796, CVE-2023-0797, CVE-2023-0798,
+CVE-2023-0799.
+
+Bug-Debian: https://bugs.debian.org/1031632
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/afaabc3e50d4e5d80a94143f7e3c997e7e410f68
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz ]
+CVE: CVE-2023-0795 CVE-2023-0796 CVE-2023-0797 CVE-2023-0798 CVE-2023-0799
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcrop.c | 51 ++++++++++++++++++++++++++++--------------------
+ 1 file changed, 30 insertions(+), 21 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index adf0f84..deba170 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -269,7 +269,6 @@ struct region {
+ uint32_t width; /* width in pixels */
+ uint32_t length; /* length in pixels */
+ uint32_t buffsize; /* size of buffer needed to hold the cropped region */
+- unsigned char *buffptr; /* address of start of the region */
+ };
+
+ /* Cropping parameters from command line and image data
+@@ -524,7 +523,7 @@ static int rotateContigSamples24bits(uint16_t, uint16_t, uint16_t, uint32_t,
+ static int rotateContigSamples32bits(uint16_t, uint16_t, uint16_t, uint32_t,
+ uint32_t, uint32_t, uint8_t *, uint8_t *);
+ static int rotateImage(uint16_t, struct image_data *, uint32_t *, uint32_t *,
+- unsigned char **);
++ unsigned char **, int);
+ static int mirrorImage(uint16_t, uint16_t, uint16_t, uint32_t, uint32_t,
+ unsigned char *);
+ static int invertImage(uint16_t, uint16_t, uint16_t, uint32_t, uint32_t,
+@@ -5219,7 +5218,6 @@ initCropMasks (struct crop_mask *cps)
+ cps->regionlist[i].width = 0;
+ cps->regionlist[i].length = 0;
+ cps->regionlist[i].buffsize = 0;
+- cps->regionlist[i].buffptr = NULL;
+ cps->zonelist[i].position = 0;
+ cps->zonelist[i].total = 0;
+ }
+@@ -6511,8 +6509,13 @@ static int correct_orientation(struct image_data *image, unsigned char **work_b
+ (uint16_t) (image->adjustments & ROTATE_ANY));
+ return (-1);
+ }
+-
+- if (rotateImage(rotation, image, &image->width, &image->length, work_buff_ptr))
++
++ /* Dummy variable in order not to switch two times the
++ * image->width,->length within rotateImage(),
++ * but switch xres, yres there. */
++ uint32_t width = image->width;
++ uint32_t length = image->length;
++ if (rotateImage(rotation, image, &width, &length, work_buff_ptr, TRUE))
+ {
+ TIFFError ("correct_orientation", "Unable to rotate image");
+ return (-1);
+@@ -6580,7 +6583,6 @@ extractCompositeRegions(struct image_data *image, struct crop_mask *crop,
+ /* These should not be needed for composite images */
+ crop->regionlist[i].width = crop_width;
+ crop->regionlist[i].length = crop_length;
+- crop->regionlist[i].buffptr = crop_buff;
+
+ src_rowsize = ((img_width * bps * spp) + 7) / 8;
+ dst_rowsize = (((crop_width * bps * count) + 7) / 8);
+@@ -6817,7 +6819,6 @@ extractSeparateRegion(struct image_data *image, struct crop_mask *crop,
+
+ crop->regionlist[region].width = crop_width;
+ crop->regionlist[region].length = crop_length;
+- crop->regionlist[region].buffptr = crop_buff;
+
+ src = read_buff;
+ dst = crop_buff;
+@@ -7695,7 +7696,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, &crop_buff))
++ &crop->combined_length, &crop_buff, FALSE))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate composite regions by %"PRIu32" degrees", crop->rotation);
+@@ -7805,7 +7806,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ * ToDo: Therefore rotateImage() and its usage has to be reworked (e.g. like mirrorImage()) !!
+ */
+ if (rotateImage(crop->rotation, image, &crop->regionlist[i].width,
+- &crop->regionlist[i].length, &crop_buff))
++ &crop->regionlist[i].length, &crop_buff, FALSE))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate crop region by %"PRIu16" degrees", crop->rotation);
+@@ -7937,7 +7938,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, crop_buff_ptr))
++ &crop->combined_length, crop_buff_ptr, TRUE))
+ {
+ TIFFError("createCroppedImage",
+ "Failed to rotate image or cropped selection by %"PRIu16" degrees", crop->rotation);
+@@ -8600,7 +8601,7 @@ rotateContigSamples32bits(uint16_t rotation, uint16_t spp, uint16_t bps, uint32_
+ /* Rotate an image by a multiple of 90 degrees clockwise */
+ static int
+ rotateImage(uint16_t rotation, struct image_data *image, uint32_t *img_width,
+- uint32_t *img_length, unsigned char **ibuff_ptr)
++ uint32_t *img_length, unsigned char **ibuff_ptr, int rot_image_params)
+ {
+ int shift_width;
+ uint32_t bytes_per_pixel, bytes_per_sample;
+@@ -8791,11 +8792,15 @@ rotateImage(uint16_t rotation, struct image_data *image, uint32_t *img_width,
+
+ *img_width = length;
+ *img_length = width;
+- image->width = length;
+- image->length = width;
+- res_temp = image->xres;
+- image->xres = image->yres;
+- image->yres = res_temp;
++ /* Only toggle image parameters if whole input image is rotated. */
++ if (rot_image_params)
++ {
++ image->width = length;
++ image->length = width;
++ res_temp = image->xres;
++ image->xres = image->yres;
++ image->yres = res_temp;
++ }
+ break;
+
+ case 270: if ((bps % 8) == 0) /* byte aligned data */
+@@ -8868,11 +8873,15 @@ rotateImage(uint16_t rotation, struct image_data *image, uint32_t *img_width,
+
+ *img_width = length;
+ *img_length = width;
+- image->width = length;
+- image->length = width;
+- res_temp = image->xres;
+- image->xres = image->yres;
+- image->yres = res_temp;
++ /* Only toggle image parameters if whole input image is rotated. */
++ if (rot_image_params)
++ {
++ image->width = length;
++ image->length = width;
++ res_temp = image->xres;
++ image->xres = image->yres;
++ image->yres = res_temp;
++ }
+ break;
+ default:
+ break;
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-0800_0801_0802_0803_0804.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-0800_0801_0802_0803_0804.patch
new file mode 100644
index 0000000000..8372bc35f2
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-0800_0801_0802_0803_0804.patch
@@ -0,0 +1,128 @@
+From 82a7fbb1fa7228499ffeb3a57a1d106a9626d57c Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Sun, 5 Feb 2023 15:53:15 +0000
+Subject: [PATCH] tiffcrop: added check for assumption on composite images
+ (fixes #496)
+
+tiffcrop: For composite images with more than one region, the combined_length or combined_width always needs to be equal, respectively. Otherwise, even the first section/region copy action might cause buffer overrun. This is now checked before the first copy action.
+
+Closes #496, #497, #498, #500, #501.
+
+Upstream-Status: Backport [import from fedora https://src.fedoraproject.org/rpms/libtiff/c/91856895aadf3cce6353f40c2feef9bf0b486440 ]
+CVE: CVE-2023-0800 CVE-2023-0801 CVE-2023-0802 CVE-2023-0803 CVE-2023-0804
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ tools/tiffcrop.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 66 insertions(+), 2 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index 84e26ac6..480b927c 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -5329,18 +5329,39 @@
+
+ crop->regionlist[i].buffsize = buffsize;
+ crop->bufftotal += buffsize;
++ /* For composite images with more than one region, the
++ * combined_length or combined_width always needs to be equal,
++ * respectively.
++ * Otherwise, even the first section/region copy
++ * action might cause buffer overrun. */
+ if (crop->img_mode == COMPOSITE_IMAGES)
+ {
+ switch (crop->edge_ref)
+ {
+ case EDGE_LEFT:
+ case EDGE_RIGHT:
++ if (i > 0 && zlength != crop->combined_length)
++ {
++ TIFFError(
++ "computeInputPixelOffsets",
++ "Only equal length regions can be combined for "
++ "-E left or right");
++ return (-1);
++ }
+ crop->combined_length = zlength;
+ crop->combined_width += zwidth;
+ break;
+ case EDGE_BOTTOM:
+ case EDGE_TOP: /* width from left, length from top */
+ default:
++ if (i > 0 && zwidth != crop->combined_width)
++ {
++ TIFFError("computeInputPixelOffsets",
++ "Only equal width regions can be "
++ "combined for -E "
++ "top or bottom");
++ return (-1);
++ }
+ crop->combined_width = zwidth;
+ crop->combined_length += zlength;
+ break;
+@@ -6546,6 +6567,46 @@
+ crop->combined_width = 0;
+ crop->combined_length = 0;
+
++ /* If there is more than one region, check beforehand whether all the width
++ * and length values of the regions are the same, respectively. */
++ switch (crop->edge_ref)
++ {
++ default:
++ case EDGE_TOP:
++ case EDGE_BOTTOM:
++ for (i = 1; i < crop->selections; i++)
++ {
++ uint32_t crop_width0 =
++ crop->regionlist[i - 1].x2 - crop->regionlist[i - 1].x1 + 1;
++ uint32_t crop_width1 =
++ crop->regionlist[i].x2 - crop->regionlist[i].x1 + 1;
++ if (crop_width0 != crop_width1)
++ {
++ TIFFError("extractCompositeRegions",
++ "Only equal width regions can be combined for -E "
++ "top or bottom");
++ return (1);
++ }
++ }
++ break;
++ case EDGE_LEFT:
++ case EDGE_RIGHT:
++ for (i = 1; i < crop->selections; i++)
++ {
++ uint32_t crop_length0 =
++ crop->regionlist[i - 1].y2 - crop->regionlist[i - 1].y1 + 1;
++ uint32_t crop_length1 =
++ crop->regionlist[i].y2 - crop->regionlist[i].y1 + 1;
++ if (crop_length0 != crop_length1)
++ {
++ TIFFError("extractCompositeRegions",
++ "Only equal length regions can be combined for "
++ "-E left or right");
++ return (1);
++ }
++ }
++ }
++
+ for (i = 0; i < crop->selections; i++)
+ {
+ /* rows, columns, width, length are expressed in pixels */
+@@ -6570,7 +6631,8 @@
+ default:
+ case EDGE_TOP:
+ case EDGE_BOTTOM:
+- if ((i > 0) && (crop_width != crop->regionlist[i - 1].width))
++ if ((crop->selections > i + 1) &&
++ (crop_width != crop->regionlist[i + 1].width))
+ {
+ TIFFError ("extractCompositeRegions",
+ "Only equal width regions can be combined for -E top or bottom");
+@@ -6651,7 +6713,8 @@
+ break;
+ case EDGE_LEFT: /* splice the pieces of each row together, side by side */
+ case EDGE_RIGHT:
+- if ((i > 0) && (crop_length != crop->regionlist[i - 1].length))
++ if ((crop->selections > i + 1) &&
++ (crop_length != crop->regionlist[i + 1].length))
+ {
+ TIFFError ("extractCompositeRegions",
+ "Only equal length regions can be combined for -E left or right");
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-1916.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-1916.patch
new file mode 100644
index 0000000000..6722781a3a
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-1916.patch
@@ -0,0 +1,99 @@
+From 848434a81c443f59ec90d41218eba6e48a450a11 Mon Sep 17 00:00:00 2001
+From: zhailiangliang <zhailiangliang@loongson.cn>
+Date: Thu, 16 Mar 2023 16:16:54 +0800
+Subject: [PATCH] Fix heap-buffer-overflow in function extractImageSection
+
+CVE: CVE-2023-1916
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/848434a81c443f59ec90d41218eba6e48a450a11 https://gitlab.com/libtiff/libtiff/-/merge_requests/535]
+Signed-off-by: Marek Vasut <marex@denx.de>
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcrop.c | 44 ++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 40 insertions(+), 4 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index 05ba4d2..8a08536 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -5700,6 +5700,15 @@ getCropOffsets(struct image_data *image, struct crop_mask *crop, struct dump_opt
+ crop->combined_width += (uint32_t)zwidth;
+ else
+ crop->combined_width = (uint32_t)zwidth;
++
++ /* When the degrees clockwise rotation is 90 or 270, check the boundary */
++ if (((crop->rotation == 90) || (crop->rotation == 270))
++ && ((crop->combined_length > image->width) || (crop->combined_width > image->length)))
++ {
++ TIFFError("getCropOffsets", "The crop size exceeds the image boundary size");
++ return -1;
++ }
++
+ break;
+ case EDGE_BOTTOM: /* width from left, zones from bottom to top */
+ zwidth = offsets.crop_width;
+@@ -5735,6 +5744,15 @@ getCropOffsets(struct image_data *image, struct crop_mask *crop, struct dump_opt
+ else
+ crop->combined_length = (uint32_t)zlength;
+ crop->combined_width = (uint32_t)zwidth;
++
++ /* When the degrees clockwise rotation is 90 or 270, check the boundary */
++ if (((crop->rotation == 90) || (crop->rotation == 270))
++ && ((crop->combined_length > image->width) || (crop->combined_width > image->length)))
++ {
++ TIFFError("getCropOffsets", "The crop size exceeds the image boundary size");
++ return -1;
++ }
++
+ break;
+ case EDGE_RIGHT: /* zones from right to left, length from top */
+ zlength = offsets.crop_length;
+@@ -5772,6 +5790,15 @@ getCropOffsets(struct image_data *image, struct crop_mask *crop, struct dump_opt
+ crop->combined_width += (uint32_t)zwidth;
+ else
+ crop->combined_width = (uint32_t)zwidth;
++
++ /* When the degrees clockwise rotation is 90 or 270, check the boundary */
++ if (((crop->rotation == 90) || (crop->rotation == 270))
++ && ((crop->combined_length > image->width) || (crop->combined_width > image->length)))
++ {
++ TIFFError("getCropOffsets", "The crop size exceeds the image boundary size");
++ return -1;
++ }
++
+ break;
+ case EDGE_TOP: /* width from left, zones from top to bottom */
+ default:
+@@ -5818,7 +5845,16 @@ getCropOffsets(struct image_data *image, struct crop_mask *crop, struct dump_opt
+ else
+ crop->combined_length = (uint32_t)zlength;
+ crop->combined_width = (uint32_t)zwidth;
+- break;
++
++ /* When the degrees clockwise rotation is 90 or 270, check the boundary */
++ if (((crop->rotation == 90) || (crop->rotation == 270))
++ && ((crop->combined_length > image->width) || (crop->combined_width > image->length)))
++ {
++ TIFFError("getCropOffsets", "The crop size exceeds the image boundary size");
++ return -1;
++ }
++
++ break;
+ } /* end switch statement */
+
+ buffsize = (uint32_t)
+@@ -7016,9 +7052,9 @@ extractImageSection(struct image_data *image, struct pageseg *section,
+ * regardless of the way the data are organized in the input file.
+ * Furthermore, bytes and bits are arranged in buffer according to COMPRESSION=1 and FILLORDER=1
+ */
+- img_rowsize = (((img_width * spp * bps) + 7) / 8); /* row size in full bytes of source image */
+- full_bytes = (sect_width * spp * bps) / 8; /* number of COMPLETE bytes per row in section */
+- trailing_bits = (sect_width * spp * bps) % 8; /* trailing bits within the last byte of destination buffer */
++ img_rowsize = (((img_width * spp * bps) + 7) / 8); /* row size in full bytes of source image */
++ full_bytes = (sect_width * spp * bps) / 8; /* number of COMPLETE bytes per row in section */
++ trailing_bits = (sect_width * spp * bps) % 8; /* trailing bits within the last byte of destination buffer */
+
+ #ifdef DEVELMODE
+ TIFFError ("", "First row: %"PRIu32", last row: %"PRIu32", First col: %"PRIu32", last col: %"PRIu32"\n",
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-25433.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-25433.patch
new file mode 100644
index 0000000000..285aa3d1c4
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-25433.patch
@@ -0,0 +1,195 @@
+From 9c22495e5eeeae9e00a1596720c969656bb8d678 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Fri, 3 Feb 2023 15:31:31 +0100
+Subject: [PATCH] CVE-2023-25433
+
+tiffcrop correctly update buffersize after rotateImage()
+fix#520 rotateImage() set up a new buffer and calculates its size
+individually. Therefore, seg_buffs[] size needs to be updated accordingly.
+Before this fix, the seg_buffs buffer size was calculated with a different
+formula than within rotateImage().
+
+Closes #520.
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/9c22495e5eeeae9e00a1596720c969656bb8d678 && https://gitlab.com/libtiff/libtiff/-/commit/688012dca2c39033aa2dc7bcea9796787cfd1b44]
+CVE: CVE-2023-25433
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcrop.c | 78 +++++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 60 insertions(+), 18 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index eee26bf..cbd24cc 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -523,7 +523,7 @@ static int rotateContigSamples24bits(uint16_t, uint16_t, uint16_t, uint32_t,
+ static int rotateContigSamples32bits(uint16_t, uint16_t, uint16_t, uint32_t,
+ uint32_t, uint32_t, uint8_t *, uint8_t *);
+ static int rotateImage(uint16_t, struct image_data *, uint32_t *, uint32_t *,
+- unsigned char **, int);
++ unsigned char **, size_t *);
+ static int mirrorImage(uint16_t, uint16_t, uint16_t, uint32_t, uint32_t,
+ unsigned char *);
+ static int invertImage(uint16_t, uint16_t, uint16_t, uint32_t, uint32_t,
+@@ -6515,7 +6515,7 @@ static int correct_orientation(struct image_data *image, unsigned char **work_b
+ * but switch xres, yres there. */
+ uint32_t width = image->width;
+ uint32_t length = image->length;
+- if (rotateImage(rotation, image, &width, &length, work_buff_ptr, TRUE))
++ if (rotateImage(rotation, image, &width, &length, work_buff_ptr, NULL))
+ {
+ TIFFError ("correct_orientation", "Unable to rotate image");
+ return (-1);
+@@ -7695,16 +7695,19 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
++ /* rotateImage() set up a new buffer and calculates its size
++ * individually. Therefore, seg_buffs size needs to be updated
++ * accordingly. */
++ size_t rot_buf_size = 0;
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, &crop_buff, FALSE))
++ &crop->combined_length, &crop_buff, &rot_buf_size))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate composite regions by %"PRIu32" degrees", crop->rotation);
+ return (-1);
+ }
+ seg_buffs[0].buffer = crop_buff;
+- seg_buffs[0].size = (((crop->combined_width * image->bps + 7 ) / 8)
+- * image->spp) * crop->combined_length;
++ seg_buffs[0].size = rot_buf_size;
+ }
+ }
+ else /* Separated Images */
+@@ -7804,9 +7807,13 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ {
+ /* rotateImage() changes image->width, ->length, ->xres and ->yres, what it schouldn't do here, when more than one section is processed.
+ * ToDo: Therefore rotateImage() and its usage has to be reworked (e.g. like mirrorImage()) !!
+- */
+- if (rotateImage(crop->rotation, image, &crop->regionlist[i].width,
+- &crop->regionlist[i].length, &crop_buff, FALSE))
++ * Furthermore, rotateImage() set up a new buffer and calculates
++ * its size individually. Therefore, seg_buffs size needs to be
++ * updated accordingly. */
++ size_t rot_buf_size = 0;
++ if (rotateImage(
++ crop->rotation, image, &crop->regionlist[i].width,
++ &crop->regionlist[i].length, &crop_buff, &rot_buf_size))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate crop region by %"PRIu16" degrees", crop->rotation);
+@@ -7817,8 +7824,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ crop->combined_width = total_width;
+ crop->combined_length = total_length;
+ seg_buffs[i].buffer = crop_buff;
+- seg_buffs[i].size = (((crop->regionlist[i].width * image->bps + 7 ) / 8)
+- * image->spp) * crop->regionlist[i].length;
++ seg_buffs[i].size = rot_buf_size;
+ }
+ } /* for crop->selections loop */
+ } /* Separated Images (else case) */
+@@ -7827,7 +7833,6 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+
+ /* Copy the crop section of the data from the current image into a buffer
+ * and adjust the IFD values to reflect the new size. If no cropping is
+- * required, use the original read buffer as the crop buffer.
+ *
+ * There is quite a bit of redundancy between this routine and the more
+ * specialized processCropSelections, but this provides
+@@ -7938,7 +7943,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, crop_buff_ptr, TRUE))
++ &crop->combined_length, crop_buff_ptr, NULL))
+ {
+ TIFFError("createCroppedImage",
+ "Failed to rotate image or cropped selection by %"PRIu16" degrees", crop->rotation);
+@@ -8600,14 +8605,16 @@ rotateContigSamples32bits(uint16_t rotation, uint16_t spp, uint16_t bps, uint32_
+
+ /* Rotate an image by a multiple of 90 degrees clockwise */
+ static int
+-rotateImage(uint16_t rotation, struct image_data *image, uint32_t *img_width,
+- uint32_t *img_length, unsigned char **ibuff_ptr, int rot_image_params)
++rotateImage(uint16_t rotation, struct image_data *image,
++ uint32_t *img_width,uint32_t *img_length,
++ unsigned char **ibuff_ptr, size_t *rot_buf_size)
+ {
+ int shift_width;
+ uint32_t bytes_per_pixel, bytes_per_sample;
+ uint32_t row, rowsize, src_offset, dst_offset;
+ uint32_t i, col, width, length;
+- uint32_t colsize, buffsize, col_offset, pix_offset;
++ uint32_t colsize, col_offset, pix_offset;
++ tmsize_t buffsize;
+ unsigned char *ibuff;
+ unsigned char *src;
+ unsigned char *dst;
+@@ -8620,12 +8627,41 @@ rotateImage(uint16_t rotation, struct image_data *image, uint32_t *img_width,
+ spp = image->spp;
+ bps = image->bps;
+
++ if ((spp != 0 && bps != 0 &&
++ width > (uint32_t)((UINT32_MAX - 7) / spp / bps)) ||
++ (spp != 0 && bps != 0 &&
++ length > (uint32_t)((UINT32_MAX - 7) / spp / bps)))
++ {
++ TIFFError("rotateImage", "Integer overflow detected.");
++ return (-1);
++ }
++
+ rowsize = ((bps * spp * width) + 7) / 8;
+ colsize = ((bps * spp * length) + 7) / 8;
+ if ((colsize * width) > (rowsize * length))
+- buffsize = (colsize + 1) * width;
++{
++ if (((tmsize_t)colsize + 1) != 0 &&
++ (tmsize_t)width > ((TIFF_TMSIZE_T_MAX - NUM_BUFF_OVERSIZE_BYTES) /
++ ((tmsize_t)colsize + 1)))
++ {
++ TIFFError("rotateImage",
++ "Integer overflow when calculating buffer size.");
++ return (-1);
++ }
++ buffsize = ((tmsize_t)colsize + 1) * width;
++ }
+ else
+- buffsize = (rowsize + 1) * length;
++ {
++ if (((tmsize_t)rowsize + 1) != 0 &&
++ (tmsize_t)length > ((TIFF_TMSIZE_T_MAX - NUM_BUFF_OVERSIZE_BYTES) /
++ ((tmsize_t)rowsize + 1)))
++ {
++ TIFFError("rotateImage",
++ "Integer overflow when calculating buffer size.");
++ return (-1);
++ }
++ buffsize = (rowsize + 1) * length;
++ }
+
+ bytes_per_sample = (bps + 7) / 8;
+ bytes_per_pixel = ((bps * spp) + 7) / 8;
+@@ -8648,11 +8684,17 @@ rotateImage(uint16_t rotation, struct image_data *image, uint32_t *img_width,
+ /* Add 3 padding bytes for extractContigSamplesShifted32bits */
+ if (!(rbuff = (unsigned char *)limitMalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES)))
+ {
+- TIFFError("rotateImage", "Unable to allocate rotation buffer of %1u bytes", buffsize + NUM_BUFF_OVERSIZE_BYTES);
++ TIFFError("rotateImage",
++ "Unable to allocate rotation buffer of %" TIFF_SSIZE_FORMAT
++ " bytes ",
++ buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ return (-1);
+ }
+ _TIFFmemset(rbuff, '\0', buffsize + NUM_BUFF_OVERSIZE_BYTES);
+
++ if (rot_buf_size != NULL)
++ *rot_buf_size = buffsize;
++
+ ibuff = *ibuff_ptr;
+ switch (rotation)
+ {
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-25434-CVE-2023-25435.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-25434-CVE-2023-25435.patch
new file mode 100644
index 0000000000..e214277504
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-25434-CVE-2023-25435.patch
@@ -0,0 +1,94 @@
+From 69818e2f2d246e6631ac2a2da692c3706b849c38 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Sun, 29 Jan 2023 11:09:26 +0100
+Subject: [PATCH] CVE-2023-25434 & CVE-2023-25435
+
+tiffcrop: Amend rotateImage() not to toggle the input (main)
+image width and length parameters when only cropped image sections are
+rotated. Remove buffptr from region structure because never used.
+
+Closes #492 #493 #494 #495 #499 #518 #519
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/69818e2f2d246e6631ac2a2da692c3706b849c38]
+CVE: CVE-2023-25434 & CVE-2023-25435
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcrop.c | 27 ++++++++++++++++-----------
+ 1 file changed, 16 insertions(+), 11 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index cbd24cc..b811fbb 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -523,7 +523,7 @@ static int rotateContigSamples24bits(uint16_t, uint16_t, uint16_t, uint32_t,
+ static int rotateContigSamples32bits(uint16_t, uint16_t, uint16_t, uint32_t,
+ uint32_t, uint32_t, uint8_t *, uint8_t *);
+ static int rotateImage(uint16_t, struct image_data *, uint32_t *, uint32_t *,
+- unsigned char **, size_t *);
++ unsigned char **, size_t *, int);
+ static int mirrorImage(uint16_t, uint16_t, uint16_t, uint32_t, uint32_t,
+ unsigned char *);
+ static int invertImage(uint16_t, uint16_t, uint16_t, uint32_t, uint32_t,
+@@ -6513,10 +6513,11 @@ static int correct_orientation(struct image_data *image, unsigned char **work_b
+ /* Dummy variable in order not to switch two times the
+ * image->width,->length within rotateImage(),
+ * but switch xres, yres there. */
+- uint32_t width = image->width;
+- uint32_t length = image->length;
+- if (rotateImage(rotation, image, &width, &length, work_buff_ptr, NULL))
+- {
++ uint32_t width = image->width;
++ uint32_t length = image->length;
++ if (rotateImage(rotation, image, &width, &length, work_buff_ptr, NULL,
++ TRUE))
++ {
+ TIFFError ("correct_orientation", "Unable to rotate image");
+ return (-1);
+ }
+@@ -7700,7 +7701,8 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ * accordingly. */
+ size_t rot_buf_size = 0;
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, &crop_buff, &rot_buf_size))
++ &crop->combined_length, &crop_buff, &rot_buf_size,
++ FALSE))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate composite regions by %"PRIu32" degrees", crop->rotation);
+@@ -7811,9 +7813,10 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ * its size individually. Therefore, seg_buffs size needs to be
+ * updated accordingly. */
+ size_t rot_buf_size = 0;
+- if (rotateImage(
+- crop->rotation, image, &crop->regionlist[i].width,
+- &crop->regionlist[i].length, &crop_buff, &rot_buf_size))
++ if (rotateImage(crop->rotation, image,
++ &crop->regionlist[i].width,
++ &crop->regionlist[i].length, &crop_buff,
++ &rot_buf_size, FALSE))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate crop region by %"PRIu16" degrees", crop->rotation);
+@@ -7943,7 +7946,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, crop_buff_ptr, NULL))
++ &crop->combined_length, crop_buff_ptr, NULL, TRUE))
+ {
+ TIFFError("createCroppedImage",
+ "Failed to rotate image or cropped selection by %"PRIu16" degrees", crop->rotation);
+@@ -8607,7 +8610,9 @@ rotateContigSamples32bits(uint16_t rotation, uint16_t spp, uint16_t bps, uint32_
+ static int
+ rotateImage(uint16_t rotation, struct image_data *image,
+ uint32_t *img_width,uint32_t *img_length,
+- unsigned char **ibuff_ptr, size_t *rot_buf_size)
++ unsigned char **ibuff_ptr, size_t *rot_buf_size,
++ int rot_image_params)
++
+ {
+ int shift_width;
+ uint32_t bytes_per_pixel, bytes_per_sample;
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-26965.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-26965.patch
new file mode 100644
index 0000000000..2162493e34
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-26965.patch
@@ -0,0 +1,97 @@
+From ec8ef90c1f573c9eb1f17d6a056aa0015f184acf Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Tue, 14 Feb 2023 20:43:43 +0100
+Subject: [PATCH] tiffcrop: Do not reuse input buffer for subsequent images.
+ Fix issue 527
+
+Reuse of read_buff within loadImage() from previous image is quite unsafe, because other functions (like rotateImage() etc.) reallocate that buffer with different size without updating the local prev_readsize value.
+
+Closes #527
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/ec8ef90c1f573c9eb1f17d6a056aa0015f184acf]
+CVE: CVE-2023-26965
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcrop.c | 47 +++++++++++++++--------------------------------
+ 1 file changed, 15 insertions(+), 32 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index b811fbb..ce77c74 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -6066,9 +6066,7 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ uint32_t tw = 0, tl = 0; /* Tile width and length */
+ tmsize_t tile_rowsize = 0;
+ unsigned char *read_buff = NULL;
+- unsigned char *new_buff = NULL;
+ int readunit = 0;
+- static tmsize_t prev_readsize = 0;
+
+ TIFFGetFieldDefaulted(in, TIFFTAG_BITSPERSAMPLE, &bps);
+ TIFFGetFieldDefaulted(in, TIFFTAG_SAMPLESPERPIXEL, &spp);
+@@ -6361,47 +6359,32 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ }
+
+ read_buff = *read_ptr;
+- /* +3 : add a few guard bytes since reverseSamples16bits() can read a bit */
+- /* outside buffer */
+- if (!read_buff)
+- {
+- if( buffsize > 0xFFFFFFFFU - 3 )
++ /* +3 : add a few guard bytes since reverseSamples16bits() can read a bit
++ * outside buffer */
++ /* Reuse of read_buff from previous image is quite unsafe, because other
++ * functions (like rotateImage() etc.) reallocate that buffer with different
++ * size without updating the local prev_readsize value. */
++ if (read_buff)
+ {
+- TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
+- return (-1);
++ _TIFFfree(read_buff);
+ }
+- read_buff = (unsigned char *)limitMalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
+- }
+- else
++ if (buffsize > 0xFFFFFFFFU - 3)
+ {
+- if (prev_readsize < buffsize)
+- {
+- if( buffsize > 0xFFFFFFFFU - 3 )
+- {
+- TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
+- return (-1);
+- }
+- new_buff = _TIFFrealloc(read_buff, buffsize + NUM_BUFF_OVERSIZE_BYTES);
+- if (!new_buff)
+- {
+- free (read_buff);
+- read_buff = (unsigned char *)limitMalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
+- }
+- else
+- read_buff = new_buff;
+- }
++ TIFFError("loadImage", "Required read buffer size too large");
++ return (-1);
+ }
+- if (!read_buff)
++ read_buff =
++ (unsigned char *)limitMalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
++ if (!read_buff)
+ {
+- TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
+- return (-1);
++ TIFFError("loadImage", "Unable to allocate read buffer");
++ return (-1);
+ }
+
+ read_buff[buffsize] = 0;
+ read_buff[buffsize+1] = 0;
+ read_buff[buffsize+2] = 0;
+
+- prev_readsize = buffsize;
+ *read_ptr = read_buff;
+
+ /* N.B. The read functions used copy separate plane data into a buffer as interleaved
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-26966.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-26966.patch
new file mode 100644
index 0000000000..85764304f9
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-26966.patch
@@ -0,0 +1,35 @@
+From b0e1c25dd1d065200c8d8f59ad0afe014861a1b9 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Thu, 16 Feb 2023 12:03:16 +0100
+Subject: [PATCH] tif_luv: Check and correct for NaN data in uv_encode().
+
+Closes #530
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/b0e1c25dd1d065200c8d8f59ad0afe014861a1b9]
+CVE: CVE-2023-26966
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ libtiff/tif_luv.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/libtiff/tif_luv.c b/libtiff/tif_luv.c
+index 13765ea..40b2719 100644
+--- a/libtiff/tif_luv.c
++++ b/libtiff/tif_luv.c
+@@ -908,6 +908,13 @@ uv_encode(double u, double v, int em) /* encode (u',v') coordinates */
+ {
+ register int vi, ui;
+
++ /* check for NaN */
++ if (u != u || v != v)
++ {
++ u = U_NEU;
++ v = V_NEU;
++ }
++
+ if (v < UV_VSTART)
+ return oog_encode(u, v);
+ vi = tiff_itrunc((v - UV_VSTART)*(1./UV_SQSIZ), em);
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-2908.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-2908.patch
new file mode 100644
index 0000000000..cf94fd23d8
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-2908.patch
@@ -0,0 +1,33 @@
+From 8c0859a80444c90b8dfb862a9f16de74e16f0a9e Mon Sep 17 00:00:00 2001
+From: xiaoxiaoafeifei <lliangliang2007@163.com>
+Date: Fri, 21 Apr 2023 13:01:34 +0000
+Subject: [PATCH] countInkNamesString(): fix `UndefinedBehaviorSanitizer`:
+ applying zero offset to null pointer
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/9bd48f0dbd64fb94dc2b5b05238fde0bfdd4ff3f]
+CVE: CVE-2023-2908
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ libtiff/tif_dir.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/libtiff/tif_dir.c b/libtiff/tif_dir.c
+index 349dfe4..1402c8e 100644
+--- a/libtiff/tif_dir.c
++++ b/libtiff/tif_dir.c
+@@ -145,10 +145,10 @@ static uint16_t
+ countInkNamesString(TIFF *tif, uint32_t slen, const char *s)
+ {
+ uint16_t i = 0;
+- const char *ep = s + slen;
+- const char *cp = s;
+
+ if (slen > 0) {
++ const char *ep = s + slen;
++ const char *cp = s;
+ do {
+ for (; cp < ep && *cp != '\0'; cp++) {}
+ if (cp >= ep)
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-3316.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-3316.patch
new file mode 100644
index 0000000000..1aa4ba45ac
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-3316.patch
@@ -0,0 +1,59 @@
+From d63de61b1ec3385f6383ef9a1f453e4b8b11d536 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Fri, 3 Feb 2023 17:38:55 +0100
+Subject: [PATCH] TIFFClose() avoid NULL pointer dereferencing. fix#515
+
+Closes #515
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/d63de61b1ec3385f6383ef9a1f453e4b8b11d536]
+CVE: CVE-2023-3316
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ libtiff/tif_close.c | 11 +++++++----
+ tools/tiffcrop.c | 5 ++++-
+ 2 files changed, 11 insertions(+), 5 deletions(-)
+
+diff --git a/libtiff/tif_close.c b/libtiff/tif_close.c
+index 674518a..0fe7af4 100644
+--- a/libtiff/tif_close.c
++++ b/libtiff/tif_close.c
+@@ -118,13 +118,16 @@ TIFFCleanup(TIFF* tif)
+ */
+
+ void
+-TIFFClose(TIFF* tif)
++TIFFClose(TIFF *tif)
+ {
+- TIFFCloseProc closeproc = tif->tif_closeproc;
+- thandle_t fd = tif->tif_clientdata;
++ if (tif != NULL)
++ {
++ TIFFCloseProc closeproc = tif->tif_closeproc;
++ thandle_t fd = tif->tif_clientdata;
+
+ TIFFCleanup(tif);
+- (void) (*closeproc)(fd);
++ (void)(*closeproc)(fd);
++ }
+ }
+
+ /* vim: set ts=8 sts=8 sw=8 noet: */
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index ce77c74..cd49660 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -2548,7 +2548,10 @@ main(int argc, char* argv[])
+ }
+ }
+
+- TIFFClose(out);
++ if (out != NULL)
++ {
++ TIFFClose(out);
++ }
+
+ return (0);
+ } /* end main */
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-3576.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-3576.patch
new file mode 100644
index 0000000000..b17dd72170
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-3576.patch
@@ -0,0 +1,35 @@
+From 881a070194783561fd209b7c789a4e75566f7f37 Mon Sep 17 00:00:00 2001
+From: zhailiangliang <zhailiangliang@loongson.cn>
+Date: Tue, 7 Mar 2023 15:02:08 +0800
+Subject: [PATCH] Fix memory leak in tiffcrop.c
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/881a070194783561fd209b7c789a4e75566f7f37]
+CVE: CVE-2023-3576
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ tools/tiffcrop.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index cd49660..0d02f56 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -7839,8 +7839,13 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+
+ read_buff = *read_buff_ptr;
+
++ /* Memory is freed before crop_buff_ptr is overwritten */
++ if (*crop_buff_ptr != NULL)
++ {
++ _TIFFfree(*crop_buff_ptr);
++ }
++
+ /* process full image, no crop buffer needed */
+- crop_buff = read_buff;
+ *crop_buff_ptr = read_buff;
+ crop->combined_width = image->width;
+ crop->combined_length = image->length;
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-3618.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-3618.patch
new file mode 100644
index 0000000000..4179145722
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-3618.patch
@@ -0,0 +1,47 @@
+From b5c7d4c4e03333ac16b5cfb11acaaeaa493334f8 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Fri, 5 May 2023 19:43:46 +0200
+Subject: [PATCH] Consider error return of writeSelections(). Fixes #553
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/b5c7d4c4e03333ac16b5cfb11acaaeaa493334f8]
+CVE: CVE-2023-3618
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcrop.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index 0d02f56..8cbeb68 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -2459,9 +2459,15 @@ main(int argc, char* argv[])
+ { /* Whole image or sections not based on output page size */
+ if (crop.selections > 0)
+ {
+- writeSelections(in, &out, &crop, &image, &dump, seg_buffs,
+- mp, argv[argc - 1], &next_page, total_pages);
+- }
++ if (writeSelections(in, &out, &crop, &image, &dump,
++ seg_buffs, mp, argv[argc - 1],
++ &next_page, total_pages))
++ {
++ TIFFError("main",
++ "Unable to write new image selections");
++ exit(EXIT_FAILURE);
++ }
++ }
+ else /* One file all images and sections */
+ {
+ if (update_output_file (&out, mp, crop.exp_mode, argv[argc - 1],
+@@ -7842,7 +7848,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ /* Memory is freed before crop_buff_ptr is overwritten */
+ if (*crop_buff_ptr != NULL)
+ {
+- _TIFFfree(*crop_buff_ptr);
++ _TIFFfree(*crop_buff_ptr);
+ }
+
+ /* process full image, no crop buffer needed */
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-40745.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-40745.patch
new file mode 100644
index 0000000000..cb4656fd46
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-40745.patch
@@ -0,0 +1,34 @@
+From 4fc16f649fa2875d5c388cf2edc295510a247ee5 Mon Sep 17 00:00:00 2001
+From: Arie Haenel <arie.haenel@jct.ac.il>
+Date: Wed, 19 Jul 2023 19:34:25 +0000
+Subject: [PATCH] tiffcp: fix memory corruption (overflow) on hostile images
+ (fixes #591)
+
+Upstream-Status: Backport from [https://gitlab.com/libtiff/libtiff/-/commit/4fc16f649fa2875d5c388cf2edc295510a247ee5]
+CVE: CVE-2023-40745
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ tools/tiffcp.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/tools/tiffcp.c b/tools/tiffcp.c
+index 57eef90..34b6ef2 100644
+--- a/tools/tiffcp.c
++++ b/tools/tiffcp.c
+@@ -1577,6 +1577,13 @@ DECLAREreadFunc(readSeparateTilesIntoBuffer)
+ TIFFError(TIFFFileName(in), "Error, cannot handle that much samples per tile row (Tile Width * Samples/Pixel)");
+ return 0;
+ }
++
++ if ( (imagew - tilew * spp) > INT_MAX ){
++ TIFFError(TIFFFileName(in),
++ "Error, image raster scan line size is too large");
++ return 0;
++ }
++
+ iskew = imagew - tilew*spp;
+ tilebuf = limitMalloc(tilesize);
+ if (tilebuf == 0)
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-41175.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-41175.patch
new file mode 100644
index 0000000000..06645bed68
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-41175.patch
@@ -0,0 +1,69 @@
+From 6e2dac5f904496d127c92ddc4e56eccfca25c2ee Mon Sep 17 00:00:00 2001
+From: Arie Haenel <arie.haenel@jct.ac.il>
+Date: Wed, 19 Jul 2023 19:40:01 +0000
+Subject: [PATCH] raw2tiff: fix integer overflow and bypass of the check (fixes #592)
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/6e2dac5f904496d127c92ddc4e56eccfca25c2ee]
+CVE: CVE-2023-41175
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ tools/raw2tiff.c | 29 +++++++++++++++++++++++++++++
+ 1 file changed, 29 insertions(+)
+
+diff --git a/tools/raw2tiff.c b/tools/raw2tiff.c
+index dfee715..253c023 100644
+--- a/tools/raw2tiff.c
++++ b/tools/raw2tiff.c
+@@ -36,6 +36,7 @@
+ #include <sys/types.h>
+ #include <math.h>
+ #include <ctype.h>
++#include <limits.h>
+
+ #ifdef HAVE_UNISTD_H
+ # include <unistd.h>
+@@ -101,6 +102,7 @@ main(int argc, char* argv[])
+ int fd;
+ char *outfilename = NULL;
+ TIFF *out;
++ uint32_t temp_limit_check = 0; /* temp for integer overflow checking*/
+
+ uint32_t row, col, band;
+ int c;
+@@ -212,6 +214,33 @@ main(int argc, char* argv[])
+ if (guessSize(fd, dtype, hdr_size, nbands, swab, &width, &length) < 0)
+ return EXIT_FAILURE;
+
++ /* check for integer overflow in */
++ /* hdr_size + (*width) * (*length) * nbands * depth */
++
++ if ((width == 0) || (length == 0) ){
++ fprintf(stderr, "Too large nbands value specified.\n");
++ return (EXIT_FAILURE);
++ }
++
++ temp_limit_check = nbands * depth;
++
++ if ( !temp_limit_check || length > ( UINT_MAX / temp_limit_check ) ) {
++ fprintf(stderr, "Too large length size specified.\n");
++ return (EXIT_FAILURE);
++ }
++ temp_limit_check = temp_limit_check * length;
++
++ if ( !temp_limit_check || width > ( UINT_MAX / temp_limit_check ) ) {
++ fprintf(stderr, "Too large width size specified.\n");
++ return (EXIT_FAILURE);
++ }
++ temp_limit_check = temp_limit_check * width;
++
++ if ( !temp_limit_check || hdr_size > ( UINT_MAX - temp_limit_check ) ) {
++ fprintf(stderr, "Too large header size specified.\n");
++ return (EXIT_FAILURE);
++ }
++
+ if (outfilename == NULL)
+ outfilename = argv[optind+1];
+ out = TIFFOpen(outfilename, "w");
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-52356.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-52356.patch
new file mode 100644
index 0000000000..4eb7d79c8f
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-52356.patch
@@ -0,0 +1,54 @@
+CVE: CVE-2023-52356
+Upstream-Status: Backport [upstream : https://gitlab.com/libtiff/libtiff/-/commit/51558511bdbbcffdce534db21dbaf5d54b31638a
+ubuntu : http://archive.ubuntu.com/ubuntu/pool/main/t/tiff/tiff_4.3.0-6ubuntu0.8.debian.tar.xz ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+[Ubuntu note: Backport of the following patch from upstream, with a few changes
+to match the current version of the file in the present Ubuntu release:
+ . using TIFFErrorExt instead of TIFFErrorExtR (the latter did not exist yet);
+-- Rodrigo Figueiredo Zaiden]
+
+Backport of:
+
+From 51558511bdbbcffdce534db21dbaf5d54b31638a Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 31 Oct 2023 15:58:41 +0100
+Subject: [PATCH] TIFFReadRGBAStrip/TIFFReadRGBATile: add more validation of
+ col/row (fixes #622)
+
+---
+ libtiff/tif_getimage.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+
+--- tiff-4.3.0.orig/libtiff/tif_getimage.c
++++ tiff-4.3.0/libtiff/tif_getimage.c
+@@ -2942,6 +2942,13 @@ TIFFReadRGBAStripExt(TIFF* tif, uint32_t
+ }
+
+ if (TIFFRGBAImageOK(tif, emsg) && TIFFRGBAImageBegin(&img, tif, stop_on_error, emsg)) {
++ if (row >= img.height)
++ {
++ TIFFErrorExt(tif->tif_clientdata, TIFFFileName(tif),
++ "Invalid row passed to TIFFReadRGBAStrip().");
++ TIFFRGBAImageEnd(&img);
++ return (0);
++ }
+
+ img.row_offset = row;
+ img.col_offset = 0;
+@@ -3018,6 +3025,14 @@ TIFFReadRGBATileExt(TIFF* tif, uint32_t
+ return( 0 );
+ }
+
++ if (col >= img.width || row >= img.height)
++ {
++ TIFFErrorExt(tif->tif_clientdata, TIFFFileName(tif),
++ "Invalid row/col passed to TIFFReadRGBATile().");
++ TIFFRGBAImageEnd(&img);
++ return (0);
++ }
++
+ /*
+ * The TIFFRGBAImageGet() function doesn't allow us to get off the
+ * edge of the image, even to fill an otherwise valid tile. So we
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6228.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6228.patch
new file mode 100644
index 0000000000..f15cc96e19
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6228.patch
@@ -0,0 +1,31 @@
+From 1e7d217a323eac701b134afc4ae39b6bdfdbc96a Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Wed, 17 Jan 2024 06:38:24 +0000
+Subject: [PATCH] codec of input image is available, independently from codec
+ check of output image and return with error if not.
+
+Fixes #606.
+
+CVE: CVE-2023-6228
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/1e7d217a323eac701b134afc4ae39b6bdfdbc96a]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ tools/tiffcp.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/tools/tiffcp.c b/tools/tiffcp.c
+index 34b6ef2..17c6524 100644
+--- a/tools/tiffcp.c
++++ b/tools/tiffcp.c
+@@ -724,6 +724,8 @@ tiffcp(TIFF* in, TIFF* out)
+ else
+ CopyField(TIFFTAG_COMPRESSION, compression);
+ TIFFGetFieldDefaulted(in, TIFFTAG_COMPRESSION, &input_compression);
++ if (!TIFFIsCODECConfigured(input_compression))
++ return FALSE;
+ TIFFGetFieldDefaulted(in, TIFFTAG_PHOTOMETRIC, &input_photometric);
+ if (input_compression == COMPRESSION_JPEG) {
+ /* Force conversion to RGB */
+--
+2.40.0
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-1.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-1.patch
new file mode 100644
index 0000000000..453df897ac
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-1.patch
@@ -0,0 +1,178 @@
+CVE: CVE-2023-6277
+Upstream-Status: Backport [upstream : https://gitlab.com/libtiff/libtiff/-/commit/5320c9d89c054fa805d037d84c57da874470b01a
+ubuntu : http://archive.ubuntu.com/ubuntu/pool/main/t/tiff/tiff_4.3.0-6ubuntu0.8.debian.tar.xz ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+[Ubuntu note: Backport of the following patch from upstream, with a few changes
+to match the current version of the file in the present Ubuntu release:
+ . using TIFFWarningExt instead of TIFFWarningExtR (the latter did not exist yet);
+ . calling _TIFFfree(data) instead of _TIFFfreeExt(tif, data) (the latter did not exist yet);
+-- Rodrigo Figueiredo Zaiden]
+
+Backport of:
+
+From 5320c9d89c054fa805d037d84c57da874470b01a Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Tue, 31 Oct 2023 15:43:29 +0000
+Subject: [PATCH] Prevent some out-of-memory attacks
+
+Some small fuzzer files fake large amounts of data and provoke out-of-memory situations. For non-compressed data content / tags, out-of-memory can be prevented by comparing with the file size.
+
+At image reading, data size of some tags / data structures (StripByteCounts, StripOffsets, StripArray, TIFF directory) is compared with file size to prevent provoked out-of-memory attacks.
+
+See issue https://gitlab.com/libtiff/libtiff/-/issues/614#note_1602683857
+---
+ libtiff/tif_dirread.c | 92 ++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 90 insertions(+), 2 deletions(-)
+
+--- tiff-4.3.0.orig/libtiff/tif_dirread.c
++++ tiff-4.3.0/libtiff/tif_dirread.c
+@@ -866,6 +866,21 @@ static enum TIFFReadDirEntryErr TIFFRead
+ datasize=(*count)*typesize;
+ assert((tmsize_t)datasize>0);
+
++ /* Before allocating a huge amount of memory for corrupted files, check if
++ * size of requested memory is not greater than file size.
++ */
++ uint64_t filesize = TIFFGetFileSize(tif);
++ if (datasize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, "ReadDirEntryArray",
++ "Requested memory size for tag %d (0x%x) %" PRIu32
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated, tag not read",
++ direntry->tdir_tag, direntry->tdir_tag, datasize,
++ filesize);
++ return (TIFFReadDirEntryErrAlloc);
++ }
++
+ if( isMapped(tif) && datasize > (uint64_t)tif->tif_size )
+ return TIFFReadDirEntryErrIo;
+
+@@ -4593,6 +4608,20 @@ EstimateStripByteCounts(TIFF* tif, TIFFD
+ if( !_TIFFFillStrilesInternal( tif, 0 ) )
+ return -1;
+
++ /* Before allocating a huge amount of memory for corrupted files, check if
++ * size of requested memory is not greater than file size. */
++ uint64_t filesize = TIFFGetFileSize(tif);
++ uint64_t allocsize = (uint64_t)td->td_nstrips * sizeof(uint64_t);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, module,
++ "Requested memory size for StripByteCounts of %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ return -1;
++ }
++
+ if (td->td_stripbytecount_p)
+ _TIFFfree(td->td_stripbytecount_p);
+ td->td_stripbytecount_p = (uint64_t*)
+@@ -4603,9 +4632,7 @@ EstimateStripByteCounts(TIFF* tif, TIFFD
+
+ if (td->td_compression != COMPRESSION_NONE) {
+ uint64_t space;
+- uint64_t filesize;
+ uint16_t n;
+- filesize = TIFFGetFileSize(tif);
+ if (!(tif->tif_flags&TIFF_BIGTIFF))
+ space=sizeof(TIFFHeaderClassic)+2+dircount*12+4;
+ else
+@@ -4913,6 +4940,20 @@ TIFFFetchDirectory(TIFF* tif, uint64_t d
+ dircount16 = (uint16_t)dircount64;
+ dirsize = 20;
+ }
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size. */
++ uint64_t filesize = TIFFGetFileSize(tif);
++ uint64_t allocsize = (uint64_t)dircount16 * dirsize;
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for TIFF directory of %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated, TIFF directory not read",
++ allocsize, filesize);
++ return 0;
++ }
+ origdir = _TIFFCheckMalloc(tif, dircount16,
+ dirsize, "to read TIFF directory");
+ if (origdir == NULL)
+@@ -5016,6 +5057,20 @@ TIFFFetchDirectory(TIFF* tif, uint64_t d
+ "Sanity check on directory count failed, zero tag directories not supported");
+ return 0;
+ }
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size. */
++ uint64_t filesize = TIFFGetFileSize(tif);
++ uint64_t allocsize = (uint64_t)dircount16 * dirsize;
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for TIFF directory of %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated, TIFF directory not read",
++ allocsize, filesize);
++ return 0;
++ }
+ origdir = _TIFFCheckMalloc(tif, dircount16,
+ dirsize,
+ "to read TIFF directory");
+@@ -5059,6 +5114,8 @@ TIFFFetchDirectory(TIFF* tif, uint64_t d
+ }
+ }
+ }
++ /* No check against filesize needed here because "dir" should have same size
++ * than "origdir" checked above. */
+ dir = (TIFFDirEntry*)_TIFFCheckMalloc(tif, dircount16,
+ sizeof(TIFFDirEntry),
+ "to read TIFF directory");
+@@ -5853,6 +5910,20 @@ TIFFFetchStripThing(TIFF* tif, TIFFDirEn
+ return(0);
+ }
+
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size. */
++ uint64_t filesize = TIFFGetFileSize(tif);
++ uint64_t allocsize = (uint64_t)nstrips * sizeof(uint64_t);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, module,
++ "Requested memory size for StripArray of %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ _TIFFfree(data);
++ return (0);
++ }
+ resizeddata=(uint64_t*)_TIFFCheckMalloc(tif, nstrips, sizeof(uint64_t), "for strip array");
+ if (resizeddata==0) {
+ _TIFFfree(data);
+@@ -5948,6 +6019,23 @@ static void allocChoppedUpStripArrays(TI
+ }
+ bytecount = last_offset + last_bytecount - offset;
+
++ /* Before allocating a huge amount of memory for corrupted files, check if
++ * size of StripByteCount and StripOffset tags is not greater than
++ * file size.
++ */
++ uint64_t allocsize = (uint64_t)nstrips * sizeof(uint64_t) * 2;
++ uint64_t filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, "allocChoppedUpStripArrays",
++ "Requested memory size for StripByteCount and "
++ "StripOffsets %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ return;
++ }
++
+ newcounts = (uint64_t*) _TIFFCheckMalloc(tif, nstrips, sizeof (uint64_t),
+ "for chopped \"StripByteCounts\" array");
+ newoffsets = (uint64_t*) _TIFFCheckMalloc(tif, nstrips, sizeof (uint64_t),
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-2.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-2.patch
new file mode 100644
index 0000000000..ad39c1c4dd
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-2.patch
@@ -0,0 +1,151 @@
+CVE: CVE-2023-6277
+Upstream-Status: Backport [upstream : https://gitlab.com/libtiff/libtiff/-/commit/0b025324711213a75e38b52f7e7ba60235f108aa
+ubuntu : http://archive.ubuntu.com/ubuntu/pool/main/t/tiff/tiff_4.3.0-6ubuntu0.8.debian.tar.xz ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+[Ubuntu note: Backport of the following patch from upstream, with a few changes
+to match the current version of the file in the present Ubuntu release:
+ . using TIFFWarningExt instead of TIFFWarningExtR (the latter did not exist yet);
+-- Rodrigo Figueiredo Zaiden]
+
+Backport of:
+
+From 0b025324711213a75e38b52f7e7ba60235f108aa Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 31 Oct 2023 19:47:22 +0100
+Subject: [PATCH] tif_dirread.c: only issue TIFFGetFileSize() for large enough
+ RAM requests
+
+Ammends 5320c9d89c054fa805d037d84c57da874470b01a
+
+This fixes a performance regression caught by the GDAL regression test
+suite.
+---
+ libtiff/tif_dirread.c | 83 +++++++++++++++++++++++++------------------
+ 1 file changed, 48 insertions(+), 35 deletions(-)
+
+--- tiff-4.3.0.orig/libtiff/tif_dirread.c
++++ tiff-4.3.0/libtiff/tif_dirread.c
+@@ -866,19 +866,22 @@ static enum TIFFReadDirEntryErr TIFFRead
+ datasize=(*count)*typesize;
+ assert((tmsize_t)datasize>0);
+
+- /* Before allocating a huge amount of memory for corrupted files, check if
+- * size of requested memory is not greater than file size.
+- */
+- uint64_t filesize = TIFFGetFileSize(tif);
+- if (datasize > filesize)
++ if (datasize > 100 * 1024 * 1024)
+ {
+- TIFFWarningExt(tif->tif_clientdata, "ReadDirEntryArray",
+- "Requested memory size for tag %d (0x%x) %" PRIu32
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated, tag not read",
+- direntry->tdir_tag, direntry->tdir_tag, datasize,
+- filesize);
+- return (TIFFReadDirEntryErrAlloc);
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size.
++ */
++ const uint64_t filesize = TIFFGetFileSize(tif);
++ if (datasize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, "ReadDirEntryArray",
++ "Requested memory size for tag %d (0x%x) %" PRIu32
++ " is greater than filesize %" PRIu64
++ ". Memory not allocated, tag not read",
++ direntry->tdir_tag, direntry->tdir_tag, datasize,
++ filesize);
++ return (TIFFReadDirEntryErrAlloc);
++ }
+ }
+
+ if( isMapped(tif) && datasize > (uint64_t)tif->tif_size )
+@@ -4608,18 +4611,22 @@ EstimateStripByteCounts(TIFF* tif, TIFFD
+ if( !_TIFFFillStrilesInternal( tif, 0 ) )
+ return -1;
+
+- /* Before allocating a huge amount of memory for corrupted files, check if
+- * size of requested memory is not greater than file size. */
+- uint64_t filesize = TIFFGetFileSize(tif);
+- uint64_t allocsize = (uint64_t)td->td_nstrips * sizeof(uint64_t);
+- if (allocsize > filesize)
++ const uint64_t allocsize = (uint64_t)td->td_nstrips * sizeof(uint64_t);
++ uint64_t filesize = 0;
++ if (allocsize > 100 * 1024 * 1024)
+ {
+- TIFFWarningExt(tif->tif_clientdata, module,
+- "Requested memory size for StripByteCounts of %" PRIu64
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated",
+- allocsize, filesize);
+- return -1;
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size. */
++ filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for StripByteCounts of %" PRIu64
++ " is greater than filesize %" PRIu64 ". Memory not allocated",
++ allocsize, filesize);
++ return -1;
++ }
+ }
+
+ if (td->td_stripbytecount_p)
+@@ -4666,11 +4673,13 @@ EstimateStripByteCounts(TIFF* tif, TIFFD
+ return -1;
+ space+=datasize;
+ }
++ if (filesize == 0)
++ filesize = TIFFGetFileSize(tif);
+ if( filesize < space )
+- /* we should perhaps return in error ? */
+- space = filesize;
+- else
+- space = filesize - space;
++ /* we should perhaps return in error ? */
++ space = filesize;
++ else
++ space = filesize - space;
+ if (td->td_planarconfig == PLANARCONFIG_SEPARATE)
+ space /= td->td_samplesperpixel;
+ for (strip = 0; strip < td->td_nstrips; strip++)
+@@ -4940,19 +4949,23 @@ TIFFFetchDirectory(TIFF* tif, uint64_t d
+ dircount16 = (uint16_t)dircount64;
+ dirsize = 20;
+ }
+- /* Before allocating a huge amount of memory for corrupted files, check
+- * if size of requested memory is not greater than file size. */
+- uint64_t filesize = TIFFGetFileSize(tif);
+- uint64_t allocsize = (uint64_t)dircount16 * dirsize;
+- if (allocsize > filesize)
++ const uint64_t allocsize = (uint64_t)dircount16 * dirsize;
++ if (allocsize > 100 * 1024 * 1024)
+ {
+- TIFFWarningExt(
+- tif->tif_clientdata, module,
+- "Requested memory size for TIFF directory of %" PRIu64
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated, TIFF directory not read",
+- allocsize, filesize);
+- return 0;
++ /* Before allocating a huge amount of memory for corrupted files,
++ * check if size of requested memory is not greater than file size.
++ */
++ const uint64_t filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for TIFF directory of %" PRIu64
++ " is greater than filesize %" PRIu64
++ ". Memory not allocated, TIFF directory not read",
++ allocsize, filesize);
++ return 0;
++ }
+ }
+ origdir = _TIFFCheckMalloc(tif, dircount16,
+ dirsize, "to read TIFF directory");
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-3.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-3.patch
new file mode 100644
index 0000000000..71eba2f34e
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-3.patch
@@ -0,0 +1,46 @@
+CVE: CVE-2023-6277
+Upstream-Status: Backport [upstream : https://gitlab.com/libtiff/libtiff/-/commit/de7bfd7d4377c266f81849579f696fa1ad5ba6c3
+ubuntu : http://archive.ubuntu.com/ubuntu/pool/main/t/tiff/tiff_4.3.0-6ubuntu0.8.debian.tar.xz ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+Backport of:
+
+From de7bfd7d4377c266f81849579f696fa1ad5ba6c3 Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 31 Oct 2023 20:13:45 +0100
+Subject: [PATCH] TIFFFetchDirectory(): remove useless allocsize vs filesize
+ check
+
+CoverityScan rightly points that the max value for dircount16 * dirsize
+is 4096 * 20. That's small enough not to do any check
+---
+ libtiff/tif_dirread.c | 18 ------------------
+ 1 file changed, 18 deletions(-)
+
+--- tiff-4.3.0.orig/libtiff/tif_dirread.c
++++ tiff-4.3.0/libtiff/tif_dirread.c
+@@ -4949,24 +4949,6 @@ TIFFFetchDirectory(TIFF* tif, uint64_t d
+ dircount16 = (uint16_t)dircount64;
+ dirsize = 20;
+ }
+- const uint64_t allocsize = (uint64_t)dircount16 * dirsize;
+- if (allocsize > 100 * 1024 * 1024)
+- {
+- /* Before allocating a huge amount of memory for corrupted files,
+- * check if size of requested memory is not greater than file size.
+- */
+- const uint64_t filesize = TIFFGetFileSize(tif);
+- if (allocsize > filesize)
+- {
+- TIFFWarningExt(
+- tif->tif_clientdata, module,
+- "Requested memory size for TIFF directory of %" PRIu64
+- " is greater than filesize %" PRIu64
+- ". Memory not allocated, TIFF directory not read",
+- allocsize, filesize);
+- return 0;
+- }
+- }
+ origdir = _TIFFCheckMalloc(tif, dircount16,
+ dirsize, "to read TIFF directory");
+ if (origdir == NULL)
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-4.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-4.patch
new file mode 100644
index 0000000000..61f48726e4
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2023-6277-4.patch
@@ -0,0 +1,93 @@
+CVE: CVE-2023-6277
+Upstream-Status: Backport [upstream : https://gitlab.com/libtiff/libtiff/-/commit/dbb825a8312f30e63a06c272010967d51af5c35a
+ubuntu : http://archive.ubuntu.com/ubuntu/pool/main/t/tiff/tiff_4.3.0-6ubuntu0.8.debian.tar.xz ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+[Ubuntu note: Backport of the following patch from upstream, with a few changes
+to match the current version of the file in the present Ubuntu release:
+ . using TIFFWarningExt instead of TIFFWarningExtR (the latter did not exist yet);
+ . calling _TIFFfree(data) instead of _TIFFfreeExt(tif, data) (the latter did not exist yet);
+-- Rodrigo Figueiredo Zaiden]
+
+Backport of:
+
+From dbb825a8312f30e63a06c272010967d51af5c35a Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 31 Oct 2023 21:30:58 +0100
+Subject: [PATCH] tif_dirread.c: only issue TIFFGetFileSize() for large enough
+ RAM requests
+
+---
+ libtiff/tif_dirread.c | 54 +++++++++++++++++++++++++------------------
+ 1 file changed, 31 insertions(+), 23 deletions(-)
+
+--- tiff-4.3.0.orig/libtiff/tif_dirread.c
++++ tiff-4.3.0/libtiff/tif_dirread.c
+@@ -5905,19 +5905,24 @@ TIFFFetchStripThing(TIFF* tif, TIFFDirEn
+ return(0);
+ }
+
+- /* Before allocating a huge amount of memory for corrupted files, check
+- * if size of requested memory is not greater than file size. */
+- uint64_t filesize = TIFFGetFileSize(tif);
+- uint64_t allocsize = (uint64_t)nstrips * sizeof(uint64_t);
+- if (allocsize > filesize)
++ const uint64_t allocsize = (uint64_t)nstrips * sizeof(uint64_t);
++ if (allocsize > 100 * 1024 * 1024)
+ {
+- TIFFWarningExt(tif->tif_clientdata, module,
+- "Requested memory size for StripArray of %" PRIu64
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated",
+- allocsize, filesize);
+- _TIFFfree(data);
+- return (0);
++ /* Before allocating a huge amount of memory for corrupted files,
++ * check if size of requested memory is not greater than file size.
++ */
++ const uint64_t filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for StripArray of %" PRIu64
++ " is greater than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ _TIFFfree(data);
++ return (0);
++ }
+ }
+ resizeddata=(uint64_t*)_TIFFCheckMalloc(tif, nstrips, sizeof(uint64_t), "for strip array");
+ if (resizeddata==0) {
+@@ -6018,17 +6023,20 @@ static void allocChoppedUpStripArrays(TI
+ * size of StripByteCount and StripOffset tags is not greater than
+ * file size.
+ */
+- uint64_t allocsize = (uint64_t)nstrips * sizeof(uint64_t) * 2;
+- uint64_t filesize = TIFFGetFileSize(tif);
+- if (allocsize > filesize)
++ const uint64_t allocsize = (uint64_t)nstrips * sizeof(uint64_t) * 2;
++ if (allocsize > 100 * 1024 * 1024)
+ {
+- TIFFWarningExt(tif->tif_clientdata, "allocChoppedUpStripArrays",
+- "Requested memory size for StripByteCount and "
+- "StripOffsets %" PRIu64
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated",
+- allocsize, filesize);
+- return;
++ const uint64_t filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, "allocChoppedUpStripArrays",
++ "Requested memory size for StripByteCount and "
++ "StripOffsets %" PRIu64
++ " is greater than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ return;
++ }
+ }
+
+ newcounts = (uint64_t*) _TIFFCheckMalloc(tif, nstrips, sizeof (uint64_t),
diff --git a/meta/recipes-multimedia/libtiff/tiff/b258ed69a485a9cfb299d9f060eb2a46c54e5903.patch b/meta/recipes-multimedia/libtiff/tiff/b258ed69a485a9cfb299d9f060eb2a46c54e5903.patch
new file mode 100644
index 0000000000..83d5db7fc6
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/b258ed69a485a9cfb299d9f060eb2a46c54e5903.patch
@@ -0,0 +1,46 @@
+From fb89eab3ed46bbb0276bdee05b570455f6a27d2f Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Sun, 6 Feb 2022 19:52:17 +0100
+Subject: [PATCH] Move the crop_width and crop_length computation after the
+ sanity check to avoid warnings when built with
+ -fsanitize=unsigned-integer-overflow.
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/b258ed69a485a9cfb299d9f060eb2a46c54e5903?merge_request_iid=294]
+
+Signed-off-by: Teoh Jay Shen <jay.shen.teoh@intel.com>
+
+CVE: CVE-2022-2868
+
+---
+ tools/tiffcrop.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index 0ef5bb2..99e4208 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -5389,15 +5389,13 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ off->endx = endx;
+ off->endy = endy;
+
+- crop_width = endx - startx + 1;
+- crop_length = endy - starty + 1;
+-
+ if (endx + 1 <= startx)
+ {
+ TIFFError("computeInputPixelOffsets",
+ "Invalid left/right margins and /or image crop width requested");
+ return (-1);
+ }
++ crop_width = endx - startx + 1;
+ if (crop_width > image->width)
+ crop_width = image->width;
+
+@@ -5407,6 +5405,7 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ "Invalid top/bottom margins and /or image crop length requested");
+ return (-1);
+ }
++ crop_length = endy - starty + 1;
+ if (crop_length > image->length)
+ crop_length = image->length;
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/eecb0712f4c3a5b449f70c57988260a667ddbdef.patch b/meta/recipes-multimedia/libtiff/tiff/eecb0712f4c3a5b449f70c57988260a667ddbdef.patch
index 74f9649fdf..5a84491711 100644
--- a/meta/recipes-multimedia/libtiff/tiff/eecb0712f4c3a5b449f70c57988260a667ddbdef.patch
+++ b/meta/recipes-multimedia/libtiff/tiff/eecb0712f4c3a5b449f70c57988260a667ddbdef.patch
@@ -1,4 +1,4 @@
-From eecb0712f4c3a5b449f70c57988260a667ddbdef Mon Sep 17 00:00:00 2001
+From 895867b72bd6c46da79de1a07d0993cd104e92cd Mon Sep 17 00:00:00 2001
From: Even Rouault <even.rouault@spatialys.com>
Date: Sun, 6 Feb 2022 13:08:38 +0100
Subject: [PATCH] TIFFFetchStripThing(): avoid calling memcpy() with a null
@@ -12,10 +12,10 @@ CVE: CVE-2022-0561
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/libtiff/tif_dirread.c b/libtiff/tif_dirread.c
-index 23194ced..50ebf8ac 100644
+index ae52ad4..d654a1c 100644
--- a/libtiff/tif_dirread.c
+++ b/libtiff/tif_dirread.c
-@@ -5777,8 +5777,9 @@ TIFFFetchStripThing(TIFF* tif, TIFFDirEntry* dir, uint32_t nstrips, uint64_t** l
+@@ -5766,8 +5766,9 @@ TIFFFetchStripThing(TIFF* tif, TIFFDirEntry* dir, uint32_t nstrips, uint64_t** l
_TIFFfree(data);
return(0);
}
@@ -27,6 +27,3 @@ index 23194ced..50ebf8ac 100644
_TIFFfree(data);
data=resizeddata;
}
---
-GitLab
-
diff --git a/meta/recipes-multimedia/libtiff/tiff_4.3.0.bb b/meta/recipes-multimedia/libtiff/tiff_4.3.0.bb
index 7a5e4816a6..b4af179e76 100644
--- a/meta/recipes-multimedia/libtiff/tiff_4.3.0.bb
+++ b/meta/recipes-multimedia/libtiff/tiff_4.3.0.bb
@@ -18,6 +18,41 @@ SRC_URI = "http://download.osgeo.org/libtiff/tiff-${PV}.tar.gz \
file://0004-TIFFFetchNormalTag-avoid-calling-memcpy-with-a-null-.patch \
file://0005-fix-the-FPE-in-tiffcrop-393.patch \
file://0006-fix-heap-buffer-overflow-in-tiffcp-278.patch \
+ file://0001-fix-the-FPE-in-tiffcrop-415-427-and-428.patch \
+ file://CVE-2022-1354.patch \
+ file://CVE-2022-1355.patch \
+ file://CVE-2022-34526.patch \
+ file://CVE-2022-2869.patch \
+ file://CVE-2022-2867.patch \
+ file://b258ed69a485a9cfb299d9f060eb2a46c54e5903.patch \
+ file://0001-tiffcrop-Fix-issue-330-and-some-more-from-320-to-349.patch \
+ file://CVE-2022-2953.patch \
+ file://CVE-2022-3970.patch \
+ file://0001-Revised-handling-of-TIFFTAG_INKNAMES-and-related-TIF.patch \
+ file://0001-tiffcrop-S-option-Make-decision-simpler.patch \
+ file://0001-tiffcrop-disable-incompatibility-of-Z-X-Y-z-options-.patch \
+ file://0001-tiffcrop-subroutines-require-a-larger-buffer-fixes-2.patch \
+ file://CVE-2022-48281.patch \
+ file://CVE-2023-0800_0801_0802_0803_0804.patch \
+ file://CVE-2023-0795_0796_0797_0798_0799.patch \
+ file://CVE-2023-25433.patch \
+ file://CVE-2023-25434-CVE-2023-25435.patch \
+ file://CVE-2023-26965.patch \
+ file://CVE-2023-2908.patch \
+ file://CVE-2023-3316.patch \
+ file://CVE-2023-3576.patch \
+ file://CVE-2023-3618.patch \
+ file://CVE-2023-26966.patch \
+ file://CVE-2022-40090.patch \
+ file://CVE-2023-1916.patch \
+ file://CVE-2023-40745.patch \
+ file://CVE-2023-41175.patch \
+ file://CVE-2023-6228.patch \
+ file://CVE-2023-52356.patch \
+ file://CVE-2023-6277-1.patch \
+ file://CVE-2023-6277-2.patch \
+ file://CVE-2023-6277-3.patch \
+ file://CVE-2023-6277-4.patch \
"
SRC_URI[sha256sum] = "0e46e5acb087ce7d1ac53cf4f56a09b221537fc86dfc5daaad1c2e89e1b37ac8"
@@ -31,7 +66,6 @@ CVE_CHECK_IGNORE += "CVE-2015-7313"
# These issues only affect libtiff post-4.3.0 but before 4.4.0,
# caused by 3079627e and fixed by b4e79bfa.
CVE_CHECK_IGNORE += "CVE-2022-1622 CVE-2022-1623"
-
# Issue is in jbig which we don't enable
CVE_CHECK_IGNORE += "CVE-2022-1210"
@@ -47,6 +81,7 @@ PACKAGECONFIG[jbig] = "--enable-jbig,--disable-jbig,jbig,"
PACKAGECONFIG[jpeg] = "--enable-jpeg,--disable-jpeg,jpeg,"
PACKAGECONFIG[zlib] = "--enable-zlib,--disable-zlib,zlib,"
PACKAGECONFIG[lzma] = "--enable-lzma,--disable-lzma,xz,"
+PACKAGECONFIG[webp] = "--enable-webp,--disable-webp,libwebp,"
# Convert single-strip uncompressed images to multiple strips of specified
# size (default: 8192) to reduce memory usage
diff --git a/meta/recipes-multimedia/pulseaudio/pulseaudio.inc b/meta/recipes-multimedia/pulseaudio/pulseaudio.inc
index 821ce7d1df..61d5bb00ba 100644
--- a/meta/recipes-multimedia/pulseaudio/pulseaudio.inc
+++ b/meta/recipes-multimedia/pulseaudio/pulseaudio.inc
@@ -61,7 +61,7 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=0e5cd938de1a7a53ea5adac38cc10c39 \
"
# libtool is needed for libltdl, used in module loading.
-DEPENDS = "libatomic-ops libsndfile1 libtool"
+DEPENDS = "m4-native libatomic-ops libsndfile1 libtool"
# optional
DEPENDS += "udev alsa-lib glib-2.0"
DEPENDS += "speexdsp libxml-parser-perl-native libcap"
diff --git a/meta/recipes-multimedia/webp/files/CVE-2023-1999.patch b/meta/recipes-multimedia/webp/files/CVE-2023-1999.patch
new file mode 100644
index 0000000000..895d01ea7d
--- /dev/null
+++ b/meta/recipes-multimedia/webp/files/CVE-2023-1999.patch
@@ -0,0 +1,60 @@
+From a486d800b60d0af4cc0836bf7ed8f21e12974129 Mon Sep 17 00:00:00 2001
+From: James Zern <jzern@google.com>
+Date: Wed, 22 Feb 2023 22:15:47 -0800
+Subject: [PATCH] EncodeAlphaInternal: clear result->bw on error
+
+This avoids a double free should the function fail prior to
+VP8BitWriterInit() and a previous trial result's buffer carried over.
+Previously in ApplyFiltersAndEncode() trial.bw (with a previous
+iteration's buffer) would be freed, followed by best.bw pointing to the
+same buffer.
+
+Since:
+187d379d add a fallback to ALPHA_NO_COMPRESSION
+
+In addition, check the return value of VP8BitWriterInit() in this
+function.
+
+Bug: webp:603
+Change-Id: Ic258381ee26c8c16bc211d157c8153831c8c6910
+
+CVE: CVE-2023-1999
+
+Upstream-Status: Backport [https://github.com/webmproject/libwebp/commit/a486d800b60d0af4cc0836bf7ed8f21e12974129]
+
+Signed-off-by: Soumya <soumya.sambu@windriver.com>
+---
+ src/enc/alpha_enc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/src/enc/alpha_enc.c b/src/enc/alpha_enc.c
+index f7c0269..7d20558 100644
+--- a/src/enc/alpha_enc.c
++++ b/src/enc/alpha_enc.c
+@@ -13,6 +13,7 @@
+
+ #include <assert.h>
+ #include <stdlib.h>
++#include <string.h>
+
+ #include "src/enc/vp8i_enc.h"
+ #include "src/dsp/dsp.h"
+@@ -148,6 +149,7 @@ static int EncodeAlphaInternal(const uint8_t* const data, int width, int height,
+ }
+ } else {
+ VP8LBitWriterWipeOut(&tmp_bw);
++ memset(&result->bw, 0, sizeof(result->bw));
+ return 0;
+ }
+ }
+@@ -162,7 +164,7 @@ static int EncodeAlphaInternal(const uint8_t* const data, int width, int height,
+ header = method | (filter << 2);
+ if (reduce_levels) header |= ALPHA_PREPROCESSED_LEVELS << 4;
+
+- VP8BitWriterInit(&result->bw, ALPHA_HEADER_LEN + output_size);
++ if (!VP8BitWriterInit(&result->bw, ALPHA_HEADER_LEN + output_size)) ok = 0;
+ ok = ok && VP8BitWriterAppend(&result->bw, &header, ALPHA_HEADER_LEN);
+ ok = ok && VP8BitWriterAppend(&result->bw, output, output_size);
+
+--
+2.40.0
diff --git a/meta/recipes-multimedia/webp/files/CVE-2023-4863-0001.patch b/meta/recipes-multimedia/webp/files/CVE-2023-4863-0001.patch
new file mode 100644
index 0000000000..e623569352
--- /dev/null
+++ b/meta/recipes-multimedia/webp/files/CVE-2023-4863-0001.patch
@@ -0,0 +1,366 @@
+From 902bc9190331343b2017211debcec8d2ab87e17a Mon Sep 17 00:00:00 2001
+From: Vincent Rabaud <vrabaud@google.com>
+Date: Thu, 7 Sep 2023 21:16:03 +0200
+Subject: [PATCH 1/2] Fix OOB write in BuildHuffmanTable.
+
+First, BuildHuffmanTable is called to check if the data is valid.
+If it is and the table is not big enough, more memory is allocated.
+
+This will make sure that valid (but unoptimized because of unbalanced
+codes) streams are still decodable.
+
+Bug: chromium:1479274
+Change-Id: I31c36dbf3aa78d35ecf38706b50464fd3d375741
+
+CVE: CVE-2023-4863
+
+Upstream-Status: Backport [https://github.com/webmproject/libwebp/commit/902bc9190331343b2017211debcec8d2ab87e17a]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/dec/vp8l_dec.c | 46 ++++++++++---------
+ src/dec/vp8li_dec.h | 2 +-
+ src/utils/huffman_utils.c | 97 +++++++++++++++++++++++++++++++--------
+ src/utils/huffman_utils.h | 27 +++++++++--
+ 4 files changed, 129 insertions(+), 43 deletions(-)
+
+diff --git a/src/dec/vp8l_dec.c b/src/dec/vp8l_dec.c
+index 1348055..186b0b2 100644
+--- a/src/dec/vp8l_dec.c
++++ b/src/dec/vp8l_dec.c
+@@ -253,11 +253,11 @@ static int ReadHuffmanCodeLengths(
+ int symbol;
+ int max_symbol;
+ int prev_code_len = DEFAULT_CODE_LENGTH;
+- HuffmanCode table[1 << LENGTHS_TABLE_BITS];
++ HuffmanTables tables;
+
+- if (!VP8LBuildHuffmanTable(table, LENGTHS_TABLE_BITS,
+- code_length_code_lengths,
+- NUM_CODE_LENGTH_CODES)) {
++ if (!VP8LHuffmanTablesAllocate(1 << LENGTHS_TABLE_BITS, &tables) ||
++ !VP8LBuildHuffmanTable(&tables, LENGTHS_TABLE_BITS,
++ code_length_code_lengths, NUM_CODE_LENGTH_CODES)) {
+ goto End;
+ }
+
+@@ -277,7 +277,7 @@ static int ReadHuffmanCodeLengths(
+ int code_len;
+ if (max_symbol-- == 0) break;
+ VP8LFillBitWindow(br);
+- p = &table[VP8LPrefetchBits(br) & LENGTHS_TABLE_MASK];
++ p = &tables.curr_segment->start[VP8LPrefetchBits(br) & LENGTHS_TABLE_MASK];
+ VP8LSetBitPos(br, br->bit_pos_ + p->bits);
+ code_len = p->value;
+ if (code_len < kCodeLengthLiterals) {
+@@ -300,6 +300,7 @@ static int ReadHuffmanCodeLengths(
+ ok = 1;
+
+ End:
++ VP8LHuffmanTablesDeallocate(&tables);
+ if (!ok) dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
+ return ok;
+ }
+@@ -307,7 +308,8 @@ static int ReadHuffmanCodeLengths(
+ // 'code_lengths' is pre-allocated temporary buffer, used for creating Huffman
+ // tree.
+ static int ReadHuffmanCode(int alphabet_size, VP8LDecoder* const dec,
+- int* const code_lengths, HuffmanCode* const table) {
++ int* const code_lengths,
++ HuffmanTables* const table) {
+ int ok = 0;
+ int size = 0;
+ VP8LBitReader* const br = &dec->br_;
+@@ -362,8 +364,7 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+ VP8LMetadata* const hdr = &dec->hdr_;
+ uint32_t* huffman_image = NULL;
+ HTreeGroup* htree_groups = NULL;
+- HuffmanCode* huffman_tables = NULL;
+- HuffmanCode* huffman_table = NULL;
++ HuffmanTables* huffman_tables = &hdr->huffman_tables_;
+ int num_htree_groups = 1;
+ int num_htree_groups_max = 1;
+ int max_alphabet_size = 0;
+@@ -372,6 +373,10 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+ int* mapping = NULL;
+ int ok = 0;
+
++ // Check the table has been 0 initialized (through InitMetadata).
++ assert(huffman_tables->root.start == NULL);
++ assert(huffman_tables->curr_segment == NULL);
++
+ if (allow_recursion && VP8LReadBits(br, 1)) {
+ // use meta Huffman codes.
+ const int huffman_precision = VP8LReadBits(br, 3) + 2;
+@@ -434,16 +439,15 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+
+ code_lengths = (int*)WebPSafeCalloc((uint64_t)max_alphabet_size,
+ sizeof(*code_lengths));
+- huffman_tables = (HuffmanCode*)WebPSafeMalloc(num_htree_groups * table_size,
+- sizeof(*huffman_tables));
+ htree_groups = VP8LHtreeGroupsNew(num_htree_groups);
+
+- if (htree_groups == NULL || code_lengths == NULL || huffman_tables == NULL) {
++ if (htree_groups == NULL || code_lengths == NULL ||
++ !VP8LHuffmanTablesAllocate(num_htree_groups * table_size,
++ huffman_tables)) {
+ dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
+ goto Error;
+ }
+
+- huffman_table = huffman_tables;
+ for (i = 0; i < num_htree_groups_max; ++i) {
+ // If the index "i" is unused in the Huffman image, just make sure the
+ // coefficients are valid but do not store them.
+@@ -468,19 +472,20 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+ int max_bits = 0;
+ for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) {
+ int alphabet_size = kAlphabetSize[j];
+- htrees[j] = huffman_table;
+ if (j == 0 && color_cache_bits > 0) {
+ alphabet_size += (1 << color_cache_bits);
+ }
+- size = ReadHuffmanCode(alphabet_size, dec, code_lengths, huffman_table);
++ size =
++ ReadHuffmanCode(alphabet_size, dec, code_lengths, huffman_tables);
++ htrees[j] = huffman_tables->curr_segment->curr_table;
+ if (size == 0) {
+ goto Error;
+ }
+ if (is_trivial_literal && kLiteralMap[j] == 1) {
+- is_trivial_literal = (huffman_table->bits == 0);
++ is_trivial_literal = (htrees[j]->bits == 0);
+ }
+- total_size += huffman_table->bits;
+- huffman_table += size;
++ total_size += htrees[j]->bits;
++ huffman_tables->curr_segment->curr_table += size;
+ if (j <= ALPHA) {
+ int local_max_bits = code_lengths[0];
+ int k;
+@@ -515,14 +520,13 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+ hdr->huffman_image_ = huffman_image;
+ hdr->num_htree_groups_ = num_htree_groups;
+ hdr->htree_groups_ = htree_groups;
+- hdr->huffman_tables_ = huffman_tables;
+
+ Error:
+ WebPSafeFree(code_lengths);
+ WebPSafeFree(mapping);
+ if (!ok) {
+ WebPSafeFree(huffman_image);
+- WebPSafeFree(huffman_tables);
++ VP8LHuffmanTablesDeallocate(huffman_tables);
+ VP8LHtreeGroupsFree(htree_groups);
+ }
+ return ok;
+@@ -1358,7 +1362,7 @@ static void ClearMetadata(VP8LMetadata* const hdr) {
+ assert(hdr != NULL);
+
+ WebPSafeFree(hdr->huffman_image_);
+- WebPSafeFree(hdr->huffman_tables_);
++ VP8LHuffmanTablesDeallocate(&hdr->huffman_tables_);
+ VP8LHtreeGroupsFree(hdr->htree_groups_);
+ VP8LColorCacheClear(&hdr->color_cache_);
+ VP8LColorCacheClear(&hdr->saved_color_cache_);
+@@ -1673,7 +1677,7 @@ int VP8LDecodeImage(VP8LDecoder* const dec) {
+
+ if (dec == NULL) return 0;
+
+- assert(dec->hdr_.huffman_tables_ != NULL);
++ assert(dec->hdr_.huffman_tables_.root.start != NULL);
+ assert(dec->hdr_.htree_groups_ != NULL);
+ assert(dec->hdr_.num_htree_groups_ > 0);
+
+diff --git a/src/dec/vp8li_dec.h b/src/dec/vp8li_dec.h
+index 72b2e86..32540a4 100644
+--- a/src/dec/vp8li_dec.h
++++ b/src/dec/vp8li_dec.h
+@@ -51,7 +51,7 @@ typedef struct {
+ uint32_t* huffman_image_;
+ int num_htree_groups_;
+ HTreeGroup* htree_groups_;
+- HuffmanCode* huffman_tables_;
++ HuffmanTables huffman_tables_;
+ } VP8LMetadata;
+
+ typedef struct VP8LDecoder VP8LDecoder;
+diff --git a/src/utils/huffman_utils.c b/src/utils/huffman_utils.c
+index 0cba0fb..9efd628 100644
+--- a/src/utils/huffman_utils.c
++++ b/src/utils/huffman_utils.c
+@@ -177,21 +177,24 @@ static int BuildHuffmanTable(HuffmanCode* const root_table, int root_bits,
+ if (num_open < 0) {
+ return 0;
+ }
+- if (root_table == NULL) continue;
+ for (; count[len] > 0; --count[len]) {
+ HuffmanCode code;
+ if ((key & mask) != low) {
+- table += table_size;
++ if (root_table != NULL) table += table_size;
+ table_bits = NextTableBitSize(count, len, root_bits);
+ table_size = 1 << table_bits;
+ total_size += table_size;
+ low = key & mask;
+- root_table[low].bits = (uint8_t)(table_bits + root_bits);
+- root_table[low].value = (uint16_t)((table - root_table) - low);
++ if (root_table != NULL) {
++ root_table[low].bits = (uint8_t)(table_bits + root_bits);
++ root_table[low].value = (uint16_t)((table - root_table) - low);
++ }
++ }
++ if (root_table != NULL) {
++ code.bits = (uint8_t)(len - root_bits);
++ code.value = (uint16_t)sorted[symbol++];
++ ReplicateValue(&table[key >> root_bits], step, table_size, code);
+ }
+- code.bits = (uint8_t)(len - root_bits);
+- code.value = (uint16_t)sorted[symbol++];
+- ReplicateValue(&table[key >> root_bits], step, table_size, code);
+ key = GetNextKey(key, len);
+ }
+ }
+@@ -211,25 +214,83 @@ static int BuildHuffmanTable(HuffmanCode* const root_table, int root_bits,
+ ((1 << MAX_CACHE_BITS) + NUM_LITERAL_CODES + NUM_LENGTH_CODES)
+ // Cut-off value for switching between heap and stack allocation.
+ #define SORTED_SIZE_CUTOFF 512
+-int VP8LBuildHuffmanTable(HuffmanCode* const root_table, int root_bits,
++int VP8LBuildHuffmanTable(HuffmanTables* const root_table, int root_bits,
+ const int code_lengths[], int code_lengths_size) {
+- int total_size;
++ const int total_size =
++ BuildHuffmanTable(NULL, root_bits, code_lengths, code_lengths_size, NULL);
+ assert(code_lengths_size <= MAX_CODE_LENGTHS_SIZE);
+- if (root_table == NULL) {
+- total_size = BuildHuffmanTable(NULL, root_bits,
+- code_lengths, code_lengths_size, NULL);
+- } else if (code_lengths_size <= SORTED_SIZE_CUTOFF) {
++ if (total_size == 0 || root_table == NULL) return total_size;
++
++ if (root_table->curr_segment->curr_table + total_size >=
++ root_table->curr_segment->start + root_table->curr_segment->size) {
++ // If 'root_table' does not have enough memory, allocate a new segment.
++ // The available part of root_table->curr_segment is left unused because we
++ // need a contiguous buffer.
++ const int segment_size = root_table->curr_segment->size;
++ struct HuffmanTablesSegment* next =
++ (HuffmanTablesSegment*)WebPSafeMalloc(1, sizeof(*next));
++ if (next == NULL) return 0;
++ // Fill the new segment.
++ // We need at least 'total_size' but if that value is small, it is better to
++ // allocate a big chunk to prevent more allocations later. 'segment_size' is
++ // therefore chosen (any other arbitrary value could be chosen).
++ next->size = total_size > segment_size ? total_size : segment_size;
++ next->start =
++ (HuffmanCode*)WebPSafeMalloc(next->size, sizeof(*next->start));
++ if (next->start == NULL) {
++ WebPSafeFree(next);
++ return 0;
++ }
++ next->curr_table = next->start;
++ next->next = NULL;
++ // Point to the new segment.
++ root_table->curr_segment->next = next;
++ root_table->curr_segment = next;
++ }
++ if (code_lengths_size <= SORTED_SIZE_CUTOFF) {
+ // use local stack-allocated array.
+ uint16_t sorted[SORTED_SIZE_CUTOFF];
+- total_size = BuildHuffmanTable(root_table, root_bits,
+- code_lengths, code_lengths_size, sorted);
+- } else { // rare case. Use heap allocation.
++ BuildHuffmanTable(root_table->curr_segment->curr_table, root_bits,
++ code_lengths, code_lengths_size, sorted);
++ } else { // rare case. Use heap allocation.
+ uint16_t* const sorted =
+ (uint16_t*)WebPSafeMalloc(code_lengths_size, sizeof(*sorted));
+ if (sorted == NULL) return 0;
+- total_size = BuildHuffmanTable(root_table, root_bits,
+- code_lengths, code_lengths_size, sorted);
++ BuildHuffmanTable(root_table->curr_segment->curr_table, root_bits,
++ code_lengths, code_lengths_size, sorted);
+ WebPSafeFree(sorted);
+ }
+ return total_size;
+ }
++
++int VP8LHuffmanTablesAllocate(int size, HuffmanTables* huffman_tables) {
++ // Have 'segment' point to the first segment for now, 'root'.
++ HuffmanTablesSegment* const root = &huffman_tables->root;
++ huffman_tables->curr_segment = root;
++ // Allocate root.
++ root->start = (HuffmanCode*)WebPSafeMalloc(size, sizeof(*root->start));
++ if (root->start == NULL) return 0;
++ root->curr_table = root->start;
++ root->next = NULL;
++ root->size = size;
++ return 1;
++}
++
++void VP8LHuffmanTablesDeallocate(HuffmanTables* const huffman_tables) {
++ HuffmanTablesSegment *current, *next;
++ if (huffman_tables == NULL) return;
++ // Free the root node.
++ current = &huffman_tables->root;
++ next = current->next;
++ WebPSafeFree(current->start);
++ current->start = NULL;
++ current->next = NULL;
++ current = next;
++ // Free the following nodes.
++ while (current != NULL) {
++ next = current->next;
++ WebPSafeFree(current->start);
++ WebPSafeFree(current);
++ current = next;
++ }
++}
+diff --git a/src/utils/huffman_utils.h b/src/utils/huffman_utils.h
+index 13b7ad1..98415c5 100644
+--- a/src/utils/huffman_utils.h
++++ b/src/utils/huffman_utils.h
+@@ -43,6 +43,29 @@ typedef struct {
+ // or non-literal symbol otherwise
+ } HuffmanCode32;
+
++// Contiguous memory segment of HuffmanCodes.
++typedef struct HuffmanTablesSegment {
++ HuffmanCode* start;
++ // Pointer to where we are writing into the segment. Starts at 'start' and
++ // cannot go beyond 'start' + 'size'.
++ HuffmanCode* curr_table;
++ // Pointer to the next segment in the chain.
++ struct HuffmanTablesSegment* next;
++ int size;
++} HuffmanTablesSegment;
++
++// Chained memory segments of HuffmanCodes.
++typedef struct HuffmanTables {
++ HuffmanTablesSegment root;
++ // Currently processed segment. At first, this is 'root'.
++ HuffmanTablesSegment* curr_segment;
++} HuffmanTables;
++
++// Allocates a HuffmanTables with 'size' contiguous HuffmanCodes. Returns 0 on
++// memory allocation error, 1 otherwise.
++int VP8LHuffmanTablesAllocate(int size, HuffmanTables* huffman_tables);
++void VP8LHuffmanTablesDeallocate(HuffmanTables* const huffman_tables);
++
+ #define HUFFMAN_PACKED_BITS 6
+ #define HUFFMAN_PACKED_TABLE_SIZE (1u << HUFFMAN_PACKED_BITS)
+
+@@ -78,9 +101,7 @@ void VP8LHtreeGroupsFree(HTreeGroup* const htree_groups);
+ // the huffman table.
+ // Returns built table size or 0 in case of error (invalid tree or
+ // memory error).
+-// If root_table is NULL, it returns 0 if a lookup cannot be built, something
+-// > 0 otherwise (but not the table size).
+-int VP8LBuildHuffmanTable(HuffmanCode* const root_table, int root_bits,
++int VP8LBuildHuffmanTable(HuffmanTables* const root_table, int root_bits,
+ const int code_lengths[], int code_lengths_size);
+
+ #ifdef __cplusplus
+--
+2.40.0
+
diff --git a/meta/recipes-multimedia/webp/files/CVE-2023-4863-0002.patch b/meta/recipes-multimedia/webp/files/CVE-2023-4863-0002.patch
new file mode 100644
index 0000000000..231894e882
--- /dev/null
+++ b/meta/recipes-multimedia/webp/files/CVE-2023-4863-0002.patch
@@ -0,0 +1,53 @@
+From 95ea5226c870449522240ccff26f0b006037c520 Mon Sep 17 00:00:00 2001
+From: Vincent Rabaud <vrabaud@google.com>
+Date: Mon, 11 Sep 2023 16:06:08 +0200
+Subject: [PATCH 2/2] Fix invalid incremental decoding check.
+
+The first condition is only necessary if we have not read enough
+(enough being defined by src_last, not src_end which is the end
+of the image).
+The second condition now fits the comment below: "if not
+incremental, and we are past the end of buffer".
+
+BUG=oss-fuzz:62136
+
+Change-Id: I0700f67c62db8e1c02c2e429a069a71e606a5e4f
+
+CVE: CVE-2023-4863
+
+Upstream-Status: Backport [https://github.com/webmproject/libwebp/commit/95ea5226c870449522240ccff26f0b006037c520]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/dec/vp8l_dec.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/src/dec/vp8l_dec.c b/src/dec/vp8l_dec.c
+index 186b0b2..59a9e64 100644
+--- a/src/dec/vp8l_dec.c
++++ b/src/dec/vp8l_dec.c
+@@ -1241,9 +1241,20 @@ static int DecodeImageData(VP8LDecoder* const dec, uint32_t* const data,
+ }
+
+ br->eos_ = VP8LIsEndOfStream(br);
+- if (dec->incremental_ && br->eos_ && src < src_end) {
++ // In incremental decoding:
++ // br->eos_ && src < src_last: if 'br' reached the end of the buffer and
++ // 'src_last' has not been reached yet, there is not enough data. 'dec' has to
++ // be reset until there is more data.
++ // !br->eos_ && src < src_last: this cannot happen as either the buffer is
++ // fully read, either enough has been read to reach 'src_last'.
++ // src >= src_last: 'src_last' is reached, all is fine. 'src' can actually go
++ // beyond 'src_last' in case the image is cropped and an LZ77 goes further.
++ // The buffer might have been enough or there is some left. 'br->eos_' does
++ // not matter.
++ assert(!dec->incremental_ || (br->eos_ && src < src_last) || src >= src_last);
++ if (dec->incremental_ && br->eos_ && src < src_last) {
+ RestoreState(dec);
+- } else if (!br->eos_) {
++ } else if ((dec->incremental_ && src >= src_last) || !br->eos_) {
+ // Process the remaining rows corresponding to last row-block.
+ if (process_func != NULL) {
+ process_func(dec, row > last_row ? last_row : row);
+--
+2.40.0
diff --git a/meta/recipes-multimedia/webp/libwebp_1.2.2.bb b/meta/recipes-multimedia/webp/libwebp_1.2.4.bb
index 281cff1bf2..a6cdc0c510 100644
--- a/meta/recipes-multimedia/webp/libwebp_1.2.2.bb
+++ b/meta/recipes-multimedia/webp/libwebp_1.2.4.bb
@@ -13,8 +13,12 @@ LICENSE = "BSD-3-Clause"
LIC_FILES_CHKSUM = "file://COPYING;md5=6e8dee932c26f2dab503abf70c96d8bb \
file://PATENTS;md5=c6926d0cb07d296f886ab6e0cc5a85b7"
-SRC_URI = "http://downloads.webmproject.org/releases/webp/${BP}.tar.gz"
-SRC_URI[sha256sum] = "7656532f837af5f4cec3ff6bafe552c044dc39bf453587bd5b77450802f4aee6"
+SRC_URI = "http://downloads.webmproject.org/releases/webp/${BP}.tar.gz \
+ file://CVE-2023-1999.patch \
+ file://CVE-2023-4863-0001.patch \
+ file://CVE-2023-4863-0002.patch \
+ "
+SRC_URI[sha256sum] = "7bf5a8a28cc69bcfa8cb214f2c3095703c6b73ac5fba4d5480c205331d9494df"
UPSTREAM_CHECK_URI = "http://downloads.webmproject.org/releases/webp/index.html"
diff --git a/meta/recipes-rt/rt-tests/files/rt_bmark.py b/meta/recipes-rt/rt-tests/files/rt_bmark.py
index 3b84447a0f..2a4eed412f 100755
--- a/meta/recipes-rt/rt-tests/files/rt_bmark.py
+++ b/meta/recipes-rt/rt-tests/files/rt_bmark.py
@@ -265,7 +265,7 @@ cmd = ("cyclictest",
"-d", str(interval_delta),
"-l", str(loop_count)
)
-rex = re.compile(b"C:\s*(\d+).*Min:\s*(\d+).*Avg:\s*(\d+).*Max:\s*(\d+)")
+rex = re.compile(r"C:\s*(\d+).*Min:\s*(\d+).*Avg:\s*(\d+).*Max:\s*(\d+)")
def run_cyclictest_once():
res = subprocess.check_output(cmd)
diff --git a/meta/recipes-sato/webkit/libwpe_1.12.0.bb b/meta/recipes-sato/webkit/libwpe_1.12.3.bb
index ac4ee3eb23..77ca517ef7 100644
--- a/meta/recipes-sato/webkit/libwpe_1.12.0.bb
+++ b/meta/recipes-sato/webkit/libwpe_1.12.3.bb
@@ -11,7 +11,7 @@ inherit cmake features_check pkgconfig
REQUIRED_DISTRO_FEATURES = "opengl"
SRC_URI = "https://wpewebkit.org/releases/${BPN}-${PV}.tar.xz"
-SRC_URI[sha256sum] = "e8eeca228a6b4c36294cfb63f7d3ba9ada47a430904a5a973b3c99c96a44c18c"
+SRC_URI[sha256sum] = "b84fdbfbc849ce4fdf084bb28b58e5463b1b4b6cc8f200dc77b41f8545d5329d"
# This is a tweak of upstream-version-is-even needed because
# ipstream directory contains tarballs for other components as well.
diff --git a/meta/recipes-sato/webkit/webkitgtk/CVE-2022-32888.patch b/meta/recipes-sato/webkit/webkitgtk/CVE-2022-32888.patch
new file mode 100644
index 0000000000..1a6b685450
--- /dev/null
+++ b/meta/recipes-sato/webkit/webkitgtk/CVE-2022-32888.patch
@@ -0,0 +1,41 @@
+CVE: CVE-2022-32888
+Upstream-Status: Backport [https://github.com/WebKit/WebKit/commit/a3dd7dc]
+
+[1]: https://support.apple.com/en-us/HT213446
+[2]: https://bugs.webkit.org/show_bug.cgi?id=242047
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From a3dd7dc5f60b87a7cfd14c372e40ebd339076763 Mon Sep 17 00:00:00 2001
+From: Yusuke Suzuki <ysuzuki@apple.com>
+Date: Mon, 27 Jun 2022 21:34:55 -0700
+Subject: [PATCH] [JSC] Drop wasm stale assertion
+ https://bugs.webkit.org/show_bug.cgi?id=242047 rdar://95866655
+
+Reviewed by Mark Lam.
+
+This patch drops stale assertion in addDelegateToUnreachable.
+
+* Source/JavaScriptCore/wasm/WasmLLIntGenerator.cpp:
+(JSC::Wasm::LLIntGenerator::addDelegateToUnreachable):
+
+Canonical link: https://commits.webkit.org/251902@main
+---
+ Source/JavaScriptCore/wasm/WasmLLIntGenerator.cpp | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/Source/JavaScriptCore/wasm/WasmLLIntGenerator.cpp b/Source/JavaScriptCore/wasm/WasmLLIntGenerator.cpp
+index 39fb39b3331f..d0d2b9725991 100644
+--- a/Source/JavaScriptCore/wasm/WasmLLIntGenerator.cpp
++++ b/Source/JavaScriptCore/wasm/WasmLLIntGenerator.cpp
+@@ -1182,7 +1182,6 @@ auto LLIntGenerator::addDelegateToUnreachable(ControlType& target, ControlType&
+
+ ControlTry& tryData = std::get<ControlTry>(data);
+ m_codeBlock->addExceptionHandler({ HandlerType::Delegate, tryData.m_try->location(), delegateLabel->location(), 0, m_tryDepth, targetDepth });
+- checkConsistency();
+ return { };
+ }
+
+--
+2.34.1
+
diff --git a/meta/recipes-sato/webkit/webkitgtk/CVE-2022-32923.patch b/meta/recipes-sato/webkit/webkitgtk/CVE-2022-32923.patch
new file mode 100644
index 0000000000..60342a14f8
--- /dev/null
+++ b/meta/recipes-sato/webkit/webkitgtk/CVE-2022-32923.patch
@@ -0,0 +1,435 @@
+CVE: CVE-2022-32923
+Upstream-Status: Backport [https://github.com/WebKit/WebKit/commit/ef76e31]
+
+[1]: https://support.apple.com/en-us/HT213495
+[2]: https://bugs.webkit.org/show_bug.cgi?id=242964
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From ef76e31a2a066c3d65a9c94a9e2cd88133260c1f Mon Sep 17 00:00:00 2001
+From: Yusuke Suzuki <ysuzuki@apple.com>
+Date: Wed, 20 Jul 2022 19:30:48 -0700
+Subject: [PATCH] [JSC] BakcwardPropagationPhase should carry NaN / Infinity
+ handling https://bugs.webkit.org/show_bug.cgi?id=242964 rdar://96791603
+
+Reviewed by Mark Lam.
+
+For correctness, we should carry NaN / Infinity handling to make it more clear in the code generation site.
+
+* Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp:
+(JSC::DFG::BackwardsPropagationPhase::propagate):
+* Source/JavaScriptCore/dfg/DFGFixupPhase.cpp:
+(JSC::DFG::FixupPhase::fixupArithDivInt32):
+(JSC::DFG::FixupPhase::fixupArithDiv):
+* Source/JavaScriptCore/dfg/DFGGraph.h:
+* Source/JavaScriptCore/dfg/DFGNode.h:
+* Source/JavaScriptCore/dfg/DFGNodeFlags.cpp:
+(JSC::DFG::dumpNodeFlags):
+* Source/JavaScriptCore/dfg/DFGNodeFlags.h:
+(JSC::DFG::bytecodeCanIgnoreNaNAndInfinity):
+(JSC::DFG::nodeCanSpeculateInt32ForDiv):
+* Source/JavaScriptCore/dfg/DFGNodeType.h:
+
+Canonical link: https://commits.webkit.org/252675@main
+---
+ .../dfg/DFGBackwardsPropagationPhase.cpp | 51 +++++++++++--------
+ Source/JavaScriptCore/dfg/DFGFixupPhase.cpp | 6 ++-
+ Source/JavaScriptCore/dfg/DFGGraph.h | 11 ++++
+ Source/JavaScriptCore/dfg/DFGNode.h | 12 +++--
+ Source/JavaScriptCore/dfg/DFGNodeFlags.cpp | 10 ++--
+ Source/JavaScriptCore/dfg/DFGNodeFlags.h | 37 +++++++++++---
+ Source/JavaScriptCore/dfg/DFGNodeType.h | 3 +-
+ 7 files changed, 91 insertions(+), 39 deletions(-)
+
+diff --git a/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp
+index 306ea5d6b974..83a08aff7c20 100644
+--- a/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp
++++ b/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp
+@@ -272,7 +272,7 @@ private:
+ case ValueBitNot:
+ case ArithBitNot: {
+ flags |= NodeBytecodeUsesAsInt;
+- flags &= ~(NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsOther);
++ flags &= ~(NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeNeedsNaNOrInfinity | NodeBytecodeUsesAsOther);
+ flags &= ~NodeBytecodeUsesAsArrayIndex;
+ node->child1()->mergeFlags(flags);
+ break;
+@@ -291,7 +291,7 @@ private:
+ case BitURShift:
+ case ArithIMul: {
+ flags |= NodeBytecodeUsesAsInt;
+- flags &= ~(NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsOther);
++ flags &= ~(NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeNeedsNaNOrInfinity | NodeBytecodeUsesAsOther);
+ flags &= ~NodeBytecodeUsesAsArrayIndex;
+ node->child1()->mergeFlags(flags);
+ node->child2()->mergeFlags(flags);
+@@ -308,9 +308,9 @@ private:
+
+ case StringSlice: {
+ node->child1()->mergeFlags(NodeBytecodeUsesAsValue);
+- node->child2()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
++ node->child2()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex | NodeBytecodeNeedsNaNOrInfinity);
+ if (node->child3())
+- node->child3()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
++ node->child3()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex | NodeBytecodeNeedsNaNOrInfinity);
+ break;
+ }
+
+@@ -320,11 +320,11 @@ private:
+ if (node->numChildren() == 2)
+ m_graph.varArgChild(node, 1)->mergeFlags(NodeBytecodeUsesAsValue);
+ else if (node->numChildren() == 3) {
+- m_graph.varArgChild(node, 1)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
++ m_graph.varArgChild(node, 1)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex | NodeBytecodeNeedsNaNOrInfinity);
+ m_graph.varArgChild(node, 2)->mergeFlags(NodeBytecodeUsesAsValue);
+ } else if (node->numChildren() == 4) {
+- m_graph.varArgChild(node, 1)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
+- m_graph.varArgChild(node, 2)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
++ m_graph.varArgChild(node, 1)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex | NodeBytecodeNeedsNaNOrInfinity);
++ m_graph.varArgChild(node, 2)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex | NodeBytecodeNeedsNaNOrInfinity);
+ m_graph.varArgChild(node, 3)->mergeFlags(NodeBytecodeUsesAsValue);
+ }
+ break;
+@@ -345,6 +345,7 @@ private:
+ flags |= NodeBytecodeUsesAsNumber;
+ if (!m_allowNestedOverflowingAdditions)
+ flags |= NodeBytecodeUsesAsNumber;
++ flags |= NodeBytecodeNeedsNaNOrInfinity;
+
+ node->child1()->mergeFlags(flags);
+ node->child2()->mergeFlags(flags);
+@@ -359,6 +360,7 @@ private:
+ flags |= NodeBytecodeUsesAsNumber;
+ if (!m_allowNestedOverflowingAdditions)
+ flags |= NodeBytecodeUsesAsNumber;
++ flags |= NodeBytecodeNeedsNaNOrInfinity;
+
+ node->child1()->mergeFlags(flags);
+ node->child2()->mergeFlags(flags);
+@@ -366,7 +368,7 @@ private:
+ }
+
+ case ArithClz32: {
+- flags &= ~(NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsOther | ~NodeBytecodeUsesAsArrayIndex);
++ flags &= ~(NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeNeedsNaNOrInfinity | NodeBytecodeUsesAsOther | ~NodeBytecodeUsesAsArrayIndex);
+ flags |= NodeBytecodeUsesAsInt;
+ node->child1()->mergeFlags(flags);
+ break;
+@@ -380,6 +382,7 @@ private:
+ flags |= NodeBytecodeUsesAsNumber;
+ if (!m_allowNestedOverflowingAdditions)
+ flags |= NodeBytecodeUsesAsNumber;
++ flags |= NodeBytecodeNeedsNaNOrInfinity;
+
+ node->child1()->mergeFlags(flags);
+ node->child2()->mergeFlags(flags);
+@@ -387,6 +390,7 @@ private:
+ }
+
+ case ArithNegate: {
++ // negation does not care about NaN, Infinity, -Infinity are converted into 0 if the result is evaluated under the integer context.
+ flags &= ~NodeBytecodeUsesAsOther;
+
+ node->child1()->mergeFlags(flags);
+@@ -401,6 +405,7 @@ private:
+ flags |= NodeBytecodeUsesAsNumber;
+ if (!m_allowNestedOverflowingAdditions)
+ flags |= NodeBytecodeUsesAsNumber;
++ flags |= NodeBytecodeNeedsNaNOrInfinity;
+
+ node->child1()->mergeFlags(flags);
+ break;
+@@ -421,7 +426,7 @@ private:
+
+ node->mergeFlags(flags);
+
+- flags |= NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero;
++ flags |= NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeNeedsNaNOrInfinity;
+ flags &= ~NodeBytecodeUsesAsOther;
+
+ node->child1()->mergeFlags(flags);
+@@ -431,7 +436,13 @@ private:
+
+ case ValueDiv:
+ case ArithDiv: {
+- flags |= NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero;
++ // ArithDiv / ValueDiv need to have NodeBytecodeUsesAsNumber even if it is used in the context of integer.
++ // For example,
++ // ((@x / @y) + @z) | 0
++ // In this context, (@x / @y) can have integer context at first, but the result can be different if div
++ // generates NaN. Div and Mod are operations that can produce NaN / Infinity though only taking binary Int32 operands.
++ // Thus, we always need to check for overflow since it can affect downstream calculations.
++ flags |= NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeNeedsNaNOrInfinity;
+ flags &= ~NodeBytecodeUsesAsOther;
+
+ node->child1()->mergeFlags(flags);
+@@ -441,7 +452,7 @@ private:
+
+ case ValueMod:
+ case ArithMod: {
+- flags |= NodeBytecodeUsesAsNumber;
++ flags |= NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeNeedsNaNOrInfinity;
+ flags &= ~NodeBytecodeUsesAsOther;
+
+ node->child1()->mergeFlags(flags);
+@@ -452,7 +463,7 @@ private:
+ case EnumeratorGetByVal:
+ case GetByVal: {
+ m_graph.varArgChild(node, 0)->mergeFlags(NodeBytecodeUsesAsValue);
+- m_graph.varArgChild(node, 1)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
++ m_graph.varArgChild(node, 1)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeNeedsNaNOrInfinity | NodeBytecodeUsesAsArrayIndex);
+ break;
+ }
+
+@@ -461,13 +472,13 @@ private:
+ // Negative zero is not observable. NaN versus undefined are only observable
+ // in that you would get a different exception message. So, like, whatever: we
+ // claim here that NaN v. undefined is observable.
+- node->child1()->mergeFlags(NodeBytecodeUsesAsInt | NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsArrayIndex);
++ node->child1()->mergeFlags(NodeBytecodeUsesAsInt | NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeNeedsNaNOrInfinity | NodeBytecodeUsesAsArrayIndex);
+ break;
+ }
+
+ case ToString:
+ case CallStringConstructor: {
+- node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther);
++ node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeNeedsNaNOrInfinity);
+ break;
+ }
+
+@@ -487,15 +498,15 @@ private:
+ case CompareBelowEq:
+ case CompareEq:
+ case CompareStrictEq: {
+- node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther);
+- node->child2()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther);
++ node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeNeedsNaNOrInfinity);
++ node->child2()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeNeedsNaNOrInfinity);
+ break;
+ }
+
+ case PutByValDirect:
+ case PutByVal: {
+ m_graph.varArgChild(node, 0)->mergeFlags(NodeBytecodeUsesAsValue);
+- m_graph.varArgChild(node, 1)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
++ m_graph.varArgChild(node, 1)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex | NodeBytecodeNeedsNaNOrInfinity);
+ m_graph.varArgChild(node, 2)->mergeFlags(NodeBytecodeUsesAsValue);
+ break;
+ }
+@@ -508,20 +519,20 @@ private:
+ // then -0 and 0 are treated the same. We don't need NodeBytecodeUsesAsOther
+ // because if all of the cases are integers then NaN and undefined are
+ // treated the same (i.e. they will take default).
+- node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsInt);
++ node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsInt | NodeBytecodeNeedsNaNOrInfinity);
+ break;
+ case SwitchChar: {
+ // We don't need NodeBytecodeNeedsNegZero because if the cases are all strings
+ // then -0 and 0 are treated the same. We don't need NodeBytecodeUsesAsOther
+ // because if all of the cases are single-character strings then NaN
+ // and undefined are treated the same (i.e. they will take default).
+- node->child1()->mergeFlags(NodeBytecodeUsesAsNumber);
++ node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNaNOrInfinity);
+ break;
+ }
+ case SwitchString:
+ // We don't need NodeBytecodeNeedsNegZero because if the cases are all strings
+ // then -0 and 0 are treated the same.
+- node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther);
++ node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeNeedsNaNOrInfinity);
+ break;
+ case SwitchCell:
+ // There is currently no point to being clever here since this is used for switching
+diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
+index e8bee58ada15..b679539de2e6 100644
+--- a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
++++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
+@@ -81,7 +81,9 @@ private:
+ if (optimizeForX86() || optimizeForARM64() || optimizeForARMv7IDIVSupported()) {
+ fixIntOrBooleanEdge(leftChild);
+ fixIntOrBooleanEdge(rightChild);
+- if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
++ // We need to be careful about skipping overflow check because div / mod can generate non integer values
++ // from (Int32, Int32) inputs. For now, we always check non-zero divisor.
++ if (bytecodeCanTruncateInteger(node->arithNodeFlags()) && bytecodeCanIgnoreNaNAndInfinity(node->arithNodeFlags()) && bytecodeCanIgnoreNegativeZero(node->arithNodeFlags()))
+ node->setArithMode(Arith::Unchecked);
+ else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags()))
+ node->setArithMode(Arith::CheckOverflow);
+@@ -122,7 +124,7 @@ private:
+
+ void fixupArithDiv(Node* node, Edge& leftChild, Edge& rightChild)
+ {
+- if (m_graph.binaryArithShouldSpeculateInt32(node, FixupPass)) {
++ if (m_graph.divShouldSpeculateInt32(node, FixupPass)) {
+ fixupArithDivInt32(node, leftChild, rightChild);
+ return;
+ }
+diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h
+index ca566d3a484e..284c87672849 100644
+--- a/Source/JavaScriptCore/dfg/DFGGraph.h
++++ b/Source/JavaScriptCore/dfg/DFGGraph.h
+@@ -373,6 +373,17 @@ public:
+
+ return shouldSpeculateInt52ForAdd(left) && shouldSpeculateInt52ForAdd(right);
+ }
++
++ bool divShouldSpeculateInt32(Node* node, PredictionPass pass)
++ {
++ // Even if inputs are Int32, div can generate NaN or Infinity.
++ // Thus, Overflow in div can be caused by these non integer values as well as actual Int32 overflow.
++ Node* left = node->child1().node();
++ Node* right = node->child2().node();
++
++ return Node::shouldSpeculateInt32OrBooleanForArithmetic(left, right)
++ && nodeCanSpeculateInt32ForDiv(node->arithNodeFlags(), node->sourceFor(pass));
++ }
+
+ bool binaryArithShouldSpeculateInt32(Node* node, PredictionPass pass)
+ {
+diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h
+index f9ff50658e93..04509a3846ca 100644
+--- a/Source/JavaScriptCore/dfg/DFGNode.h
++++ b/Source/JavaScriptCore/dfg/DFGNode.h
+@@ -3308,21 +3308,25 @@ public:
+ out.printf(", @%u", child3()->index());
+ }
+
+- NodeOrigin origin;
++ NO_UNIQUE_ADDRESS NodeOrigin origin;
+
++private:
++ NO_UNIQUE_ADDRESS NodeType m_op;
++
++ NO_UNIQUE_ADDRESS unsigned m_index { std::numeric_limits<unsigned>::max() };
++
++public:
+ // References to up to 3 children, or links to a variable length set of children.
+ AdjacencyList children;
+
+ private:
+ friend class B3::SparseCollection<Node>;
+
+- unsigned m_index { std::numeric_limits<unsigned>::max() };
+- unsigned m_op : 10; // real type is NodeType
+- unsigned m_flags : 21;
+ // The virtual register number (spill location) associated with this .
+ VirtualRegister m_virtualRegister;
+ // The number of uses of the result of this operation (+1 for 'must generate' nodes, which have side-effects).
+ unsigned m_refCount;
++ NodeFlags m_flags;
+ // The prediction ascribed to this node after propagation.
+ SpeculatedType m_prediction { SpecNone };
+ // Immediate values, accesses type-checked via accessors above.
+diff --git a/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp b/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp
+index 88242947f6ef..0c53cd976c5c 100644
+--- a/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp
++++ b/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp
+@@ -74,12 +74,14 @@ void dumpNodeFlags(PrintStream& actualOut, NodeFlags flags)
+ out.print(comma, "VarArgs");
+
+ if (flags & NodeResultMask) {
+- if (!(flags & NodeBytecodeUsesAsNumber) && !(flags & NodeBytecodeNeedsNegZero))
++ if (!(flags & NodeBytecodeUsesAsNumber))
+ out.print(comma, "PureInt");
+- else if (!(flags & NodeBytecodeUsesAsNumber))
+- out.print(comma, "PureInt(w/ neg zero)");
+- else if (!(flags & NodeBytecodeNeedsNegZero))
++ else
+ out.print(comma, "PureNum");
++ if (flags & NodeBytecodeNeedsNegZero)
++ out.print(comma, "NeedsNegZero");
++ if (flags & NodeBytecodeNeedsNaNOrInfinity)
++ out.print(comma, "NeedsNaNOrInfinity");
+ if (flags & NodeBytecodeUsesAsOther)
+ out.print(comma, "UseAsOther");
+ }
+diff --git a/Source/JavaScriptCore/dfg/DFGNodeFlags.h b/Source/JavaScriptCore/dfg/DFGNodeFlags.h
+index 2ebe3544f601..aa60db7e6ba0 100644
+--- a/Source/JavaScriptCore/dfg/DFGNodeFlags.h
++++ b/Source/JavaScriptCore/dfg/DFGNodeFlags.h
+@@ -61,18 +61,19 @@ namespace JSC { namespace DFG {
+ #define NodeBytecodeUseBottom 0x00000
+ #define NodeBytecodeUsesAsNumber 0x04000 // The result of this computation may be used in a context that observes fractional, or bigger-than-int32, results.
+ #define NodeBytecodeNeedsNegZero 0x08000 // The result of this computation may be used in a context that observes -0.
+-#define NodeBytecodeUsesAsOther 0x10000 // The result of this computation may be used in a context that distinguishes between NaN and other things (like undefined).
+-#define NodeBytecodeUsesAsInt 0x20000 // The result of this computation is known to be used in a context that prefers, but does not require, integer values.
+-#define NodeBytecodeUsesAsArrayIndex 0x40000 // The result of this computation is known to be used in a context that strongly prefers integer values, to the point that we should avoid using doubles if at all possible.
+-#define NodeBytecodeUsesAsValue (NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsOther)
+-#define NodeBytecodeBackPropMask (NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex)
++#define NodeBytecodeNeedsNaNOrInfinity 0x10000 // The result of this computation may be used in a context that observes NaN or Infinity.
++#define NodeBytecodeUsesAsOther 0x20000 // The result of this computation may be used in a context that distinguishes between NaN and other things (like undefined).
++#define NodeBytecodeUsesAsInt 0x40000 // The result of this computation is known to be used in a context that prefers, but does not require, integer values.
++#define NodeBytecodeUsesAsArrayIndex 0x80000 // The result of this computation is known to be used in a context that strongly prefers integer values, to the point that we should avoid using doubles if at all possible.
++#define NodeBytecodeUsesAsValue (NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeNeedsNaNOrInfinity | NodeBytecodeUsesAsOther)
++#define NodeBytecodeBackPropMask (NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeNeedsNaNOrInfinity | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex)
+
+ #define NodeArithFlagsMask (NodeBehaviorMask | NodeBytecodeBackPropMask)
+
+-#define NodeIsFlushed 0x80000 // Computed by CPSRethreadingPhase, will tell you which local nodes are backwards-reachable from a Flush.
++#define NodeIsFlushed 0x100000 // Computed by CPSRethreadingPhase, will tell you which local nodes are backwards-reachable from a Flush.
+
+-#define NodeMiscFlag1 0x100000
+-#define NodeMiscFlag2 0x200000
++#define NodeMiscFlag1 0x200000
++#define NodeMiscFlag2 0x400000
+
+ typedef uint32_t NodeFlags;
+
+@@ -91,6 +92,11 @@ static inline bool bytecodeCanIgnoreNegativeZero(NodeFlags flags)
+ return !(flags & NodeBytecodeNeedsNegZero);
+ }
+
++static inline bool bytecodeCanIgnoreNaNAndInfinity(NodeFlags flags)
++{
++ return !(flags & NodeBytecodeNeedsNaNOrInfinity);
++}
++
+ enum RareCaseProfilingSource {
+ BaselineRareCase, // Comes from slow case counting in the baseline JIT.
+ DFGRareCase, // Comes from OSR exit profiles.
+@@ -147,6 +153,21 @@ static inline bool nodeCanSpeculateInt32(NodeFlags flags, RareCaseProfilingSourc
+ return true;
+ }
+
++static inline bool nodeCanSpeculateInt32ForDiv(NodeFlags flags, RareCaseProfilingSource source)
++{
++ if (nodeMayOverflowInt32(flags, source)) {
++ if (bytecodeUsesAsNumber(flags))
++ return false;
++ if (!bytecodeCanIgnoreNaNAndInfinity(flags))
++ return false;
++ }
++
++ if (nodeMayNegZero(flags, source))
++ return bytecodeCanIgnoreNegativeZero(flags);
++
++ return true;
++}
++
+ static inline bool nodeCanSpeculateInt52(NodeFlags flags, RareCaseProfilingSource source)
+ {
+ if (nodeMayOverflowInt52(flags, source))
+diff --git a/Source/JavaScriptCore/dfg/DFGNodeType.h b/Source/JavaScriptCore/dfg/DFGNodeType.h
+index 8f885b570665..aad4d559ccf7 100644
+--- a/Source/JavaScriptCore/dfg/DFGNodeType.h
++++ b/Source/JavaScriptCore/dfg/DFGNodeType.h
+@@ -567,7 +567,7 @@ namespace JSC { namespace DFG {
+
+ // This enum generates a monotonically increasing id for all Node types,
+ // and is used by the subsequent enum to fill out the id (as accessed via the NodeIdMask).
+-enum NodeType {
++enum NodeType : uint16_t {
+ #define DFG_OP_ENUM(opcode, flags) opcode,
+ FOR_EACH_DFG_OP(DFG_OP_ENUM)
+ #undef DFG_OP_ENUM
+@@ -577,6 +577,7 @@ enum NodeType {
+ #define DFG_OP_COUNT(opcode, flags) + 1
+ constexpr unsigned numberOfNodeTypes = FOR_EACH_DFG_OP(DFG_OP_COUNT);
+ #undef DFG_OP_COUNT
++static_assert(numberOfNodeTypes <= UINT16_MAX);
+
+ // Specifies the default flags for each node.
+ inline NodeFlags defaultFlags(NodeType op)
+--
+2.34.1
+
diff --git a/meta/recipes-sato/webkit/webkitgtk/CVE-2022-42867.patch b/meta/recipes-sato/webkit/webkitgtk/CVE-2022-42867.patch
new file mode 100644
index 0000000000..bf06809051
--- /dev/null
+++ b/meta/recipes-sato/webkit/webkitgtk/CVE-2022-42867.patch
@@ -0,0 +1,104 @@
+From f67a882170609d15836204a689dc552322fbe653 Mon Sep 17 00:00:00 2001
+From: Yogita Urade <yogita.urade@windriver.com>
+Date: Wed, 7 Jun 2023 08:15:11 +0000
+Subject: [oe-core][kirkstone][PATCH 1/1] RenderElement::updateFillImages
+ should take pointer arguments like other similar functions
+ https://bugs.webkit.org/show_bug.cgi?id=247317 rdar://100273147
+
+Reviewed by Alan Baradlay.
+
+* Source/WebCore/rendering/RenderElement.cpp:
+(WebCore::RenderElement::updateFillImages):
+(WebCore::RenderElement::styleDidChange):
+* Source/WebCore/rendering/RenderElement.h:
+
+Canonical link: https://commits.webkit.org/256215@main
+
+CVE: CVE-2022-42867
+
+Upstream-Status: Backport
+[https://github.com/WebKit/WebKit/commit/091a04e55c801ac6ba13f4b328fbee2eece853fc]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ Source/WebCore/rendering/RenderElement.cpp | 27 ++++++++++++++--------
+ Source/WebCore/rendering/RenderElement.h | 2 +-
+ 2 files changed, 19 insertions(+), 10 deletions(-)
+
+diff --git a/Source/WebCore/rendering/RenderElement.cpp b/Source/WebCore/rendering/RenderElement.cpp
+index da43bf3d..931686b8 100644
+--- a/Source/WebCore/rendering/RenderElement.cpp
++++ b/Source/WebCore/rendering/RenderElement.cpp
+@@ -358,7 +358,7 @@ inline bool RenderElement::shouldRepaintForStyleDifference(StyleDifference diff)
+ return diff == StyleDifference::Repaint || (diff == StyleDifference::RepaintIfTextOrBorderOrOutline && hasImmediateNonWhitespaceTextChildOrBorderOrOutline());
+ }
+
+-void RenderElement::updateFillImages(const FillLayer* oldLayers, const FillLayer& newLayers)
++void RenderElement::updateFillImages(const FillLayer* oldLayers, const FillLayer* newLayers)
+ {
+ auto fillImagesAreIdentical = [](const FillLayer* layer1, const FillLayer* layer2) -> bool {
+ if (layer1 == layer2)
+@@ -379,7 +379,7 @@ void RenderElement::updateFillImages(const FillLayer* oldLayers, const FillLayer
+ };
+
+ auto isRegisteredWithNewFillImages = [&]() -> bool {
+- for (auto* layer = &newLayers; layer; layer = layer->next()) {
++ for (auto* layer = newLayers; layer; layer = layer->next()) {
+ if (layer->image() && !layer->image()->hasClient(*this))
+ return false;
+ }
+@@ -388,11 +388,11 @@ void RenderElement::updateFillImages(const FillLayer* oldLayers, const FillLayer
+
+ // If images have the same characteristics and this element is already registered as a
+ // client to the new images, there is nothing to do.
+- if (fillImagesAreIdentical(oldLayers, &newLayers) && isRegisteredWithNewFillImages())
++ if (fillImagesAreIdentical(oldLayers, newLayers) && isRegisteredWithNewFillImages())
+ return;
+
+ // Add before removing, to avoid removing all clients of an image that is in both sets.
+- for (auto* layer = &newLayers; layer; layer = layer->next()) {
++ for (auto* layer = newLayers; layer; layer = layer->next()) {
+ if (layer->image())
+ layer->image()->addClient(*this);
+ }
+@@ -937,11 +937,20 @@ static inline bool areCursorsEqual(const RenderStyle* a, const RenderStyle* b)
+
+ void RenderElement::styleDidChange(StyleDifference diff, const RenderStyle* oldStyle)
+ {
+- updateFillImages(oldStyle ? &oldStyle->backgroundLayers() : nullptr, m_style.backgroundLayers());
+- updateFillImages(oldStyle ? &oldStyle->maskLayers() : nullptr, m_style.maskLayers());
+- updateImage(oldStyle ? oldStyle->borderImage().image() : nullptr, m_style.borderImage().image());
+- updateImage(oldStyle ? oldStyle->maskBoxImage().image() : nullptr, m_style.maskBoxImage().image());
+- updateShapeImage(oldStyle ? oldStyle->shapeOutside() : nullptr, m_style.shapeOutside());
++ auto registerImages = [this](auto* style, auto* oldStyle) {
++ if (!style && !oldStyle)
++ return;
++ updateFillImages(oldStyle ? &oldStyle->backgroundLayers() : nullptr, style ? &style->backgroundLayers() : nullptr);
++ updateFillImages(oldStyle ? &oldStyle->maskLayers() : nullptr, style ? &style->maskLayers() : nullptr);
++ updateImage(oldStyle ? oldStyle->borderImage().image() : nullptr, style ? style->borderImage().image() : nullptr);
++ updateImage(oldStyle ? oldStyle->maskBoxImage().image() : nullptr, style ? style->maskBoxImage().image() : nullptr);
++ updateShapeImage(oldStyle ? oldStyle->shapeOutside() : nullptr, style ? style->shapeOutside() : nullptr);
++ };
++
++ registerImages(&style(), oldStyle);
++
++ // Are there other pseudo-elements that need the resources to be registered?
++ registerImages(style().getCachedPseudoStyle(PseudoId::FirstLine), oldStyle ? oldStyle->getCachedPseudoStyle(PseudoId::FirstLine) : nullptr);
+
+ SVGRenderSupport::styleChanged(*this, oldStyle);
+
+diff --git a/Source/WebCore/rendering/RenderElement.h b/Source/WebCore/rendering/RenderElement.h
+index f376cecb..d6ba2cdf 100644
+--- a/Source/WebCore/rendering/RenderElement.h
++++ b/Source/WebCore/rendering/RenderElement.h
+@@ -349,7 +349,7 @@ private:
+ bool shouldRepaintForStyleDifference(StyleDifference) const;
+ bool hasImmediateNonWhitespaceTextChildOrBorderOrOutline() const;
+
+- void updateFillImages(const FillLayer*, const FillLayer&);
++ void updateFillImages(const FillLayer*, const FillLayer*);
+ void updateImage(StyleImage*, StyleImage*);
+ void updateShapeImage(const ShapeValue*, const ShapeValue*);
+
+--
+2.35.5
diff --git a/meta/recipes-sato/webkit/webkitgtk/CVE-2022-46691.patch b/meta/recipes-sato/webkit/webkitgtk/CVE-2022-46691.patch
new file mode 100644
index 0000000000..ff9df40433
--- /dev/null
+++ b/meta/recipes-sato/webkit/webkitgtk/CVE-2022-46691.patch
@@ -0,0 +1,43 @@
+From fd57a49d07c9c285780495344073350182fd7c7c Mon Sep 17 00:00:00 2001
+From: Yijia Huang <hyjorc1@gmail.com>
+Date: Mon, 10 Oct 2022 15:42:34 -0700
+Subject: [PATCH] [JSC] Should model BigInt with side effects
+ https://bugs.webkit.org/show_bug.cgi?id=246291 rdar://100494823
+
+Reviewed by Yusuke Suzuki.
+
+Operations with two BigInt operands have side effects,
+which should not be hoisted from loops.
+
+* Source/JavaScriptCore/dfg/DFGClobberize.cpp:
+(JSC::DFG::doesWrites):
+* Source/JavaScriptCore/dfg/DFGClobberize.h:
+(JSC::DFG::clobberize):
+
+Canonical link: https://commits.webkit.org/255368@main
+
+CVE: CVE-2022-46691
+
+Upstream-Status: Backport
+[https://github.com/WebKit/WebKit/commit/fd57a49d07c9c285780495344073350182fd7c7c]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ Source/JavaScriptCore/dfg/DFGClobberize.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/Source/JavaScriptCore/dfg/DFGClobberize.h b/Source/JavaScriptCore/dfg/DFGClobberize.h
+index 0363ab20dcd8..4b1bcfea1fd7 100644
+--- a/Source/JavaScriptCore/dfg/DFGClobberize.h
++++ b/Source/JavaScriptCore/dfg/DFGClobberize.h
+@@ -811,6 +811,8 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
+ case ValueBitRShift:
+ // FIXME: this use of single-argument isBinaryUseKind would prevent us from specializing (for example) for a HeapBigInt left-operand and a BigInt32 right-operand.
+ if (node->isBinaryUseKind(AnyBigIntUse) || node->isBinaryUseKind(BigInt32Use) || node->isBinaryUseKind(HeapBigIntUse)) {
++ read(World);
++ write(SideState);
+ def(PureValue(node));
+ return;
+ }
+--
+2.40.0
diff --git a/meta/recipes-sato/webkit/webkitgtk/CVE-2022-46699.patch b/meta/recipes-sato/webkit/webkitgtk/CVE-2022-46699.patch
new file mode 100644
index 0000000000..0752b9c0e2
--- /dev/null
+++ b/meta/recipes-sato/webkit/webkitgtk/CVE-2022-46699.patch
@@ -0,0 +1,136 @@
+From 28686e63de0d3d7270a49b0d6b656467bc4fbf68 Mon Sep 17 00:00:00 2001
+From: Justin Michaud <justin_michaud@apple.com>
+Date: Wed, 9 Nov 2022 19:20:41 -0800
+Subject: [PATCH] Error() ICs should not cache special properties.
+ https://bugs.webkit.org/show_bug.cgi?id=247699
+
+Reviewed by Yusuke Suzuki.
+
+HasOwnProperty/DeleteProperty are not always cacheable for special Error()
+properties like column. These special properties are materialized on-demand
+in materializeErrorInfoIfNeeded, but this function's behaviour can be changed
+by Error.stackTraceLimit without causing a structure transition or firing watchpoints.
+
+That is, we cannot cache property misses, and we cannot assume HasOwnProperty is deterministic
+for a given structure if we are using one of these properties.
+
+* Source/JavaScriptCore/runtime/ErrorInstance.cpp:
+(JSC::ErrorInstance::deleteProperty):
+* Source/JavaScriptCore/runtime/ErrorInstance.h:
+
+Canonical link: https://commits.webkit.org/256519@main
+
+CVE: CVE-2022-46699
+
+Upstream-Status: Backport
+[https://github.com/WebKit/WebKit/commit/28686e63de0d3d7270a49b0d6b656467bc4fbf68]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ JSTests/stress/delete-cache-error.js | 19 ++++++++++++++++++
+ .../get-own-property-slot-cache-error.js | 6 ++++++
+ JSTests/stress/get-property-cache-error.js | 20 +++++++++++++++++++
+ .../JavaScriptCore/runtime/ErrorInstance.cpp | 4 +++-
+ Source/JavaScriptCore/runtime/ErrorInstance.h | 3 ++-
+ 5 files changed, 50 insertions(+), 2 deletions(-)
+ create mode 100644 JSTests/stress/delete-cache-error.js
+ create mode 100644 JSTests/stress/get-own-property-slot-cache-error.js
+ create mode 100644 JSTests/stress/get-property-cache-error.js
+
+diff --git a/JSTests/stress/delete-cache-error.js b/JSTests/stress/delete-cache-error.js
+new file mode 100644
+index 000000000000..d77c09185a13
+--- /dev/null
++++ b/JSTests/stress/delete-cache-error.js
+@@ -0,0 +1,19 @@
++delete Error.stackTraceLimit
++
++// sourceURL is not materialized
++function cacheColumn(o) {
++ delete o.sourceURL
++}
++noInline(cacheColumn)
++
++for (let i = 0; i < 200; ++i) {
++ let e = Error()
++ cacheColumn(e)
++ if (e.sourceURL !== undefined)
++ throw "Test failed on iteration " + i + " " + e.sourceURL
++
++ if (i == 197) {
++ // now it is
++ Error.stackTraceLimit = 10
++ }
++}
+\ No newline at end of file
+diff --git a/JSTests/stress/get-own-property-slot-cache-error.js b/JSTests/stress/get-own-property-slot-cache-error.js
+new file mode 100644
+index 000000000000..f8202213bf79
+--- /dev/null
++++ b/JSTests/stress/get-own-property-slot-cache-error.js
+@@ -0,0 +1,6 @@
++delete Error.stackTraceLimit
++// GetOwnPropertySlot does not materializeErrorInfoIfNeeded because stackString is null.
++Object.hasOwn(Error(), "column")
++Error.stackTraceLimit = 10
++// Now it does
++Object.hasOwn(Error(), "column")
+\ No newline at end of file
+diff --git a/JSTests/stress/get-property-cache-error.js b/JSTests/stress/get-property-cache-error.js
+new file mode 100644
+index 000000000000..b35272ea6fe2
+--- /dev/null
++++ b/JSTests/stress/get-property-cache-error.js
+@@ -0,0 +1,20 @@
++// GetOwnPropertySlot does not materializeErrorInfoIfNeeded because stackString is null.
++delete Error.stackTraceLimit
++expected = undefined
++
++function cacheColumn(o) {
++ return o.column
++}
++noInline(cacheColumn)
++
++for (let i = 0; i < 1000; ++i) {
++ let val = cacheColumn(Error())
++ if (val !== expected)
++ throw "Test failed on iteration " + i + ": " + val
++
++ if (i == 900) {
++ // now it does
++ Error.stackTraceLimit = 10
++ expected = 32
++ }
++}
+\ No newline at end of file
+diff --git a/Source/JavaScriptCore/runtime/ErrorInstance.cpp b/Source/JavaScriptCore/runtime/ErrorInstance.cpp
+index ddf96869e84a..8e5373257d34 100644
+--- a/Source/JavaScriptCore/runtime/ErrorInstance.cpp
++++ b/Source/JavaScriptCore/runtime/ErrorInstance.cpp
+@@ -303,7 +303,9 @@ bool ErrorInstance::deleteProperty(JSCell* cell, JSGlobalObject* globalObject, P
+ {
+ VM& vm = globalObject->vm();
+ ErrorInstance* thisObject = jsCast<ErrorInstance*>(cell);
+- thisObject->materializeErrorInfoIfNeeded(vm, propertyName);
++ bool materializedProperties = thisObject->materializeErrorInfoIfNeeded(vm, propertyName);
++ if (materializedProperties)
++ slot.disableCaching();
+ return Base::deleteProperty(thisObject, globalObject, propertyName, slot);
+ }
+
+diff --git a/Source/JavaScriptCore/runtime/ErrorInstance.h b/Source/JavaScriptCore/runtime/ErrorInstance.h
+index 28807b4ea33e..2afb153a7442 100644
+--- a/Source/JavaScriptCore/runtime/ErrorInstance.h
++++ b/Source/JavaScriptCore/runtime/ErrorInstance.h
+@@ -30,7 +30,8 @@ namespace JSC {
+ class ErrorInstance : public JSNonFinalObject {
+ public:
+ using Base = JSNonFinalObject;
+- static constexpr unsigned StructureFlags = Base::StructureFlags | OverridesGetOwnPropertySlot | OverridesGetOwnSpecialPropertyNames | OverridesPut;
++
++ static constexpr unsigned StructureFlags = Base::StructureFlags | OverridesGetOwnPropertySlot | OverridesGetOwnSpecialPropertyNames | OverridesPut | GetOwnPropertySlotIsImpureForPropertyAbsence;
+ static constexpr bool needsDestruction = true;
+
+ static void destroy(JSCell* cell)
+--
+2.40.0
diff --git a/meta/recipes-sato/webkit/webkitgtk/CVE-2022-46700.patch b/meta/recipes-sato/webkit/webkitgtk/CVE-2022-46700.patch
new file mode 100644
index 0000000000..242b8337fa
--- /dev/null
+++ b/meta/recipes-sato/webkit/webkitgtk/CVE-2022-46700.patch
@@ -0,0 +1,67 @@
+From 86fbeb6fcd638e2350b09a43dde355f9830e75da Mon Sep 17 00:00:00 2001
+From: David Degazio <d_degazio@apple.com>
+Date: Tue, 8 Nov 2022 19:54:33 -0800
+Subject: [PATCH] Intl.Locale.prototype.hourCycles leaks empty JSValue to
+ script https://bugs.webkit.org/show_bug.cgi?id=247562 rdar://102031379
+
+Reviewed by Mark Lam.
+
+We currently don't check if IntlLocale::hourCycles returns a null JSArray, which allows it
+to be encoded as an empty JSValue and exposed to user code. This patch throws a TypeError
+when udatpg_open returns a failed status.
+
+* JSTests/stress/intl-locale-invalid-hourCycles.js: Added.
+(main):
+* Source/JavaScriptCore/runtime/IntlLocale.cpp:
+(JSC::IntlLocale::hourCycles):
+
+Canonical link: https://commits.webkit.org/256473@main
+
+CVE:CVE-2022-46700
+
+Upstream-Status: Backport
+[https://github.com/WebKit/WebKit/commit/86fbeb6fcd638e2350b09a43dde355f9830e75da]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ JSTests/stress/intl-locale-invalid-hourCycles.js | 12 ++++++++++++
+ Source/JavaScriptCore/runtime/IntlLocale.cpp | 4 +++-
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+ create mode 100644 JSTests/stress/intl-locale-invalid-hourCycles.js
+
+diff --git a/JSTests/stress/intl-locale-invalid-hourCycles.js b/JSTests/stress/intl-locale-invalid-hourCycles.js
+new file mode 100644
+index 000000000000..7b94eb844764
+--- /dev/null
++++ b/JSTests/stress/intl-locale-invalid-hourCycles.js
+@@ -0,0 +1,12 @@
++function main() {
++ const v24 = new Intl.Locale("trimEnd", { 'numberingSystem': "foobar" });
++ let empty = v24.hourCycles;
++ print(empty);
++}
++
++try {
++ main();
++} catch (e) {
++ if (!(e instanceof TypeError))
++ throw e;
++}
+diff --git a/Source/JavaScriptCore/runtime/IntlLocale.cpp b/Source/JavaScriptCore/runtime/IntlLocale.cpp
+index c3c346163a18..bef424727a8a 100644
+--- a/Source/JavaScriptCore/runtime/IntlLocale.cpp
++++ b/Source/JavaScriptCore/runtime/IntlLocale.cpp
+@@ -632,8 +632,10 @@ JSArray* IntlLocale::hourCycles(JSGlobalObject* globalObject)
+
+ UErrorCode status = U_ZERO_ERROR;
+ auto generator = std::unique_ptr<UDateTimePatternGenerator, ICUDeleter<udatpg_close>>(udatpg_open(m_localeID.data(), &status));
+- if (U_FAILURE(status))
++ if (U_FAILURE(status)) {
++ throwTypeError(globalObject, scope, "invalid locale"_s);
+ return nullptr;
++ }
+
+ // Use "j" skeleton and parse pattern to retrieve the configured hour-cycle information.
+ constexpr const UChar skeleton[] = { 'j', 0 };
+--
+2.40.0
diff --git a/meta/recipes-sato/webkit/webkitgtk/CVE-2022-48503.patch b/meta/recipes-sato/webkit/webkitgtk/CVE-2022-48503.patch
new file mode 100644
index 0000000000..b67751736d
--- /dev/null
+++ b/meta/recipes-sato/webkit/webkitgtk/CVE-2022-48503.patch
@@ -0,0 +1,225 @@
+From 612c245823a515c8c70c2ad486957bd8a850f0f9 Mon Sep 17 00:00:00 2001
+From: Yusuke Suzuki <ysuzuki@apple.com>
+Date: Tue, 5 Sep 2023 08:40:19 +0000
+Subject: [PATCH] [JSC] Refactor wasm section ordering code
+ https://bugs.webkit.org/show_bug.cgi?id=241931 rdar://83326477
+
+Reviewed by Keith Miller.
+
+This patch refactors existing validateOrder code since it is too adhoc right now.
+
+* Source/JavaScriptCore/wasm/WasmModuleInformation.h:
+(JSC::Wasm::ModuleInformation::dataSegmentsCount const):
+* Source/JavaScriptCore/wasm/WasmSectionParser.cpp:
+(JSC::Wasm::SectionParser::parseData):
+(JSC::Wasm::SectionParser::parseDataCount):
+* Source/JavaScriptCore/wasm/WasmSectionParser.h:
+* Source/JavaScriptCore/wasm/WasmSections.h:
+(JSC::Wasm::orderingNumber):
+(JSC::Wasm::isKnownSection):
+(JSC::Wasm::validateOrder):
+(JSC::Wasm::makeString):
+* Source/JavaScriptCore/wasm/WasmStreamingParser.cpp:
+(JSC::Wasm::StreamingParser::parseSectionPayload):
+(JSC::Wasm::StreamingParser::finalize):
+
+Canonical link: https://commits.webkit.org/251800@main
+
+CVE: CVE-2022-48503
+
+Upstream-Status: Backport [https://github.com/WebKit/WebKit/commit/612c245823a515c8c70c2ad486957bd8a850f0f9]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ .../wasm/WasmModuleInformation.h | 4 +-
+ .../JavaScriptCore/wasm/WasmSectionParser.cpp | 3 ++
+ .../JavaScriptCore/wasm/WasmSectionParser.h | 2 +-
+ Source/JavaScriptCore/wasm/WasmSections.h | 52 +++++++++++--------
+ .../wasm/WasmStreamingParser.cpp | 11 +++-
+ 5 files changed, 45 insertions(+), 27 deletions(-)
+
+diff --git a/Source/JavaScriptCore/wasm/WasmModuleInformation.h b/Source/JavaScriptCore/wasm/WasmModuleInformation.h
+index ae6bbeed..f9f1baf7 100644
+--- a/Source/JavaScriptCore/wasm/WasmModuleInformation.h
++++ b/Source/JavaScriptCore/wasm/WasmModuleInformation.h
+@@ -86,7 +86,7 @@ struct ModuleInformation : public ThreadSafeRefCounted<ModuleInformation> {
+ uint32_t memoryCount() const { return memory ? 1 : 0; }
+ uint32_t tableCount() const { return tables.size(); }
+ uint32_t elementCount() const { return elements.size(); }
+- uint32_t dataSegmentsCount() const { return numberOfDataSegments; }
++ uint32_t dataSegmentsCount() const { return numberOfDataSegments.value_or(0); }
+
+ const TableInformation& table(unsigned index) const { return tables[index]; }
+
+@@ -131,7 +131,7 @@ struct ModuleInformation : public ThreadSafeRefCounted<ModuleInformation> {
+ Vector<CustomSection> customSections;
+ Ref<NameSection> nameSection;
+ BranchHints branchHints;
+- uint32_t numberOfDataSegments { 0 };
++ std::optional<uint32_t> numberOfDataSegments;
+
+ BitVector m_declaredFunctions;
+ BitVector m_declaredExceptions;
+diff --git a/Source/JavaScriptCore/wasm/WasmSectionParser.cpp b/Source/JavaScriptCore/wasm/WasmSectionParser.cpp
+index 5b511811..c55ee3c0 100644
+--- a/Source/JavaScriptCore/wasm/WasmSectionParser.cpp
++++ b/Source/JavaScriptCore/wasm/WasmSectionParser.cpp
+@@ -768,6 +768,8 @@ auto SectionParser::parseData() -> PartialResult
+ uint32_t segmentCount;
+ WASM_PARSER_FAIL_IF(!parseVarUInt32(segmentCount), "can't get Data section's count");
+ WASM_PARSER_FAIL_IF(segmentCount > maxDataSegments, "Data section's count is too big ", segmentCount, " maximum ", maxDataSegments);
++ if (m_info->numberOfDataSegments)
++ WASM_PARSER_FAIL_IF(segmentCount != m_info->numberOfDataSegments.value(), "Data section's count ", segmentCount, " is different from Data Count section's count ", m_info->numberOfDataSegments.value());
+ WASM_PARSER_FAIL_IF(!m_info->data.tryReserveCapacity(segmentCount), "can't allocate enough memory for Data section's ", segmentCount, " segments");
+
+ for (uint32_t segmentNumber = 0; segmentNumber < segmentCount; ++segmentNumber) {
+@@ -847,6 +849,7 @@ auto SectionParser::parseDataCount() -> PartialResult
+ {
+ uint32_t numberOfDataSegments;
+ WASM_PARSER_FAIL_IF(!parseVarUInt32(numberOfDataSegments), "can't get Data Count section's count");
++ WASM_PARSER_FAIL_IF(numberOfDataSegments > maxDataSegments, "Data Count section's count is too big ", numberOfDataSegments , " maximum ", maxDataSegments);
+
+ m_info->numberOfDataSegments = numberOfDataSegments;
+ return { };
+diff --git a/Source/JavaScriptCore/wasm/WasmSectionParser.h b/Source/JavaScriptCore/wasm/WasmSectionParser.h
+index 91fd3ed8..4d7dcbac 100644
+--- a/Source/JavaScriptCore/wasm/WasmSectionParser.h
++++ b/Source/JavaScriptCore/wasm/WasmSectionParser.h
+@@ -44,7 +44,7 @@ public:
+ {
+ }
+
+-#define WASM_SECTION_DECLARE_PARSER(NAME, ID, DESCRIPTION) PartialResult WARN_UNUSED_RETURN parse ## NAME();
++#define WASM_SECTION_DECLARE_PARSER(NAME, ID, ORDERING, DESCRIPTION) PartialResult WARN_UNUSED_RETURN parse ## NAME();
+ FOR_EACH_KNOWN_WASM_SECTION(WASM_SECTION_DECLARE_PARSER)
+ #undef WASM_SECTION_DECLARE_PARSER
+
+diff --git a/Source/JavaScriptCore/wasm/WasmSections.h b/Source/JavaScriptCore/wasm/WasmSections.h
+index bef20701..b422a587 100644
+--- a/Source/JavaScriptCore/wasm/WasmSections.h
++++ b/Source/JavaScriptCore/wasm/WasmSections.h
+@@ -33,20 +33,21 @@ IGNORE_RETURN_TYPE_WARNINGS_BEGIN
+
+ namespace JSC { namespace Wasm {
+
++// macro(Name, ID, OrderingNumber, Description).
+ #define FOR_EACH_KNOWN_WASM_SECTION(macro) \
+- macro(Type, 1, "Function signature declarations") \
+- macro(Import, 2, "Import declarations") \
+- macro(Function, 3, "Function declarations") \
+- macro(Table, 4, "Indirect function table and other tables") \
+- macro(Memory, 5, "Memory attributes") \
+- macro(Global, 6, "Global declarations") \
+- macro(Export, 7, "Exports") \
+- macro(Start, 8, "Start function declaration") \
+- macro(Element, 9, "Elements section") \
+- macro(Code, 10, "Function bodies (code)") \
+- macro(Data, 11, "Data segments") \
+- macro(DataCount, 12, "Data count") \
+- macro(Exception, 13, "Exception declarations") \
++ macro(Type, 1, 1, "Function signature declarations") \
++ macro(Import, 2, 2, "Import declarations") \
++ macro(Function, 3, 3, "Function declarations") \
++ macro(Table, 4, 4, "Indirect function table and other tables") \
++ macro(Memory, 5, 5, "Memory attributes") \
++ macro(Global, 6, 7, "Global declarations") \
++ macro(Export, 7, 8, "Exports") \
++ macro(Start, 8, 9, "Start function declaration") \
++ macro(Element, 9, 10, "Elements section") \
++ macro(Code, 10, 12, "Function bodies (code)") \
++ macro(Data, 11, 13, "Data segments") \
++ macro(DataCount, 12, 11, "Data count") \
++ macro(Exception, 13, 6, "Exception declarations") \
+
+ enum class Section : uint8_t {
+ // It's important that Begin is less than every other section number and that Custom is greater.
+@@ -54,18 +55,29 @@ enum class Section : uint8_t {
+ // Also, Begin is not a real section but is used as a marker for validating the ordering
+ // of sections.
+ Begin = 0,
+-#define DEFINE_WASM_SECTION_ENUM(NAME, ID, DESCRIPTION) NAME = ID,
++#define DEFINE_WASM_SECTION_ENUM(NAME, ID, ORDERING, DESCRIPTION) NAME = ID,
+ FOR_EACH_KNOWN_WASM_SECTION(DEFINE_WASM_SECTION_ENUM)
+ #undef DEFINE_WASM_SECTION_ENUM
+ Custom
+ };
+ static_assert(static_cast<uint8_t>(Section::Begin) < static_cast<uint8_t>(Section::Type), "Begin should come before the first known section.");
+
++inline unsigned orderingNumber(Section section)
++{
++ switch (section) {
++#define ORDERING_OF_SECTION(NAME, ID, ORDERING, DESCRIPTION) case Section::NAME: return ORDERING;
++ FOR_EACH_KNOWN_WASM_SECTION(ORDERING_OF_SECTION)
++#undef VALIDATE_SECTION
++ default:
++ return static_cast<unsigned>(section);
++ }
++}
++
+ template<typename Int>
+ inline bool isKnownSection(Int section)
+ {
+ switch (section) {
+-#define VALIDATE_SECTION(NAME, ID, DESCRIPTION) case static_cast<Int>(Section::NAME): return true;
++#define VALIDATE_SECTION(NAME, ID, ORDERING, DESCRIPTION) case static_cast<Int>(Section::NAME): return true;
+ FOR_EACH_KNOWN_WASM_SECTION(VALIDATE_SECTION)
+ #undef VALIDATE_SECTION
+ default:
+@@ -89,13 +101,7 @@ inline bool decodeSection(uint8_t sectionByte, Section& section)
+ inline bool validateOrder(Section previousKnown, Section next)
+ {
+ ASSERT(isKnownSection(previousKnown) || previousKnown == Section::Begin);
+- if (previousKnown == Section::DataCount && next == Section::Code)
+- return true;
+- if (previousKnown == Section::Exception)
+- return next >= Section::Global;
+- if (next == Section::Exception)
+- return previousKnown <= Section::Memory;
+- return static_cast<uint8_t>(previousKnown) < static_cast<uint8_t>(next);
++ return orderingNumber(previousKnown) < orderingNumber(next);
+ }
+
+ inline const char* makeString(Section section)
+@@ -105,7 +111,7 @@ inline const char* makeString(Section section)
+ return "Begin";
+ case Section::Custom:
+ return "Custom";
+-#define STRINGIFY_SECTION_NAME(NAME, ID, DESCRIPTION) case Section::NAME: return #NAME;
++#define STRINGIFY_SECTION_NAME(NAME, ID, ORDERING, DESCRIPTION) case Section::NAME: return #NAME;
+ FOR_EACH_KNOWN_WASM_SECTION(STRINGIFY_SECTION_NAME)
+ #undef STRINGIFY_SECTION_NAME
+ }
+diff --git a/Source/JavaScriptCore/wasm/WasmStreamingParser.cpp b/Source/JavaScriptCore/wasm/WasmStreamingParser.cpp
+index fa552eff..25e7e32d 100644
+--- a/Source/JavaScriptCore/wasm/WasmStreamingParser.cpp
++++ b/Source/JavaScriptCore/wasm/WasmStreamingParser.cpp
+@@ -161,7 +161,7 @@ auto StreamingParser::parseSectionPayload(Vector<uint8_t>&& data) -> State
+ {
+ SectionParser parser(data.data(), data.size(), m_offset, m_info.get());
+ switch (m_section) {
+-#define WASM_SECTION_PARSE(NAME, ID, DESCRIPTION) \
++#define WASM_SECTION_PARSE(NAME, ID, ORDERING, DESCRIPTION) \
+ case Section::NAME: { \
+ WASM_STREAMING_PARSER_FAIL_IF_HELPER_FAILS(parser.parse ## NAME()); \
+ break; \
+@@ -393,9 +393,18 @@ auto StreamingParser::finalize() -> State
+ m_state = fail("Number of functions parsed (", m_functionCount, ") does not match the number of declared functions (", m_info->functions.size(), ")");
+ break;
+ }
++
++ if (m_info->numberOfDataSegments) {
++ if (UNLIKELY(m_info->data.size() != m_info->numberOfDataSegments.value())) {
++ m_state = fail("Data section's count ", m_info->data.size(), " is different from Data Count section's count ", m_info->numberOfDataSegments.value());
++ break;
++ }
++ }
++
+ if (m_remaining.isEmpty()) {
+ if (UNLIKELY(Options::useEagerWebAssemblyModuleHashing()))
+ m_info->nameSection->setHash(m_hasher.computeHexDigest());
++
+ m_state = State::Finished;
+ m_client.didFinishParsing();
+ } else
+--
+2.40.0
diff --git a/meta/recipes-sato/webkit/webkitgtk/CVE-2023-23529.patch b/meta/recipes-sato/webkit/webkitgtk/CVE-2023-23529.patch
new file mode 100644
index 0000000000..f2e9808ab4
--- /dev/null
+++ b/meta/recipes-sato/webkit/webkitgtk/CVE-2023-23529.patch
@@ -0,0 +1,65 @@
+CVE: CVE-2023-23529
+Upstream-Status: Backport [https://github.com/WebKit/WebKit/commit/6cc943c]
+
+With the help from webkit maillist, backport and rebase patch to fix
+CVE-2023-23529.
+
+https://lists.webkit.org/pipermail/webkit-gtk/2023-August/003931.html
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 6cc943c3323a1a1368934c812e5e8ec08f54dcd4 Mon Sep 17 00:00:00 2001
+From: Yusuke Suzuki <ysuzuki@apple.com>
+Date: Fri, 17 Feb 2023 10:39:19 -0800
+Subject: [PATCH] Cherry-pick 259548.63@safari-7615-branch (1b2eb138ef92).
+ rdar://105598149
+
+ [JSC] ToThis object folding should check if AbstractValue is always an object
+ https://bugs.webkit.org/show_bug.cgi?id=251944
+ rdar://105175786
+
+ Reviewed by Geoffrey Garen and Mark Lam.
+
+ ToThis can become Identity for strict mode if it is just primitive values or its object does not have toThis function overriding.
+ This is correct, but folding ToThis to Undefined etc. (not Identity) needs to check that an input only contains objects.
+ This patch adds appropriate checks to prevent from converting ToThis(GlobalObject | Int32) to Undefined for example.
+
+ * Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h:
+ (JSC::DFG::isToThisAnIdentity):
+
+ Canonical link: https://commits.webkit.org/259548.63@safari-7615-branch
+
+Canonical link: https://commits.webkit.org/260455@main
+---
+ .../JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h b/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
+index 928328ffab826..82481455e651d 100644
+--- a/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
++++ b/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
+@@ -209,7 +209,8 @@ inline ToThisResult isToThisAnIdentity(VM& vm, ECMAMode ecmaMode, AbstractValue&
+ }
+ }
+
+- if ((ecmaMode.isStrict() || (valueForNode.m_type && !(valueForNode.m_type & ~SpecObject))) && valueForNode.m_structure.isFinite()) {
++ bool onlyObjects = valueForNode.m_type && !(valueForNode.m_type & ~SpecObject);
++ if ((ecmaMode.isStrict() || onlyObjects) && valueForNode.m_structure.isFinite()) {
+ bool allStructuresAreJSScope = !valueForNode.m_structure.isClear();
+ bool overridesToThis = false;
+ valueForNode.m_structure.forEach([&](RegisteredStructure structure) {
+@@ -226,9 +227,13 @@ inline ToThisResult isToThisAnIdentity(VM& vm, ECMAMode ecmaMode, AbstractValue&
+ // If all the structures are JSScope's ones, we know the details of JSScope::toThis() operation.
+ allStructuresAreJSScope &= structure->classInfo()->methodTable.toThis == JSScope::info()->methodTable.toThis;
+ });
++
++ // This is correct for strict mode even if this can have non objects, since the right semantics is Identity.
+ if (!overridesToThis)
+ return ToThisResult::Identity;
+- if (allStructuresAreJSScope) {
++
++ // But this folding is available only if input is always an object.
++ if (onlyObjects && allStructuresAreJSScope) {
+ if (ecmaMode.isStrict())
+ return ToThisResult::Undefined;
+ return ToThisResult::GlobalThis;
diff --git a/meta/recipes-sato/webkit/webkitgtk/CVE-2023-32439.patch b/meta/recipes-sato/webkit/webkitgtk/CVE-2023-32439.patch
new file mode 100644
index 0000000000..f8d7b613fa
--- /dev/null
+++ b/meta/recipes-sato/webkit/webkitgtk/CVE-2023-32439.patch
@@ -0,0 +1,127 @@
+From ebefb9e6b7e7440ab6bb29452f4ac6350bd8b975 Mon Sep 17 00:00:00 2001
+From: Yijia Huang <yijia_huang@apple.com>
+Date: Tue, 26 Sep 2023 09:23:31 +0000
+Subject: [PATCH] Cherry-pick 263909@main (52fe95e5805c).
+ https://bugs.webkit.org/show_bug.cgi?id=256567
+
+ EnumeratorNextUpdateIndexAndMode and HasIndexedProperty should have different heap location kinds
+ https://bugs.webkit.org/show_bug.cgi?id=256567
+ rdar://109089013
+
+ Reviewed by Yusuke Suzuki.
+
+ EnumeratorNextUpdateIndexAndMode and HasIndexedProperty are different DFG nodes. However,
+ they might introduce the same heap location kind in DFGClobberize.h which might lead to
+ hash collision. We should introduce a new locationn kind for EnumeratorNextUpdateIndexAndMode.
+
+ * JSTests/stress/heap-location-collision-dfg-clobberize.js: Added.
+ (foo):
+ * Source/JavaScriptCore/dfg/DFGClobberize.h:
+ (JSC::DFG::clobberize):
+ * Source/JavaScriptCore/dfg/DFGHeapLocation.cpp:
+ (WTF::printInternal):
+ * Source/JavaScriptCore/dfg/DFGHeapLocation.h:
+
+ Canonical link: https://commits.webkit.org/263909@main
+
+Canonical link: https://commits.webkit.org/260527.376@webkitglib/2.40
+
+CVE: CVE-2023-32439
+
+Upstream-Status: Backport [https://github.com/WebKit/WebKit/commit/ebefb9e]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ .../stress/heap-location-collision-dfg-clobberize.js | 12 ++++++++++++
+ Source/JavaScriptCore/dfg/DFGClobberize.h | 7 ++++---
+ Source/JavaScriptCore/dfg/DFGHeapLocation.cpp | 4 ++++
+ Source/JavaScriptCore/dfg/DFGHeapLocation.h | 1 +
+ 4 files changed, 21 insertions(+), 3 deletions(-)
+ create mode 100644 JSTests/stress/heap-location-collision-dfg-clobberize.js
+
+diff --git a/JSTests/stress/heap-location-collision-dfg-clobberize.js b/JSTests/stress/heap-location-collision-dfg-clobberize.js
+new file mode 100644
+index 00000000..ed40601e
+--- /dev/null
++++ b/JSTests/stress/heap-location-collision-dfg-clobberize.js
+@@ -0,0 +1,12 @@
++//@ runDefault("--watchdog=300", "--watchdog-exception-ok")
++const arr = [0];
++
++function foo() {
++ for (let _ in arr) {
++ 0 in arr;
++ while(1);
++ }
++}
++
++
++foo();
+diff --git a/Source/JavaScriptCore/dfg/DFGClobberize.h b/Source/JavaScriptCore/dfg/DFGClobberize.h
+index f96e21d2..af3e864b 100644
+--- a/Source/JavaScriptCore/dfg/DFGClobberize.h
++++ b/Source/JavaScriptCore/dfg/DFGClobberize.h
+@@ -371,6 +371,7 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
+
+ read(JSObject_butterfly);
+ ArrayMode mode = node->arrayMode();
++ LocationKind locationKind = node->op() == EnumeratorNextUpdateIndexAndMode ? EnumeratorNextUpdateIndexAndModeLoc : HasIndexedPropertyLoc;
+ switch (mode.type()) {
+ case Array::ForceExit: {
+ write(SideState);
+@@ -380,7 +381,7 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
+ if (mode.isInBounds()) {
+ read(Butterfly_publicLength);
+ read(IndexedInt32Properties);
+- def(HeapLocation(HasIndexedPropertyLoc, IndexedInt32Properties, graph.varArgChild(node, 0), graph.varArgChild(node, 1)), LazyNode(node));
++ def(HeapLocation(locationKind, IndexedInt32Properties, graph.varArgChild(node, 0), graph.varArgChild(node, 1)), LazyNode(node));
+ return;
+ }
+ break;
+@@ -390,7 +391,7 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
+ if (mode.isInBounds()) {
+ read(Butterfly_publicLength);
+ read(IndexedDoubleProperties);
+- def(HeapLocation(HasIndexedPropertyLoc, IndexedDoubleProperties, graph.varArgChild(node, 0), graph.varArgChild(node, 1)), LazyNode(node));
++ def(HeapLocation(locationKind, IndexedDoubleProperties, graph.varArgChild(node, 0), graph.varArgChild(node, 1)), LazyNode(node));
+ return;
+ }
+ break;
+@@ -400,7 +401,7 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
+ if (mode.isInBounds()) {
+ read(Butterfly_publicLength);
+ read(IndexedContiguousProperties);
+- def(HeapLocation(HasIndexedPropertyLoc, IndexedContiguousProperties, graph.varArgChild(node, 0), graph.varArgChild(node, 1)), LazyNode(node));
++ def(HeapLocation(locationKind, IndexedContiguousProperties, graph.varArgChild(node, 0), graph.varArgChild(node, 1)), LazyNode(node));
+ return;
+ }
+ break;
+diff --git a/Source/JavaScriptCore/dfg/DFGHeapLocation.cpp b/Source/JavaScriptCore/dfg/DFGHeapLocation.cpp
+index 0661e5b8..698a6d4b 100644
+--- a/Source/JavaScriptCore/dfg/DFGHeapLocation.cpp
++++ b/Source/JavaScriptCore/dfg/DFGHeapLocation.cpp
+@@ -134,6 +134,10 @@ void printInternal(PrintStream& out, LocationKind kind)
+ out.print("HasIndexedPorpertyLoc");
+ return;
+
++ case EnumeratorNextUpdateIndexAndModeLoc:
++ out.print("EnumeratorNextUpdateIndexAndModeLoc");
++ return;
++
+ case IndexedPropertyDoubleLoc:
+ out.print("IndexedPropertyDoubleLoc");
+ return;
+diff --git a/Source/JavaScriptCore/dfg/DFGHeapLocation.h b/Source/JavaScriptCore/dfg/DFGHeapLocation.h
+index 40fb7167..7238491b 100644
+--- a/Source/JavaScriptCore/dfg/DFGHeapLocation.h
++++ b/Source/JavaScriptCore/dfg/DFGHeapLocation.h
+@@ -46,6 +46,7 @@ enum LocationKind {
+ DirectArgumentsLoc,
+ GetterLoc,
+ GlobalVariableLoc,
++ EnumeratorNextUpdateIndexAndModeLoc,
+ HasIndexedPropertyLoc,
+ IndexedPropertyDoubleLoc,
+ IndexedPropertyDoubleSaneChainLoc,
+--
+2.40.0
diff --git a/meta/recipes-sato/webkit/webkitgtk_2.36.3.bb b/meta/recipes-sato/webkit/webkitgtk_2.36.8.bb
index 83b6f8a6ee..f4b8456749 100644
--- a/meta/recipes-sato/webkit/webkitgtk_2.36.3.bb
+++ b/meta/recipes-sato/webkit/webkitgtk_2.36.8.bb
@@ -9,15 +9,23 @@ LIC_FILES_CHKSUM = "file://Source/JavaScriptCore/COPYING.LIB;md5=d0c6d6397a5d842
file://Source/WebCore/LICENSE-LGPL-2.1;md5=a778a33ef338abbaf8b8a7c36b6eec80 \
"
-SRC_URI = "https://www.webkitgtk.org/releases/${BPN}-${PV}.tar.xz \
+SRC_URI = "https://www.webkitgtk.org/releases/${BP}.tar.xz \
file://0001-FindGObjectIntrospection.cmake-prefix-variables-obta.patch \
file://0001-Tweak-gtkdoc-settings-so-that-gtkdoc-generation-work.patch \
file://0001-Fix-build-without-opengl-or-es.patch \
file://reproducibility.patch \
file://0001-When-building-introspection-files-do-not-quote-CFLAG.patch \
+ file://CVE-2022-32888.patch \
+ file://CVE-2022-32923.patch \
+ file://CVE-2022-46691.patch \
+ file://CVE-2022-46699.patch \
+ file://CVE-2022-42867.patch \
+ file://CVE-2022-46700.patch \
+ file://CVE-2023-23529.patch \
+ file://CVE-2022-48503.patch \
+ file://CVE-2023-32439.patch \
"
-
-SRC_URI[sha256sum] = "732fcf8c4ec644b8ed28b46ebbd7c1ebab9d9e0afea9bdf5e5d12786afc478d1"
+SRC_URI[sha256sum] = "0ad9fb6bf28308fe3889faf184bd179d13ac1b46835d2136edbab2c133d00437"
inherit cmake pkgconfig gobject-introspection perlnative features_check upstream-version-is-even gtk-doc
diff --git a/meta/recipes-sato/webkit/wpebackend-fdo_1.12.0.bb b/meta/recipes-sato/webkit/wpebackend-fdo_1.14.2.bb
index 4a18467ea4..b3d7b229c8 100644
--- a/meta/recipes-sato/webkit/wpebackend-fdo_1.12.0.bb
+++ b/meta/recipes-sato/webkit/wpebackend-fdo_1.14.2.bb
@@ -13,7 +13,7 @@ inherit meson features_check pkgconfig
REQUIRED_DISTRO_FEATURES = "opengl"
SRC_URI = "https://wpewebkit.org/releases/${BPN}-${PV}.tar.xz"
-SRC_URI[sha256sum] = "6239c9c15523410798d66315de6b491712ab30009ba180f3e0dd076d9b0074ac"
+SRC_URI[sha256sum] = "93c9766ae9864eeaeaee2b0a74f22cbca08df42c1a1bdb55b086f2528e380d38"
# Especially helps compiling with clang which enable this as error when
# using c++11
diff --git a/meta/recipes-support/apr/apr-util/0001-Fix-error-handling-in-gdbm.patch b/meta/recipes-support/apr/apr-util/0001-Fix-error-handling-in-gdbm.patch
deleted file mode 100644
index 6f27876a7f..0000000000
--- a/meta/recipes-support/apr/apr-util/0001-Fix-error-handling-in-gdbm.patch
+++ /dev/null
@@ -1,134 +0,0 @@
-From 6b638fa9afbeb54dfa19378e391465a5284ce1ad Mon Sep 17 00:00:00 2001
-From: Changqing Li <changqing.li@windriver.com>
-Date: Wed, 12 Sep 2018 17:16:36 +0800
-Subject: [PATCH] Fix error handling in gdbm
-
-Only check for gdbm_errno if the return value of the called gdbm_*
-function says so. This fixes apr-util with gdbm 1.14, which does not
-seem to always reset gdbm_errno.
-
-Also make the gdbm driver return error codes starting with
-APR_OS_START_USEERR instead of always returning APR_EGENERAL. This is
-what the berkleydb driver already does.
-
-Also ensure that dsize is 0 if dptr == NULL.
-
-Upstream-Status: Backport [https://svn.apache.org/viewvc?view=revision&amp;revision=1825311]
-
-Signed-off-by: Changqing Li <changqing.li@windriver.com>
----
- dbm/apr_dbm_gdbm.c | 47 +++++++++++++++++++++++++++++------------------
- 1 file changed, 29 insertions(+), 18 deletions(-)
-
-diff --git a/dbm/apr_dbm_gdbm.c b/dbm/apr_dbm_gdbm.c
-index 749447a..1c86327 100644
---- a/dbm/apr_dbm_gdbm.c
-+++ b/dbm/apr_dbm_gdbm.c
-@@ -36,13 +36,25 @@
- static apr_status_t g2s(int gerr)
- {
- if (gerr == -1) {
-- /* ### need to fix this */
-- return APR_EGENERAL;
-+ if (gdbm_errno == GDBM_NO_ERROR)
-+ return APR_SUCCESS;
-+ return APR_OS_START_USEERR + gdbm_errno;
- }
-
- return APR_SUCCESS;
- }
-
-+static apr_status_t gdat2s(datum d)
-+{
-+ if (d.dptr == NULL) {
-+ if (gdbm_errno == GDBM_NO_ERROR || gdbm_errno == GDBM_ITEM_NOT_FOUND)
-+ return APR_SUCCESS;
-+ return APR_OS_START_USEERR + gdbm_errno;
-+ }
-+
-+ return APR_SUCCESS;
-+}
-+
- static apr_status_t datum_cleanup(void *dptr)
- {
- if (dptr)
-@@ -53,22 +65,15 @@ static apr_status_t datum_cleanup(void *dptr)
-
- static apr_status_t set_error(apr_dbm_t *dbm, apr_status_t dbm_said)
- {
-- apr_status_t rv = APR_SUCCESS;
-
-- /* ### ignore whatever the DBM said (dbm_said); ask it explicitly */
-+ dbm->errcode = dbm_said;
-
-- if ((dbm->errcode = gdbm_errno) == GDBM_NO_ERROR) {
-+ if (dbm_said == APR_SUCCESS)
- dbm->errmsg = NULL;
-- }
-- else {
-- dbm->errmsg = gdbm_strerror(gdbm_errno);
-- rv = APR_EGENERAL; /* ### need something better */
-- }
--
-- /* captured it. clear it now. */
-- gdbm_errno = GDBM_NO_ERROR;
-+ else
-+ dbm->errmsg = gdbm_strerror(dbm_said - APR_OS_START_USEERR);
-
-- return rv;
-+ return dbm_said;
- }
-
- /* --------------------------------------------------------------------------
-@@ -107,7 +112,7 @@ static apr_status_t vt_gdbm_open(apr_dbm_t **pdb, const char *pathname,
- NULL);
-
- if (file == NULL)
-- return APR_EGENERAL; /* ### need a better error */
-+ return APR_OS_START_USEERR + gdbm_errno; /* ### need a better error */
-
- /* we have an open database... return it */
- *pdb = apr_pcalloc(pool, sizeof(**pdb));
-@@ -141,10 +146,12 @@ static apr_status_t vt_gdbm_fetch(apr_dbm_t *dbm, apr_datum_t key,
- if (pvalue->dptr)
- apr_pool_cleanup_register(dbm->pool, pvalue->dptr, datum_cleanup,
- apr_pool_cleanup_null);
-+ else
-+ pvalue->dsize = 0;
-
- /* store the error info into DBM, and return a status code. Also, note
- that *pvalue should have been cleared on error. */
-- return set_error(dbm, APR_SUCCESS);
-+ return set_error(dbm, gdat2s(rd));
- }
-
- static apr_status_t vt_gdbm_store(apr_dbm_t *dbm, apr_datum_t key,
-@@ -201,9 +208,11 @@ static apr_status_t vt_gdbm_firstkey(apr_dbm_t *dbm, apr_datum_t *pkey)
- if (pkey->dptr)
- apr_pool_cleanup_register(dbm->pool, pkey->dptr, datum_cleanup,
- apr_pool_cleanup_null);
-+ else
-+ pkey->dsize = 0;
-
- /* store any error info into DBM, and return a status code. */
-- return set_error(dbm, APR_SUCCESS);
-+ return set_error(dbm, gdat2s(rd));
- }
-
- static apr_status_t vt_gdbm_nextkey(apr_dbm_t *dbm, apr_datum_t *pkey)
-@@ -221,9 +230,11 @@ static apr_status_t vt_gdbm_nextkey(apr_dbm_t *dbm, apr_datum_t *pkey)
- if (pkey->dptr)
- apr_pool_cleanup_register(dbm->pool, pkey->dptr, datum_cleanup,
- apr_pool_cleanup_null);
-+ else
-+ pkey->dsize = 0;
-
- /* store any error info into DBM, and return a status code. */
-- return set_error(dbm, APR_SUCCESS);
-+ return set_error(dbm, gdat2s(rd));
- }
-
- static void vt_gdbm_freedatum(apr_dbm_t *dbm, apr_datum_t data)
---
-2.7.4
-
diff --git a/meta/recipes-support/apr/apr-util_1.6.1.bb b/meta/recipes-support/apr/apr-util_1.6.3.bb
index b851d46351..7c6fcc699b 100644
--- a/meta/recipes-support/apr/apr-util_1.6.1.bb
+++ b/meta/recipes-support/apr/apr-util_1.6.3.bb
@@ -13,11 +13,9 @@ SRC_URI = "${APACHE_MIRROR}/apr/${BPN}-${PV}.tar.gz \
file://configfix.patch \
file://configure_fixes.patch \
file://run-ptest \
- file://0001-Fix-error-handling-in-gdbm.patch \
-"
+ "
-SRC_URI[md5sum] = "bd502b9a8670a8012c4d90c31a84955f"
-SRC_URI[sha256sum] = "b65e40713da57d004123b6319828be7f1273fbc6490e145874ee1177e112c459"
+SRC_URI[sha256sum] = "2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983"
EXTRA_OECONF = "--with-apr=${STAGING_BINDIR_CROSS}/apr-1-config \
--without-odbc \
diff --git a/meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch b/meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch
index abff4e9331..a274f3a16e 100644
--- a/meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch
+++ b/meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch
@@ -1,14 +1,15 @@
-From 2bbe20b4f69e84e7a18bc79d382486953f479328 Mon Sep 17 00:00:00 2001
+From 225abf37cd0b49960664b59f08e515a4c4ea5ad0 Mon Sep 17 00:00:00 2001
From: Jeremy Puhlman <jpuhlman@mvista.com>
Date: Thu, 26 Mar 2020 18:30:36 +0000
Subject: [PATCH] Add option to disable timed dependant tests
-The disabled tests rely on timing to pass correctly. On a virtualized
+The disabled tests rely on timing to pass correctly. On a virtualized
system under heavy load, these tests randomly fail because they miss
a timer or other timing related issues.
Upstream-Status: Pending
Signed-off-by: Jeremy Puhlman <jpuhlman@mvista.com>
+
---
configure.in | 6 ++++++
include/apr.h.in | 1 +
@@ -16,10 +17,10 @@ Signed-off-by: Jeremy Puhlman <jpuhlman@mvista.com>
3 files changed, 9 insertions(+), 2 deletions(-)
diff --git a/configure.in b/configure.in
-index d9f32d6..f0c5661 100644
+index bfd488b..3663220 100644
--- a/configure.in
+++ b/configure.in
-@@ -2886,6 +2886,12 @@ AC_ARG_ENABLE(timedlocks,
+@@ -3023,6 +3023,12 @@ AC_ARG_ENABLE(timedlocks,
)
AC_SUBST(apr_has_timedlocks)
@@ -45,10 +46,10 @@ index ee99def..c46a5f4 100644
#define APR_PROCATTR_USER_SET_REQUIRES_PASSWORD @apr_procattr_user_set_requires_password@
diff --git a/test/testlock.c b/test/testlock.c
-index a43f477..6233d0b 100644
+index e3437c1..04e01b9 100644
--- a/test/testlock.c
+++ b/test/testlock.c
-@@ -396,13 +396,13 @@ abts_suite *testlock(abts_suite *suite)
+@@ -535,7 +535,7 @@ abts_suite *testlock(abts_suite *suite)
abts_run_test(suite, threads_not_impl, NULL);
#else
abts_run_test(suite, test_thread_mutex, NULL);
@@ -56,6 +57,8 @@ index a43f477..6233d0b 100644
+#if APR_HAS_TIMEDLOCKS && APR_HAVE_TIME_DEPENDANT_TESTS
abts_run_test(suite, test_thread_timedmutex, NULL);
#endif
+ abts_run_test(suite, test_thread_nestedmutex, NULL);
+@@ -543,7 +543,7 @@ abts_suite *testlock(abts_suite *suite)
abts_run_test(suite, test_thread_rwlock, NULL);
abts_run_test(suite, test_cond, NULL);
abts_run_test(suite, test_timeoutcond, NULL);
@@ -63,7 +66,4 @@ index a43f477..6233d0b 100644
+#if APR_HAS_TIMEDLOCKS && APR_HAVE_TIME_DEPENDANT_TESTS
abts_run_test(suite, test_timeoutmutex, NULL);
#endif
- #endif
---
-2.23.0
-
+ #ifdef WIN32
diff --git a/meta/recipes-support/apr/apr/0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch b/meta/recipes-support/apr/apr/0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch
new file mode 100644
index 0000000000..a78b16284f
--- /dev/null
+++ b/meta/recipes-support/apr/apr/0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch
@@ -0,0 +1,58 @@
+From 316b81c462f065927d7fec56aadd5c8cb94d1cf0 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Fri, 26 Aug 2022 00:28:08 -0700
+Subject: [PATCH] configure: Remove runtime test for mmap that can map
+ /dev/zero
+
+This never works for cross-compile moreover it ends up disabling
+ac_cv_file__dev_zero which then results in compiler errors in shared
+mutexes
+
+Upstream-Status: Inappropriate [Cross-compile specific]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+
+---
+ configure.in | 30 ------------------------------
+ 1 file changed, 30 deletions(-)
+
+diff --git a/configure.in b/configure.in
+index 3663220..dce9789 100644
+--- a/configure.in
++++ b/configure.in
+@@ -1303,36 +1303,6 @@ AC_CHECK_FUNCS([mmap munmap shm_open shm_unlink shmget shmat shmdt shmctl \
+ APR_CHECK_DEFINE(MAP_ANON, sys/mman.h)
+ AC_CHECK_FILE(/dev/zero)
+
+-# Not all systems can mmap /dev/zero (such as HP-UX). Check for that.
+-if test "$ac_cv_func_mmap" = "yes" &&
+- test "$ac_cv_file__dev_zero" = "yes"; then
+- AC_CACHE_CHECK([for mmap that can map /dev/zero],
+- [ac_cv_mmap__dev_zero],
+- [AC_TRY_RUN([#include <sys/types.h>
+-#include <sys/stat.h>
+-#include <fcntl.h>
+-#ifdef HAVE_SYS_MMAN_H
+-#include <sys/mman.h>
+-#endif
+- int main()
+- {
+- int fd;
+- void *m;
+- fd = open("/dev/zero", O_RDWR);
+- if (fd < 0) {
+- return 1;
+- }
+- m = mmap(0, sizeof(void*), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+- if (m == (void *)-1) { /* aka MAP_FAILED */
+- return 2;
+- }
+- if (munmap(m, sizeof(void*)) < 0) {
+- return 3;
+- }
+- return 0;
+- }], [], [ac_cv_file__dev_zero=no], [ac_cv_file__dev_zero=no])])
+-fi
+-
+ # Now we determine which one is our anonymous shmem preference.
+ haveshmgetanon="0"
+ havemmapzero="0"
diff --git a/meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch b/meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch
index 72e706f966..d63423f3a1 100644
--- a/meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch
+++ b/meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch
@@ -1,8 +1,7 @@
-From 5925b20da8bbc34d9bf5a5dca123ef38864d43c6 Mon Sep 17 00:00:00 2001
+From 689a8db96a6d1e1cae9cbfb35d05ac82140a6555 Mon Sep 17 00:00:00 2001
From: Hongxu Jia <hongxu.jia@windriver.com>
Date: Tue, 30 Jan 2018 09:39:06 +0800
-Subject: [PATCH 2/7] apr: Remove workdir path references from installed apr
- files
+Subject: [PATCH] apr: Remove workdir path references from installed apr files
Upstream-Status: Inappropriate [configuration]
@@ -14,20 +13,23 @@ packages at target run time, the workdir path caused confusion.
Rebase to 1.6.3
Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
+
---
- apr-config.in | 26 ++------------------------
- 1 file changed, 2 insertions(+), 24 deletions(-)
+ apr-config.in | 32 ++------------------------------
+ 1 file changed, 2 insertions(+), 30 deletions(-)
diff --git a/apr-config.in b/apr-config.in
-index 84b4073..bbbf651 100644
+index bed47ca..47874e5 100644
--- a/apr-config.in
+++ b/apr-config.in
-@@ -152,14 +152,7 @@ while test $# -gt 0; do
+@@ -164,16 +164,7 @@ while test $# -gt 0; do
flags="$flags $LDFLAGS"
;;
--includes)
- if test "$location" = "installed"; then
flags="$flags -I$includedir $EXTRA_INCLUDES"
+- elif test "$location" = "crosscompile"; then
+- flags="$flags -I$APR_TARGET_DIR/$includedir $EXTRA_INCLUDES"
- elif test "$location" = "source"; then
- flags="$flags -I$APR_SOURCE_DIR/include $EXTRA_INCLUDES"
- else
@@ -37,13 +39,15 @@ index 84b4073..bbbf651 100644
;;
--srcdir)
echo $APR_SOURCE_DIR
-@@ -181,29 +174,14 @@ while test $# -gt 0; do
+@@ -197,33 +188,14 @@ while test $# -gt 0; do
exit 0
;;
--link-ld)
- if test "$location" = "installed"; then
- ### avoid using -L if libdir is a "standard" location like /usr/lib
- flags="$flags -L$libdir -l${APR_LIBNAME}"
+- elif test "$location" = "crosscompile"; then
+- flags="$flags -L$APR_TARGET_DIR/$libdir -l${APR_LIBNAME}"
- else
- ### this surely can't work since the library is in .libs?
- flags="$flags -L$APR_BUILD_DIR -l${APR_LIBNAME}"
@@ -62,6 +66,8 @@ index 84b4073..bbbf651 100644
- # Since the user is specifying they are linking with libtool, we
- # *know* that -R will be recognized by libtool.
- flags="$flags -L$libdir -R$libdir -l${APR_LIBNAME}"
+- elif test "$location" = "crosscompile"; then
+- flags="$flags -L${APR_TARGET_DIR}/$libdir -l${APR_LIBNAME}"
- else
- flags="$flags $LA_FILE"
- fi
@@ -69,6 +75,3 @@ index 84b4073..bbbf651 100644
;;
--shlib-path-var)
echo "$SHLIBPATH_VAR"
---
-1.8.3.1
-
diff --git a/meta/recipes-support/apr/apr/0003-Makefile.in-configure.in-support-cross-compiling.patch b/meta/recipes-support/apr/apr/0003-Makefile.in-configure.in-support-cross-compiling.patch
deleted file mode 100644
index 4dd53bd8eb..0000000000
--- a/meta/recipes-support/apr/apr/0003-Makefile.in-configure.in-support-cross-compiling.patch
+++ /dev/null
@@ -1,63 +0,0 @@
-From d5028c10f156c224475b340cfb1ba025d6797243 Mon Sep 17 00:00:00 2001
-From: Hongxu Jia <hongxu.jia@windriver.com>
-Date: Fri, 2 Feb 2018 15:51:42 +0800
-Subject: [PATCH 3/7] Makefile.in/configure.in: support cross compiling
-
-While cross compiling, the tools/gen_test_char could not
-be executed at build time, use AX_PROG_CC_FOR_BUILD to
-build native tools/gen_test_char
-
-Upstream-Status: Submitted [https://github.com/apache/apr/pull/8]
-
-Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
----
- Makefile.in | 10 +++-------
- configure.in | 3 +++
- 2 files changed, 6 insertions(+), 7 deletions(-)
-
-diff --git a/Makefile.in b/Makefile.in
-index 5fb760e..8675f90 100644
---- a/Makefile.in
-+++ b/Makefile.in
-@@ -46,7 +46,7 @@ LT_VERSION = @LT_VERSION@
-
- CLEAN_TARGETS = apr-config.out apr.exp exports.c export_vars.c .make.dirs \
- build/apr_rules.out tools/gen_test_char@EXEEXT@ \
-- tools/gen_test_char.o tools/gen_test_char.lo \
-+ tools/gen_test_char.o \
- include/private/apr_escape_test_char.h
- DISTCLEAN_TARGETS = config.cache config.log config.status \
- include/apr.h include/arch/unix/apr_private.h \
-@@ -131,13 +131,9 @@ check: $(TARGET_LIB)
- etags:
- etags `find . -name '*.[ch]'`
-
--OBJECTS_gen_test_char = tools/gen_test_char.lo $(LOCAL_LIBS)
--tools/gen_test_char.lo: tools/gen_test_char.c
-+tools/gen_test_char@EXEEXT@: tools/gen_test_char.c
- $(APR_MKDIR) tools
-- $(LT_COMPILE)
--
--tools/gen_test_char@EXEEXT@: $(OBJECTS_gen_test_char)
-- $(LINK_PROG) $(OBJECTS_gen_test_char) $(ALL_LIBS)
-+ $(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $< -o $@
-
- include/private/apr_escape_test_char.h: tools/gen_test_char@EXEEXT@
- $(APR_MKDIR) include/private
-diff --git a/configure.in b/configure.in
-index 719f331..361120f 100644
---- a/configure.in
-+++ b/configure.in
-@@ -183,6 +183,9 @@ dnl can only be used once within a configure script, so this prevents a
- dnl preload section from invoking the macro to get compiler info.
- AC_PROG_CC
-
-+dnl Check build CC for gen_test_char compiling which is executed at build time.
-+AX_PROG_CC_FOR_BUILD
-+
- dnl AC_PROG_SED is only avaliable in recent autoconf versions.
- dnl Use AC_CHECK_PROG instead if AC_PROG_SED is not present.
- ifdef([AC_PROG_SED],
---
-1.8.3.1
-
diff --git a/meta/recipes-support/apr/apr/0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch b/meta/recipes-support/apr/apr/0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch
deleted file mode 100644
index d1a2ebe881..0000000000
--- a/meta/recipes-support/apr/apr/0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-From 49661ea3858cf8494926cccf57d3e8c6dcb47117 Mon Sep 17 00:00:00 2001
-From: Dengke Du <dengke.du@windriver.com>
-Date: Wed, 14 Dec 2016 18:13:08 +0800
-Subject: [PATCH] apr: fix off_t size doesn't match in glibc when cross
- compiling
-
-In configure.in, it contains the following:
-
- APR_CHECK_SIZEOF_EXTENDED([#include <sys/types.h>], off_t, 8)
-
-the macro "APR_CHECK_SIZEOF_EXTENDED" was defined in build/apr_common.m4,
-it use the "AC_TRY_RUN" macro, this macro let the off_t to 8, when cross
-compiling enable.
-
-So it was hardcoded for cross compiling, we should detect it dynamic based on
-the sysroot's glibc. We change it to the following:
-
- AC_CHECK_SIZEOF(off_t)
-
-The same for the following hardcoded types for cross compiling:
-
- pid_t 8
- ssize_t 8
- size_t 8
- off_t 8
-
-Change the above correspondingly.
-
-Signed-off-by: Dengke Du <dengke.du@windriver.com>
-
-Upstream-Status: Pending
-
----
- configure.in | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
-
-diff --git a/configure.in b/configure.in
-index 27b8539..fb408d1 100644
---- a/configure.in
-+++ b/configure.in
-@@ -1801,7 +1801,7 @@ else
- socklen_t_value="int"
- fi
-
--APR_CHECK_SIZEOF_EXTENDED([#include <sys/types.h>], pid_t, 8)
-+AC_CHECK_SIZEOF(pid_t)
-
- if test "$ac_cv_sizeof_pid_t" = "$ac_cv_sizeof_short"; then
- pid_t_fmt='#define APR_PID_T_FMT "hd"'
-@@ -1873,7 +1873,7 @@ APR_CHECK_TYPES_FMT_COMPATIBLE(size_t, unsigned long, lu, [size_t_fmt="lu"], [
- APR_CHECK_TYPES_FMT_COMPATIBLE(size_t, unsigned int, u, [size_t_fmt="u"])
- ])
-
--APR_CHECK_SIZEOF_EXTENDED([#include <sys/types.h>], ssize_t, 8)
-+AC_CHECK_SIZEOF(ssize_t)
-
- dnl the else cases below should no longer occur;
- AC_MSG_CHECKING([which format to use for apr_ssize_t])
-@@ -1891,7 +1891,7 @@ fi
-
- ssize_t_fmt="#define APR_SSIZE_T_FMT \"$ssize_t_fmt\""
-
--APR_CHECK_SIZEOF_EXTENDED([#include <stddef.h>], size_t, 8)
-+AC_CHECK_SIZEOF(size_t)
-
- # else cases below should no longer occur;
- AC_MSG_CHECKING([which format to use for apr_size_t])
-@@ -1909,7 +1909,7 @@ fi
-
- size_t_fmt="#define APR_SIZE_T_FMT \"$size_t_fmt\""
-
--APR_CHECK_SIZEOF_EXTENDED([#include <sys/types.h>], off_t, 8)
-+AC_CHECK_SIZEOF(off_t)
-
- if test "${ac_cv_sizeof_off_t}${apr_cv_use_lfs64}" = "4yes"; then
- # Enable LFS
diff --git a/meta/recipes-support/apr/apr/CVE-2021-35940.patch b/meta/recipes-support/apr/apr/CVE-2021-35940.patch
deleted file mode 100644
index 00befdacee..0000000000
--- a/meta/recipes-support/apr/apr/CVE-2021-35940.patch
+++ /dev/null
@@ -1,58 +0,0 @@
-
-SECURITY: CVE-2021-35940 (cve.mitre.org)
-
-Restore fix for CVE-2017-12613 which was missing in 1.7.x branch, though
-was addressed in 1.6.x in 1.6.3 and later via r1807976.
-
-The fix was merged back to 1.7.x in r1891198.
-
-Since this was a regression in 1.7.0, a new CVE name has been assigned
-to track this, CVE-2021-35940.
-
-Thanks to Iveta Cesalova <icesalov redhat.com> for reporting this issue.
-
-https://svn.apache.org/viewvc?view=revision&revision=1891198
-
-Upstream-Status: Backport
-CVE: CVE-2021-35940
-Signed-off-by: Armin Kuster <akuster@mvista.com>
-
-
-Index: time/unix/time.c
-===================================================================
---- a/time/unix/time.c (revision 1891197)
-+++ b/time/unix/time.c (revision 1891198)
-@@ -142,6 +142,9 @@
- static const int dayoffset[12] =
- {306, 337, 0, 31, 61, 92, 122, 153, 184, 214, 245, 275};
-
-+ if (xt->tm_mon < 0 || xt->tm_mon >= 12)
-+ return APR_EBADDATE;
-+
- /* shift new year to 1st March in order to make leap year calc easy */
-
- if (xt->tm_mon < 2)
-Index: time/win32/time.c
-===================================================================
---- a/time/win32/time.c (revision 1891197)
-+++ b/time/win32/time.c (revision 1891198)
-@@ -54,6 +54,9 @@
- static const int dayoffset[12] =
- {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334};
-
-+ if (tm->wMonth < 1 || tm->wMonth > 12)
-+ return APR_EBADDATE;
-+
- /* Note; the caller is responsible for filling in detailed tm_usec,
- * tm_gmtoff and tm_isdst data when applicable.
- */
-@@ -228,6 +231,9 @@
- static const int dayoffset[12] =
- {306, 337, 0, 31, 61, 92, 122, 153, 184, 214, 245, 275};
-
-+ if (xt->tm_mon < 0 || xt->tm_mon >= 12)
-+ return APR_EBADDATE;
-+
- /* shift new year to 1st March in order to make leap year calc easy */
-
- if (xt->tm_mon < 2)
diff --git a/meta/recipes-support/apr/apr/autoconf270.patch b/meta/recipes-support/apr/apr/autoconf270.patch
deleted file mode 100644
index 9f7b5c624c..0000000000
--- a/meta/recipes-support/apr/apr/autoconf270.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-With autoconf 2.70 confdefs.h is already included. Including it twice generates
-compiler warnings and since this macros is to error on warnings, it breaks.
-
-Fix by not including the file.
-
-Upstream-Status: Pending
-RP - 2021/1/28
-
-Index: apr-1.7.0/build/apr_common.m4
-===================================================================
---- apr-1.7.0.orig/build/apr_common.m4
-+++ apr-1.7.0/build/apr_common.m4
-@@ -505,8 +505,7 @@ AC_DEFUN([APR_TRY_COMPILE_NO_WARNING],
- fi
- AC_COMPILE_IFELSE(
- [AC_LANG_SOURCE(
-- [#include "confdefs.h"
-- ]
-+ []
- [[$1]]
- [int main(int argc, const char *const *argv) {]
- [[$2]]
diff --git a/meta/recipes-support/apr/apr/libtoolize_check.patch b/meta/recipes-support/apr/apr/libtoolize_check.patch
index 740792e6b0..80ce43caa4 100644
--- a/meta/recipes-support/apr/apr/libtoolize_check.patch
+++ b/meta/recipes-support/apr/apr/libtoolize_check.patch
@@ -1,6 +1,7 @@
+From 17835709bc55657b7af1f7c99b3f572b819cf97e Mon Sep 17 00:00:00 2001
From: Helmut Grohne <helmut@subdivi.de>
-Subject: check for libtoolize rather than libtool
-Last-Update: 2014-09-19
+Date: Tue, 7 Feb 2023 07:04:00 +0000
+Subject: [PATCH] check for libtoolize rather than libtool
libtool is now in package libtool-bin, but apr only needs libtoolize.
@@ -8,14 +9,22 @@ Upstream-Status: Pending [ from debian: https://sources.debian.org/data/main/a/a
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
---- apr.orig/build/buildcheck.sh
-+++ apr/build/buildcheck.sh
-@@ -39,11 +39,11 @@ fi
+---
+ build/buildcheck.sh | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/build/buildcheck.sh b/build/buildcheck.sh
+index 44921b5..08bc8a8 100755
+--- a/build/buildcheck.sh
++++ b/build/buildcheck.sh
+@@ -39,13 +39,11 @@ fi
# ltmain.sh (GNU libtool 1.1361 2004/01/02 23:10:52) 1.5a
# output is multiline from 1.5 onwards
-# Require libtool 1.4 or newer
--libtool=`build/PrintPath glibtool1 glibtool libtool libtool15 libtool14`
+-if test -z "$libtool"; then
+- libtool=`build/PrintPath glibtool1 glibtool libtool libtool15 libtool14`
+-fi
-lt_pversion=`$libtool --version 2>/dev/null|sed -e 's/([^)]*)//g;s/^[^0-9]*//;s/[- ].*//g;q'`
+# Require libtoolize 1.4 or newer
+libtoolize=`build/PrintPath glibtoolize1 glibtoolize libtoolize libtoolize15 libtoolize14`
diff --git a/meta/recipes-support/apr/apr_1.7.0.bb b/meta/recipes-support/apr/apr_1.7.2.bb
index 9c826d4380..c9059c9921 100644
--- a/meta/recipes-support/apr/apr_1.7.0.bb
+++ b/meta/recipes-support/apr/apr_1.7.2.bb
@@ -16,19 +16,15 @@ BBCLASSEXTEND = "native nativesdk"
SRC_URI = "${APACHE_MIRROR}/apr/${BPN}-${PV}.tar.bz2 \
file://run-ptest \
file://0002-apr-Remove-workdir-path-references-from-installed-ap.patch \
- file://0003-Makefile.in-configure.in-support-cross-compiling.patch \
file://0004-Fix-packet-discards-HTTP-redirect.patch \
file://0005-configure.in-fix-LTFLAGS-to-make-it-work-with-ccache.patch \
- file://0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch \
file://0007-explicitly-link-libapr-against-phtread-to-make-gold-.patch \
file://libtoolize_check.patch \
file://0001-Add-option-to-disable-timed-dependant-tests.patch \
- file://autoconf270.patch \
- file://CVE-2021-35940.patch \
+ file://0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch \
"
-SRC_URI[md5sum] = "7a14a83d664e87599ea25ff4432e48a7"
-SRC_URI[sha256sum] = "e2e148f0b2e99b8e5c6caa09f6d4fb4dd3e83f744aa72a952f94f5a14436f7ea"
+SRC_URI[sha256sum] = "75e77cc86776c030c0a5c408dfbd0bf2a0b75eed5351e52d5439fa1e5509a43e"
inherit autotools-brokensep lib_package binconfig multilib_header ptest multilib_script
@@ -36,17 +32,30 @@ OE_BINCONFIG_EXTRA_MANGLE = " -e 's:location=source:location=installed:'"
# Added to fix some issues with cmake. Refer to https://github.com/bmwcarit/meta-ros/issues/68#issuecomment-19896928
CACHED_CONFIGUREVARS += "apr_cv_mutex_recursive=yes"
-
+# Enable largefile
+CACHED_CONFIGUREVARS += "apr_cv_use_lfs64=yes"
+# Additional AC_TRY_RUN tests which will need to be cached for cross compile
+CACHED_CONFIGUREVARS += "apr_cv_epoll=yes epoll_create1=yes apr_cv_sock_cloexec=yes \
+ ac_cv_struct_rlimit=yes \
+ ac_cv_func_sem_open=yes \
+ apr_cv_process_shared_works=yes \
+ apr_cv_mutex_robust_shared=yes \
+ "
# Also suppress trying to use sctp.
#
CACHED_CONFIGUREVARS += "ac_cv_header_netinet_sctp_h=no ac_cv_header_netinet_sctp_uio_h=no"
-CACHED_CONFIGUREVARS += "ac_cv_sizeof_struct_iovec=yes"
+# ac_cv_sizeof_struct_iovec is deduced using runtime check which will fail during cross-compile
+CACHED_CONFIGUREVARS += "${@['ac_cv_sizeof_struct_iovec=16','ac_cv_sizeof_struct_iovec=8'][d.getVar('SITEINFO_BITS') != '32']}"
+
CACHED_CONFIGUREVARS += "ac_cv_file__dev_zero=yes"
+CACHED_CONFIGUREVARS:append:libc-musl = " ac_cv_strerror_r_rc_int=yes"
PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)}"
+PACKAGECONFIG:append:libc-musl = " xsi-strerror"
PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6,"
PACKAGECONFIG[timed-tests] = "--enable-timed-tests,--disable-timed-tests,"
+PACKAGECONFIG[xsi-strerror] = "ac_cv_strerror_r_rc_int=yes,ac_cv_strerror_r_rc_int=no,"
do_configure:prepend() {
# Avoid absolute paths for grep since it causes failures
diff --git a/meta/recipes-support/attr/acl/run-ptest b/meta/recipes-support/attr/acl/run-ptest
index 4312823365..3af75c84fe 100644
--- a/meta/recipes-support/attr/acl/run-ptest
+++ b/meta/recipes-support/attr/acl/run-ptest
@@ -7,4 +7,10 @@
mkdir -p /tmp/acl-ptest/test
cp test/test.* /tmp/acl-ptest/test
+set +e
make test-suite.log
+exitcode=$?
+if [ $exitcode -ne 0 -a -e test-suite.log ]; then
+ cat test-suite.log
+fi
+exit $exitcode
diff --git a/meta/recipes-support/attr/acl_2.3.1.bb b/meta/recipes-support/attr/acl_2.3.1.bb
index aca04a9aac..c2c9ba9069 100644
--- a/meta/recipes-support/attr/acl_2.3.1.bb
+++ b/meta/recipes-support/attr/acl_2.3.1.bb
@@ -62,6 +62,7 @@ RDEPENDS:${PN}-ptest = "acl \
bash \
coreutils \
perl \
+ perl-module-constant \
perl-module-filehandle \
perl-module-getopt-std \
perl-module-posix \
diff --git a/meta/recipes-support/attr/attr.inc b/meta/recipes-support/attr/attr.inc
index a4e38f2b19..56028edb12 100644
--- a/meta/recipes-support/attr/attr.inc
+++ b/meta/recipes-support/attr/attr.inc
@@ -50,6 +50,7 @@ do_install_ptest() {
RDEPENDS:${PN}-ptest = "attr \
coreutils \
+ perl-module-constant \
perl-module-filehandle \
perl-module-getopt-std \
perl-module-posix \
diff --git a/meta/recipes-support/attr/attr/run-ptest b/meta/recipes-support/attr/attr/run-ptest
index f64244f239..3e7a3a17a0 100644
--- a/meta/recipes-support/attr/attr/run-ptest
+++ b/meta/recipes-support/attr/attr/run-ptest
@@ -1,3 +1,10 @@
#!/bin/sh
+set +e
make test-suite.log
+exitcode=$?
+if [ $exitcode -ne 0 -a -e test-suite.log ]; then
+ cat test-suite.log
+fi
+exit $exitcode
+
diff --git a/meta/recipes-support/bmap-tools/bmap-tools_git.bb b/meta/recipes-support/bmap-tools/bmap-tools_git.bb
index 78c51e7731..89b7bf2b93 100644
--- a/meta/recipes-support/bmap-tools/bmap-tools_git.bb
+++ b/meta/recipes-support/bmap-tools/bmap-tools_git.bb
@@ -9,7 +9,7 @@ SECTION = "console/utils"
LICENSE = "GPL-2.0-only"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
-SRC_URI = "git://github.com/intel/${BPN};branch=master;protocol=https"
+SRC_URI = "git://github.com/intel/${BPN};branch=main;protocol=https"
SRCREV = "c0673962a8ec1624b5189dc1d24f33fe4f06785a"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-support/boost/boost/0001-Don-t-skip-install-targets-if-there-s-build-no-in-ur.patch b/meta/recipes-support/boost/boost/0001-Don-t-skip-install-targets-if-there-s-build-no-in-ur.patch
new file mode 100644
index 0000000000..df8b285700
--- /dev/null
+++ b/meta/recipes-support/boost/boost/0001-Don-t-skip-install-targets-if-there-s-build-no-in-ur.patch
@@ -0,0 +1,82 @@
+From 78fd284a42caabe8815cb0870b46e5567872e75b Mon Sep 17 00:00:00 2001
+From: Dmitry <grisumbras@gmail.com>
+Date: Sat, 11 Dec 2021 16:58:23 +0300
+Subject: [PATCH] Don't skip install targets if there's <build>no in ureqs
+ (#113)
+
+---
+ src/tools/stage.jam | 4 ++++
+ test/install_build_no.py | 26 ++++++++++++++++++++++++++
+ test/test_all.py | 1 +
+ 3 files changed, 31 insertions(+)
+ create mode 100755 test/install_build_no.py
+
+Fixes install of boost fiber shared libraries which are missing in 1.78.0
+but working in 1.79.0. Only kirkstone affected by this.
+
+Upstream-Status: Backport
+
+Signed-off-by: Mikko Rapeli <mikko.rapeli@bmw.de>
+
+diff --git a/tools/build/src/tools/stage.jam b/tools/build/src/tools/stage.jam
+index c5f02e3ba..325129dc8 100644
+--- a/tools/build/src/tools/stage.jam
++++ b/tools/build/src/tools/stage.jam
+@@ -478,6 +478,10 @@ class install-target-class : basic-target
+ return [ sequence.unique $(result2) ] ;
+ }
+
++ rule skip-from-usage-requirements ( )
++ {
++ }
++
+ # Returns true iff 'type' is subtype of some element of 'types-to-include'.
+ #
+ local rule include-type ( type : types-to-include * )
+diff --git a/tools/build/test/install_build_no.py b/tools/build/test/install_build_no.py
+new file mode 100755
+index 000000000..0ccf3c5cc
+--- /dev/null
++++ b/tools/build/test/install_build_no.py
+@@ -0,0 +1,26 @@
++#!/usr/bin/python
++
++# Copyright 2021 Dmitry Arkhipov (grisumbras@gmail.com)
++# Distributed under the Boost Software License, Version 1.0.
++# (See accompanying file LICENSE.txt or https://www.bfgroup.xyz/b2/LICENSE.txt)
++
++# Check that <build>no in usage-requirements of dependencies does not affect
++# install rule, i.e. a skipped installed target does not affect insallation of
++# other targets.
++
++import BoostBuild
++
++t = BoostBuild.Tester()
++
++t.write("a.cpp", "int main() {}\n")
++
++t.write("jamroot.jam", """
++make x : : maker : <build>no ;
++exe a : a.cpp ;
++install install : x a ;
++""")
++
++t.run_build_system()
++t.expect_addition("install/a.exe")
++
++t.cleanup()
+diff --git a/tools/build/test/test_all.py b/tools/build/test/test_all.py
+index b7ef5ad70..9ed729d01 100644
+--- a/tools/build/test/test_all.py
++++ b/tools/build/test/test_all.py
+@@ -250,6 +250,7 @@ tests = ["abs_workdir",
+ "inherit_toolset",
+ "inherited_dependency",
+ "inline",
++ "install_build_no",
+ "libjpeg",
+ "liblzma",
+ "libpng",
+--
+2.20.1
+
diff --git a/meta/recipes-support/boost/boost_1.78.0.bb b/meta/recipes-support/boost/boost_1.78.0.bb
index 58be9dcf12..08364a4c3c 100644
--- a/meta/recipes-support/boost/boost_1.78.0.bb
+++ b/meta/recipes-support/boost/boost_1.78.0.bb
@@ -7,4 +7,5 @@ SRC_URI += "file://boost-CVE-2012-2677.patch \
file://0001-dont-setup-compiler-flags-m32-m64.patch \
file://de657e01635306085488290ea83de541ec393f8b.patch \
file://0001-futex-fix-build-on-32-bit-architectures-using-64-bit.patch \
+ file://0001-Don-t-skip-install-targets-if-there-s-build-no-in-ur.patch \
"
diff --git a/meta/recipes-support/curl/curl/0001-openssl-fix-CN-check-error-code.patch b/meta/recipes-support/curl/curl/0001-openssl-fix-CN-check-error-code.patch
new file mode 100644
index 0000000000..c0a2355e5b
--- /dev/null
+++ b/meta/recipes-support/curl/curl/0001-openssl-fix-CN-check-error-code.patch
@@ -0,0 +1,38 @@
+From 0677924c6ec7e0d68964553fb760f6d407242c54 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 8 Mar 2022 13:38:13 +0100
+Subject: [PATCH] openssl: fix CN check error code
+
+Due to a missing 'else' this returns error too easily.
+
+Regressed in: d15692ebb
+
+Reported-by: Kristoffer Gleditsch
+Fixes #8559
+Closes #8560
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/911714d617c106ed5d553bf003e34ec94ab6a136]
+
+Signed-off-by: Jose Quaresma <jose.quaresma@foundries.io>
+
+---
+ lib/vtls/openssl.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/lib/vtls/openssl.c b/lib/vtls/openssl.c
+index 616a510..1bafe96 100644
+--- a/lib/vtls/openssl.c
++++ b/lib/vtls/openssl.c
+@@ -1808,7 +1808,8 @@ CURLcode Curl_ossl_verifyhost(struct Curl_easy *data, struct connectdata *conn,
+ memcpy(peer_CN, ASN1_STRING_get0_data(tmp), peerlen);
+ peer_CN[peerlen] = '\0';
+ }
+- result = CURLE_OUT_OF_MEMORY;
++ else
++ result = CURLE_OUT_OF_MEMORY;
+ }
+ }
+ else /* not a UTF8 name */
+--
+2.34.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2022-32205.patch b/meta/recipes-support/curl/curl/CVE-2022-32205.patch
new file mode 100644
index 0000000000..165fd8af47
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-32205.patch
@@ -0,0 +1,174 @@
+From a91c22a072cbb32e296f1efba3502f1b7775dfaf Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Sun, 26 Jun 2022 11:00:48 +0200
+Subject: [PATCH] cookie: apply limits
+
+- Send no more than 150 cookies per request
+- Cap the max length used for a cookie: header to 8K
+- Cap the max number of received Set-Cookie: headers to 50
+
+Bug: https://curl.se/docs/CVE-2022-32205.html
+CVE-2022-32205
+Reported-by: Harry Sintonen
+Closes #9048
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/48d7064a49148f0394]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/cookie.c | 14 ++++++++++++--
+ lib/cookie.h | 21 +++++++++++++++++++--
+ lib/http.c | 13 +++++++++++--
+ lib/urldata.h | 1 +
+ 4 files changed, 43 insertions(+), 6 deletions(-)
+
+diff --git a/lib/cookie.c b/lib/cookie.c
+index 1b8c8f9..8a6aa1a 100644
+--- a/lib/cookie.c
++++ b/lib/cookie.c
+@@ -477,6 +477,10 @@ Curl_cookie_add(struct Curl_easy *data,
+ (void)data;
+ #endif
+
++ DEBUGASSERT(MAX_SET_COOKIE_AMOUNT <= 255); /* counter is an unsigned char */
++ if(data->req.setcookies >= MAX_SET_COOKIE_AMOUNT)
++ return NULL;
++
+ /* First, alloc and init a new struct for it */
+ co = calloc(1, sizeof(struct Cookie));
+ if(!co)
+@@ -816,7 +820,7 @@ Curl_cookie_add(struct Curl_easy *data,
+ freecookie(co);
+ return NULL;
+ }
+-
++ data->req.setcookies++;
+ }
+ else {
+ /*
+@@ -1354,7 +1358,8 @@ static struct Cookie *dup_cookie(struct Cookie *src)
+ *
+ * It shall only return cookies that haven't expired.
+ */
+-struct Cookie *Curl_cookie_getlist(struct CookieInfo *c,
++struct Cookie *Curl_cookie_getlist(struct Curl_easy *data,
++ struct CookieInfo *c,
+ const char *host, const char *path,
+ bool secure)
+ {
+@@ -1409,6 +1414,11 @@ struct Cookie *Curl_cookie_getlist(struct CookieInfo *c,
+ mainco = newco;
+
+ matches++;
++ if(matches >= MAX_COOKIE_SEND_AMOUNT) {
++ infof(data, "Included max number of cookies (%u) in request!",
++ matches);
++ break;
++ }
+ }
+ else
+ goto fail;
+diff --git a/lib/cookie.h b/lib/cookie.h
+index 0ffe08e..7411980 100644
+--- a/lib/cookie.h
++++ b/lib/cookie.h
+@@ -81,10 +81,26 @@ struct CookieInfo {
+ */
+ #define MAX_COOKIE_LINE 5000
+
+-/* This is the maximum length of a cookie name or content we deal with: */
++/* Maximum length of an incoming cookie name or content we deal with. Longer
++ cookies are ignored. */
+ #define MAX_NAME 4096
+ #define MAX_NAME_TXT "4095"
+
++/* Maximum size for an outgoing cookie line libcurl will use in an http
++ request. This is the default maximum length used in some versions of Apache
++ httpd. */
++#define MAX_COOKIE_HEADER_LEN 8190
++
++/* Maximum number of cookies libcurl will send in a single request, even if
++ there might be more cookies that match. One reason to cap the number is to
++ keep the maximum HTTP request within the maximum allowed size. */
++#define MAX_COOKIE_SEND_AMOUNT 150
++
++/* Maximum number of Set-Cookie: lines accepted in a single response. If more
++ such header lines are received, they are ignored. This value must be less
++ than 256 since an unsigned char is used to count. */
++#define MAX_SET_COOKIE_AMOUNT 50
++
+ struct Curl_easy;
+ /*
+ * Add a cookie to the internal list of cookies. The domain and path arguments
+@@ -97,7 +113,8 @@ struct Cookie *Curl_cookie_add(struct Curl_easy *data,
+ const char *domain, const char *path,
+ bool secure);
+
+-struct Cookie *Curl_cookie_getlist(struct CookieInfo *c, const char *host,
++struct Cookie *Curl_cookie_getlist(struct Curl_easy *data,
++ struct CookieInfo *c, const char *host,
+ const char *path, bool secure);
+ void Curl_cookie_freelist(struct Cookie *cookies);
+ void Curl_cookie_clearall(struct CookieInfo *cookies);
+diff --git a/lib/http.c b/lib/http.c
+index 4433824..2c8b0c4 100644
+--- a/lib/http.c
++++ b/lib/http.c
+@@ -2709,12 +2709,14 @@ CURLcode Curl_http_bodysend(struct Curl_easy *data, struct connectdata *conn,
+ }
+
+ #if !defined(CURL_DISABLE_COOKIES)
++
+ CURLcode Curl_http_cookies(struct Curl_easy *data,
+ struct connectdata *conn,
+ struct dynbuf *r)
+ {
+ CURLcode result = CURLE_OK;
+ char *addcookies = NULL;
++ bool linecap = FALSE;
+ if(data->set.str[STRING_COOKIE] &&
+ !Curl_checkheaders(data, STRCONST("Cookie")))
+ addcookies = data->set.str[STRING_COOKIE];
+@@ -2732,7 +2734,7 @@ CURLcode Curl_http_cookies(struct Curl_easy *data,
+ !strcmp(host, "127.0.0.1") ||
+ !strcmp(host, "[::1]") ? TRUE : FALSE;
+ Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
+- co = Curl_cookie_getlist(data->cookies, host, data->state.up.path,
++ co = Curl_cookie_getlist(data, data->cookies, host, data->state.up.path,
+ secure_context);
+ Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
+ }
+@@ -2746,6 +2748,13 @@ CURLcode Curl_http_cookies(struct Curl_easy *data,
+ if(result)
+ break;
+ }
++ if((Curl_dyn_len(r) + strlen(co->name) + strlen(co->value) + 1) >=
++ MAX_COOKIE_HEADER_LEN) {
++ infof(data, "Restricted outgoing cookies due to header size, "
++ "'%s' not sent", co->name);
++ linecap = TRUE;
++ break;
++ }
+ result = Curl_dyn_addf(r, "%s%s=%s", count?"; ":"",
+ co->name, co->value);
+ if(result)
+@@ -2756,7 +2765,7 @@ CURLcode Curl_http_cookies(struct Curl_easy *data,
+ }
+ Curl_cookie_freelist(store);
+ }
+- if(addcookies && !result) {
++ if(addcookies && !result && !linecap) {
+ if(!count)
+ result = Curl_dyn_addn(r, STRCONST("Cookie: "));
+ if(!result) {
+diff --git a/lib/urldata.h b/lib/urldata.h
+index e006495..54faf7d 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -707,6 +707,7 @@ struct SingleRequest {
+ #ifndef CURL_DISABLE_DOH
+ struct dohdata *doh; /* DoH specific data for this request */
+ #endif
++ unsigned char setcookies;
+ BIT(header); /* incoming data has HTTP header */
+ BIT(content_range); /* set TRUE if Content-Range: was found */
+ BIT(upload_done); /* set to TRUE when doing chunked transfer-encoding
diff --git a/meta/recipes-support/curl/curl/CVE-2022-32206.patch b/meta/recipes-support/curl/curl/CVE-2022-32206.patch
new file mode 100644
index 0000000000..25f5b27cc7
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-32206.patch
@@ -0,0 +1,51 @@
+From e12531340b03d242d3f892aa8797faf12b56dddf Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 16 May 2022 16:28:13 +0200
+Subject: [PATCH] content_encoding: return error on too many compression steps
+
+The max allowed steps is arbitrarily set to 5.
+
+Bug: https://curl.se/docs/CVE-2022-32206.html
+CVE-2022-32206
+Reported-by: Harry Sintonen
+Closes #9049
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/3a09fbb7f264c67c43]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/content_encoding.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/lib/content_encoding.c b/lib/content_encoding.c
+index c03637a..6f994b3 100644
+--- a/lib/content_encoding.c
++++ b/lib/content_encoding.c
+@@ -1026,12 +1026,16 @@ static const struct content_encoding *find_encoding(const char *name,
+ return NULL;
+ }
+
++/* allow no more than 5 "chained" compression steps */
++#define MAX_ENCODE_STACK 5
++
+ /* Set-up the unencoding stack from the Content-Encoding header value.
+ * See RFC 7231 section 3.1.2.2. */
+ CURLcode Curl_build_unencoding_stack(struct Curl_easy *data,
+ const char *enclist, int maybechunked)
+ {
+ struct SingleRequest *k = &data->req;
++ int counter = 0;
+
+ do {
+ const char *name;
+@@ -1066,6 +1070,11 @@ CURLcode Curl_build_unencoding_stack(struct Curl_easy *data,
+ if(!encoding)
+ encoding = &error_encoding; /* Defer error at stack use. */
+
++ if(++counter >= MAX_ENCODE_STACK) {
++ failf(data, "Reject response due to %u content encodings",
++ counter);
++ return CURLE_BAD_CONTENT_ENCODING;
++ }
+ /* Stack the unencoding stage. */
+ writer = new_unencoding_writer(data, encoding, k->writer_stack);
+ if(!writer)
diff --git a/meta/recipes-support/curl/curl/CVE-2022-32207.patch b/meta/recipes-support/curl/curl/CVE-2022-32207.patch
new file mode 100644
index 0000000000..bc16b62f39
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-32207.patch
@@ -0,0 +1,283 @@
+From 759088694e2ba68ddc5ffe042b071dadad6ff675 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Wed, 25 May 2022 10:09:53 +0200
+Subject: [PATCH] fopen: add Curl_fopen() for better overwriting of files
+
+Bug: https://curl.se/docs/CVE-2022-32207.html
+CVE-2022-32207
+Reported-by: Harry Sintonen
+Closes #9050
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/20f9dd6bae50b]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ CMakeLists.txt | 1 +
+ configure.ac | 1 +
+ lib/Makefile.inc | 2 +
+ lib/cookie.c | 19 ++-----
+ lib/curl_config.h.cmake | 3 ++
+ lib/fopen.c | 113 ++++++++++++++++++++++++++++++++++++++++
+ lib/fopen.h | 30 +++++++++++
+ 7 files changed, 154 insertions(+), 15 deletions(-)
+ create mode 100644 lib/fopen.c
+ create mode 100644 lib/fopen.h
+
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index b77de6d..a0bfaad 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -1027,6 +1027,7 @@ elseif(HAVE_LIBSOCKET)
+ set(CMAKE_REQUIRED_LIBRARIES socket)
+ endif()
+
++check_symbol_exists(fchmod "${CURL_INCLUDES}" HAVE_FCHMOD)
+ check_symbol_exists(basename "${CURL_INCLUDES}" HAVE_BASENAME)
+ check_symbol_exists(socket "${CURL_INCLUDES}" HAVE_SOCKET)
+ check_symbol_exists(select "${CURL_INCLUDES}" HAVE_SELECT)
+diff --git a/configure.ac b/configure.ac
+index d431870..7433bb9 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -3351,6 +3351,7 @@ AC_CHECK_DECLS([getpwuid_r], [], [AC_DEFINE(HAVE_DECL_GETPWUID_R_MISSING, 1, "Se
+
+
+ AC_CHECK_FUNCS([fnmatch \
++ fchmod \
+ geteuid \
+ getpass_r \
+ getppid \
+diff --git a/lib/Makefile.inc b/lib/Makefile.inc
+index e8f110f..5139b03 100644
+--- a/lib/Makefile.inc
++++ b/lib/Makefile.inc
+@@ -133,6 +133,7 @@ LIB_CFILES = \
+ escape.c \
+ file.c \
+ fileinfo.c \
++ fopen.c \
+ formdata.c \
+ ftp.c \
+ ftplistparser.c \
+@@ -263,6 +264,7 @@ LIB_HFILES = \
+ escape.h \
+ file.h \
+ fileinfo.h \
++ fopen.h \
+ formdata.h \
+ ftp.h \
+ ftplistparser.h \
+diff --git a/lib/cookie.c b/lib/cookie.c
+index 8a6aa1a..cb0c03b 100644
+--- a/lib/cookie.c
++++ b/lib/cookie.c
+@@ -96,8 +96,8 @@ Example set of cookies:
+ #include "curl_get_line.h"
+ #include "curl_memrchr.h"
+ #include "parsedate.h"
+-#include "rand.h"
+ #include "rename.h"
++#include "fopen.h"
+
+ /* The last 3 #include files should be in this order */
+ #include "curl_printf.h"
+@@ -1620,20 +1620,9 @@ static CURLcode cookie_output(struct Curl_easy *data,
+ use_stdout = TRUE;
+ }
+ else {
+- unsigned char randsuffix[9];
+-
+- if(Curl_rand_hex(data, randsuffix, sizeof(randsuffix)))
+- return 2;
+-
+- tempstore = aprintf("%s.%s.tmp", filename, randsuffix);
+- if(!tempstore)
+- return CURLE_OUT_OF_MEMORY;
+-
+- out = fopen(tempstore, FOPEN_WRITETEXT);
+- if(!out) {
+- error = CURLE_WRITE_ERROR;
++ error = Curl_fopen(data, filename, &out, &tempstore);
++ if(error)
+ goto error;
+- }
+ }
+
+ fputs("# Netscape HTTP Cookie File\n"
+@@ -1680,7 +1669,7 @@ static CURLcode cookie_output(struct Curl_easy *data,
+ if(!use_stdout) {
+ fclose(out);
+ out = NULL;
+- if(Curl_rename(tempstore, filename)) {
++ if(tempstore && Curl_rename(tempstore, filename)) {
+ unlink(tempstore);
+ error = CURLE_WRITE_ERROR;
+ goto error;
+diff --git a/lib/curl_config.h.cmake b/lib/curl_config.h.cmake
+index d2a0f43..c254359 100644
+--- a/lib/curl_config.h.cmake
++++ b/lib/curl_config.h.cmake
+@@ -157,6 +157,9 @@
+ /* Define to 1 if you have the <assert.h> header file. */
+ #cmakedefine HAVE_ASSERT_H 1
+
++/* Define to 1 if you have the `fchmod' function. */
++#cmakedefine HAVE_FCHMOD 1
++
+ /* Define to 1 if you have the `basename' function. */
+ #cmakedefine HAVE_BASENAME 1
+
+diff --git a/lib/fopen.c b/lib/fopen.c
+new file mode 100644
+index 0000000..ad3691b
+--- /dev/null
++++ b/lib/fopen.c
+@@ -0,0 +1,113 @@
++/***************************************************************************
++ * _ _ ____ _
++ * Project ___| | | | _ \| |
++ * / __| | | | |_) | |
++ * | (__| |_| | _ <| |___
++ * \___|\___/|_| \_\_____|
++ *
++ * Copyright (C) 1998 - 2022, Daniel Stenberg, <daniel@haxx.se>, et al.
++ *
++ * This software is licensed as described in the file COPYING, which
++ * you should have received as part of this distribution. The terms
++ * are also available at https://curl.se/docs/copyright.html.
++ *
++ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++ * copies of the Software, and permit persons to whom the Software is
++ * furnished to do so, under the terms of the COPYING file.
++ *
++ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++ * KIND, either express or implied.
++ *
++ * SPDX-License-Identifier: curl
++ *
++ ***************************************************************************/
++
++#include "curl_setup.h"
++
++#if !defined(CURL_DISABLE_COOKIES) || !defined(CURL_DISABLE_ALTSVC) || \
++ !defined(CURL_DISABLE_HSTS)
++
++#ifdef HAVE_FCNTL_H
++#include <fcntl.h>
++#endif
++
++#include "urldata.h"
++#include "rand.h"
++#include "fopen.h"
++/* The last 3 #include files should be in this order */
++#include "curl_printf.h"
++#include "curl_memory.h"
++#include "memdebug.h"
++
++/*
++ * Curl_fopen() opens a file for writing with a temp name, to be renamed
++ * to the final name when completed. If there is an existing file using this
++ * name at the time of the open, this function will clone the mode from that
++ * file. if 'tempname' is non-NULL, it needs a rename after the file is
++ * written.
++ */
++CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
++ FILE **fh, char **tempname)
++{
++ CURLcode result = CURLE_WRITE_ERROR;
++ unsigned char randsuffix[9];
++ char *tempstore = NULL;
++ struct_stat sb;
++ int fd = -1;
++ *tempname = NULL;
++
++ if(stat(filename, &sb) == -1 || !S_ISREG(sb.st_mode)) {
++ /* a non-regular file, fallback to direct fopen() */
++ *fh = fopen(filename, FOPEN_WRITETEXT);
++ if(*fh)
++ return CURLE_OK;
++ goto fail;
++ }
++
++ result = Curl_rand_hex(data, randsuffix, sizeof(randsuffix));
++ if(result)
++ goto fail;
++
++ tempstore = aprintf("%s.%s.tmp", filename, randsuffix);
++ if(!tempstore) {
++ result = CURLE_OUT_OF_MEMORY;
++ goto fail;
++ }
++
++ result = CURLE_WRITE_ERROR;
++ fd = open(tempstore, O_WRONLY | O_CREAT | O_EXCL, 0600);
++ if(fd == -1)
++ goto fail;
++
++#ifdef HAVE_FCHMOD
++ {
++ struct_stat nsb;
++ if((fstat(fd, &nsb) != -1) &&
++ (nsb.st_uid == sb.st_uid) && (nsb.st_gid == sb.st_gid)) {
++ /* if the user and group are the same, clone the original mode */
++ if(fchmod(fd, sb.st_mode) == -1)
++ goto fail;
++ }
++ }
++#endif
++
++ *fh = fdopen(fd, FOPEN_WRITETEXT);
++ if(!*fh)
++ goto fail;
++
++ *tempname = tempstore;
++ return CURLE_OK;
++
++fail:
++ if(fd != -1) {
++ close(fd);
++ unlink(tempstore);
++ }
++
++ free(tempstore);
++
++ *tempname = NULL;
++ return result;
++}
++
++#endif /* ! disabled */
+diff --git a/lib/fopen.h b/lib/fopen.h
+new file mode 100644
+index 0000000..289e55f
+--- /dev/null
++++ b/lib/fopen.h
+@@ -0,0 +1,30 @@
++#ifndef HEADER_CURL_FOPEN_H
++#define HEADER_CURL_FOPEN_H
++/***************************************************************************
++ * _ _ ____ _
++ * Project ___| | | | _ \| |
++ * / __| | | | |_) | |
++ * | (__| |_| | _ <| |___
++ * \___|\___/|_| \_\_____|
++ *
++ * Copyright (C) 1998 - 2022, Daniel Stenberg, <daniel@haxx.se>, et al.
++ *
++ * This software is licensed as described in the file COPYING, which
++ * you should have received as part of this distribution. The terms
++ * are also available at https://curl.se/docs/copyright.html.
++ *
++ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++ * copies of the Software, and permit persons to whom the Software is
++ * furnished to do so, under the terms of the COPYING file.
++ *
++ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++ * KIND, either express or implied.
++ *
++ * SPDX-License-Identifier: curl
++ *
++ ***************************************************************************/
++
++CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
++ FILE **fh, char **tempname);
++
++#endif
diff --git a/meta/recipes-support/curl/curl/CVE-2022-32208.patch b/meta/recipes-support/curl/curl/CVE-2022-32208.patch
new file mode 100644
index 0000000000..9a4e398370
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-32208.patch
@@ -0,0 +1,67 @@
+From fd2ffddec315c029e923e6e6f2c049809d01a5fc Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 9 Jun 2022 09:27:24 +0200
+Subject: [PATCH] krb5: return error properly on decode errors
+
+Bug: https://curl.se/docs/CVE-2022-32208.html
+CVE-2022-32208
+Reported-by: Harry Sintonen
+Closes #9051
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/6ecdf5136b52af7]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/krb5.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/lib/krb5.c b/lib/krb5.c
+index 787137c..6f9e1f7 100644
+--- a/lib/krb5.c
++++ b/lib/krb5.c
+@@ -140,11 +140,8 @@ krb5_decode(void *app_data, void *buf, int len,
+ enc.value = buf;
+ enc.length = len;
+ maj = gss_unwrap(&min, *context, &enc, &dec, NULL, NULL);
+- if(maj != GSS_S_COMPLETE) {
+- if(len >= 4)
+- strcpy(buf, "599 ");
++ if(maj != GSS_S_COMPLETE)
+ return -1;
+- }
+
+ memcpy(buf, dec.value, dec.length);
+ len = curlx_uztosi(dec.length);
+@@ -506,6 +503,7 @@ static CURLcode read_data(struct connectdata *conn,
+ {
+ int len;
+ CURLcode result;
++ int nread;
+
+ result = socket_read(fd, &len, sizeof(len));
+ if(result)
+@@ -514,7 +512,10 @@ static CURLcode read_data(struct connectdata *conn,
+ if(len) {
+ /* only realloc if there was a length */
+ len = ntohl(len);
+- buf->data = Curl_saferealloc(buf->data, len);
++ if(len > CURL_MAX_INPUT_LENGTH)
++ len = 0;
++ else
++ buf->data = Curl_saferealloc(buf->data, len);
+ }
+ if(!len || !buf->data)
+ return CURLE_OUT_OF_MEMORY;
+@@ -522,8 +523,11 @@ static CURLcode read_data(struct connectdata *conn,
+ result = socket_read(fd, buf->data, len);
+ if(result)
+ return result;
+- buf->size = conn->mech->decode(conn->app_data, buf->data, len,
+- conn->data_prot, conn);
++ nread = conn->mech->decode(conn->app_data, buf->data, len,
++ conn->data_prot, conn);
++ if(nread < 0)
++ return CURLE_RECV_ERROR;
++ buf->size = (size_t)nread;
+ buf->index = 0;
+ return CURLE_OK;
+ }
diff --git a/meta/recipes-support/curl/curl/CVE-2022-32221.patch b/meta/recipes-support/curl/curl/CVE-2022-32221.patch
new file mode 100644
index 0000000000..b78b2ce1a8
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-32221.patch
@@ -0,0 +1,28 @@
+From a64e3e59938abd7d667e4470a18072a24d7e9de9 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 15 Sep 2022 09:22:45 +0200
+Subject: [PATCH] setopt: when POST is set, reset the 'upload' field
+
+Reported-by: RobBotic1 on github
+Fixes #9507
+Closes #9511
+
+CVE: CVE-2022-32221
+Upstream-Status: Backport [https://github.com/curl/curl/commit/a64e3e59938abd7d667e4470a18072a24d7e9de9]
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ lib/setopt.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/lib/setopt.c b/lib/setopt.c
+index 03c4efdbf1e58..7289a4e78bdd0 100644
+--- a/lib/setopt.c
++++ b/lib/setopt.c
+@@ -700,6 +700,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
+ }
+ else
+ data->set.method = HTTPREQ_GET;
++ data->set.upload = FALSE;
+ break;
+
+ case CURLOPT_HTTPPOST:
diff --git a/meta/recipes-support/curl/curl/CVE-2022-35252.patch b/meta/recipes-support/curl/curl/CVE-2022-35252.patch
new file mode 100644
index 0000000000..7b6f81bd02
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-35252.patch
@@ -0,0 +1,72 @@
+From 62c09239ac4e08239c8e363b06901fc80637d8c7 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 29 Aug 2022 00:09:17 +0200
+Subject: [PATCH] cookie: reject cookies with "control bytes"
+
+Rejects 0x01 - 0x1f (except 0x09) plus 0x7f
+
+Reported-by: Axel Chong
+
+Bug: https://curl.se/docs/CVE-2022-35252.html
+
+CVE-2022-35252
+
+Closes #9381
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/8dfc93e573ca740544a2d79ebb]
+
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/cookie.c | 29 +++++++++++++++++++++++++++++
+ 1 file changed, 29 insertions(+)
+
+diff --git a/lib/cookie.c b/lib/cookie.c
+index cb0c03b..e0470a1 100644
+--- a/lib/cookie.c
++++ b/lib/cookie.c
+@@ -438,6 +438,30 @@ static bool bad_domain(const char *domain)
+ return TRUE;
+ }
+
++/*
++ RFC 6265 section 4.1.1 says a server should accept this range:
++
++ cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
++
++ But Firefox and Chrome as of June 2022 accept space, comma and double-quotes
++ fine. The prime reason for filtering out control bytes is that some HTTP
++ servers return 400 for requests that contain such.
++*/
++static int invalid_octets(const char *p)
++{
++ /* Reject all bytes \x01 - \x1f (*except* \x09, TAB) + \x7f */
++ static const char badoctets[] = {
++ "\x01\x02\x03\x04\x05\x06\x07\x08\x0a"
++ "\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14"
++ "\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x7f"
++ };
++ size_t vlen, len;
++ /* scan for all the octets that are *not* in cookie-octet */
++ len = strcspn(p, badoctets);
++ vlen = strlen(p);
++ return (len != vlen);
++}
++
+ /*
+ * Curl_cookie_add
+ *
+@@ -590,6 +614,11 @@ Curl_cookie_add(struct Curl_easy *data,
+ badcookie = TRUE;
+ break;
+ }
++ if(invalid_octets(whatptr) || invalid_octets(name)) {
++ infof(data, "invalid octets in name/value, cookie dropped");
++ badcookie = TRUE;
++ break;
++ }
+ }
+ else if(!len) {
+ /*
+--
+2.35.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2022-42915.patch b/meta/recipes-support/curl/curl/CVE-2022-42915.patch
new file mode 100644
index 0000000000..0f37a80e09
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-42915.patch
@@ -0,0 +1,53 @@
+From 55e1875729f9d9fc7315cec611bffbd2c817ad89 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 6 Oct 2022 14:13:36 +0200
+Subject: [PATCH] http_proxy: restore the protocol pointer on error
+
+Reported-by: Trail of Bits
+
+Closes #9790
+
+CVE: CVE-2022-42915
+Upstream-Status: Backport [https://github.com/curl/curl/commit/55e1875729f9d9fc7315cec611bffbd2c817ad89]
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ lib/http_proxy.c | 6 ++----
+ lib/url.c | 9 ---------
+ 2 files changed, 2 insertions(+), 13 deletions(-)
+
+diff --git a/lib/http_proxy.c b/lib/http_proxy.c
+index 1f87f6c62aa40..cc20b3a801941 100644
+--- a/lib/http_proxy.c
++++ b/lib/http_proxy.c
+@@ -212,10 +212,8 @@ void Curl_connect_done(struct Curl_easy *data)
+ Curl_dyn_free(&s->rcvbuf);
+ Curl_dyn_free(&s->req);
+
+- /* restore the protocol pointer, if not already done */
+- if(s->prot_save)
+- data->req.p.http = s->prot_save;
+- s->prot_save = NULL;
++ /* restore the protocol pointer */
++ data->req.p.http = s->prot_save;
+ data->info.httpcode = 0; /* clear it as it might've been used for the
+ proxy */
+ data->req.ignorebody = FALSE;
+diff --git a/lib/url.c b/lib/url.c
+index 690c53c81a3c1..be5ffca2d8b20 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -751,15 +751,6 @@ static void conn_shutdown(struct Curl_easy *data, struct connectdata *conn)
+ DEBUGASSERT(data);
+ infof(data, "Closing connection %ld", conn->connection_id);
+
+-#ifndef USE_HYPER
+- if(conn->connect_state && conn->connect_state->prot_save) {
+- /* If this was closed with a CONNECT in progress, cleanup this temporary
+- struct arrangement */
+- data->req.p.http = NULL;
+- Curl_safefree(conn->connect_state->prot_save);
+- }
+-#endif
+-
+ /* possible left-overs from the async name resolvers */
+ Curl_resolver_cancel(data);
diff --git a/meta/recipes-support/curl/curl/CVE-2022-42916.patch b/meta/recipes-support/curl/curl/CVE-2022-42916.patch
new file mode 100644
index 0000000000..fbc592280a
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-42916.patch
@@ -0,0 +1,136 @@
+From 53bcf55b4538067e6dc36242168866becb987bb7 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Wed, 12 Oct 2022 10:47:59 +0200
+Subject: [PATCH] url: use IDN decoded names for HSTS checks
+
+Reported-by: Hiroki Kurosawa
+
+Closes #9791
+
+CVE: CVE-2022-42916
+Upstream-Status: Backport [https://github.com/curl/curl/commit/53bcf55b4538067e6dc36242168866becb987bb7]
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+Comments: Refreshed hunk
+---
+ lib/url.c | 91 ++++++++++++++++++++++++++++---------------------------
+ 1 file changed, 47 insertions(+), 44 deletions(-)
+
+diff --git a/lib/url.c b/lib/url.c
+index a3be56bced9de..690c53c81a3c1 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -2012,10 +2012,56 @@
+ if(!strcasecompare("file", data->state.up.scheme))
+ return CURLE_OUT_OF_MEMORY;
+ }
++ hostname = data->state.up.hostname;
++
++ if(hostname && hostname[0] == '[') {
++ /* This looks like an IPv6 address literal. See if there is an address
++ scope. */
++ size_t hlen;
++ conn->bits.ipv6_ip = TRUE;
++ /* cut off the brackets! */
++ hostname++;
++ hlen = strlen(hostname);
++ hostname[hlen - 1] = 0;
++
++ zonefrom_url(uh, data, conn);
++ }
++
++ /* make sure the connect struct gets its own copy of the host name */
++ conn->host.rawalloc = strdup(hostname ? hostname : "");
++ if(!conn->host.rawalloc)
++ return CURLE_OUT_OF_MEMORY;
++ conn->host.name = conn->host.rawalloc;
++
++ /*************************************************************
++ * IDN-convert the hostnames
++ *************************************************************/
++ result = Curl_idnconvert_hostname(data, &conn->host);
++ if(result)
++ return result;
++ if(conn->bits.conn_to_host) {
++ result = Curl_idnconvert_hostname(data, &conn->conn_to_host);
++ if(result)
++ return result;
++ }
++#ifndef CURL_DISABLE_PROXY
++ if(conn->bits.httpproxy) {
++ result = Curl_idnconvert_hostname(data, &conn->http_proxy.host);
++ if(result)
++ return result;
++ }
++ if(conn->bits.socksproxy) {
++ result = Curl_idnconvert_hostname(data, &conn->socks_proxy.host);
++ if(result)
++ return result;
++ }
++#endif
+
+ #ifndef CURL_DISABLE_HSTS
++ /* HSTS upgrade */
+ if(data->hsts && strcasecompare("http", data->state.up.scheme)) {
+- if(Curl_hsts(data->hsts, data->state.up.hostname, TRUE)) {
++ /* This MUST use the IDN decoded name */
++ if(Curl_hsts(data->hsts, conn->host.name, TRUE)) {
+ char *url;
+ Curl_safefree(data->state.up.scheme);
+ uc = curl_url_set(uh, CURLUPART_SCHEME, "https", 0);
+@@ -2145,26 +2191,6 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data,
+
+ (void)curl_url_get(uh, CURLUPART_QUERY, &data->state.up.query, 0);
+
+- hostname = data->state.up.hostname;
+- if(hostname && hostname[0] == '[') {
+- /* This looks like an IPv6 address literal. See if there is an address
+- scope. */
+- size_t hlen;
+- conn->bits.ipv6_ip = TRUE;
+- /* cut off the brackets! */
+- hostname++;
+- hlen = strlen(hostname);
+- hostname[hlen - 1] = 0;
+-
+- zonefrom_url(uh, data, conn);
+- }
+-
+- /* make sure the connect struct gets its own copy of the host name */
+- conn->host.rawalloc = strdup(hostname ? hostname : "");
+- if(!conn->host.rawalloc)
+- return CURLE_OUT_OF_MEMORY;
+- conn->host.name = conn->host.rawalloc;
+-
+ #ifdef ENABLE_IPV6
+ if(data->set.scope_id)
+ /* Override any scope that was set above. */
+@@ -3713,29 +3739,6 @@ static CURLcode create_conn(struct Curl_easy *data,
+ if(result)
+ goto out;
+
+- /*************************************************************
+- * IDN-convert the hostnames
+- *************************************************************/
+- result = Curl_idnconvert_hostname(data, &conn->host);
+- if(result)
+- goto out;
+- if(conn->bits.conn_to_host) {
+- result = Curl_idnconvert_hostname(data, &conn->conn_to_host);
+- if(result)
+- goto out;
+- }
+-#ifndef CURL_DISABLE_PROXY
+- if(conn->bits.httpproxy) {
+- result = Curl_idnconvert_hostname(data, &conn->http_proxy.host);
+- if(result)
+- goto out;
+- }
+- if(conn->bits.socksproxy) {
+- result = Curl_idnconvert_hostname(data, &conn->socks_proxy.host);
+- if(result)
+- goto out;
+- }
+-#endif
+
+ /*************************************************************
+ * Check whether the host and the "connect to host" are equal.
diff --git a/meta/recipes-support/curl/curl/CVE-2022-43551.patch b/meta/recipes-support/curl/curl/CVE-2022-43551.patch
new file mode 100644
index 0000000000..e1ec7bf72e
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-43551.patch
@@ -0,0 +1,35 @@
+From 9e71901634e276dd050481c4320f046bebb1bc28 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 19 Dec 2022 08:36:55 +0100
+Subject: [PATCH] http: use the IDN decoded name in HSTS checks
+
+Otherwise it stores the info HSTS into the persistent cache for the IDN
+name which will not match when the HSTS status is later checked for
+using the decoded name.
+
+Reported-by: Hiroki Kurosawa
+
+Closes #10111
+
+CVE: CVE-2022-43551
+Upstream-Status: Backport [https://github.com/curl/curl/commit/9e71901634e276dd050481c4320f046bebb1bc28]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comments: Hunk refresh to remove patch-fuzz warning
+
+---
+ lib/http.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/http.c b/lib/http.c
+index 85528a2218eee..a784745a8d505 100644
+--- a/lib/http.c
++++ b/lib/http.c
+@@ -3652,7 +3652,7 @@ CURLcode Curl_http_header(struct Curl_easy *data, struct connectdata *conn,
+ else if(data->hsts && checkprefix("Strict-Transport-Security:", headp) &&
+ (conn->handler->flags & PROTOPT_SSL)) {
+ CURLcode check =
+- Curl_hsts_parse(data->hsts, data->state.up.hostname,
++ Curl_hsts_parse(data->hsts, conn->host.name,
+ headp + strlen("Strict-Transport-Security:"));
+ if(check)
+ infof(data, "Illegal STS header skipped");
diff --git a/meta/recipes-support/curl/curl/CVE-2022-43552.patch b/meta/recipes-support/curl/curl/CVE-2022-43552.patch
new file mode 100644
index 0000000000..dfe6d8c6d5
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-43552.patch
@@ -0,0 +1,80 @@
+From 4f20188ac644afe174be6005ef4f6ffba232b8b2 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 19 Dec 2022 08:38:37 +0100
+Subject: [PATCH] smb/telnet: do not free the protocol struct in *_done()
+
+It is managed by the generic layer.
+
+Reported-by: Trail of Bits
+
+Closes #10112
+
+CVE: CVE-2022-43552
+Upstream-Status: Backport [https://github.com/curl/curl/commit/4f20188ac644afe174be6005ef4f6ffba232b8b2]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ lib/smb.c | 14 ++------------
+ lib/telnet.c | 3 ---
+ 2 files changed, 2 insertions(+), 15 deletions(-)
+
+diff --git a/lib/smb.c b/lib/smb.c
+index 2cfe041dff072..48d5a2fe006d5 100644
+--- a/lib/smb.c
++++ b/lib/smb.c
+@@ -58,8 +58,6 @@ static CURLcode smb_connect(struct Curl_easy *data, bool *done);
+ static CURLcode smb_connection_state(struct Curl_easy *data, bool *done);
+ static CURLcode smb_do(struct Curl_easy *data, bool *done);
+ static CURLcode smb_request_state(struct Curl_easy *data, bool *done);
+-static CURLcode smb_done(struct Curl_easy *data, CURLcode status,
+- bool premature);
+ static CURLcode smb_disconnect(struct Curl_easy *data,
+ struct connectdata *conn, bool dead);
+ static int smb_getsock(struct Curl_easy *data, struct connectdata *conn,
+@@ -74,7 +72,7 @@ const struct Curl_handler Curl_handler_smb = {
+ "SMB", /* scheme */
+ smb_setup_connection, /* setup_connection */
+ smb_do, /* do_it */
+- smb_done, /* done */
++ ZERO_NULL, /* done */
+ ZERO_NULL, /* do_more */
+ smb_connect, /* connect_it */
+ smb_connection_state, /* connecting */
+@@ -101,7 +99,7 @@ const struct Curl_handler Curl_handler_smbs = {
+ "SMBS", /* scheme */
+ smb_setup_connection, /* setup_connection */
+ smb_do, /* do_it */
+- smb_done, /* done */
++ ZERO_NULL, /* done */
+ ZERO_NULL, /* do_more */
+ smb_connect, /* connect_it */
+ smb_connection_state, /* connecting */
+@@ -936,14 +934,6 @@ static CURLcode smb_request_state(struct Curl_easy *data, bool *done)
+ return CURLE_OK;
+ }
+
+-static CURLcode smb_done(struct Curl_easy *data, CURLcode status,
+- bool premature)
+-{
+- (void) premature;
+- Curl_safefree(data->req.p.smb);
+- return status;
+-}
+-
+ static CURLcode smb_disconnect(struct Curl_easy *data,
+ struct connectdata *conn, bool dead)
+ {
+diff --git a/lib/telnet.c b/lib/telnet.c
+index 24d3f1efb14c8..22bc81e755222 100644
+--- a/lib/telnet.c
++++ b/lib/telnet.c
+@@ -1248,9 +1248,6 @@ static CURLcode telnet_done(struct Curl_easy *data,
+
+ curl_slist_free_all(tn->telnet_vars);
+ tn->telnet_vars = NULL;
+-
+- Curl_safefree(data->req.p.telnet);
+-
+ return CURLE_OK;
+ }
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-23914_5-1.patch b/meta/recipes-support/curl/curl/CVE-2023-23914_5-1.patch
new file mode 100644
index 0000000000..d357cee76c
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-23914_5-1.patch
@@ -0,0 +1,280 @@
+From 076a2f629119222aeeb50f5a03bf9f9052fabb9a Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 27 Dec 2022 11:50:20 +0100
+Subject: [PATCH] share: add sharing of HSTS cache among handles
+
+Closes #10138
+
+CVE: CVE-2023-23914 CVE-2023-23915
+Upstream-Status: Backport [https://github.com/curl/curl/commit/076a2f629119222aeeb50f5a03bf9f9052fabb9a]
+Comment: Refreshed hunk from hsts.c and urldata.h
+Signed-off-by: Pawan Badganchi <Pawan.Badganchi@kpit.com>
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ include/curl/curl.h | 1 +
+ lib/hsts.c | 15 +++++++++
+ lib/hsts.h | 2 ++
+ lib/setopt.c | 48 ++++++++++++++++++++++++-----
+ lib/share.c | 32 +++++++++++++++++--
+ lib/share.h | 6 +++-
+ lib/transfer.c | 3 ++
+ lib/url.c | 6 +++-
+ lib/urldata.h | 2 ++
+ 9 files changed, 109 insertions(+), 11 deletions(-)
+
+--- a/include/curl/curl.h
++++ b/include/curl/curl.h
+@@ -2953,6 +2953,7 @@ typedef enum {
+ CURL_LOCK_DATA_SSL_SESSION,
+ CURL_LOCK_DATA_CONNECT,
+ CURL_LOCK_DATA_PSL,
++ CURL_LOCK_DATA_HSTS,
+ CURL_LOCK_DATA_LAST
+ } curl_lock_data;
+
+--- a/lib/hsts.c
++++ b/lib/hsts.c
+@@ -37,6 +37,7 @@
+ #include "parsedate.h"
+ #include "rand.h"
+ #include "rename.h"
++#include "share.h"
+ #include "strtoofft.h"
+
+ /* The last 3 #include files should be in this order */
+@@ -561,4 +562,18 @@
+ return CURLE_OK;
+ }
+
++void Curl_hsts_loadfiles(struct Curl_easy *data)
++{
++ struct curl_slist *l = data->set.hstslist;
++ if(l) {
++ Curl_share_lock(data, CURL_LOCK_DATA_HSTS, CURL_LOCK_ACCESS_SINGLE);
++
++ while(l) {
++ (void)Curl_hsts_loadfile(data, data->hsts, l->data);
++ l = l->next;
++ }
++ Curl_share_unlock(data, CURL_LOCK_DATA_HSTS);
++ }
++}
++
+ #endif /* CURL_DISABLE_HTTP || CURL_DISABLE_HSTS */
+--- a/lib/hsts.h
++++ b/lib/hsts.h
+@@ -59,9 +59,11 @@ CURLcode Curl_hsts_loadfile(struct Curl_
+ struct hsts *h, const char *file);
+ CURLcode Curl_hsts_loadcb(struct Curl_easy *data,
+ struct hsts *h);
++void Curl_hsts_loadfiles(struct Curl_easy *data);
+ #else
+ #define Curl_hsts_cleanup(x)
+ #define Curl_hsts_loadcb(x,y) CURLE_OK
+ #define Curl_hsts_save(x,y,z)
++#define Curl_hsts_loadfiles(x)
+ #endif /* CURL_DISABLE_HTTP || CURL_DISABLE_HSTS */
+ #endif /* HEADER_CURL_HSTS_H */
+--- a/lib/setopt.c
++++ b/lib/setopt.c
+@@ -2260,9 +2260,14 @@ CURLcode Curl_vsetopt(struct Curl_easy *
+ data->cookies = NULL;
+ #endif
+
++#ifndef CURL_DISABLE_HSTS
++ if(data->share->hsts == data->hsts)
++ data->hsts = NULL;
++#endif
++#ifdef USE_SSL
+ if(data->share->sslsession == data->state.session)
+ data->state.session = NULL;
+-
++#endif
+ #ifdef USE_LIBPSL
+ if(data->psl == &data->share->psl)
+ data->psl = data->multi? &data->multi->psl: NULL;
+@@ -2296,10 +2301,19 @@ CURLcode Curl_vsetopt(struct Curl_easy *
+ data->cookies = data->share->cookies;
+ }
+ #endif /* CURL_DISABLE_HTTP */
++#ifndef CURL_DISABLE_HSTS
++ if(data->share->hsts) {
++ /* first free the private one if any */
++ Curl_hsts_cleanup(&data->hsts);
++ data->hsts = data->share->hsts;
++ }
++#endif /* CURL_DISABLE_HTTP */
++#ifdef USE_SSL
+ if(data->share->sslsession) {
+ data->set.general_ssl.max_ssl_sessions = data->share->max_ssl_sessions;
+ data->state.session = data->share->sslsession;
+ }
++#endif
+ #ifdef USE_LIBPSL
+ if(data->share->specifier & (1 << CURL_LOCK_DATA_PSL))
+ data->psl = &data->share->psl;
+@@ -3049,19 +3063,39 @@ CURLcode Curl_vsetopt(struct Curl_easy *
+ case CURLOPT_HSTSWRITEDATA:
+ data->set.hsts_write_userp = va_arg(param, void *);
+ break;
+- case CURLOPT_HSTS:
++ case CURLOPT_HSTS: {
++ struct curl_slist *h;
+ if(!data->hsts) {
+ data->hsts = Curl_hsts_init();
+ if(!data->hsts)
+ return CURLE_OUT_OF_MEMORY;
+ }
+ argptr = va_arg(param, char *);
+- result = Curl_setstropt(&data->set.str[STRING_HSTS], argptr);
+- if(result)
+- return result;
+- if(argptr)
+- (void)Curl_hsts_loadfile(data, data->hsts, argptr);
++ if(argptr) {
++ result = Curl_setstropt(&data->set.str[STRING_HSTS], argptr);
++ if(result)
++ return result;
++ /* this needs to build a list of file names to read from, so that it can
++ read them later, as we might get a shared HSTS handle to load them
++ into */
++ h = curl_slist_append(data->set.hstslist, argptr);
++ if(!h) {
++ curl_slist_free_all(data->set.hstslist);
++ data->set.hstslist = NULL;
++ return CURLE_OUT_OF_MEMORY;
++ }
++ data->set.hstslist = h; /* store the list for later use */
++ }
++ else {
++ /* clear the list of HSTS files */
++ curl_slist_free_all(data->set.hstslist);
++ data->set.hstslist = NULL;
++ if(!data->share || !data->share->hsts)
++ /* throw away the HSTS cache unless shared */
++ Curl_hsts_cleanup(&data->hsts);
++ }
+ break;
++ }
+ case CURLOPT_HSTS_CTRL:
+ arg = va_arg(param, long);
+ if(arg & CURLHSTS_ENABLE) {
+--- a/lib/share.c
++++ b/lib/share.c
+@@ -29,9 +29,11 @@
+ #include "share.h"
+ #include "psl.h"
+ #include "vtls/vtls.h"
+-#include "curl_memory.h"
++#include "hsts.h"
+
+-/* The last #include file should be: */
++/* The last 3 #include files should be in this order */
++#include "curl_printf.h"
++#include "curl_memory.h"
+ #include "memdebug.h"
+
+ struct Curl_share *
+@@ -89,6 +91,18 @@ curl_share_setopt(struct Curl_share *sha
+ #endif
+ break;
+
++ case CURL_LOCK_DATA_HSTS:
++#ifndef CURL_DISABLE_HSTS
++ if(!share->hsts) {
++ share->hsts = Curl_hsts_init();
++ if(!share->hsts)
++ res = CURLSHE_NOMEM;
++ }
++#else /* CURL_DISABLE_HSTS */
++ res = CURLSHE_NOT_BUILT_IN;
++#endif
++ break;
++
+ case CURL_LOCK_DATA_SSL_SESSION:
+ #ifdef USE_SSL
+ if(!share->sslsession) {
+@@ -141,6 +155,16 @@ curl_share_setopt(struct Curl_share *sha
+ #endif
+ break;
+
++ case CURL_LOCK_DATA_HSTS:
++#ifndef CURL_DISABLE_HSTS
++ if(share->hsts) {
++ Curl_hsts_cleanup(&share->hsts);
++ }
++#else /* CURL_DISABLE_HSTS */
++ res = CURLSHE_NOT_BUILT_IN;
++#endif
++ break;
++
+ case CURL_LOCK_DATA_SSL_SESSION:
+ #ifdef USE_SSL
+ Curl_safefree(share->sslsession);
+@@ -207,6 +231,10 @@ curl_share_cleanup(struct Curl_share *sh
+ Curl_cookie_cleanup(share->cookies);
+ #endif
+
++#ifndef CURL_DISABLE_HSTS
++ Curl_hsts_cleanup(&share->hsts);
++#endif
++
+ #ifdef USE_SSL
+ if(share->sslsession) {
+ size_t i;
+--- a/lib/share.h
++++ b/lib/share.h
+@@ -59,10 +59,14 @@ struct Curl_share {
+ #ifdef USE_LIBPSL
+ struct PslCache psl;
+ #endif
+-
++#ifndef CURL_DISABLE_HSTS
++ struct hsts *hsts;
++#endif
++#ifdef USE_SSL
+ struct Curl_ssl_session *sslsession;
+ size_t max_ssl_sessions;
+ long sessionage;
++#endif
+ };
+
+ CURLSHcode Curl_share_lock(struct Curl_easy *, curl_lock_data,
+--- a/lib/transfer.c
++++ b/lib/transfer.c
+@@ -1398,6 +1398,9 @@ CURLcode Curl_pretransfer(struct Curl_ea
+ if(data->state.resolve)
+ result = Curl_loadhostpairs(data);
+
++ /* If there is a list of hsts files to read */
++ Curl_hsts_loadfiles(data);
++
+ if(!result) {
+ /* Allow data->set.use_port to set which port to use. This needs to be
+ * disabled for example when we follow Location: headers to URLs using
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -434,7 +434,11 @@ CURLcode Curl_close(struct Curl_easy **d
+ Curl_altsvc_save(data, data->asi, data->set.str[STRING_ALTSVC]);
+ Curl_altsvc_cleanup(&data->asi);
+ Curl_hsts_save(data, data->hsts, data->set.str[STRING_HSTS]);
+- Curl_hsts_cleanup(&data->hsts);
++#ifndef CURL_DISABLE_HSTS
++ if(!data->share || !data->share->hsts)
++ Curl_hsts_cleanup(&data->hsts);
++ curl_slist_free_all(data->set.hstslist); /* clean up list */
++#endif
+ #if !defined(CURL_DISABLE_HTTP) && !defined(CURL_DISABLE_CRYPTO_AUTH)
+ Curl_http_auth_cleanup_digest(data);
+ #endif
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1670,6 +1670,8 @@
+
+ void *seek_client; /* pointer to pass to the seek callback */
+ #ifndef CURL_DISABLE_HSTS
++ struct curl_slist *hstslist; /* list of HSTS files set by
++ curl_easy_setopt(HSTS) calls */
+ curl_hstsread_callback hsts_read;
+ void *hsts_read_userp;
+ curl_hstswrite_callback hsts_write;
diff --git a/meta/recipes-support/curl/curl/CVE-2023-23914_5-2.patch b/meta/recipes-support/curl/curl/CVE-2023-23914_5-2.patch
new file mode 100644
index 0000000000..668972cb3f
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-23914_5-2.patch
@@ -0,0 +1,23 @@
+From 0bf8b796a0ea98395b390c7807187982215f5c11 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 27 Dec 2022 11:50:23 +0100
+Subject: [PATCH] tool_operate: share HSTS between handles
+
+CVE: CVE-2023-23914 CVE-2023-23915
+Upstream-Status: Backport [https://github.com/curl/curl/pull/10138/commits/ca17cfed2df001356cfe2841f166569bac0f5e8c]
+Signed-off-by: Pawan Badganchi <Pawan.Badganchi@kpit.com>
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ src/tool_operate.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/src/tool_operate.c
++++ b/src/tool_operate.c
+@@ -2722,6 +2722,7 @@ CURLcode operate(struct GlobalConfig *gl
+ curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_SSL_SESSION);
+ curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_CONNECT);
+ curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_PSL);
++ curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_HSTS);
+
+ /* Get the required arguments for each operation */
+ do {
diff --git a/meta/recipes-support/curl/curl/CVE-2023-23914_5-3.patch b/meta/recipes-support/curl/curl/CVE-2023-23914_5-3.patch
new file mode 100644
index 0000000000..4422b26834
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-23914_5-3.patch
@@ -0,0 +1,45 @@
+From ca02a77f05bd5cef20618c8f741aa48b7be0a648 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 27 Dec 2022 11:50:23 +0100
+Subject: [PATCH] hsts: handle adding the same host name again
+
+It will then use the largest expire time of the two entries.
+
+CVE: CVE-2023-23914 CVE-2023-23915
+Upstream-Status: Backport [https://github.com/curl/curl/pull/10138/commits/e077b30a42272d964d76e5b815a0af7dc65d8360]
+Signed-off-by: Pawan Badganchi <Pawan.Badganchi@kpit.com>
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ lib/hsts.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/lib/hsts.c b/lib/hsts.c
+index 339237be1c621..8d6723ee587d2 100644
+--- a/lib/hsts.c
++++ b/lib/hsts.c
+@@ -426,14 +426,23 @@ static CURLcode hsts_add(struct hsts *h, char *line)
+ if(2 == rc) {
+ time_t expires = strcmp(date, UNLIMITED) ? Curl_getdate_capped(date) :
+ TIME_T_MAX;
+- CURLcode result;
++ CURLcode result = CURLE_OK;
+ char *p = host;
+ bool subdomain = FALSE;
++ struct stsentry *e;
+ if(p[0] == '.') {
+ p++;
+ subdomain = TRUE;
+ }
+- result = hsts_create(h, p, subdomain, expires);
++ /* only add it if not already present */
++ e = Curl_hsts(h, p, subdomain);
++ if(!e)
++ result = hsts_create(h, p, subdomain, expires);
++ else {
++ /* the same host name, use the largest expire time */
++ if(expires > e->expires)
++ e->expires = expires;
++ }
+ if(result)
+ return result;
+ }
diff --git a/meta/recipes-support/curl/curl/CVE-2023-23914_5-4.patch b/meta/recipes-support/curl/curl/CVE-2023-23914_5-4.patch
new file mode 100644
index 0000000000..865b3f93a5
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-23914_5-4.patch
@@ -0,0 +1,48 @@
+From dc0725244a3163f1e2d5f51165db3a1a430f3ba0 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 27 Dec 2022 11:50:23 +0100
+Subject: [PATCH] runtests: support crlf="yes" for verify/proxy
+
+CVE: CVE-2023-23914 CVE-2023-23915
+Upstream-Status: Backport [https://github.com/curl/curl/pull/10138/commits/fd7e1a557e414dd803c9225e37a2ca84e1df2269]
+Comment: Refreshed hunk from FILEFORMAT.md
+Signed-off-by: Pawan Badganchi <Pawan.Badganchi@kpit.com>
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ tests/FILEFORMAT.md | 4 ++--
+ tests/runtests.pl | 5 +++++
+ 2 files changed, 7 insertions(+), 2 deletions(-)
+
+--- a/tests/FILEFORMAT.md
++++ b/tests/FILEFORMAT.md
+@@ -540,14 +540,14 @@
+ One perl op per line that operates on the protocol dump. This is pretty
+ advanced. Example: `s/^EPRT .*/EPRT stripped/`.
+
+-### `<protocol [nonewline="yes"]>`
++### `<protocol [nonewline="yes"][crlf="yes"]>`
+
+ the protocol dump curl should transmit, if 'nonewline' is set, we will cut off
+ the trailing newline of this given data before comparing with the one actually
+ sent by the client The `<strip>` and `<strippart>` rules are applied before
+ comparisons are made.
+
+-### `<proxy [nonewline="yes"]>`
++### `<proxy [nonewline="yes"][crlf="yes"]>`
+
+ The protocol dump curl should transmit to a HTTP proxy (when the http-proxy
+ server is used), if 'nonewline' is set, we will cut off the trailing newline
+--- a/tests/runtests.pl
++++ b/tests/runtests.pl
+@@ -4744,6 +4744,11 @@ sub singletest {
+ }
+ }
+
++ if($hash{'crlf'} ||
++ ($has_hyper && ($keywords{"HTTP"} || $keywords{"HTTPS"}))) {
++ map subNewlines(0, \$_), @protstrip;
++ }
++
+ $res = compare($testnum, $testname, "proxy", \@out, \@protstrip);
+ if($res) {
+ return $errorreturncode;
diff --git a/meta/recipes-support/curl/curl/CVE-2023-23914_5-5.patch b/meta/recipes-support/curl/curl/CVE-2023-23914_5-5.patch
new file mode 100644
index 0000000000..1a363f0b4b
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-23914_5-5.patch
@@ -0,0 +1,118 @@
+From ea5aaaa5ede53819f8bc7ae767fc2d13d3704d37 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 27 Dec 2022 11:50:23 +0100
+Subject: [PATCH] test446: verify hsts with two URLs
+
+CVE: CVE-2023-23914 CVE-2023-23915
+Upstream-Status: Backport [https://github.com/curl/curl/pull/10138/commits/7e89dfd463597701dd1defcad7be54f7d3c9d55d]
+Comment: Refreshed hunk from Makefile.inc
+Signed-off-by: Pawan Badganchi <Pawan.Badganchi@kpit.com>
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ tests/data/Makefile.inc | 2 +-
+ tests/data/test446 | 84 +++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 85 insertions(+), 1 deletion(-)
+ create mode 100644 tests/data/test446
+
+diff --git a/tests/data/Makefile.inc b/tests/data/Makefile.inc
+index 3a6356bd122bc..fe1bb1c74c2ab 100644
+--- a/tests/data/Makefile.inc
++++ b/tests/data/Makefile.inc
+@@ -72,6 +72,7 @@
+ \
+ test430 test431 test432 test433 test434 test435 test436 \
+ \
++test446 \
+ test490 test491 test492 test493 test494 \
+ \
+ test500 test501 test502 test503 test504 test505 test506 test507 test508 \
+diff --git a/tests/data/test446 b/tests/data/test446
+new file mode 100644
+index 0000000000000..0e2dfdcfe33b6
+--- /dev/null
++++ b/tests/data/test446
+@@ -0,0 +1,84 @@
++<?xml version="1.0" encoding="ISO-8859-1"?>
++<testcase>
++<info>
++<keywords>
++HTTP
++HTTP proxy
++HSTS
++trailing-dot
++</keywords>
++</info>
++
++<reply>
++
++# we use this as response to a CONNECT
++<connect nocheck="yes">
++HTTP/1.1 200 OK
++
++</connect>
++<data crlf="yes">
++HTTP/1.1 200 OK
++Content-Length: 6
++Strict-Transport-Security: max-age=604800
++
++-foo-
++</data>
++<data2 crlf="yes">
++HTTP/1.1 200 OK
++Content-Length: 6
++Strict-Transport-Security: max-age=6048000
++
++-baa-
++</data2>
++</reply>
++
++<client>
++<server>
++https
++http-proxy
++</server>
++<features>
++HSTS
++proxy
++https
++debug
++</features>
++<setenv>
++CURL_HSTS_HTTP=yes
++CURL_TIME=2000000000
++</setenv>
++
++<name>
++HSTS with two URLs
++</name>
++<command>
++-x http://%HOSTIP:%PROXYPORT --hsts log/hsts%TESTNUMBER http://this.hsts.example./%TESTNUMBER http://another.example.com/%TESTNUMBER0002
++</command>
++</client>
++
++<verify>
++# we let it CONNECT to the server to confirm HSTS but deny from there
++<proxy crlf="yes">
++GET http://this.hsts.example./%TESTNUMBER HTTP/1.1
++Host: this.hsts.example.
++User-Agent: curl/%VERSION
++Accept: */*
++Proxy-Connection: Keep-Alive
++
++GET http://another.example.com/%TESTNUMBER0002 HTTP/1.1
++Host: another.example.com
++User-Agent: curl/%VERSION
++Accept: */*
++Proxy-Connection: Keep-Alive
++
++</proxy>
++
++<file name="log/hsts%TESTNUMBER" mode="text">
++# Your HSTS cache. https://curl.se/docs/hsts.html
++# This file was generated by libcurl! Edit at your own risk.
++this.hsts.example "20330525 03:33:20"
++another.example.com "20330727 03:33:20"
++</file>
++
++</verify>
++</testcase>
diff --git a/meta/recipes-support/curl/curl/CVE-2023-23916.patch b/meta/recipes-support/curl/curl/CVE-2023-23916.patch
new file mode 100644
index 0000000000..a57d275902
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-23916.patch
@@ -0,0 +1,219 @@
+From 119fb187192a9ea13dc90d9d20c215fc82799ab9 Mon Sep 17 00:00:00 2001
+From: Patrick Monnerat <patrick@monnerat.net>
+Date: Mon, 13 Feb 2023 08:33:09 +0100
+Subject: [PATCH] content_encoding: do not reset stage counter for each header
+
+Test 418 verifies
+
+Closes #10492
+
+CVE: CVE-2023-23916
+Upstream-Status: Backport [https://github.com/curl/curl/commit/119fb187192a9ea13dc.patch]
+Signed-off-by: Pawan Badganchi <Pawan.Badganchi@kpit.com>
+---
+ lib/content_encoding.c | 7 +-
+ lib/urldata.h | 1 +
+ tests/data/Makefile.inc | 2 +-
+ tests/data/test387 | 2 +-
+ tests/data/test418 | 152 ++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 158 insertions(+), 6 deletions(-)
+ create mode 100644 tests/data/test418
+
+--- a/lib/content_encoding.c
++++ b/lib/content_encoding.c
+@@ -1037,7 +1037,6 @@ CURLcode Curl_build_unencoding_stack(str
+ const char *enclist, int maybechunked)
+ {
+ struct SingleRequest *k = &data->req;
+- int counter = 0;
+
+ do {
+ const char *name;
+@@ -1072,9 +1071,9 @@ CURLcode Curl_build_unencoding_stack(str
+ if(!encoding)
+ encoding = &error_encoding; /* Defer error at stack use. */
+
+- if(++counter >= MAX_ENCODE_STACK) {
+- failf(data, "Reject response due to %u content encodings",
+- counter);
++ if(k->writer_stack_depth++ >= MAX_ENCODE_STACK) {
++ failf(data, "Reject response due to more than %u content encodings",
++ MAX_ENCODE_STACK);
+ return CURLE_BAD_CONTENT_ENCODING;
+ }
+ /* Stack the unencoding stage. */
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -682,6 +682,7 @@ struct SingleRequest {
+ struct dohdata *doh; /* DoH specific data for this request */
+ #endif
+ unsigned char setcookies;
++ unsigned char writer_stack_depth; /* Unencoding stack depth. */
+ BIT(header); /* incoming data has HTTP header */
+ BIT(content_range); /* set TRUE if Content-Range: was found */
+ BIT(upload_done); /* set to TRUE when doing chunked transfer-encoding
+--- a/tests/data/Makefile.inc
++++ b/tests/data/Makefile.inc
+@@ -69,6 +69,7 @@
+ \
+ test400 test401 test402 test403 test404 test405 test406 test407 test408 \
+ test409 test410 \
++test418 \
+ \
+ test430 test431 test432 test433 test434 test435 test436 \
+ \
+--- /dev/null
++++ b/tests/data/test418
+@@ -0,0 +1,152 @@
++<testcase>
++<info>
++<keywords>
++HTTP
++gzip
++</keywords>
++</info>
++
++#
++# Server-side
++<reply>
++<data nocheck="yes">
++HTTP/1.1 200 OK
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++
++-foo-
++</data>
++</reply>
++
++#
++# Client-side
++<client>
++<server>
++http
++</server>
++ <name>
++Response with multiple Transfer-Encoding headers
++ </name>
++ <command>
++http://%HOSTIP:%HTTPPORT/%TESTNUMBER -sS
++</command>
++</client>
++
++#
++# Verify data after the test has been "shot"
++<verify>
++<protocol crlf="yes">
++GET /%TESTNUMBER HTTP/1.1
++Host: %HOSTIP:%HTTPPORT
++User-Agent: curl/%VERSION
++Accept: */*
++
++</protocol>
++
++# CURLE_BAD_CONTENT_ENCODING is 61
++<errorcode>
++61
++</errorcode>
++<stderr mode="text">
++curl: (61) Reject response due to more than 5 content encodings
++</stderr>
++</verify>
++</testcase>
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27533.patch b/meta/recipes-support/curl/curl/CVE-2023-27533.patch
new file mode 100644
index 0000000000..b69b20c85a
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27533.patch
@@ -0,0 +1,208 @@
+From 538b1e79a6e7b0bb829ab4cecc828d32105d0684 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 6 Mar 2023 12:07:33 +0100
+Subject: [PATCH] telnet: parse telnet options without sscanf & only accept option arguments in ascii
+
+To avoid embedded telnet negotiation commands etc.
+
+Reported-by: Harry Sintonen
+Closes #10728
+
+CVE: CVE-2023-27533
+Upstream-Status: Backport [https://github.com/curl/curl/commit/0c28ba2faae2d7da780a66d2446045a560192cdc && https://github.com/curl/curl/commit/538b1e79a6e7b0bb829ab4cecc828d32105d0684]
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ lib/telnet.c | 149 +++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 91 insertions(+), 58 deletions(-)
+
+diff --git a/lib/telnet.c b/lib/telnet.c
+index e709973..3ecd680 100644
+--- a/lib/telnet.c
++++ b/lib/telnet.c
+@@ -768,22 +768,32 @@ static void printsub(struct Curl_easy *data,
+ }
+ }
+
++static bool str_is_nonascii(const char *str)
++{
++ size_t len = strlen(str);
++ while(len--) {
++ if(*str & 0x80)
++ return TRUE;
++ str++;
++ }
++ return FALSE;
++}
++
+ static CURLcode check_telnet_options(struct Curl_easy *data)
+ {
+ struct curl_slist *head;
+ struct curl_slist *beg;
+- char option_keyword[128] = "";
+- char option_arg[256] = "";
+ struct TELNET *tn = data->req.p.telnet;
+- struct connectdata *conn = data->conn;
+ CURLcode result = CURLE_OK;
+- int binary_option;
+
+ /* Add the user name as an environment variable if it
+ was given on the command line */
+ if(data->state.aptr.user) {
+- msnprintf(option_arg, sizeof(option_arg), "USER,%s", conn->user);
+- beg = curl_slist_append(tn->telnet_vars, option_arg);
++ char buffer[256];
++ if(str_is_nonascii(data->conn->user))
++ return CURLE_BAD_FUNCTION_ARGUMENT;
++ msnprintf(buffer, sizeof(buffer), "USER,%s", data->conn->user);
++ beg = curl_slist_append(tn->telnet_vars, buffer);
+ if(!beg) {
+ curl_slist_free_all(tn->telnet_vars);
+ tn->telnet_vars = NULL;
+@@ -793,68 +803,91 @@ static CURLcode check_telnet_options(struct Curl_easy *data)
+ tn->us_preferred[CURL_TELOPT_NEW_ENVIRON] = CURL_YES;
+ }
+
+- for(head = data->set.telnet_options; head; head = head->next) {
+- if(sscanf(head->data, "%127[^= ]%*[ =]%255s",
+- option_keyword, option_arg) == 2) {
+-
+- /* Terminal type */
+- if(strcasecompare(option_keyword, "TTYPE")) {
+- strncpy(tn->subopt_ttype, option_arg, 31);
+- tn->subopt_ttype[31] = 0; /* String termination */
+- tn->us_preferred[CURL_TELOPT_TTYPE] = CURL_YES;
++ for(head = data->set.telnet_options; head && !result; head = head->next) {
++ size_t olen;
++ char *option = head->data;
++ char *arg;
++ char *sep = strchr(option, '=');
++ if(sep) {
++ olen = sep - option;
++ arg = ++sep;
++ if(str_is_nonascii(arg))
+ continue;
+- }
++ switch(olen) {
++ case 5:
++ /* Terminal type */
++ if(strncasecompare(option, "TTYPE", 5)) {
++ strncpy(tn->subopt_ttype, arg, 31);
++ tn->subopt_ttype[31] = 0; /* String termination */
++ tn->us_preferred[CURL_TELOPT_TTYPE] = CURL_YES;
++ }
++ else
++ result = CURLE_UNKNOWN_OPTION;
++ break;
+
+- /* Display variable */
+- if(strcasecompare(option_keyword, "XDISPLOC")) {
+- strncpy(tn->subopt_xdisploc, option_arg, 127);
+- tn->subopt_xdisploc[127] = 0; /* String termination */
+- tn->us_preferred[CURL_TELOPT_XDISPLOC] = CURL_YES;
+- continue;
+- }
++ case 8:
++ /* Display variable */
++ if(strncasecompare(option, "XDISPLOC", 8)) {
++ strncpy(tn->subopt_xdisploc, arg, 127);
++ tn->subopt_xdisploc[127] = 0; /* String termination */
++ tn->us_preferred[CURL_TELOPT_XDISPLOC] = CURL_YES;
++ }
++ else
++ result = CURLE_UNKNOWN_OPTION;
++ break;
+
+- /* Environment variable */
+- if(strcasecompare(option_keyword, "NEW_ENV")) {
+- beg = curl_slist_append(tn->telnet_vars, option_arg);
+- if(!beg) {
+- result = CURLE_OUT_OF_MEMORY;
+- break;
++ case 7:
++ /* Environment variable */
++ if(strncasecompare(option, "NEW_ENV", 7)) {
++ beg = curl_slist_append(tn->telnet_vars, arg);
++ if(!beg) {
++ result = CURLE_OUT_OF_MEMORY;
++ break;
++ }
++ tn->telnet_vars = beg;
++ tn->us_preferred[CURL_TELOPT_NEW_ENVIRON] = CURL_YES;
+ }
+- tn->telnet_vars = beg;
+- tn->us_preferred[CURL_TELOPT_NEW_ENVIRON] = CURL_YES;
+- continue;
+- }
++ else
++ result = CURLE_UNKNOWN_OPTION;
++ break;
+
+- /* Window Size */
+- if(strcasecompare(option_keyword, "WS")) {
+- if(sscanf(option_arg, "%hu%*[xX]%hu",
+- &tn->subopt_wsx, &tn->subopt_wsy) == 2)
+- tn->us_preferred[CURL_TELOPT_NAWS] = CURL_YES;
+- else {
+- failf(data, "Syntax error in telnet option: %s", head->data);
+- result = CURLE_SETOPT_OPTION_SYNTAX;
+- break;
++ case 2:
++ /* Window Size */
++ if(strncasecompare(option, "WS", 2)) {
++ if(sscanf(arg, "%hu%*[xX]%hu",
++ &tn->subopt_wsx, &tn->subopt_wsy) == 2)
++ tn->us_preferred[CURL_TELOPT_NAWS] = CURL_YES;
++ else {
++ failf(data, "Syntax error in telnet option: %s", head->data);
++ result = CURLE_SETOPT_OPTION_SYNTAX;
++ }
+ }
+- continue;
+- }
++ else
++ result = CURLE_UNKNOWN_OPTION;
++ break;
+
+- /* To take care or not of the 8th bit in data exchange */
+- if(strcasecompare(option_keyword, "BINARY")) {
+- binary_option = atoi(option_arg);
+- if(binary_option != 1) {
+- tn->us_preferred[CURL_TELOPT_BINARY] = CURL_NO;
+- tn->him_preferred[CURL_TELOPT_BINARY] = CURL_NO;
++ case 6:
++ /* To take care or not of the 8th bit in data exchange */
++ if(strncasecompare(option, "BINARY", 6)) {
++ int binary_option = atoi(arg);
++ if(binary_option != 1) {
++ tn->us_preferred[CURL_TELOPT_BINARY] = CURL_NO;
++ tn->him_preferred[CURL_TELOPT_BINARY] = CURL_NO;
++ }
+ }
+- continue;
++ else
++ result = CURLE_UNKNOWN_OPTION;
++ break;
++ default:
++ failf(data, "Unknown telnet option %s", head->data);
++ result = CURLE_UNKNOWN_OPTION;
++ break;
+ }
+-
+- failf(data, "Unknown telnet option %s", head->data);
+- result = CURLE_UNKNOWN_OPTION;
+- break;
+ }
+- failf(data, "Syntax error in telnet option: %s", head->data);
+- result = CURLE_SETOPT_OPTION_SYNTAX;
+- break;
++ else {
++ failf(data, "Syntax error in telnet option: %s", head->data);
++ result = CURLE_SETOPT_OPTION_SYNTAX;
++ }
+ }
+
+ if(result) {
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27534.patch b/meta/recipes-support/curl/curl/CVE-2023-27534.patch
new file mode 100644
index 0000000000..9109faaf88
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27534.patch
@@ -0,0 +1,122 @@
+From 4e2b52b5f7a3bf50a0f1494155717b02cc1df6d6 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 9 Mar 2023 16:22:11 +0100
+Subject: [PATCH] curl_path: create the new path with dynbuf
+
+CVE: CVE-2023-27534
+Upstream-Status: Backport [https://github.com/curl/curl/commit/4e2b52b5f7a3bf50a0f1494155717b02cc1df6d6]
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ lib/curl_path.c | 71 ++++++++++++++++++++++++-------------------------
+ 1 file changed, 35 insertions(+), 36 deletions(-)
+
+diff --git a/lib/curl_path.c b/lib/curl_path.c
+index a1669d1..b9c470f 100644
+--- a/lib/curl_path.c
++++ b/lib/curl_path.c
+@@ -30,66 +30,65 @@
+ #include "escape.h"
+ #include "memdebug.h"
+
++#define MAX_SSHPATH_LEN 100000 /* arbitrary */
++
+ /* figure out the path to work with in this particular request */
+ CURLcode Curl_getworkingpath(struct Curl_easy *data,
+ char *homedir, /* when SFTP is used */
+ char **path) /* returns the allocated
+ real path to work with */
+ {
+- char *real_path = NULL;
+ char *working_path;
+ size_t working_path_len;
++ struct dynbuf npath;
+ CURLcode result =
+ Curl_urldecode(data->state.up.path, 0, &working_path,
+ &working_path_len, REJECT_ZERO);
+ if(result)
+ return result;
+
++ /* new path to switch to in case we need to */
++ Curl_dyn_init(&npath, MAX_SSHPATH_LEN);
++
+ /* Check for /~/, indicating relative to the user's home directory */
+- if(data->conn->handler->protocol & CURLPROTO_SCP) {
+- real_path = malloc(working_path_len + 1);
+- if(!real_path) {
++ if((data->conn->handler->protocol & CURLPROTO_SCP) &&
++ (working_path_len > 3) && (!memcmp(working_path, "/~/", 3))) {
++ /* It is referenced to the home directory, so strip the leading '/~/' */
++ if(Curl_dyn_addn(&npath, &working_path[3], working_path_len - 3)) {
+ free(working_path);
+ return CURLE_OUT_OF_MEMORY;
+ }
+- if((working_path_len > 3) && (!memcmp(working_path, "/~/", 3)))
+- /* It is referenced to the home directory, so strip the leading '/~/' */
+- memcpy(real_path, working_path + 3, working_path_len - 2);
+- else
+- memcpy(real_path, working_path, 1 + working_path_len);
+ }
+- else if(data->conn->handler->protocol & CURLPROTO_SFTP) {
+- if((working_path_len > 1) && (working_path[1] == '~')) {
+- size_t homelen = strlen(homedir);
+- real_path = malloc(homelen + working_path_len + 1);
+- if(!real_path) {
+- free(working_path);
+- return CURLE_OUT_OF_MEMORY;
+- }
+- /* It is referenced to the home directory, so strip the
+- leading '/' */
+- memcpy(real_path, homedir, homelen);
+- real_path[homelen] = '/';
+- real_path[homelen + 1] = '\0';
+- if(working_path_len > 3) {
+- memcpy(real_path + homelen + 1, working_path + 3,
+- 1 + working_path_len -3);
+- }
++ else if((data->conn->handler->protocol & CURLPROTO_SFTP) &&
++ (working_path_len > 2) && !memcmp(working_path, "/~/", 3)) {
++ size_t len;
++ const char *p;
++ int copyfrom = 3;
++ if(Curl_dyn_add(&npath, homedir)) {
++ free(working_path);
++ return CURLE_OUT_OF_MEMORY;
+ }
+- else {
+- real_path = malloc(working_path_len + 1);
+- if(!real_path) {
+- free(working_path);
+- return CURLE_OUT_OF_MEMORY;
+- }
+- memcpy(real_path, working_path, 1 + working_path_len);
++ /* Copy a separating '/' if homedir does not end with one */
++ len = Curl_dyn_len(&npath);
++ p = Curl_dyn_ptr(&npath);
++ if(len && (p[len-1] != '/'))
++ copyfrom = 2;
++
++ if(Curl_dyn_addn(&npath,
++ &working_path[copyfrom], working_path_len - copyfrom)) {
++ free(working_path);
++ return CURLE_OUT_OF_MEMORY;
+ }
+ }
+
+- free(working_path);
++ if(Curl_dyn_len(&npath)) {
++ free(working_path);
+
+- /* store the pointer for the caller to receive */
+- *path = real_path;
++ /* store the pointer for the caller to receive */
++ *path = Curl_dyn_ptr(&npath);
++ }
++ else
++ *path = working_path;
+
+ return CURLE_OK;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27535-pre1.patch b/meta/recipes-support/curl/curl/CVE-2023-27535-pre1.patch
new file mode 100644
index 0000000000..57e1cb9e13
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27535-pre1.patch
@@ -0,0 +1,196 @@
+From ed5095ed94281989e103c72e032200b83be37878 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 6 Oct 2022 00:49:10 +0200
+Subject: [PATCH] strcase: add and use Curl_timestrcmp
+
+This is a strcmp() alternative function for comparing "secrets",
+designed to take the same time no matter the content to not leak
+match/non-match info to observers based on how fast it is.
+
+The time this function takes is only a function of the shortest input
+string.
+
+Reported-by: Trail of Bits
+
+Closes #9658
+
+Upstream-Status: Backport from [https://github.com/curl/curl/commit/ed5095ed94281989e103c72e032200b83be37878]
+Comment: to backport fix for CVE-2023-27535, add function Curl_timestrcmp.
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ lib/netrc.c | 6 +++---
+ lib/strcase.c | 22 ++++++++++++++++++++++
+ lib/strcase.h | 1 +
+ lib/url.c | 33 +++++++++++++--------------------
+ lib/vauth/digest_sspi.c | 4 ++--
+ lib/vtls/vtls.c | 4 ++--
+ 6 files changed, 43 insertions(+), 27 deletions(-)
+
+diff --git a/lib/netrc.c b/lib/netrc.c
+index 0a4ae2c..b771b60 100644
+--- a/lib/netrc.c
++++ b/lib/netrc.c
+@@ -140,9 +140,9 @@ static int parsenetrc(const char *host,
+ /* we are now parsing sub-keywords concerning "our" host */
+ if(state_login) {
+ if(specific_login) {
+- state_our_login = strcasecompare(login, tok);
++ state_our_login = !Curl_timestrcmp(login, tok);
+ }
+- else if(!login || strcmp(login, tok)) {
++ else if(!login || Curl_timestrcmp(login, tok)) {
+ if(login_alloc) {
+ free(login);
+ login_alloc = FALSE;
+@@ -158,7 +158,7 @@ static int parsenetrc(const char *host,
+ }
+ else if(state_password) {
+ if((state_our_login || !specific_login)
+- && (!password || strcmp(password, tok))) {
++ && (!password || Curl_timestrcmp(password, tok))) {
+ if(password_alloc) {
+ free(password);
+ password_alloc = FALSE;
+diff --git a/lib/strcase.c b/lib/strcase.c
+index 692a3f1..be085b3 100644
+--- a/lib/strcase.c
++++ b/lib/strcase.c
+@@ -141,6 +141,28 @@ bool Curl_safecmp(char *a, char *b)
+ return !a && !b;
+ }
+
++/*
++ * Curl_timestrcmp() returns 0 if the two strings are identical. The time this
++ * function spends is a function of the shortest string, not of the contents.
++ */
++int Curl_timestrcmp(const char *a, const char *b)
++{
++ int match = 0;
++ int i = 0;
++
++ if(a && b) {
++ while(1) {
++ match |= a[i]^b[i];
++ if(!a[i] || !b[i])
++ break;
++ i++;
++ }
++ }
++ else
++ return a || b;
++ return match;
++}
++
+ /* --- public functions --- */
+
+ int curl_strequal(const char *first, const char *second)
+diff --git a/lib/strcase.h b/lib/strcase.h
+index 382b80a..c6979da 100644
+--- a/lib/strcase.h
++++ b/lib/strcase.h
+@@ -48,5 +48,6 @@ void Curl_strntoupper(char *dest, const char *src, size_t n);
+ void Curl_strntolower(char *dest, const char *src, size_t n);
+
+ bool Curl_safecmp(char *a, char *b);
++int Curl_timestrcmp(const char *first, const char *second);
+
+ #endif /* HEADER_CURL_STRCASE_H */
+diff --git a/lib/url.c b/lib/url.c
+index df4377d..c397b57 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -930,19 +930,10 @@ socks_proxy_info_matches(const struct proxy_info *data,
+ /* the user information is case-sensitive
+ or at least it is not defined as case-insensitive
+ see https://datatracker.ietf.org/doc/html/rfc3986#section-3.2.1 */
+- if(!data->user != !needle->user)
+- return FALSE;
+- /* curl_strequal does a case insentive comparison, so do not use it here! */
+- if(data->user &&
+- needle->user &&
+- strcmp(data->user, needle->user) != 0)
+- return FALSE;
+- if(!data->passwd != !needle->passwd)
+- return FALSE;
++
+ /* curl_strequal does a case insentive comparison, so do not use it here! */
+- if(data->passwd &&
+- needle->passwd &&
+- strcmp(data->passwd, needle->passwd) != 0)
++ if(Curl_timestrcmp(data->user, needle->user) ||
++ Curl_timestrcmp(data->passwd, needle->passwd))
+ return FALSE;
+ return TRUE;
+ }
+@@ -1341,10 +1332,10 @@ ConnectionExists(struct Curl_easy *data,
+ if(!(needle->handler->flags & PROTOPT_CREDSPERREQUEST)) {
+ /* This protocol requires credentials per connection,
+ so verify that we're using the same name and password as well */
+- if(strcmp(needle->user, check->user) ||
+- strcmp(needle->passwd, check->passwd) ||
+- !Curl_safecmp(needle->sasl_authzid, check->sasl_authzid) ||
+- !Curl_safecmp(needle->oauth_bearer, check->oauth_bearer)) {
++ if(Curl_timestrcmp(needle->user, check->user) ||
++ Curl_timestrcmp(needle->passwd, check->passwd) ||
++ Curl_timestrcmp(needle->sasl_authzid, check->sasl_authzid) ||
++ Curl_timestrcmp(needle->oauth_bearer, check->oauth_bearer)) {
+ /* one of them was different */
+ continue;
+ }
+@@ -1420,8 +1411,8 @@ ConnectionExists(struct Curl_easy *data,
+ possible. (Especially we must not reuse the same connection if
+ partway through a handshake!) */
+ if(wantNTLMhttp) {
+- if(strcmp(needle->user, check->user) ||
+- strcmp(needle->passwd, check->passwd)) {
++ if(Curl_timestrcmp(needle->user, check->user) ||
++ Curl_timestrcmp(needle->passwd, check->passwd)) {
+
+ /* we prefer a credential match, but this is at least a connection
+ that can be reused and "upgraded" to NTLM */
+@@ -1443,8 +1434,10 @@ ConnectionExists(struct Curl_easy *data,
+ if(!check->http_proxy.user || !check->http_proxy.passwd)
+ continue;
+
+- if(strcmp(needle->http_proxy.user, check->http_proxy.user) ||
+- strcmp(needle->http_proxy.passwd, check->http_proxy.passwd))
++ if(Curl_timestrcmp(needle->http_proxy.user,
++ check->http_proxy.user) ||
++ Curl_timestrcmp(needle->http_proxy.passwd,
++ check->http_proxy.passwd))
+ continue;
+ }
+ else if(check->proxy_ntlm_state != NTLMSTATE_NONE) {
+diff --git a/lib/vauth/digest_sspi.c b/lib/vauth/digest_sspi.c
+index 94f8f8c..a413419 100644
+--- a/lib/vauth/digest_sspi.c
++++ b/lib/vauth/digest_sspi.c
+@@ -429,8 +429,8 @@ CURLcode Curl_auth_create_digest_http_message(struct Curl_easy *data,
+ has changed then delete that context. */
+ if((userp && !digest->user) || (!userp && digest->user) ||
+ (passwdp && !digest->passwd) || (!passwdp && digest->passwd) ||
+- (userp && digest->user && strcmp(userp, digest->user)) ||
+- (passwdp && digest->passwd && strcmp(passwdp, digest->passwd))) {
++ (userp && digest->user && Curl_timestrcmp(userp, digest->user)) ||
++ (passwdp && digest->passwd && Curl_timestrcmp(passwdp, digest->passwd))) {
+ if(digest->http_context) {
+ s_pSecFn->DeleteSecurityContext(digest->http_context);
+ Curl_safefree(digest->http_context);
+diff --git a/lib/vtls/vtls.c b/lib/vtls/vtls.c
+index e2d3438..881c8d2 100644
+--- a/lib/vtls/vtls.c
++++ b/lib/vtls/vtls.c
+@@ -146,8 +146,8 @@ Curl_ssl_config_matches(struct ssl_primary_config *data,
+ Curl_safecmp(data->random_file, needle->random_file) &&
+ Curl_safecmp(data->egdsocket, needle->egdsocket) &&
+ #ifdef USE_TLS_SRP
+- Curl_safecmp(data->username, needle->username) &&
+- Curl_safecmp(data->password, needle->password) &&
++ !Curl_timestrcmp(data->username, needle->username) &&
++ !Curl_timestrcmp(data->password, needle->password) &&
+ (data->authtype == needle->authtype) &&
+ #endif
+ Curl_safe_strcasecompare(data->cipher_list, needle->cipher_list) &&
+--
+2.35.7
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27535_and_CVE-2023-27538.patch b/meta/recipes-support/curl/curl/CVE-2023-27535_and_CVE-2023-27538.patch
new file mode 100644
index 0000000000..4e701edfff
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27535_and_CVE-2023-27538.patch
@@ -0,0 +1,170 @@
+From 8f4608468b890dce2dad9f91d5607ee7e9c1aba1 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 9 Mar 2023 17:47:06 +0100
+Subject: [PATCH] ftp: add more conditions for connection reuse
+
+Reported-by: Harry Sintonen
+Closes #10730
+
+Upstream-Status: Backport from [https://github.com/curl/curl/commit/8f4608468b890dce2dad9f91d5607ee7e9c1aba1, https://github.com/curl/curl/commit/af369db4d3833272b8ed443f7fcc2e757a0872eb]
+Comment: Backport for CVE-2023-27535 also fixes CVE-2023-27538 in the file "lib/url.c".
+CVE: CVE-2023-27535, CVE-2023-27538
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ lib/ftp.c | 28 ++++++++++++++++++++++++++--
+ lib/ftp.h | 5 +++++
+ lib/setopt.c | 2 +-
+ lib/url.c | 19 ++++++++++++++++---
+ lib/urldata.h | 4 ++--
+ 5 files changed, 50 insertions(+), 8 deletions(-)
+
+diff --git a/lib/ftp.c b/lib/ftp.c
+index c6efaed..93bbaeb 100644
+--- a/lib/ftp.c
++++ b/lib/ftp.c
+@@ -4097,6 +4097,8 @@ static CURLcode ftp_disconnect(struct Curl_easy *data,
+ }
+
+ freedirs(ftpc);
++ Curl_safefree(ftpc->account);
++ Curl_safefree(ftpc->alternative_to_user);
+ Curl_safefree(ftpc->prevpath);
+ Curl_safefree(ftpc->server_os);
+ Curl_pp_disconnect(pp);
+@@ -4364,11 +4366,31 @@ static CURLcode ftp_setup_connection(struct Curl_easy *data,
+ {
+ char *type;
+ struct FTP *ftp;
++ struct ftp_conn *ftpc = &conn->proto.ftpc;
+
+- data->req.p.ftp = ftp = calloc(sizeof(struct FTP), 1);
++ ftp = calloc(sizeof(struct FTP), 1);
+ if(!ftp)
+ return CURLE_OUT_OF_MEMORY;
+
++ /* clone connection related data that is FTP specific */
++ if(data->set.str[STRING_FTP_ACCOUNT]) {
++ ftpc->account = strdup(data->set.str[STRING_FTP_ACCOUNT]);
++ if(!ftpc->account) {
++ free(ftp);
++ return CURLE_OUT_OF_MEMORY;
++ }
++ }
++ if(data->set.str[STRING_FTP_ALTERNATIVE_TO_USER]) {
++ ftpc->alternative_to_user =
++ strdup(data->set.str[STRING_FTP_ALTERNATIVE_TO_USER]);
++ if(!ftpc->alternative_to_user) {
++ Curl_safefree(ftpc->account);
++ free(ftp);
++ return CURLE_OUT_OF_MEMORY;
++ }
++ }
++ data->req.p.ftp = ftp;
++
+ ftp->path = &data->state.up.path[1]; /* don't include the initial slash */
+
+ /* FTP URLs support an extension like ";type=<typecode>" that
+@@ -4403,7 +4425,9 @@ static CURLcode ftp_setup_connection(struct Curl_easy *data,
+ /* get some initial data into the ftp struct */
+ ftp->transfer = PPTRANSFER_BODY;
+ ftp->downloadsize = 0;
+- conn->proto.ftpc.known_filesize = -1; /* unknown size for now */
++ ftpc->known_filesize = -1; /* unknown size for now */
++ ftpc->use_ssl = data->set.use_ssl;
++ ftpc->ccc = data->set.ftp_ccc;
+
+ return CURLE_OK;
+ }
+diff --git a/lib/ftp.h b/lib/ftp.h
+index 1cfdac0..afca25b 100644
+--- a/lib/ftp.h
++++ b/lib/ftp.h
+@@ -115,6 +115,8 @@ struct FTP {
+ struct */
+ struct ftp_conn {
+ struct pingpong pp;
++ char *account;
++ char *alternative_to_user;
+ char *entrypath; /* the PWD reply when we logged on */
+ char *file; /* url-decoded file name (or path) */
+ char **dirs; /* realloc()ed array for path components */
+@@ -144,6 +146,9 @@ struct ftp_conn {
+ ftpstate state; /* always use ftp.c:state() to change state! */
+ ftpstate state_saved; /* transfer type saved to be reloaded after
+ data connection is established */
++ unsigned char use_ssl; /* if AUTH TLS is to be attempted etc, for FTP or
++ IMAP or POP3 or others! (type: curl_usessl)*/
++ unsigned char ccc; /* ccc level for this connection */
+ curl_off_t retr_size_saved; /* Size of retrieved file saved */
+ char *server_os; /* The target server operating system. */
+ curl_off_t known_filesize; /* file size is different from -1, if wildcard
+diff --git a/lib/setopt.c b/lib/setopt.c
+index 29a78a4..89d0150 100644
+--- a/lib/setopt.c
++++ b/lib/setopt.c
+@@ -2304,7 +2304,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
+ arg = va_arg(param, long);
+ if((arg < CURLUSESSL_NONE) || (arg >= CURLUSESSL_LAST))
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+- data->set.use_ssl = (curl_usessl)arg;
++ data->set.use_ssl = (unsigned char)arg;
+ break;
+
+ case CURLOPT_SSL_OPTIONS:
+diff --git a/lib/url.c b/lib/url.c
+index c397b57..280171c 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -1347,11 +1347,24 @@ ConnectionExists(struct Curl_easy *data,
+ (check->httpversion >= 20) &&
+ (data->state.httpwant < CURL_HTTP_VERSION_2_0))
+ continue;
+-
+- if(get_protocol_family(needle->handler) == PROTO_FAMILY_SSH) {
+- if(!ssh_config_matches(needle, check))
++#ifdef USE_SSH
++ else if(get_protocol_family(needle->handler) & PROTO_FAMILY_SSH) {
++ if(!ssh_config_matches(needle, check))
+ continue;
+ }
++#endif
++#ifndef CURL_DISABLE_FTP
++ else if(get_protocol_family(needle->handler) & PROTO_FAMILY_FTP) {
++ /* Also match ACCOUNT, ALTERNATIVE-TO-USER, USE_SSL and CCC options */
++ if(Curl_timestrcmp(needle->proto.ftpc.account,
++ check->proto.ftpc.account) ||
++ Curl_timestrcmp(needle->proto.ftpc.alternative_to_user,
++ check->proto.ftpc.alternative_to_user) ||
++ (needle->proto.ftpc.use_ssl != check->proto.ftpc.use_ssl) ||
++ (needle->proto.ftpc.ccc != check->proto.ftpc.ccc))
++ continue;
++ }
++#endif
+
+ if((needle->handler->flags&PROTOPT_SSL)
+ #ifndef CURL_DISABLE_PROXY
+diff --git a/lib/urldata.h b/lib/urldata.h
+index 69eb2ee..6e6122a 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1748,8 +1748,6 @@ struct UserDefined {
+ enum CURL_NETRC_OPTION
+ use_netrc; /* defined in include/curl.h */
+ #endif
+- curl_usessl use_ssl; /* if AUTH TLS is to be attempted etc, for FTP or
+- IMAP or POP3 or others! */
+ long new_file_perms; /* Permissions to use when creating remote files */
+ long new_directory_perms; /* Permissions to use when creating remote dirs */
+ long ssh_auth_types; /* allowed SSH auth types */
+@@ -1877,6 +1875,8 @@ struct UserDefined {
+ BIT(http09_allowed); /* allow HTTP/0.9 responses */
+ BIT(mail_rcpt_allowfails); /* allow RCPT TO command to fail for some
+ recipients */
++ unsigned char use_ssl; /* if AUTH TLS is to be attempted etc, for FTP or
++ IMAP or POP3 or others! (type: curl_usessl)*/
+ };
+
+ struct Names {
+--
+2.35.7
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27536.patch b/meta/recipes-support/curl/curl/CVE-2023-27536.patch
new file mode 100644
index 0000000000..d3d1d2dc2e
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27536.patch
@@ -0,0 +1,53 @@
+From cb49e67303dbafbab1cebf4086e3ec15b7d56ee5 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Fri, 10 Mar 2023 09:22:43 +0100
+Subject: [PATCH] url: only reuse connections with same GSS delegation
+
+Upstream-Status: Backport from [https://github.com/curl/curl/commit/cb49e67303dbafbab1cebf4086e3ec15b7d56ee5]
+CVE: CVE-2023-27536
+Signed-off-by: Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+Signed-off-by: Sourav Kumar Pramanik <pramanik.souravkumar@gmail.com>
+---
+ lib/url.c | 6 ++++++
+ lib/urldata.h | 1 +
+ 2 files changed, 7 insertions(+)
+
+diff --git a/lib/url.c b/lib/url.c
+index 280171c..c6413a1 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -1341,6 +1341,11 @@ ConnectionExists(struct Curl_easy *data,
+ }
+ }
+
++ /* GSS delegation differences do not actually affect every connection
++ and auth method, but this check takes precaution before efficiency */
++ if(needle->gssapi_delegation != check->gssapi_delegation)
++ continue;
++
+ /* If multiplexing isn't enabled on the h2 connection and h1 is
+ explicitly requested, handle it: */
+ if((needle->handler->protocol & PROTO_FAMILY_HTTP) &&
+@@ -1813,6 +1818,7 @@ static struct connectdata *allocate_conn(struct Curl_easy *data)
+ conn->fclosesocket = data->set.fclosesocket;
+ conn->closesocket_client = data->set.closesocket_client;
+ conn->lastused = Curl_now(); /* used now */
++ conn->gssapi_delegation = data->set.gssapi_delegation;
+
+ return conn;
+ error:
+diff --git a/lib/urldata.h b/lib/urldata.h
+index 6e6122a..602c735 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1131,6 +1131,7 @@ struct connectdata {
+ int socks5_gssapi_enctype;
+ #endif
+ unsigned short localport;
++ long gssapi_delegation; /* inherited from set.gssapi_delegation */
+ };
+
+ /* The end of connectdata. */
+--
+2.35.7
diff --git a/meta/recipes-support/curl/curl/CVE-2023-28319.patch b/meta/recipes-support/curl/curl/CVE-2023-28319.patch
new file mode 100644
index 0000000000..c0bca9a56e
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-28319.patch
@@ -0,0 +1,33 @@
+From 8e21b1a05f3c0ee098dbcb6c3d84cb61f102a122 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 8 May 2023 14:33:54 +0200
+Subject: [PATCH] libssh2: free fingerprint better
+
+Reported-by: Wei Chong Tan
+Closes #11088
+
+CVE: CVE-2023-28319
+Upstream-Status: Backport [https://github.com/curl/curl/commit/8e21b1a05f3c0ee098dbcb6c]
+Comments: Hunks Refreshed
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ lib/vssh/libssh2.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/lib/vssh/libssh2.c b/lib/vssh/libssh2.c
+index bfcc94e160178..dd39a844c646b 100644
+--- a/lib/vssh/libssh2.c
++++ b/lib/vssh/libssh2.c
+@@ -695,11 +695,10 @@
+ */
+ if((pub_pos != b64_pos) ||
+ Curl_strncasecompare(fingerprint_b64, pubkey_sha256, pub_pos) != 1) {
+- free(fingerprint_b64);
+-
+ failf(data,
+ "Denied establishing ssh session: mismatch sha256 fingerprint. "
+ "Remote %s is not equal to %s", fingerprint_b64, pubkey_sha256);
++ free(fingerprint_b64);
+ state(data, SSH_SESSION_FREE);
+ sshc->actualcode = CURLE_PEER_FAILED_VERIFICATION;
+ return sshc->actualcode;
diff --git a/meta/recipes-support/curl/curl/CVE-2023-28320-fol1.patch b/meta/recipes-support/curl/curl/CVE-2023-28320-fol1.patch
new file mode 100644
index 0000000000..2ba74aaaa9
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-28320-fol1.patch
@@ -0,0 +1,197 @@
+From f446258f0269a62289cca0210157cb8558d0edc3 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 16 May 2023 23:40:42 +0200
+Subject: [PATCH] hostip: include easy_lock.h before using
+ GLOBAL_INIT_IS_THREADSAFE
+
+Since that header file is the only place that define can be defined.
+
+Reported-by: Marc Deslauriers
+
+Follow-up to 13718030ad4b3209
+
+Closes #11121
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/f446258f0269a62289cca0210157cb8558d0edc3]
+CVE: CVE-2023-28320
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ lib/easy_lock.h | 109 ++++++++++++++++++++++++++++++++++++++++++++++++
+ lib/hostip.c | 10 ++---
+ lib/hostip.h | 9 ----
+ 3 files changed, 113 insertions(+), 15 deletions(-)
+ create mode 100644 lib/easy_lock.h
+
+diff --git a/lib/easy_lock.h b/lib/easy_lock.h
+new file mode 100644
+index 0000000..6399a39
+--- /dev/null
++++ b/lib/easy_lock.h
+@@ -0,0 +1,109 @@
++#ifndef HEADER_CURL_EASY_LOCK_H
++#define HEADER_CURL_EASY_LOCK_H
++/***************************************************************************
++ * _ _ ____ _
++ * Project ___| | | | _ \| |
++ * / __| | | | |_) | |
++ * | (__| |_| | _ <| |___
++ * \___|\___/|_| \_\_____|
++ *
++ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
++ *
++ * This software is licensed as described in the file COPYING, which
++ * you should have received as part of this distribution. The terms
++ * are also available at https://curl.se/docs/copyright.html.
++ *
++ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++ * copies of the Software, and permit persons to whom the Software is
++ * furnished to do so, under the terms of the COPYING file.
++ *
++ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++ * KIND, either express or implied.
++ *
++ * SPDX-License-Identifier: curl
++ *
++ ***************************************************************************/
++
++#include "curl_setup.h"
++
++#define GLOBAL_INIT_IS_THREADSAFE
++
++#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x600
++
++#ifdef __MINGW32__
++#ifndef __MINGW64_VERSION_MAJOR
++#if (__MINGW32_MAJOR_VERSION < 5) || \
++ (__MINGW32_MAJOR_VERSION == 5 && __MINGW32_MINOR_VERSION == 0)
++/* mingw >= 5.0.1 defines SRWLOCK, and slightly different from MS define */
++typedef PVOID SRWLOCK, *PSRWLOCK;
++#endif
++#endif
++#ifndef SRWLOCK_INIT
++#define SRWLOCK_INIT NULL
++#endif
++#endif /* __MINGW32__ */
++
++#define curl_simple_lock SRWLOCK
++#define CURL_SIMPLE_LOCK_INIT SRWLOCK_INIT
++
++#define curl_simple_lock_lock(m) AcquireSRWLockExclusive(m)
++#define curl_simple_lock_unlock(m) ReleaseSRWLockExclusive(m)
++
++#elif defined(HAVE_ATOMIC) && defined(HAVE_STDATOMIC_H)
++#include <stdatomic.h>
++#if defined(HAVE_SCHED_YIELD)
++#include <sched.h>
++#endif
++
++#define curl_simple_lock atomic_int
++#define CURL_SIMPLE_LOCK_INIT 0
++
++/* a clang-thing */
++#ifndef __has_builtin
++#define __has_builtin(x) 0
++#endif
++
++#ifndef __INTEL_COMPILER
++/* The Intel compiler tries to look like GCC *and* clang *and* lies in its
++ __has_builtin() function, so override it. */
++
++/* if GCC on i386/x86_64 or if the built-in is present */
++#if ( (defined(__GNUC__) && !defined(__clang__)) && \
++ (defined(__i386__) || defined(__x86_64__))) || \
++ __has_builtin(__builtin_ia32_pause)
++#define HAVE_BUILTIN_IA32_PAUSE
++#endif
++
++#endif
++
++static inline void curl_simple_lock_lock(curl_simple_lock *lock)
++{
++ for(;;) {
++ if(!atomic_exchange_explicit(lock, true, memory_order_acquire))
++ break;
++ /* Reduce cache coherency traffic */
++ while(atomic_load_explicit(lock, memory_order_relaxed)) {
++ /* Reduce load (not mandatory) */
++#ifdef HAVE_BUILTIN_IA32_PAUSE
++ __builtin_ia32_pause();
++#elif defined(__aarch64__)
++ __asm__ volatile("yield" ::: "memory");
++#elif defined(HAVE_SCHED_YIELD)
++ sched_yield();
++#endif
++ }
++ }
++}
++
++static inline void curl_simple_lock_unlock(curl_simple_lock *lock)
++{
++ atomic_store_explicit(lock, false, memory_order_release);
++}
++
++#else
++
++#undef GLOBAL_INIT_IS_THREADSAFE
++
++#endif
++
++#endif /* HEADER_CURL_EASY_LOCK_H */
+diff --git a/lib/hostip.c b/lib/hostip.c
+index e15c17a..c2e0962 100644
+--- a/lib/hostip.c
++++ b/lib/hostip.c
+@@ -72,6 +72,8 @@
+ #include <SystemConfiguration/SCDynamicStoreCopySpecific.h>
+ #endif
+
++#include "easy_lock.h"
++
+ #if defined(CURLRES_SYNCH) && \
+ defined(HAVE_ALARM) && \
+ defined(SIGALRM) && \
+@@ -81,10 +83,6 @@
+ #define USE_ALARM_TIMEOUT
+ #endif
+
+-#ifdef USE_ALARM_TIMEOUT
+-#include "easy_lock.h"
+-#endif
+-
+ #define MAX_HOSTCACHE_LEN (255 + 7) /* max FQDN + colon + port number + zero */
+
+ /*
+@@ -260,8 +258,8 @@ void Curl_hostcache_prune(struct Curl_easy *data)
+ /* Beware this is a global and unique instance. This is used to store the
+ return address that we can jump back to from inside a signal handler. This
+ is not thread-safe stuff. */
+-sigjmp_buf curl_jmpenv;
+-curl_simple_lock curl_jmpenv_lock;
++static sigjmp_buf curl_jmpenv;
++static curl_simple_lock curl_jmpenv_lock;
+ #endif
+
+ /* lookup address, returns entry if found and not stale */
+diff --git a/lib/hostip.h b/lib/hostip.h
+index 1db5981..a46bdc6 100644
+--- a/lib/hostip.h
++++ b/lib/hostip.h
+@@ -189,15 +189,6 @@ Curl_cache_addr(struct Curl_easy *data, struct Curl_addrinfo *addr,
+ #define CURL_INADDR_NONE INADDR_NONE
+ #endif
+
+-#ifdef HAVE_SIGSETJMP
+-/* Forward-declaration of variable defined in hostip.c. Beware this
+- * is a global and unique instance. This is used to store the return
+- * address that we can jump back to from inside a signal handler.
+- * This is not thread-safe stuff.
+- */
+-extern sigjmp_buf curl_jmpenv;
+-#endif
+-
+ /*
+ * Function provided by the resolver backend to set DNS servers to use.
+ */
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-28320.patch b/meta/recipes-support/curl/curl/CVE-2023-28320.patch
new file mode 100644
index 0000000000..1e0fc7534a
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-28320.patch
@@ -0,0 +1,83 @@
+From 13718030ad4b3209a7583b4f27f683cd3a6fa5f2 Mon Sep 17 00:00:00 2001
+From: Harry Sintonen <sintonen@iki.fi>
+Date: Tue, 25 Apr 2023 09:22:26 +0200
+Subject: [PATCH] hostip: add locks around use of global buffer for alarm()
+
+When building with the sync name resolver and timeout ability we now
+require thread-safety to be present to enable it.
+
+Closes #11030
+
+CVE: CVE-2023-28320
+Upstream-Status: Backport [https://github.com/curl/curl/commit/13718030ad4b3209a7583b]
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ lib/hostip.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+diff --git a/lib/hostip.c b/lib/hostip.c
+index 2381290fdd43e..e410cda69ae6e 100644
+--- a/lib/hostip.c
++++ b/lib/hostip.c
+@@ -70,12 +70,19 @@
+ #include <SystemConfiguration/SCDynamicStoreCopySpecific.h>
+ #endif
+
+-#if defined(CURLRES_SYNCH) && \
+- defined(HAVE_ALARM) && defined(SIGALRM) && defined(HAVE_SIGSETJMP)
++#if defined(CURLRES_SYNCH) && \
++ defined(HAVE_ALARM) && \
++ defined(SIGALRM) && \
++ defined(HAVE_SIGSETJMP) && \
++ defined(GLOBAL_INIT_IS_THREADSAFE)
+ /* alarm-based timeouts can only be used with all the dependencies satisfied */
+ #define USE_ALARM_TIMEOUT
+ #endif
+
++#ifdef USE_ALARM_TIMEOUT
++#include "easy_lock.h"
++#endif
++
+ #define MAX_HOSTCACHE_LEN (255 + 7) /* max FQDN + colon + port number + zero */
+
+ /*
+@@ -254,11 +261,12 @@ void Curl_hostcache_prune(struct Curl_easy *data)
+ Curl_share_unlock(data, CURL_LOCK_DATA_DNS);
+ }
+
+-#ifdef HAVE_SIGSETJMP
++#ifdef USE_ALARM_TIMEOUT
+ /* Beware this is a global and unique instance. This is used to store the
+ return address that we can jump back to from inside a signal handler. This
+ is not thread-safe stuff. */
+ sigjmp_buf curl_jmpenv;
++curl_simple_lock curl_jmpenv_lock;
+ #endif
+
+ /* lookup address, returns entry if found and not stale */
+@@ -832,7 +840,6 @@ enum resolve_t Curl_resolv(struct Curl_easy *data,
+ static
+ void alarmfunc(int sig)
+ {
+- /* this is for "-ansi -Wall -pedantic" to stop complaining! (rabe) */
+ (void)sig;
+ siglongjmp(curl_jmpenv, 1);
+ }
+@@ -912,6 +919,8 @@ enum resolve_t Curl_resolv_timeout(struct Curl_easy *data,
+ This should be the last thing we do before calling Curl_resolv(),
+ as otherwise we'd have to worry about variables that get modified
+ before we invoke Curl_resolv() (and thus use "volatile"). */
++ curl_simple_lock_lock(&curl_jmpenv_lock);
++
+ if(sigsetjmp(curl_jmpenv, 1)) {
+ /* this is coming from a siglongjmp() after an alarm signal */
+ failf(data, "name lookup timed out");
+@@ -980,6 +989,8 @@ enum resolve_t Curl_resolv_timeout(struct Curl_easy *data,
+ #endif
+ #endif /* HAVE_SIGACTION */
+
++ curl_simple_lock_unlock(&curl_jmpenv_lock);
++
+ /* switch back the alarm() to either zero or to what it was before minus
+ the time we spent until now! */
+ if(prev_alarm) {
diff --git a/meta/recipes-support/curl/curl/CVE-2023-28321.patch b/meta/recipes-support/curl/curl/CVE-2023-28321.patch
new file mode 100644
index 0000000000..bcd8b112db
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-28321.patch
@@ -0,0 +1,302 @@
+From 199f2d440d8659b42670c1b796220792b01a97bf Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 24 Apr 2023 21:07:02 +0200
+Subject: [PATCH] hostcheck: fix host name wildcard checking
+
+The leftmost "label" of the host name can now only match against single
+'*'. Like the browsers have worked for a long time.
+
+- extended unit test 1397 for this
+- move some SOURCE variables from unit/Makefile.am to unit/Makefile.inc
+
+Reported-by: Hiroki Kurosawa
+Closes #11018
+
+CVE: CVE-2023-28321
+Upstream-Status: Backport [https://github.com/curl/curl/commit/199f2d440d8659b42]
+Comments: Hunks removed as changes already exist
+Removed hunks from files:
+tests/unit/Makefile.am
+tests/unit/Makefile.inc
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ lib/vtls/hostcheck.c | 50 +++++++--------
+ tests/data/test1397 | 10 ++-
+ tests/unit/Makefile.am | 94 ----------------------------
+ tests/unit/Makefile.inc | 94 ++++++++++++++++++++++++++++
+ tests/unit/unit1397.c | 134 ++++++++++++++++++++++++----------------
+ 5 files changed, 202 insertions(+), 180 deletions(-)
+
+diff --git a/lib/vtls/hostcheck.c b/lib/vtls/hostcheck.c
+index e827dc58f378c..d061c6356f97f 100644
+--- a/lib/vtls/hostcheck.c
++++ b/lib/vtls/hostcheck.c
+@@ -71,7 +71,12 @@ static bool pmatch(const char *hostname, size_t hostlen,
+ * apparent distinction between a name and an IP. We need to detect the use of
+ * an IP address and not wildcard match on such names.
+ *
++ * Only match on "*" being used for the leftmost label, not "a*", "a*b" nor
++ * "*b".
++ *
+ * Return TRUE on a match. FALSE if not.
++ *
++ * @unittest: 1397
+ */
+
+ static bool hostmatch(const char *hostname,
+@@ -79,53 +84,42 @@ static bool hostmatch(const char *hostname,
+ const char *pattern,
+ size_t patternlen)
+ {
+- const char *pattern_label_end, *wildcard, *hostname_label_end;
+- size_t prefixlen, suffixlen;
++ const char *pattern_label_end;
+
+- /* normalize pattern and hostname by stripping off trailing dots */
++ DEBUGASSERT(pattern);
+ DEBUGASSERT(patternlen);
++ DEBUGASSERT(hostname);
++ DEBUGASSERT(hostlen);
++
++ /* normalize pattern and hostname by stripping off trailing dots */
+ if(hostname[hostlen-1]=='.')
+ hostlen--;
+ if(pattern[patternlen-1]=='.')
+ patternlen--;
+
+- wildcard = memchr(pattern, '*', patternlen);
+- if(!wildcard)
++ if(strncmp(pattern, "*.", 2))
+ return pmatch(hostname, hostlen, pattern, patternlen);
+
+ /* detect IP address as hostname and fail the match if so */
+- if(Curl_host_is_ipnum(hostname))
++ else if(Curl_host_is_ipnum(hostname))
+ return FALSE;
+
+ /* We require at least 2 dots in the pattern to avoid too wide wildcard
+ match. */
+ pattern_label_end = memchr(pattern, '.', patternlen);
+ if(!pattern_label_end ||
+- (memrchr(pattern, '.', patternlen) == pattern_label_end) ||
+- strncasecompare(pattern, "xn--", 4))
++ (memrchr(pattern, '.', patternlen) == pattern_label_end))
+ return pmatch(hostname, hostlen, pattern, patternlen);
+-
+- hostname_label_end = memchr(hostname, '.', hostlen);
+- if(!hostname_label_end)
+- return FALSE;
+ else {
+- size_t skiphost = hostname_label_end - hostname;
+- size_t skiplen = pattern_label_end - pattern;
+- if(!pmatch(hostname_label_end, hostlen - skiphost,
+- pattern_label_end, patternlen - skiplen))
+- return FALSE;
++ const char *hostname_label_end = memchr(hostname, '.', hostlen);
++ if(hostname_label_end) {
++ size_t skiphost = hostname_label_end - hostname;
++ size_t skiplen = pattern_label_end - pattern;
++ return pmatch(hostname_label_end, hostlen - skiphost,
++ pattern_label_end, patternlen - skiplen);
++ }
+ }
+- /* The wildcard must match at least one character, so the left-most
+- label of the hostname is at least as large as the left-most label
+- of the pattern. */
+- if(hostname_label_end - hostname < pattern_label_end - pattern)
+- return FALSE;
+-
+- prefixlen = wildcard - pattern;
+- suffixlen = pattern_label_end - (wildcard + 1);
+- return strncasecompare(pattern, hostname, prefixlen) &&
+- strncasecompare(wildcard + 1, hostname_label_end - suffixlen,
+- suffixlen) ? TRUE : FALSE;
++ return FALSE;
+ }
+
+ /*
+diff --git a/tests/data/test1397 b/tests/data/test1397
+index 84f962abebee3..f31b2c2a3f330 100644
+--- a/tests/data/test1397
++++ b/tests/data/test1397
+@@ -2,8 +2,7 @@
+ <info>
+ <keywords>
+ unittest
+-ssl
+-wildcard
++Curl_cert_hostcheck
+ </keywords>
+ </info>
+
+@@ -16,9 +15,8 @@ none
+ <features>
+ unittest
+ </features>
+- <name>
+-Check wildcard certificate matching function Curl_cert_hostcheck
+- </name>
++<name>
++Curl_cert_hostcheck unit tests
++</name>
+ </client>
+-
+ </testcase>
+diff --git a/tests/unit/unit1397.c b/tests/unit/unit1397.c
+index 2f3d3aa4d09e1..3ae75618d5d10 100644
+--- a/tests/unit/unit1397.c
++++ b/tests/unit/unit1397.c
+@@ -23,7 +23,6 @@
+ ***************************************************************************/
+ #include "curlcheck.h"
+
+-#include "vtls/hostcheck.h" /* from the lib dir */
+
+ static CURLcode unit_setup(void)
+ {
+@@ -32,63 +31,94 @@ static CURLcode unit_setup(void)
+
+ static void unit_stop(void)
+ {
+- /* done before shutting down and exiting */
+ }
+
+-UNITTEST_START
+-
+ /* only these backends define the tested functions */
+-#if defined(USE_OPENSSL) || defined(USE_GSKIT)
+-
+- /* here you start doing things and checking that the results are good */
++#if defined(USE_OPENSSL) || defined(USE_GSKIT) || defined(USE_SCHANNEL)
++#include "vtls/hostcheck.h"
++struct testcase {
++ const char *host;
++ const char *pattern;
++ bool match;
++};
+
+-fail_unless(Curl_cert_hostcheck(STRCONST("www.example.com"),
+- STRCONST("www.example.com")), "good 1");
+-fail_unless(Curl_cert_hostcheck(STRCONST("*.example.com"),
+- STRCONST("www.example.com")),
+- "good 2");
+-fail_unless(Curl_cert_hostcheck(STRCONST("xxx*.example.com"),
+- STRCONST("xxxwww.example.com")), "good 3");
+-fail_unless(Curl_cert_hostcheck(STRCONST("f*.example.com"),
+- STRCONST("foo.example.com")), "good 4");
+-fail_unless(Curl_cert_hostcheck(STRCONST("192.168.0.0"),
+- STRCONST("192.168.0.0")), "good 5");
++static struct testcase tests[] = {
++ {"", "", FALSE},
++ {"a", "", FALSE},
++ {"", "b", FALSE},
++ {"a", "b", FALSE},
++ {"aa", "bb", FALSE},
++ {"\xff", "\xff", TRUE},
++ {"aa.aa.aa", "aa.aa.bb", FALSE},
++ {"aa.aa.aa", "aa.aa.aa", TRUE},
++ {"aa.aa.aa", "*.aa.bb", FALSE},
++ {"aa.aa.aa", "*.aa.aa", TRUE},
++ {"192.168.0.1", "192.168.0.1", TRUE},
++ {"192.168.0.1", "*.168.0.1", FALSE},
++ {"192.168.0.1", "*.0.1", FALSE},
++ {"h.ello", "*.ello", FALSE},
++ {"h.ello.", "*.ello", FALSE},
++ {"h.ello", "*.ello.", FALSE},
++ {"h.e.llo", "*.e.llo", TRUE},
++ {"h.e.llo", " *.e.llo", FALSE},
++ {" h.e.llo", "*.e.llo", TRUE},
++ {"h.e.llo.", "*.e.llo", TRUE},
++ {"*.e.llo.", "*.e.llo", TRUE},
++ {"************.e.llo.", "*.e.llo", TRUE},
++ {"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
++ "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
++ "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
++ "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD"
++ "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE"
++ ".e.llo.", "*.e.llo", TRUE},
++ {"\xfe\xfe.e.llo.", "*.e.llo", TRUE},
++ {"h.e.llo.", "*.e.llo.", TRUE},
++ {"h.e.llo", "*.e.llo.", TRUE},
++ {".h.e.llo", "*.e.llo.", FALSE},
++ {"h.e.llo", "*.*.llo.", FALSE},
++ {"h.e.llo", "h.*.llo", FALSE},
++ {"h.e.llo", "h.e.*", FALSE},
++ {"hello", "*.ello", FALSE},
++ {"hello", "**llo", FALSE},
++ {"bar.foo.example.com", "*.example.com", FALSE},
++ {"foo.example.com", "*.example.com", TRUE},
++ {"baz.example.net", "b*z.example.net", FALSE},
++ {"foobaz.example.net", "*baz.example.net", FALSE},
++ {"xn--l8j.example.local", "x*.example.local", FALSE},
++ {"xn--l8j.example.net", "*.example.net", TRUE},
++ {"xn--l8j.example.net", "*j.example.net", FALSE},
++ {"xn--l8j.example.net", "xn--l8j.example.net", TRUE},
++ {"xn--l8j.example.net", "xn--l8j.*.net", FALSE},
++ {"xl8j.example.net", "*.example.net", TRUE},
++ {"fe80::3285:a9ff:fe46:b619", "*::3285:a9ff:fe46:b619", FALSE},
++ {"fe80::3285:a9ff:fe46:b619", "fe80::3285:a9ff:fe46:b619", TRUE},
++ {NULL, NULL, FALSE}
++};
+
+-fail_if(Curl_cert_hostcheck(STRCONST("xxx.example.com"),
+- STRCONST("www.example.com")), "bad 1");
+-fail_if(Curl_cert_hostcheck(STRCONST("*"),
+- STRCONST("www.example.com")),"bad 2");
+-fail_if(Curl_cert_hostcheck(STRCONST("*.*.com"),
+- STRCONST("www.example.com")), "bad 3");
+-fail_if(Curl_cert_hostcheck(STRCONST("*.example.com"),
+- STRCONST("baa.foo.example.com")), "bad 4");
+-fail_if(Curl_cert_hostcheck(STRCONST("f*.example.com"),
+- STRCONST("baa.example.com")), "bad 5");
+-fail_if(Curl_cert_hostcheck(STRCONST("*.com"),
+- STRCONST("example.com")), "bad 6");
+-fail_if(Curl_cert_hostcheck(STRCONST("*fail.com"),
+- STRCONST("example.com")), "bad 7");
+-fail_if(Curl_cert_hostcheck(STRCONST("*.example."),
+- STRCONST("www.example.")), "bad 8");
+-fail_if(Curl_cert_hostcheck(STRCONST("*.example."),
+- STRCONST("www.example")), "bad 9");
+-fail_if(Curl_cert_hostcheck(STRCONST(""), STRCONST("www")), "bad 10");
+-fail_if(Curl_cert_hostcheck(STRCONST("*"), STRCONST("www")), "bad 11");
+-fail_if(Curl_cert_hostcheck(STRCONST("*.168.0.0"),
+- STRCONST("192.168.0.0")), "bad 12");
+-fail_if(Curl_cert_hostcheck(STRCONST("www.example.com"),
+- STRCONST("192.168.0.0")), "bad 13");
+-
+-#ifdef ENABLE_IPV6
+-fail_if(Curl_cert_hostcheck(STRCONST("*::3285:a9ff:fe46:b619"),
+- STRCONST("fe80::3285:a9ff:fe46:b619")), "bad 14");
+-fail_unless(Curl_cert_hostcheck(STRCONST("fe80::3285:a9ff:fe46:b619"),
+- STRCONST("fe80::3285:a9ff:fe46:b619")),
+- "good 6");
+-#endif
++UNITTEST_START
++{
++ int i;
++ for(i = 0; tests[i].host; i++) {
++ if(tests[i].match != Curl_cert_hostcheck(tests[i].pattern,
++ strlen(tests[i].pattern),
++ tests[i].host,
++ strlen(tests[i].host))) {
++ fprintf(stderr,
++ "HOST: %s\n"
++ "PTRN: %s\n"
++ "did %sMATCH\n",
++ tests[i].host,
++ tests[i].pattern,
++ tests[i].match ? "NOT ": "");
++ unitfail++;
++ }
++ }
++}
+
+-#endif
++UNITTEST_STOP
++#else
+
+- /* you end the test code like this: */
++UNITTEST_START
+
+ UNITTEST_STOP
++#endif
diff --git a/meta/recipes-support/curl/curl/CVE-2023-28322-1.patch b/meta/recipes-support/curl/curl/CVE-2023-28322-1.patch
new file mode 100644
index 0000000000..547127001d
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-28322-1.patch
@@ -0,0 +1,84 @@
+From efbf02111aa66bda9288506b7d5cc0226bf5453e Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Sun, 12 Feb 2023 13:24:08 +0100
+Subject: [PATCH] smb: return error on upload without size
+
+The protocol needs to know the size ahead of time, this is now a known
+restriction and not a bug.
+
+Also output a clearer error if the URL path does not contain proper
+share.
+
+Ref: #7896
+Closes #10484
+
+CVE: CVE-2023-28322
+Upstream-Status: Backport [https://github.com/curl/curl/commit/efbf02111aa66bda9288506b7d5cc0226bf5453e]
+Comments: Hunks refreshed
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ docs/KNOWN_BUGS | 5 -----
+ docs/URL-SYNTAX.md | 3 +++
+ lib/smb.c | 6 ++++++
+ 3 files changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/docs/KNOWN_BUGS b/docs/KNOWN_BUGS
+index cbf5be352a279..a515e7a59bdfd 100644
+--- a/docs/KNOWN_BUGS
++++ b/docs/KNOWN_BUGS
+@@ -58,7 +58,6 @@
+ 5.7 Visual Studio project gaps
+ 5.8 configure finding libs in wrong directory
+ 5.9 Utilize Requires.private directives in libcurl.pc
+- 5.10 curl hangs on SMB upload over stdin
+ 5.11 configure --with-gssapi with Heimdal is ignored on macOS
+ 5.12 flaky Windows CI builds
+
+@@ -332,10 +331,6 @@ problems may have been fixed or changed somewhat since this was written.
+
+ https://github.com/curl/curl/issues/864
+
+-5.10 curl hangs on SMB upload over stdin
+-
+- See https://github.com/curl/curl/issues/7896
+-
+ 5.11 configure --with-gssapi with Heimdal is ignored on macOS
+
+ ... unless you also pass --with-gssapi-libs
+diff --git a/docs/URL-SYNTAX.md b/docs/URL-SYNTAX.md
+index 691fcceacd66c..802bbdef96979 100644
+--- a/docs/URL-SYNTAX.md
++++ b/docs/URL-SYNTAX.md
+@@ -360,6 +360,9 @@ share and directory or the share to upload to and as such, may not be omitted.
+ If the user name is embedded in the URL then it must contain the domain name
+ and as such, the backslash must be URL encoded as %2f.
+
++When uploading to SMB, the size of the file needs to be known ahead of time,
++meaning that you can upload a file passed to curl over a pipe like stdin.
++
+ curl supports SMB version 1 (only)
+
+ ## SMTP
+diff --git a/lib/smb.c b/lib/smb.c
+index 8a76763c157ce..dc0abe784bcee 100644
+--- a/lib/smb.c
++++ b/lib/smb.c
+@@ -763,6 +763,11 @@ static CURLcode smb_request_state(struct Curl_easy *data, bool *done)
+ void *msg = NULL;
+ const struct smb_nt_create_response *smb_m;
+
++ if(data->set.upload && (data->state.infilesize < 0)) {
++ failf(data, "SMB upload needs to know the size up front");
++ return CURLE_SEND_ERROR;
++ }
++
+ /* Start the request */
+ if(req->state == SMB_REQUESTING) {
+ result = smb_send_tree_connect(data);
+@@ -993,6 +998,7 @@ static CURLcode smb_parse_url_path(struct Curl_easy *data,
+ /* The share must be present */
+ if(!slash) {
+ Curl_safefree(smbc->share);
++ failf(data, "missing share in URL path for SMB");
+ return CURLE_URL_MALFORMAT;
+ }
diff --git a/meta/recipes-support/curl/curl/CVE-2023-28322-2.patch b/meta/recipes-support/curl/curl/CVE-2023-28322-2.patch
new file mode 100644
index 0000000000..f2134dd1c3
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-28322-2.patch
@@ -0,0 +1,436 @@
+From 7815647d6582c0a4900be2e1de6c5e61272c496b Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 25 Apr 2023 08:28:01 +0200
+Subject: [PATCH] lib: unify the upload/method handling
+
+By making sure we set state.upload based on the set.method value and not
+independently as set.upload, we reduce confusion and mixup risks, both
+internally and externally.
+
+Closes #11017
+
+CVE: CVE-2023-28322
+Upstream-Status: Backport [https://github.com/curl/curl/commit/7815647d6582c0a4900be2e1de]
+Comments: Hunks refreshed
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ lib/curl_rtmp.c | 4 ++--
+ lib/file.c | 4 ++--
+ lib/ftp.c | 8 ++++----
+ lib/http.c | 4 ++--
+ lib/imap.c | 6 +++---
+ lib/rtsp.c | 4 ++--
+ lib/setopt.c | 6 ++----
+ lib/smb.c | 6 +++---
+ lib/smtp.c | 4 ++--
+ lib/tftp.c | 8 ++++----
+ lib/transfer.c | 4 ++--
+ lib/urldata.h | 2 +-
+ lib/vssh/libssh.c | 6 +++---
+ lib/vssh/libssh2.c | 6 +++---
+ lib/vssh/wolfssh.c | 2 +-
+ 15 files changed, 36 insertions(+), 38 deletions(-)
+
+diff --git a/lib/curl_rtmp.c b/lib/curl_rtmp.c
+index 2679a2cdc1afe..406fb42ac0f44 100644
+--- a/lib/curl_rtmp.c
++++ b/lib/curl_rtmp.c
+@@ -231,7 +231,7 @@ static CURLcode rtmp_connect(struct Curl_easy *data, bool *done)
+ /* We have to know if it's a write before we send the
+ * connect request packet
+ */
+- if(data->set.upload)
++ if(data->state.upload)
+ r->Link.protocol |= RTMP_FEATURE_WRITE;
+
+ /* For plain streams, use the buffer toggle trick to keep data flowing */
+@@ -263,7 +263,7 @@ static CURLcode rtmp_do(struct Curl_easy *data, bool *done)
+ if(!RTMP_ConnectStream(r, 0))
+ return CURLE_FAILED_INIT;
+
+- if(data->set.upload) {
++ if(data->state.upload) {
+ Curl_pgrsSetUploadSize(data, data->state.infilesize);
+ Curl_setup_transfer(data, -1, -1, FALSE, FIRSTSOCKET);
+ }
+diff --git a/lib/file.c b/lib/file.c
+index 51c5d07ce40ab..c751e8861a99b 100644
+--- a/lib/file.c
++++ b/lib/file.c
+@@ -240,7 +240,7 @@ static CURLcode file_connect(struct Curl_easy *data, bool *done)
+ file->freepath = real_path; /* free this when done */
+
+ file->fd = fd;
+- if(!data->set.upload && (fd == -1)) {
++ if(!data->state.upload && (fd == -1)) {
+ failf(data, "Couldn't open file %s", data->state.up.path);
+ file_done(data, CURLE_FILE_COULDNT_READ_FILE, FALSE);
+ return CURLE_FILE_COULDNT_READ_FILE;
+@@ -422,7 +422,7 @@ static CURLcode file_do(struct Curl_easy *data, bool *done)
+
+ Curl_pgrsStartNow(data);
+
+- if(data->set.upload)
++ if(data->state.upload)
+ return file_upload(data);
+
+ file = data->req.p.file;
+diff --git a/lib/ftp.c b/lib/ftp.c
+index f50d7baf622f8..4ff68cc454cbc 100644
+--- a/lib/ftp.c
++++ b/lib/ftp.c
+@@ -1348,7 +1348,7 @@ static CURLcode ftp_state_prepare_transfer(struct Curl_easy *data)
+ data->set.str[STRING_CUSTOMREQUEST]?
+ data->set.str[STRING_CUSTOMREQUEST]:
+ (data->state.list_only?"NLST":"LIST"));
+- else if(data->set.upload)
++ else if(data->state.upload)
+ result = Curl_pp_sendf(data, &ftpc->pp, "PRET STOR %s",
+ conn->proto.ftpc.file);
+ else
+@@ -3384,7 +3384,7 @@ static CURLcode ftp_done(struct Curl_easy *data, CURLcode status,
+ /* the response code from the transfer showed an error already so no
+ use checking further */
+ ;
+- else if(data->set.upload) {
++ else if(data->state.upload) {
+ if((-1 != data->state.infilesize) &&
+ (data->state.infilesize != data->req.writebytecount) &&
+ !data->set.crlf &&
+@@ -3640,7 +3640,7 @@ static CURLcode ftp_do_more(struct Curl_easy *data, int *completep)
+ connected back to us */
+ }
+ }
+- else if(data->set.upload) {
++ else if(data->state.upload) {
+ result = ftp_nb_type(data, conn, data->state.prefer_ascii,
+ FTP_STOR_TYPE);
+ if(result)
+@@ -4233,7 +4233,7 @@
+ ftpc->file = NULL; /* instead of point to a zero byte,
+ we make it a NULL pointer */
+
+- if(data->set.upload && !ftpc->file && (ftp->transfer == PPTRANSFER_BODY)) {
++ if(data->state.upload && !ftpc->file && (ftp->transfer == PPTRANSFER_BODY)) {
+ /* We need a file name when uploading. Return error! */
+ failf(data, "Uploading to a URL without a file name!");
+ free(rawPath);
+diff --git a/lib/http.c b/lib/http.c
+index 80e43f6f361e8..bffdd3468536d 100644
+--- a/lib/http.c
++++ b/lib/http.c
+@@ -2033,7 +2033,7 @@
+ Curl_HttpReq httpreq = data->state.httpreq;
+ const char *request;
+ if((conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_FTP)) &&
+- data->set.upload)
++ data->state.upload)
+ httpreq = HTTPREQ_PUT;
+
+ /* Now set the 'request' pointer to the proper request string */
+@@ -2423,7 +2423,7 @@ CURLcode Curl_http_body(struct Curl_easy *data, struct connectdata *conn,
+ if((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
+ (((httpreq == HTTPREQ_POST_MIME || httpreq == HTTPREQ_POST_FORM) &&
+ http->postsize < 0) ||
+- ((data->set.upload || httpreq == HTTPREQ_POST) &&
++ ((data->state.upload || httpreq == HTTPREQ_POST) &&
+ data->state.infilesize == -1))) {
+ if(conn->bits.authneg)
+ /* don't enable chunked during auth neg */
+diff --git a/lib/imap.c b/lib/imap.c
+index c2f675d4b2618..1952e66a1efcd 100644
+--- a/lib/imap.c
++++ b/lib/imap.c
+@@ -1511,11 +1511,11 @@ static CURLcode imap_done(struct Curl_easy *data, CURLcode status,
+ result = status; /* use the already set error code */
+ }
+ else if(!data->set.connect_only && !imap->custom &&
+- (imap->uid || imap->mindex || data->set.upload ||
++ (imap->uid || imap->mindex || data->state.upload ||
+ data->set.mimepost.kind != MIMEKIND_NONE)) {
+ /* Handle responses after FETCH or APPEND transfer has finished */
+
+- if(!data->set.upload && data->set.mimepost.kind == MIMEKIND_NONE)
++ if(!data->state.upload && data->set.mimepost.kind == MIMEKIND_NONE)
+ state(data, IMAP_FETCH_FINAL);
+ else {
+ /* End the APPEND command first by sending an empty line */
+@@ -1581,7 +1581,7 @@ static CURLcode imap_perform(struct Curl_easy *data, bool *connected,
+ selected = TRUE;
+
+ /* Start the first command in the DO phase */
+- if(data->set.upload || data->set.mimepost.kind != MIMEKIND_NONE)
++ if(data->state.upload || data->set.mimepost.kind != MIMEKIND_NONE)
+ /* APPEND can be executed directly */
+ result = imap_perform_append(data);
+ else if(imap->custom && (selected || !imap->mailbox))
+diff --git a/lib/rtsp.c b/lib/rtsp.c
+index ea99d720ec4eb..ccd7264b00e74 100644
+--- a/lib/rtsp.c
++++ b/lib/rtsp.c
+@@ -493,7 +493,7 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done)
+ rtspreq == RTSPREQ_SET_PARAMETER ||
+ rtspreq == RTSPREQ_GET_PARAMETER) {
+
+- if(data->set.upload) {
++ if(data->state.upload) {
+ putsize = data->state.infilesize;
+ data->state.httpreq = HTTPREQ_PUT;
+
+@@ -512,7 +512,7 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done)
+ result =
+ Curl_dyn_addf(&req_buffer,
+ "Content-Length: %" CURL_FORMAT_CURL_OFF_T"\r\n",
+- (data->set.upload ? putsize : postsize));
++ (data->state.upload ? putsize : postsize));
+ if(result)
+ return result;
+ }
+diff --git a/lib/setopt.c b/lib/setopt.c
+index 38f5711e44191..0c3b9634d1192 100644
+--- a/lib/setopt.c
++++ b/lib/setopt.c
+@@ -333,8 +333,8 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
+ * We want to sent data to the remote host. If this is HTTP, that equals
+ * using the PUT request.
+ */
+- data->set.upload = (0 != va_arg(param, long)) ? TRUE : FALSE;
+- if(data->set.upload) {
++ arg = va_arg(param, long);
++ if(arg) {
+ /* If this is HTTP, PUT is what's needed to "upload" */
+ data->set.method = HTTPREQ_PUT;
+ data->set.opt_no_body = FALSE; /* this is implied */
+@@ -625,7 +625,6 @@
+ }
+ else
+ data->set.method = HTTPREQ_GET;
+- data->set.upload = FALSE;
+ break;
+
+ case CURLOPT_HTTPPOST:
+@@ -888,7 +887,6 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
+ */
+ if(va_arg(param, long)) {
+ data->set.method = HTTPREQ_GET;
+- data->set.upload = FALSE; /* switch off upload */
+ data->set.opt_no_body = FALSE; /* this is implied */
+ }
+ break;
+diff --git a/lib/smb.c b/lib/smb.c
+index a1e444ee6b97e..d6822213529bc 100644
+--- a/lib/smb.c
++++ b/lib/smb.c
+@@ -530,7 +530,7 @@ static CURLcode smb_send_open(struct Curl_easy *data)
+ byte_count = strlen(req->path);
+ msg.name_length = smb_swap16((unsigned short)byte_count);
+ msg.share_access = smb_swap32(SMB_FILE_SHARE_ALL);
+- if(data->set.upload) {
++ if(data->state.upload) {
+ msg.access = smb_swap32(SMB_GENERIC_READ | SMB_GENERIC_WRITE);
+ msg.create_disposition = smb_swap32(SMB_FILE_OVERWRITE_IF);
+ }
+@@ -762,7 +762,7 @@ static CURLcode smb_request_state(struct Curl_easy *data, bool *done)
+ void *msg = NULL;
+ const struct smb_nt_create_response *smb_m;
+
+- if(data->set.upload && (data->state.infilesize < 0)) {
++ if(data->state.upload && (data->state.infilesize < 0)) {
+ failf(data, "SMB upload needs to know the size up front");
+ return CURLE_SEND_ERROR;
+ }
+@@ -813,7 +813,7 @@ static CURLcode smb_request_state(struct Curl_easy *data, bool *done)
+ smb_m = (const struct smb_nt_create_response*) msg;
+ req->fid = smb_swap16(smb_m->fid);
+ data->req.offset = 0;
+- if(data->set.upload) {
++ if(data->state.upload) {
+ data->req.size = data->state.infilesize;
+ Curl_pgrsSetUploadSize(data, data->req.size);
+ next_state = SMB_UPLOAD;
+diff --git a/lib/smtp.c b/lib/smtp.c
+index 7a030308d4689..c182cace742d7 100644
+--- a/lib/smtp.c
++++ b/lib/smtp.c
+@@ -1419,7 +1419,7 @@ static CURLcode smtp_done(struct Curl_easy *data, CURLcode status,
+ result = status; /* use the already set error code */
+ }
+ else if(!data->set.connect_only && data->set.mail_rcpt &&
+- (data->set.upload || data->set.mimepost.kind)) {
++ (data->state.upload || data->set.mimepost.kind)) {
+ /* Calculate the EOB taking into account any terminating CRLF from the
+ previous line of the email or the CRLF of the DATA command when there
+ is "no mail data". RFC-5321, sect. 4.1.1.4.
+@@ -1511,7 +1511,7 @@ static CURLcode smtp_perform(struct Curl_easy *data, bool *connected,
+ smtp->eob = 2;
+
+ /* Start the first command in the DO phase */
+- if((data->set.upload || data->set.mimepost.kind) && data->set.mail_rcpt)
++ if((data->state.upload || data->set.mimepost.kind) && data->set.mail_rcpt)
+ /* MAIL transfer */
+ result = smtp_perform_mail(data);
+ else
+diff --git a/lib/tftp.c b/lib/tftp.c
+index 164d3c723c5b9..8ed1b887b4d21 100644
+--- a/lib/tftp.c
++++ b/lib/tftp.c
+@@ -370,7 +370,7 @@ static CURLcode tftp_parse_option_ack(struct tftp_state_data *state,
+
+ /* tsize should be ignored on upload: Who cares about the size of the
+ remote file? */
+- if(!data->set.upload) {
++ if(!data->state.upload) {
+ if(!tsize) {
+ failf(data, "invalid tsize -:%s:- value in OACK packet", value);
+ return CURLE_TFTP_ILLEGAL;
+@@ -451,7 +451,7 @@ static CURLcode tftp_send_first(struct tftp_state_data *state,
+ return result;
+ }
+
+- if(data->set.upload) {
++ if(data->state.upload) {
+ /* If we are uploading, send an WRQ */
+ setpacketevent(&state->spacket, TFTP_EVENT_WRQ);
+ state->data->req.upload_fromhere =
+@@ -486,7 +486,7 @@ static CURLcode tftp_send_first(struct tftp_state_data *state,
+ if(!data->set.tftp_no_options) {
+ char buf[64];
+ /* add tsize option */
+- if(data->set.upload && (data->state.infilesize != -1))
++ if(data->state.upload && (data->state.infilesize != -1))
+ msnprintf(buf, sizeof(buf), "%" CURL_FORMAT_CURL_OFF_T,
+ data->state.infilesize);
+ else
+@@ -540,7 +540,7 @@ static CURLcode tftp_send_first(struct tftp_state_data *state,
+ break;
+
+ case TFTP_EVENT_OACK:
+- if(data->set.upload) {
++ if(data->state.upload) {
+ result = tftp_connect_for_tx(state, event);
+ }
+ else {
+diff --git a/lib/transfer.c b/lib/transfer.c
+index e9ab8fbf09510..cb69f3365855a 100644
+--- a/lib/transfer.c
++++ b/lib/transfer.c
+@@ -1293,6 +1293,7 @@ void Curl_init_CONNECT(struct Curl_easy *data)
+ {
+ data->state.fread_func = data->set.fread_func_set;
+ data->state.in = data->set.in_set;
++ data->state.upload = (data->state.httpreq == HTTPREQ_PUT);
+ }
+
+ /*
+@@ -1767,7 +1767,6 @@
+ data->state.httpreq != HTTPREQ_POST_MIME) ||
+ !(data->set.keep_post & CURL_REDIR_POST_303))) {
+ data->state.httpreq = HTTPREQ_GET;
+- data->set.upload = false;
+ infof(data, "Switch to %s",
+ data->set.opt_no_body?"HEAD":"GET");
+ }
+@@ -1770,7 +1770,7 @@ CURLcode Curl_retry_request(struct Curl_easy *data, char **url)
+
+ /* if we're talking upload, we can't do the checks below, unless the protocol
+ is HTTP as when uploading over HTTP we will still get a response */
+- if(data->set.upload &&
++ if(data->state.upload &&
+ !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
+ return CURLE_OK;
+
+diff --git a/lib/urldata.h b/lib/urldata.h
+index cca992a0295aa..a8580bdb66fe8 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1487,6 +1487,7 @@
+ BIT(url_alloc); /* URL string is malloc()'ed */
+ BIT(referer_alloc); /* referer string is malloc()ed */
+ BIT(wildcard_resolve); /* Set to true if any resolve change is a wildcard */
++ BIT(upload); /* upload request */
+ };
+
+ /*
+@@ -1838,7 +1839,6 @@ struct UserDefined {
+ BIT(http_auto_referer); /* set "correct" referer when following
+ location: */
+ BIT(opt_no_body); /* as set with CURLOPT_NOBODY */
+- BIT(upload); /* upload request */
+ BIT(verbose); /* output verbosity */
+ BIT(krb); /* Kerberos connection requested */
+ BIT(reuse_forbid); /* forbidden to be reused, close after use */
+diff --git a/lib/vssh/libssh.c b/lib/vssh/libssh.c
+index b31f741ba9492..d60edaa303642 100644
+--- a/lib/vssh/libssh.c
++++ b/lib/vssh/libssh.c
+@@ -1209,7 +1209,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
+ }
+
+ case SSH_SFTP_TRANS_INIT:
+- if(data->set.upload)
++ if(data->state.upload)
+ state(data, SSH_SFTP_UPLOAD_INIT);
+ else {
+ if(protop->path[strlen(protop->path)-1] == '/')
+@@ -1802,7 +1802,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
+ /* Functions from the SCP subsystem cannot handle/return SSH_AGAIN */
+ ssh_set_blocking(sshc->ssh_session, 1);
+
+- if(data->set.upload) {
++ if(data->state.upload) {
+ if(data->state.infilesize < 0) {
+ failf(data, "SCP requires a known file size for upload");
+ sshc->actualcode = CURLE_UPLOAD_FAILED;
+@@ -1907,7 +1907,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
+ break;
+ }
+ case SSH_SCP_DONE:
+- if(data->set.upload)
++ if(data->state.upload)
+ state(data, SSH_SCP_SEND_EOF);
+ else
+ state(data, SSH_SCP_CHANNEL_FREE);
+diff --git a/lib/vssh/libssh2.c b/lib/vssh/libssh2.c
+index f1154dc47a74e..f2e5352d1fd3a 100644
+--- a/lib/vssh/libssh2.c
++++ b/lib/vssh/libssh2.c
+@@ -2019,7 +2019,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
+ }
+
+ case SSH_SFTP_TRANS_INIT:
+- if(data->set.upload)
++ if(data->state.upload)
+ state(data, SSH_SFTP_UPLOAD_INIT);
+ else {
+ if(sshp->path[strlen(sshp->path)-1] == '/')
+@@ -2691,7 +2691,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
+ break;
+ }
+
+- if(data->set.upload) {
++ if(data->state.upload) {
+ if(data->state.infilesize < 0) {
+ failf(data, "SCP requires a known file size for upload");
+ sshc->actualcode = CURLE_UPLOAD_FAILED;
+@@ -2831,7 +2831,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
+ break;
+
+ case SSH_SCP_DONE:
+- if(data->set.upload)
++ if(data->state.upload)
+ state(data, SSH_SCP_SEND_EOF);
+ else
+ state(data, SSH_SCP_CHANNEL_FREE);
+diff --git a/lib/vssh/wolfssh.c b/lib/vssh/wolfssh.c
+index 17d59ecd23bc8..2ca91b7363b1d 100644
+--- a/lib/vssh/wolfssh.c
++++ b/lib/vssh/wolfssh.c
+@@ -557,7 +557,7 @@ static CURLcode wssh_statemach_act(struct Curl_easy *data, bool *block)
+ }
+ break;
+ case SSH_SFTP_TRANS_INIT:
+- if(data->set.upload)
++ if(data->state.upload)
+ state(data, SSH_SFTP_UPLOAD_INIT);
+ else {
+ if(sftp_scp->path[strlen(sftp_scp->path)-1] == '/')
diff --git a/meta/recipes-support/curl/curl/CVE-2023-38545.patch b/meta/recipes-support/curl/curl/CVE-2023-38545.patch
new file mode 100644
index 0000000000..c198d29c04
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-38545.patch
@@ -0,0 +1,133 @@
+From fb4415d8aee6c1045be932a34fe6107c2f5ed147 Mon Sep 17 00:00:00 2001
+From: Jay Satiro <raysatiro@yahoo.com>
+Date: Wed, 11 Oct 2023 07:34:19 +0200
+Subject: [PATCH] socks: return error if hostname too long for remote resolve
+
+Prior to this change the state machine attempted to change the remote
+resolve to a local resolve if the hostname was longer than 255
+characters. Unfortunately that did not work as intended and caused a
+security issue.
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/fb4415d8aee6c1045be932a34fe6107c2f5ed147]
+
+CVE: CVE-2023-38545
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ lib/socks.c | 8 +++---
+ tests/data/Makefile.inc | 2 +-
+ tests/data/test722 | 64 +++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 69 insertions(+), 5 deletions(-)
+ create mode 100644 tests/data/test722
+
+diff --git a/lib/socks.c b/lib/socks.c
+index a014aa6..2215c02 100644
+--- a/lib/socks.c
++++ b/lib/socks.c
+@@ -536,9 +536,9 @@ CURLproxycode Curl_SOCKS5(const char *proxy_user,
+
+ /* RFC1928 chapter 5 specifies max 255 chars for domain name in packet */
+ if(!socks5_resolve_local && hostname_len > 255) {
+- infof(data, "SOCKS5: server resolving disabled for hostnames of "
+- "length > 255 [actual len=%zu]", hostname_len);
+- socks5_resolve_local = TRUE;
++ failf(data, "SOCKS5: the destination hostname is too long to be "
++ "resolved remotely by the proxy.");
++ return CURLPX_LONG_HOSTNAME;
+ }
+
+ if(auth & ~(CURLAUTH_BASIC | CURLAUTH_GSSAPI))
+@@ -879,7 +879,7 @@ CURLproxycode Curl_SOCKS5(const char *proxy_user,
+ }
+ else {
+ socksreq[len++] = 3;
+- socksreq[len++] = (char) hostname_len; /* one byte address length */
++ socksreq[len++] = (unsigned char) hostname_len; /* one byte length */
+ memcpy(&socksreq[len], hostname, hostname_len); /* address w/o NULL */
+ len += hostname_len;
+ }
+diff --git a/tests/data/Makefile.inc b/tests/data/Makefile.inc
+index 3064b39..47117b6 100644
+--- a/tests/data/Makefile.inc
++++ b/tests/data/Makefile.inc
+@@ -99,7 +99,7 @@ test670 test671 test672 test673 test674 test675 test676 test677 test678 \
+ \
+ test700 test701 test702 test703 test704 test705 test706 test707 test708 \
+ test709 test710 test711 test712 test713 test714 test715 test716 test717 \
+-test718 test719 test720 test721 \
++test718 test719 test720 test721 test722 \
+ \
+ test800 test801 test802 test803 test804 test805 test806 test807 test808 \
+ test809 test810 test811 test812 test813 test814 test815 test816 test817 \
+diff --git a/tests/data/test722 b/tests/data/test722
+new file mode 100644
+index 0000000..05bcf28
+--- /dev/null
++++ b/tests/data/test722
+@@ -0,0 +1,64 @@
++<testcase>
++<info>
++<keywords>
++HTTP
++HTTP GET
++SOCKS5
++SOCKS5h
++followlocation
++</keywords>
++</info>
++
++#
++# Server-side
++<reply>
++# The hostname in this redirect is 256 characters and too long (> 255) for
++# SOCKS5 remote resolve. curl must return error CURLE_PROXY in this case.
++<data>
++HTTP/1.1 301 Moved Permanently
++Location: http://AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/
++Content-Length: 0
++Connection: close
++
++</data>
++</reply>
++
++#
++# Client-side
++<client>
++<features>
++proxy
++</features>
++<server>
++http
++socks5
++</server>
++ <name>
++SOCKS5h with HTTP redirect to hostname too long
++ </name>
++ <command>
++--no-progress-meter --location --proxy socks5h://%HOSTIP:%SOCKSPORT http://%HOSTIP:%HTTPPORT/%TESTNUMBER
++</command>
++</client>
++
++#
++# Verify data after the test has been "shot"
++<verify>
++<protocol crlf="yes">
++GET /%TESTNUMBER HTTP/1.1
++Host: %HOSTIP:%HTTPPORT
++User-Agent: curl/%VERSION
++Accept: */*
++
++</protocol>
++<errorcode>
++97
++</errorcode>
++# the error message is verified because error code CURLE_PROXY (97) may be
++# returned for any number of reasons and we need to make sure it is
++# specifically for the reason below so that we know the check is working.
++<stderr mode="text">
++curl: (97) SOCKS5: the destination hostname is too long to be resolved remotely by the proxy.
++</stderr>
++</verify>
++</testcase>
+--
+2.40.0
diff --git a/meta/recipes-support/curl/curl/CVE-2023-38546.patch b/meta/recipes-support/curl/curl/CVE-2023-38546.patch
new file mode 100644
index 0000000000..1b2f1e7a7d
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-38546.patch
@@ -0,0 +1,137 @@
+From 61275672b46d9abb3285740467b882e22ed75da8 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 14 Sep 2023 23:28:32 +0200
+Subject: [PATCH] cookie: remove unnecessary struct fields
+
+Plus: reduce the hash table size from 256 to 63. It seems unlikely to
+make much of a speed difference for most use cases but saves 1.5KB of
+data per instance.
+
+Closes #11862
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/61275672b46d9abb32857404]
+
+CVE: CVE-2023-38546
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ lib/cookie.c | 13 +------------
+ lib/cookie.h | 13 ++++---------
+ lib/easy.c | 4 +---
+ 3 files changed, 6 insertions(+), 24 deletions(-)
+
+diff --git a/lib/cookie.c b/lib/cookie.c
+index e0470a1..38d8d6c 100644
+--- a/lib/cookie.c
++++ b/lib/cookie.c
+@@ -115,7 +115,6 @@ static void freecookie(struct Cookie *co)
+ free(co->name);
+ free(co->value);
+ free(co->maxage);
+- free(co->version);
+ free(co);
+ }
+
+@@ -707,11 +706,7 @@ Curl_cookie_add(struct Curl_easy *data,
+ }
+ }
+ else if(strcasecompare("version", name)) {
+- strstore(&co->version, whatptr);
+- if(!co->version) {
+- badcookie = TRUE;
+- break;
+- }
++ /* just ignore */
+ }
+ else if(strcasecompare("max-age", name)) {
+ /*
+@@ -1132,7 +1127,6 @@ Curl_cookie_add(struct Curl_easy *data,
+ free(clist->path);
+ free(clist->spath);
+ free(clist->expirestr);
+- free(clist->version);
+ free(clist->maxage);
+
+ *clist = *co; /* then store all the new data */
+@@ -1210,9 +1204,6 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data,
+ c = calloc(1, sizeof(struct CookieInfo));
+ if(!c)
+ return NULL; /* failed to get memory */
+- c->filename = strdup(file?file:"none"); /* copy the name just in case */
+- if(!c->filename)
+- goto fail; /* failed to get memory */
+ /*
+ * Initialize the next_expiration time to signal that we don't have enough
+ * information yet.
+@@ -1363,7 +1354,6 @@ static struct Cookie *dup_cookie(struct Cookie *src)
+ CLONE(name);
+ CLONE(value);
+ CLONE(maxage);
+- CLONE(version);
+ d->expires = src->expires;
+ d->tailmatch = src->tailmatch;
+ d->secure = src->secure;
+@@ -1579,7 +1569,6 @@ void Curl_cookie_cleanup(struct CookieInfo *c)
+ {
+ if(c) {
+ unsigned int i;
+- free(c->filename);
+ for(i = 0; i < COOKIE_HASH_SIZE; i++)
+ Curl_cookie_freelist(c->cookies[i]);
+ free(c); /* free the base struct as well */
+diff --git a/lib/cookie.h b/lib/cookie.h
+index 7411980..645600a 100644
+--- a/lib/cookie.h
++++ b/lib/cookie.h
+@@ -34,11 +34,7 @@ struct Cookie {
+ char *domain; /* domain = <this> */
+ curl_off_t expires; /* expires = <this> */
+ char *expirestr; /* the plain text version */
+-
+- /* RFC 2109 keywords. Version=1 means 2109-compliant cookie sending */
+- char *version; /* Version = <value> */
+ char *maxage; /* Max-Age = <value> */
+-
+ bool tailmatch; /* whether we do tail-matching of the domain name */
+ bool secure; /* whether the 'secure' keyword was used */
+ bool livecookie; /* updated from a server, not a stored file */
+@@ -54,18 +50,17 @@ struct Cookie {
+ #define COOKIE_PREFIX__SECURE (1<<0)
+ #define COOKIE_PREFIX__HOST (1<<1)
+
+-#define COOKIE_HASH_SIZE 256
++#define COOKIE_HASH_SIZE 63
+
+ struct CookieInfo {
+ /* linked list of cookies we know of */
+ struct Cookie *cookies[COOKIE_HASH_SIZE];
+
+- char *filename; /* file we read from/write to */
+- long numcookies; /* number of cookies in the "jar" */
++ curl_off_t next_expiration; /* the next time at which expiration happens */
++ int numcookies; /* number of cookies in the "jar" */
++ int lastct; /* last creation-time used in the jar */
+ bool running; /* state info, for cookie adding information */
+ bool newsession; /* new session, discard session cookies on load */
+- int lastct; /* last creation-time used in the jar */
+- curl_off_t next_expiration; /* the next time at which expiration happens */
+ };
+
+ /* This is the maximum line length we accept for a cookie line. RFC 2109
+diff --git a/lib/easy.c b/lib/easy.c
+index 0e23561..31abf9e 100644
+--- a/lib/easy.c
++++ b/lib/easy.c
+@@ -841,9 +841,7 @@ struct Curl_easy *curl_easy_duphandle(struct Curl_easy *data)
+ if(data->cookies) {
+ /* If cookies are enabled in the parent handle, we enable them
+ in the clone as well! */
+- outcurl->cookies = Curl_cookie_init(data,
+- data->cookies->filename,
+- outcurl->cookies,
++ outcurl->cookies = Curl_cookie_init(data, NULL, outcurl->cookies,
+ data->set.cookiesession);
+ if(!outcurl->cookies)
+ goto fail;
+--
+2.40.0
diff --git a/meta/recipes-support/curl/curl/CVE-2023-46218.patch b/meta/recipes-support/curl/curl/CVE-2023-46218.patch
new file mode 100644
index 0000000000..d7d7908ea0
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-46218.patch
@@ -0,0 +1,52 @@
+Backport of:
+
+From 2b0994c29a721c91c572cff7808c572a24d251eb Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 23 Nov 2023 08:15:47 +0100
+Subject: [PATCH] cookie: lowercase the domain names before PSL checks
+
+Reported-by: Harry Sintonen
+
+Closes #12387
+
+CVE: CVE-2023-46218
+Upstream-Status: Backport [https://github.com/curl/curl/commit/2b0994c29a721c91c57]
+Signed-off-by: Poonam Jadhav <poonam.jadhav@kpit.com>
+---
+ lib/cookie.c | 24 ++++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+--- a/lib/cookie.c
++++ b/lib/cookie.c
+@@ -1044,15 +1044,23 @@ Curl_cookie_add(struct Curl_easy *data,
+ * dereference it.
+ */
+ if(data && (domain && co->domain && !Curl_host_is_ipnum(co->domain))) {
+- const psl_ctx_t *psl = Curl_psl_use(data);
+- int acceptable;
+-
+- if(psl) {
+- acceptable = psl_is_cookie_domain_acceptable(psl, domain, co->domain);
+- Curl_psl_release(data);
++ bool acceptable = FALSE;
++ char lcase[256];
++ char lcookie[256];
++ size_t dlen = strlen(domain);
++ size_t clen = strlen(co->domain);
++ if((dlen < sizeof(lcase)) && (clen < sizeof(lcookie))) {
++ const psl_ctx_t *psl = Curl_psl_use(data);
++ if(psl) {
++ /* the PSL check requires lowercase domain name and pattern */
++ Curl_strntolower(lcase, domain, dlen + 1);
++ Curl_strntolower(lcookie, co->domain, clen + 1);
++ acceptable = psl_is_cookie_domain_acceptable(psl, lcase, lcookie);
++ Curl_psl_release(data);
++ }
++ else
++ acceptable = !bad_domain(domain);
+ }
+- else
+- acceptable = !bad_domain(domain);
+
+ if(!acceptable) {
+ infof(data, "cookie '%s' dropped, domain '%s' must not "
diff --git a/meta/recipes-support/curl/curl/CVE-2023-46219-0001.patch b/meta/recipes-support/curl/curl/CVE-2023-46219-0001.patch
new file mode 100644
index 0000000000..55e8f6fac9
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-46219-0001.patch
@@ -0,0 +1,42 @@
+From 0c667188e0c6cda615a036b8a2b4125f2c404dde Mon Sep 17 00:00:00 2001
+From: SaltyMilk <soufiane.elmelcaoui@gmail.com>
+Date: Mon, 10 Jul 2023 21:43:28 +0200
+Subject: [PATCH] fopen: optimize
+
+Closes #11419
+
+CVE: CVE-2023-46219
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/0c667188e0c6]
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ lib/fopen.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/lib/fopen.c b/lib/fopen.c
+index ad3691b..92f39cf 100644
+--- a/lib/fopen.c
++++ b/lib/fopen.c
+@@ -56,13 +56,13 @@ CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
+ int fd = -1;
+ *tempname = NULL;
+
+- if(stat(filename, &sb) == -1 || !S_ISREG(sb.st_mode)) {
+- /* a non-regular file, fallback to direct fopen() */
+- *fh = fopen(filename, FOPEN_WRITETEXT);
+- if(*fh)
+- return CURLE_OK;
++ *fh = fopen(filename, FOPEN_WRITETEXT);
++ if(!*fh)
+ goto fail;
+- }
++ if(fstat(fileno(*fh), &sb) == -1 || !S_ISREG(sb.st_mode))
++ return CURLE_OK;
++ fclose(*fh);
++ *fh = NULL;
+
+ result = Curl_rand_hex(data, randsuffix, sizeof(randsuffix));
+ if(result)
+--
+2.40.0
diff --git a/meta/recipes-support/curl/curl/CVE-2023-46219-0002.patch b/meta/recipes-support/curl/curl/CVE-2023-46219-0002.patch
new file mode 100644
index 0000000000..f432fabbb1
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-46219-0002.patch
@@ -0,0 +1,133 @@
+From 73b65e94f3531179de45c6f3c836a610e3d0a846 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 23 Nov 2023 08:23:17 +0100
+Subject: [PATCH] fopen: create short(er) temporary file name
+
+Only using random letters in the name plus a ".tmp" extension. Not by
+appending characters to the final file name.
+
+Reported-by: Maksymilian Arciemowicz
+
+Closes #12388
+
+CVE: CVE-2023-46219
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/73b65e94f3531179]
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ lib/fopen.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 59 insertions(+), 4 deletions(-)
+
+diff --git a/lib/fopen.c b/lib/fopen.c
+index 92f39cf..1670e32 100644
+--- a/lib/fopen.c
++++ b/lib/fopen.c
+@@ -39,6 +39,50 @@
+ #include "curl_memory.h"
+ #include "memdebug.h"
+
++
++/*
++ The dirslash() function breaks a null-terminated pathname string into
++ directory and filename components then returns the directory component up
++ to, *AND INCLUDING*, a final '/'. If there is no directory in the path,
++ this instead returns a "" string.
++ This function returns a pointer to malloc'ed memory.
++ The input path to this function is expected to have a file name part.
++*/
++
++#ifdef _WIN32
++#define PATHSEP "\\"
++#define IS_SEP(x) (((x) == '/') || ((x) == '\\'))
++#elif defined(MSDOS) || defined(__EMX__) || defined(OS2)
++#define PATHSEP "\\"
++#define IS_SEP(x) ((x) == '\\')
++#else
++#define PATHSEP "/"
++#define IS_SEP(x) ((x) == '/')
++#endif
++
++static char *dirslash(const char *path)
++{
++ size_t n;
++ struct dynbuf out;
++ DEBUGASSERT(path);
++ Curl_dyn_init(&out, CURL_MAX_INPUT_LENGTH);
++ n = strlen(path);
++ if(n) {
++ /* find the rightmost path separator, if any */
++ while(n && !IS_SEP(path[n-1]))
++ --n;
++ /* skip over all the path separators, if any */
++ while(n && IS_SEP(path[n-1]))
++ --n;
++ }
++ if(Curl_dyn_addn(&out, path, n))
++ return NULL;
++ /* if there was a directory, append a single trailing slash */
++ if(n && Curl_dyn_addn(&out, PATHSEP, 1))
++ return NULL;
++ return Curl_dyn_ptr(&out);
++}
++
+ /*
+ * Curl_fopen() opens a file for writing with a temp name, to be renamed
+ * to the final name when completed. If there is an existing file using this
+@@ -50,25 +94,34 @@ CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
+ FILE **fh, char **tempname)
+ {
+ CURLcode result = CURLE_WRITE_ERROR;
+- unsigned char randsuffix[9];
++ unsigned char randbuf[41];
+ char *tempstore = NULL;
+ struct_stat sb;
+ int fd = -1;
++ char *dir;
+ *tempname = NULL;
+
++ dir = dirslash(filename);
++ if(!dir)
++ goto fail;
++
+ *fh = fopen(filename, FOPEN_WRITETEXT);
+ if(!*fh)
+ goto fail;
+- if(fstat(fileno(*fh), &sb) == -1 || !S_ISREG(sb.st_mode))
++ if(fstat(fileno(*fh), &sb) == -1 || !S_ISREG(sb.st_mode)){
++ free(dir);
+ return CURLE_OK;
++ }
+ fclose(*fh);
+ *fh = NULL;
+
+- result = Curl_rand_hex(data, randsuffix, sizeof(randsuffix));
++ result = Curl_rand_hex(data, randbuf, sizeof(randbuf));
+ if(result)
+ goto fail;
+
+- tempstore = aprintf("%s.%s.tmp", filename, randsuffix);
++ /* The temp file name should not end up too long for the target file
++ system */
++ tempstore = aprintf("%s%s.tmp", dir, randbuf);
+ if(!tempstore) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto fail;
+@@ -95,6 +148,7 @@ CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
+ if(!*fh)
+ goto fail;
+
++ free(dir);
+ *tempname = tempstore;
+ return CURLE_OK;
+
+@@ -107,6 +161,7 @@ fail:
+ free(tempstore);
+
+ *tempname = NULL;
++ free(dir);
+ return result;
+ }
+
+--
+2.40.0
diff --git a/meta/recipes-support/curl/curl/CVE-2023-46219-0003.patch b/meta/recipes-support/curl/curl/CVE-2023-46219-0003.patch
new file mode 100644
index 0000000000..3b6f756549
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-46219-0003.patch
@@ -0,0 +1,81 @@
+From f27b8dba73295cb5296a50f2c19c0739b502eb94 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Fri, 24 Nov 2023 09:46:32 +0100
+Subject: [PATCH] fopen: allocate the dir after fopen
+
+Move the allocation of the directory name down to after the fopen() call
+to allow that shortcut code path to avoid a superfluous malloc+free
+cycle.
+
+Follow-up to 73b65e94f35311
+
+Closes #12398
+
+CVE: CVE-2023-46219
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/f27b8dba73295cb529]
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ lib/fopen.c | 19 ++++++++-----------
+ 1 file changed, 8 insertions(+), 11 deletions(-)
+
+diff --git a/lib/fopen.c b/lib/fopen.c
+index 1670e32..b663f8b 100644
+--- a/lib/fopen.c
++++ b/lib/fopen.c
+@@ -98,18 +98,13 @@ CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
+ char *tempstore = NULL;
+ struct_stat sb;
+ int fd = -1;
+- char *dir;
++ char *dir = NULL;
+ *tempname = NULL;
+
+- dir = dirslash(filename);
+- if(!dir)
+- goto fail;
+-
+ *fh = fopen(filename, FOPEN_WRITETEXT);
+ if(!*fh)
+ goto fail;
+ if(fstat(fileno(*fh), &sb) == -1 || !S_ISREG(sb.st_mode)){
+- free(dir);
+ return CURLE_OK;
+ }
+ fclose(*fh);
+@@ -119,9 +114,13 @@ CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
+ if(result)
+ goto fail;
+
+- /* The temp file name should not end up too long for the target file
+- system */
+- tempstore = aprintf("%s%s.tmp", dir, randbuf);
++ dir = dirslash(filename);
++ if(dir) {
++ /* The temp file name should not end up too long for the target file
++ system */
++ tempstore = aprintf("%s%s.tmp", dir, randbuf);
++ free(dir);
++ }
+ if(!tempstore) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto fail;
+@@ -148,7 +147,6 @@ CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
+ if(!*fh)
+ goto fail;
+
+- free(dir);
+ *tempname = tempstore;
+ return CURLE_OK;
+
+@@ -161,7 +159,6 @@ fail:
+ free(tempstore);
+
+ *tempname = NULL;
+- free(dir);
+ return result;
+ }
+
+--
+2.40.0
diff --git a/meta/recipes-support/curl/curl/CVE-2024-2398.patch b/meta/recipes-support/curl/curl/CVE-2024-2398.patch
new file mode 100644
index 0000000000..ea55117f4d
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2024-2398.patch
@@ -0,0 +1,89 @@
+Backport of:
+
+From deca8039991886a559b67bcd6701db800a5cf764 Mon Sep 17 00:00:00 2001
+From: Stefan Eissing <stefan@eissing.org>
+Date: Wed, 6 Mar 2024 09:36:08 +0100
+Subject: [PATCH] http2: push headers better cleanup
+
+- provide common cleanup method for push headers
+
+Closes #13054
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/curl/tree/debian/patches/CVE-2024-2398.patch?h=ubuntu/jammy-security
+Upstream commit https://github.com/curl/curl/commit/deca8039991886a559b67bcd6701db800a5cf764]
+CVE: CVE-2024-2398
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/http2.c | 34 +++++++++++++++-------------------
+ 1 file changed, 15 insertions(+), 19 deletions(-)
+
+--- a/lib/http2.c
++++ b/lib/http2.c
+@@ -555,6 +555,15 @@ static int set_transfer_url(struct Curl_
+ return 0;
+ }
+
++static void free_push_headers(struct HTTP *stream)
++{
++ size_t i;
++ for(i = 0; i<stream->push_headers_used; i++)
++ free(stream->push_headers[i]);
++ Curl_safefree(stream->push_headers);
++ stream->push_headers_used = 0;
++}
++
+ static int push_promise(struct Curl_easy *data,
+ struct connectdata *conn,
+ const nghttp2_push_promise *frame)
+@@ -568,7 +577,6 @@ static int push_promise(struct Curl_easy
+ struct curl_pushheaders heads;
+ CURLMcode rc;
+ struct http_conn *httpc;
+- size_t i;
+ /* clone the parent */
+ struct Curl_easy *newhandle = duphandle(data);
+ if(!newhandle) {
+@@ -604,11 +612,7 @@ static int push_promise(struct Curl_easy
+ Curl_set_in_callback(data, false);
+
+ /* free the headers again */
+- for(i = 0; i<stream->push_headers_used; i++)
+- free(stream->push_headers[i]);
+- free(stream->push_headers);
+- stream->push_headers = NULL;
+- stream->push_headers_used = 0;
++ free_push_headers(stream);
+
+ if(rv) {
+ DEBUGASSERT((rv > CURL_PUSH_OK) && (rv <= CURL_PUSH_ERROROUT));
+@@ -1045,10 +1049,10 @@ static int on_header(nghttp2_session *se
+ stream->push_headers_alloc) {
+ char **headp;
+ stream->push_headers_alloc *= 2;
+- headp = Curl_saferealloc(stream->push_headers,
+- stream->push_headers_alloc * sizeof(char *));
++ headp = realloc(stream->push_headers,
++ stream->push_headers_alloc * sizeof(char *));
+ if(!headp) {
+- stream->push_headers = NULL;
++ free_push_headers(stream);
+ return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
+ }
+ stream->push_headers = headp;
+@@ -1214,15 +1218,7 @@ void Curl_http2_done(struct Curl_easy *d
+ setup */
+ Curl_dyn_free(&http->header_recvbuf);
+ Curl_dyn_free(&http->trailer_recvbuf);
+- if(http->push_headers) {
+- /* if they weren't used and then freed before */
+- for(; http->push_headers_used > 0; --http->push_headers_used) {
+- free(http->push_headers[http->push_headers_used - 1]);
+- }
+- free(http->push_headers);
+- http->push_headers = NULL;
+- }
+-
++ free_push_headers(http);
+ if(!(data->conn->handler->protocol&PROTO_FAMILY_HTTP) ||
+ !httpc->h2) /* not HTTP/2 ? */
+ return;
diff --git a/meta/recipes-support/curl/curl_7.82.0.bb b/meta/recipes-support/curl/curl_7.82.0.bb
index ba3fd11820..72d8544e08 100644
--- a/meta/recipes-support/curl/curl_7.82.0.bb
+++ b/meta/recipes-support/curl/curl_7.82.0.bb
@@ -6,7 +6,7 @@ HTTP post, SSL connections, proxy support, FTP uploads, and more!"
HOMEPAGE = "https://curl.se/"
BUGTRACKER = "https://github.com/curl/curl/issues"
SECTION = "console/network"
-LICENSE = "MIT-open-group"
+LICENSE = "curl"
LIC_FILES_CHKSUM = "file://COPYING;md5=190c514872597083303371684954f238"
SRC_URI = "https://curl.se/download/${BP}.tar.xz \
@@ -23,12 +23,50 @@ SRC_URI = "https://curl.se/download/${BP}.tar.xz \
file://CVE-2022-27779.patch \
file://CVE-2022-27782-1.patch \
file://CVE-2022-27782-2.patch \
+ file://0001-openssl-fix-CN-check-error-code.patch \
+ file://CVE-2022-32205.patch \
+ file://CVE-2022-32206.patch \
+ file://CVE-2022-32207.patch \
+ file://CVE-2022-32208.patch \
+ file://CVE-2022-35252.patch \
+ file://CVE-2022-32221.patch \
+ file://CVE-2022-42916.patch \
+ file://CVE-2022-42915.patch \
+ file://CVE-2022-43551.patch \
+ file://CVE-2022-43552.patch \
+ file://CVE-2023-23914_5-1.patch \
+ file://CVE-2023-23914_5-2.patch \
+ file://CVE-2023-23914_5-3.patch \
+ file://CVE-2023-23914_5-4.patch \
+ file://CVE-2023-23914_5-5.patch \
+ file://CVE-2023-23916.patch \
+ file://CVE-2023-27533.patch \
+ file://CVE-2023-27534.patch \
+ file://CVE-2023-27535-pre1.patch \
+ file://CVE-2023-27535_and_CVE-2023-27538.patch \
+ file://CVE-2023-27536.patch \
+ file://CVE-2023-28319.patch \
+ file://CVE-2023-28320.patch \
+ file://CVE-2023-28320-fol1.patch \
+ file://CVE-2023-28321.patch \
+ file://CVE-2023-28322-1.patch \
+ file://CVE-2023-28322-2.patch \
+ file://CVE-2023-38545.patch \
+ file://CVE-2023-38546.patch \
+ file://CVE-2023-46218.patch \
+ file://CVE-2023-46219-0001.patch \
+ file://CVE-2023-46219-0002.patch \
+ file://CVE-2023-46219-0003.patch \
+ file://CVE-2024-2398.patch \
"
SRC_URI[sha256sum] = "0aaa12d7bd04b0966254f2703ce80dd5c38dbbd76af0297d3d690cdce58a583c"
# Curl has used many names over the years...
CVE_PRODUCT = "haxx:curl haxx:libcurl curl:curl curl:libcurl libcurl:libcurl daniel_stenberg:curl"
+# This CVE reports that apple had to upgrade curl because of other already reported CVEs
+CVE_CHECK_IGNORE += "CVE-2023-42915"
+
inherit autotools pkgconfig binconfig multilib_header
# Entropy source for random PACKAGECONFIG option
@@ -42,14 +80,16 @@ PACKAGECONFIG:class-nativesdk = "ipv6 openssl proxy random threaded-resolver ver
PACKAGECONFIG[ares] = "--enable-ares,--disable-ares,c-ares,,,threaded-resolver"
PACKAGECONFIG[brotli] = "--with-brotli,--without-brotli,brotli"
PACKAGECONFIG[builtinmanual] = "--enable-manual,--disable-manual"
+# Don't use this in production
+PACKAGECONFIG[debug] = "--enable-debug,--disable-debug"
PACKAGECONFIG[dict] = "--enable-dict,--disable-dict,"
PACKAGECONFIG[gnutls] = "--with-gnutls,--without-gnutls,gnutls"
PACKAGECONFIG[gopher] = "--enable-gopher,--disable-gopher,"
PACKAGECONFIG[imap] = "--enable-imap,--disable-imap,"
PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6,"
PACKAGECONFIG[krb5] = "--with-gssapi,--without-gssapi,krb5"
-PACKAGECONFIG[ldap] = "--enable-ldap,--disable-ldap,"
-PACKAGECONFIG[ldaps] = "--enable-ldaps,--disable-ldaps,"
+PACKAGECONFIG[ldap] = "--enable-ldap,--disable-ldap,openldap"
+PACKAGECONFIG[ldaps] = "--enable-ldaps,--disable-ldaps,openldap"
PACKAGECONFIG[libgsasl] = "--with-libgsasl,--without-libgsasl,libgsasl"
PACKAGECONFIG[libidn] = "--with-libidn2,--without-libidn2,libidn2"
PACKAGECONFIG[libssh2] = "--with-libssh2,--without-libssh2,libssh2"
@@ -78,9 +118,7 @@ EXTRA_OECONF = " \
--enable-crypto-auth \
--with-ca-bundle=${sysconfdir}/ssl/certs/ca-certificates.crt \
--without-libpsl \
- --enable-debug \
--enable-optimize \
- --disable-curldebug \
${@'--without-ssl' if (bb.utils.filter('PACKAGECONFIG', 'gnutls mbedtls nss openssl', d) == '') else ''} \
"
diff --git a/meta/recipes-support/fribidi/fribidi_1.0.12.bb b/meta/recipes-support/fribidi/fribidi_1.0.13.bb
index b29c47822f..cdcac9315b 100644
--- a/meta/recipes-support/fribidi/fribidi_1.0.12.bb
+++ b/meta/recipes-support/fribidi/fribidi_1.0.13.bb
@@ -11,7 +11,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=a916467b91076e631dd8edb7424769c7"
SRC_URI = "https://github.com/${BPN}/${BPN}/releases/download/v${PV}/${BP}.tar.xz \
"
-SRC_URI[sha256sum] = "0cd233f97fc8c67bb3ac27ce8440def5d3ffacf516765b91c2cc654498293495"
+SRC_URI[sha256sum] = "7fa16c80c81bd622f7b198d31356da139cc318a63fc7761217af4130903f54a2"
UPSTREAM_CHECK_URI = "https://github.com/${BPN}/${BPN}/releases"
diff --git a/meta/recipes-support/gnupg/gnupg/0001-configure.ac-use-a-custom-value-for-the-location-of-.patch b/meta/recipes-support/gnupg/gnupg/0001-configure.ac-use-a-custom-value-for-the-location-of-.patch
index b58fbfe6f5..c4ede9ea5e 100644
--- a/meta/recipes-support/gnupg/gnupg/0001-configure.ac-use-a-custom-value-for-the-location-of-.patch
+++ b/meta/recipes-support/gnupg/gnupg/0001-configure.ac-use-a-custom-value-for-the-location-of-.patch
@@ -1,4 +1,4 @@
-From bdde1faa774753e29d582d79186e08a38597de9e Mon Sep 17 00:00:00 2001
+From 89b98553084fbefe1ef2c7cbff9e72cf43144c49 Mon Sep 17 00:00:00 2001
From: Alexander Kanavin <alex.kanavin@gmail.com>
Date: Mon, 22 Jan 2018 18:00:21 +0200
Subject: [PATCH] configure.ac: use a custom value for the location of
@@ -14,10 +14,10 @@ Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/configure.ac b/configure.ac
-index 5cdd316..e5f2d6a 100644
+index d86c60e..65c22b2 100644
--- a/configure.ac
+++ b/configure.ac
-@@ -1962,7 +1962,7 @@ AC_DEFINE_UNQUOTED(GPGCONF_DISP_NAME, "GPGConf",
+@@ -1955,7 +1955,7 @@ AC_DEFINE_UNQUOTED(GPGCONF_DISP_NAME, "GPGConf",
AC_DEFINE_UNQUOTED(GPGTAR_NAME, "gpgtar", [The name of the gpgtar tool])
diff --git a/meta/recipes-support/gnupg/gnupg/0003-dirmngr-uses-libgpg-error.patch b/meta/recipes-support/gnupg/gnupg/0003-dirmngr-uses-libgpg-error.patch
deleted file mode 100644
index b4106d3620..0000000000
--- a/meta/recipes-support/gnupg/gnupg/0003-dirmngr-uses-libgpg-error.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From 9ace8f1b68ab708c44dce4c0152b975fbceb0398 Mon Sep 17 00:00:00 2001
-From: Saul Wold <sgw@linux.intel.com>
-Date: Wed, 16 Aug 2017 11:18:01 +0800
-Subject: [PATCH] dirmngr uses libgpg error
-
-Upstream-Status: Pending
-Signed-off-by: Saul Wold <sgw@linux.intel.com>
-
-Rebase to 2.1.23
-
-Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
-
----
- dirmngr/Makefile.am | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/dirmngr/Makefile.am b/dirmngr/Makefile.am
-index 77ca3f5..1446775 100644
---- a/dirmngr/Makefile.am
-+++ b/dirmngr/Makefile.am
-@@ -86,7 +86,7 @@ endif
- dirmngr_LDADD = $(libcommonpth) \
- $(DNSLIBS) $(LIBASSUAN_LIBS) \
- $(LIBGCRYPT_LIBS) $(KSBA_LIBS) $(NPTH_LIBS) \
-- $(NTBTLS_LIBS) $(LIBGNUTLS_LIBS) $(LIBINTL) $(LIBICONV) $(NETLIBS)
-+ $(NTBTLS_LIBS) $(LIBGNUTLS_LIBS) $(LIBINTL) $(LIBICONV) $(NETLIBS) $(GPG_ERROR_LIBS)
- if USE_LDAP
- dirmngr_LDADD += $(ldaplibs)
- endif
diff --git a/meta/recipes-support/gnupg/gnupg/relocate.patch b/meta/recipes-support/gnupg/gnupg/relocate.patch
index 74f48e9582..43999b8a6d 100644
--- a/meta/recipes-support/gnupg/gnupg/relocate.patch
+++ b/meta/recipes-support/gnupg/gnupg/relocate.patch
@@ -1,4 +1,4 @@
-From 1e34e1d477f843c0ee2f1a3fddc20201f0233e81 Mon Sep 17 00:00:00 2001
+From 89ae4f03307104689e1857d9857d452af6b35ac4 Mon Sep 17 00:00:00 2001
From: Ross Burton <ross.burton@intel.com>
Date: Wed, 19 Sep 2018 14:44:40 +0100
Subject: [PATCH] Allow the environment to override where gnupg looks for its
@@ -14,10 +14,10 @@ Signed-off-by: Alexander Kanavin <alex@linutronix.de>
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/common/homedir.c b/common/homedir.c
-index 174d961..f4c25fb 100644
+index 260aeb2..1aeb08d 100644
--- a/common/homedir.c
+++ b/common/homedir.c
-@@ -1161,7 +1161,7 @@ gnupg_socketdir (void)
+@@ -1143,7 +1143,7 @@ gnupg_socketdir (void)
if (!name)
{
unsigned int dummy;
@@ -26,7 +26,7 @@ index 174d961..f4c25fb 100644
gpgrt_annotate_leaked_object (name);
}
-@@ -1193,7 +1193,7 @@ gnupg_sysconfdir (void)
+@@ -1175,7 +1175,7 @@ gnupg_sysconfdir (void)
if (dir)
return dir;
else
@@ -35,7 +35,7 @@ index 174d961..f4c25fb 100644
#endif /*!HAVE_W32_SYSTEM*/
}
-@@ -1229,7 +1229,7 @@ gnupg_bindir (void)
+@@ -1211,7 +1211,7 @@ gnupg_bindir (void)
return name;
}
else
@@ -44,7 +44,7 @@ index 174d961..f4c25fb 100644
#endif /*!HAVE_W32_SYSTEM*/
}
-@@ -1256,7 +1256,7 @@ gnupg_libexecdir (void)
+@@ -1238,7 +1238,7 @@ gnupg_libexecdir (void)
return name;
}
else
@@ -53,7 +53,7 @@ index 174d961..f4c25fb 100644
#endif /*!HAVE_W32_SYSTEM*/
}
-@@ -1286,7 +1286,7 @@ gnupg_libdir (void)
+@@ -1268,7 +1268,7 @@ gnupg_libdir (void)
return name;
}
else
@@ -62,7 +62,7 @@ index 174d961..f4c25fb 100644
#endif /*!HAVE_W32_SYSTEM*/
}
-@@ -1317,7 +1317,7 @@ gnupg_datadir (void)
+@@ -1299,7 +1299,7 @@ gnupg_datadir (void)
return name;
}
else
@@ -71,7 +71,7 @@ index 174d961..f4c25fb 100644
#endif /*!HAVE_W32_SYSTEM*/
}
-@@ -1349,7 +1349,7 @@ gnupg_localedir (void)
+@@ -1331,7 +1331,7 @@ gnupg_localedir (void)
return name;
}
else
diff --git a/meta/recipes-support/gnupg/gnupg_2.3.4.bb b/meta/recipes-support/gnupg/gnupg_2.3.7.bb
index d27bddb8bd..da2b1c4deb 100644
--- a/meta/recipes-support/gnupg/gnupg_2.3.4.bb
+++ b/meta/recipes-support/gnupg/gnupg_2.3.7.bb
@@ -16,7 +16,6 @@ inherit autotools gettext texinfo pkgconfig
UPSTREAM_CHECK_URI = "https://gnupg.org/download/index.html"
SRC_URI = "${GNUPG_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \
file://0002-use-pkgconfig-instead-of-npth-config.patch \
- file://0003-dirmngr-uses-libgpg-error.patch \
file://0004-autogen.sh-fix-find-version-for-beta-checking.patch \
file://0001-Woverride-init-is-not-needed-with-gcc-9.patch \
"
@@ -24,7 +23,7 @@ SRC_URI:append:class-native = " file://0001-configure.ac-use-a-custom-value-for-
file://relocate.patch"
SRC_URI:append:class-nativesdk = " file://relocate.patch"
-SRC_URI[sha256sum] = "f3468ecafb1d7f9ad7b51fd1db7aebf17ceb89d2efa8a05cf2f39b4d405402ae"
+SRC_URI[sha256sum] = "ee163a5fb9ec99ffc1b18e65faef8d086800c5713d15a672ab57d3799da83669"
EXTRA_OECONF = "--disable-ldap \
--disable-ccid-driver \
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2022-2509.patch b/meta/recipes-support/gnutls/gnutls/CVE-2022-2509.patch
new file mode 100644
index 0000000000..c1c1def194
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2022-2509.patch
@@ -0,0 +1,282 @@
+From 8161fec931f416f5ca6aa31bb53751e140a93046 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 16 Aug 2022 16:56:15 +0530
+Subject: [PATCH] CVE-2022-2509
+
+Upstream-Status: Backport [https://gitlab.com/gnutls/gnutls/-/commit/ce37f9eb265dbe9b6d597f5767449e8ee95848e2]
+CVE: CVE-2022-2509
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ NEWS | 4 +
+ lib/x509/pkcs7.c | 3 +-
+ tests/Makefile.am | 2 +-
+ tests/pkcs7-verify-double-free.c | 215 +++++++++++++++++++++++++++++++
+ 4 files changed, 222 insertions(+), 2 deletions(-)
+ create mode 100644 tests/pkcs7-verify-double-free.c
+
+diff --git a/NEWS b/NEWS
+index 36381f0..02c4040 100644
+--- a/NEWS
++++ b/NEWS
+@@ -7,6 +7,10 @@ See the end for copying conditions.
+
+ * Version 3.7.4 (released 2022-03-17)
+
++** libgnutls: Fixed double free during verification of pkcs7 signatures.
++ Reported by Jaak Ristioja (#1383). [GNUTLS-SA-2022-07-07, CVSS: medium]
++ [CVE-2022-2509]
++
+ ** libgnutls: Added support for certificate compression as defined in RFC8879.
+ ** certtool: Added option --compress-cert that allows user to specify compression
+ methods for certificate compression.
+diff --git a/lib/x509/pkcs7.c b/lib/x509/pkcs7.c
+index 1f35fab..d5be7f4 100644
+--- a/lib/x509/pkcs7.c
++++ b/lib/x509/pkcs7.c
+@@ -1318,7 +1318,8 @@ gnutls_x509_crt_t find_signer(gnutls_pkcs7_t pkcs7, gnutls_x509_trust_list_t tl,
+ issuer = find_verified_issuer_of(pkcs7, issuer, purpose, vflags);
+
+ if (issuer != NULL && gnutls_x509_crt_check_issuer(issuer, issuer)) {
+- if (prev) gnutls_x509_crt_deinit(prev);
++ if (prev && prev != signer)
++ gnutls_x509_crt_deinit(prev);
+ prev = issuer;
+ break;
+ }
+diff --git a/tests/Makefile.am b/tests/Makefile.am
+index cec0a4e..b3cb56c 100644
+--- a/tests/Makefile.am
++++ b/tests/Makefile.am
+@@ -230,7 +230,7 @@ ctests += mini-record-2 simple gnutls_hmac_fast set_pkcs12_cred cert certuniquei
+ sign-verify-newapi sign-verify-deterministic iov aead-cipher-vec \
+ tls13-without-timeout-func buffer status-request-revoked \
+ set_x509_ocsp_multi_cli kdf-api keylog-func handshake-write \
+- x509cert-dntypes id-on-xmppAddr tls13-compat-mode ciphersuite-name
++ x509cert-dntypes id-on-xmppAddr tls13-compat-mode ciphersuite-name pkcs7-verify-double-free
+
+ ctests += tls-channel-binding
+
+diff --git a/tests/pkcs7-verify-double-free.c b/tests/pkcs7-verify-double-free.c
+new file mode 100644
+index 0000000..fadf307
+--- /dev/null
++++ b/tests/pkcs7-verify-double-free.c
+@@ -0,0 +1,215 @@
++/*
++ * Copyright (C) 2022 Red Hat, Inc.
++ *
++ * Author: Zoltan Fridrich
++ *
++ * This file is part of GnuTLS.
++ *
++ * GnuTLS is free software: you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 3 of the License, or
++ * (at your option) any later version.
++ *
++ * GnuTLS is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with GnuTLS. If not, see <https://www.gnu.org/licenses/>.
++ */
++
++#ifdef HAVE_CONFIG_H
++#include <config.h>
++#endif
++
++#include <stdio.h>
++#include <gnutls/pkcs7.h>
++#include <gnutls/x509.h>
++
++#include "utils.h"
++
++static char rca_pem[] =
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIDCjCCAfKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQKDApFeGFt\n"
++ "cGxlIENBMCAXDTE3MDcyMTE0NDMzNloYDzIyMjIwNzIxMTQ0MzM2WjAVMRMwEQYD\n"
++ "VQQKDApFeGFtcGxlIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA\n"
++ "v8hnKPJ/IA0SQB/A/a0Uh+npZ67vsgIMrtTQo0r0kJkmkBz5323xO3DVuJfB3QmX\n"
++ "v9zvoeCQLuDvWar5Aixfxgm6s5Q+yPvJj9t3NebDrU+Y4+qyewBIJUF8EF/5iBPC\n"
++ "ZHONmzbfIRWvQWGGgb2CRcOHp2J7AY/QLB6LsWPaLjs/DHva28Q13JaTTHIpdu8v\n"
++ "t6vHr0nXf66DN4MvtoF3N+o+v3snJCMsfXOqASi4tbWR7gtOfCfiz9uBjh0W2Dut\n"
++ "/jclBQkJkLe6esNSM+f4YiOpctVDjmfj8yoHCp394vt0wFqhG38wsTFAyVP6qIcf\n"
++ "5zoSu9ovEt2cTkhnZHjiiwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud\n"
++ "DwEB/wQEAwIBBjAdBgNVHQ4EFgQUhjeO6Uc5imbjOl2I2ltVA27Hu9YwHwYDVR0j\n"
++ "BBgwFoAUhjeO6Uc5imbjOl2I2ltVA27Hu9YwDQYJKoZIhvcNAQELBQADggEBAD+r\n"
++ "i/7FsbG0OFKGF2+JOnth6NjJQcMfM8LiglqAuBUijrv7vltoZ0Z3FJH1Vi4OeMXn\n"
++ "l7X/9tWUve0uFl75MfjDrf0+lCEdYRY1LCba2BrUgpbbkLywVUdnbsvndehegCgS\n"
++ "jss2/zys3Hlo3ZaHlTMQ/NQ4nrxcxkjOvkZSEOqgxJTLpzm6pr7YUts4k6c6lNiB\n"
++ "FSiJiDzsJCmWR9C3fBbUlfDfTJYGN3JwqX270KchXDElo8gNoDnF7jBMpLFFSEKm\n"
++ "MyfbNLX/srh+CEfZaN/OZV4A3MQ0L8vQEp6M4CJhvRLIuMVabZ2coJ0AzystrOMU\n"
++ "LirBWjg89RoAjFQ7bTE=\n"
++ "-----END CERTIFICATE-----\n";
++
++static char ca_pem[] =
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIDFzCCAf+gAwIBAgIBAjANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQKDApFeGFt\n"
++ "cGxlIENBMCAXDTE3MDcyMTE0NDQzNFoYDzIyMjIwNzIxMTQ0NDM0WjAiMSAwHgYD\n"
++ "VQQKDBdFeGFtcGxlIGludGVybWVkaWF0ZSBDQTCCASIwDQYJKoZIhvcNAQEBBQAD\n"
++ "ggEPADCCAQoCggEBAKb9ACB8u//sP6MfNU1OsVw68xz3eTPLgKxS0vpqexm6iGVg\n"
++ "ug/o9uYRLzqiEukv/eyz9WzHmY7sqlOJjOFdv92+SaNg79Jc51WHPFXgea4/qyfr\n"
++ "4y14PGs0SNxm6T44sXurUs7cXydQVUgnq2VCaWFOTUdxXoAWkV8r8GaUoPD/klVz\n"
++ "RqxSZVETmX1XBKhsMnnov41kRwVph2C+VfUspsbaUZaz/o/S1/nokhXRACzKsMBr\n"
++ "obqiGxbY35uVzsmbAW5ErhQz98AWJL3Bub1fsEMXg6OEMmPH4AtX888dTIYZNw0E\n"
++ "bUIESspz1kjJQTtVQDHTprhwz16YiSVeUonlLgMCAwEAAaNjMGEwDwYDVR0TAQH/\n"
++ "BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPBjxDWjMhjXERirKF9O\n"
++ "o/5Cllc5MB8GA1UdIwQYMBaAFIY3julHOYpm4zpdiNpbVQNux7vWMA0GCSqGSIb3\n"
++ "DQEBCwUAA4IBAQCTm+vv3hBa6lL5IT+Fw8aTxQ2Ne7mZ5oyazhvXYwwfKNMX3SML\n"
++ "W2JdPaL64ZwbxxxYvW401o5Z0CEgru3YFrsqB/hEdl0Uf8UWWJmE1rRa+miTmbjt\n"
++ "lrLNCWdrs6CiwvsPITTHg7jevB4KyZYsTSxQFcyr3N3xF+6EmOTC4IkhPPnXYXcp\n"
++ "248ih+WOavSYoRvzgB/Dip1WnPYU2mfIV3O8JReRryngA0TzWCLPLUoWR3R4jwtC\n"
++ "+1uSLoqaenz3qv3F1WEbke37az9YJuXx/5D8CqFQiZ62TUUtI6fYd8mkMBM4Qfh6\n"
++ "NW9XrCkI9wlpL5K9HllhuW0BhKeJkuPpyQ2p\n"
++ "-----END CERTIFICATE-----\n";
++
++static char ee_pem[] =
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQsFADAiMSAwHgYDVQQKDBdFeGFt\n"
++ "cGxlIGludGVybWVkaWF0ZSBDQTAgFw0yMjA3MjExNDQ1MzdaGA8yMjIyMDcyMTE0\n"
++ "NDUzN1owFTETMBEGA1UEAwwKSm9obiBTbWl0aDCCASIwDQYJKoZIhvcNAQEBBQAD\n"
++ "ggEPADCCAQoCggEBAMb1uuxppBFY+WVD45iyHUq7DkIJNNOI/JRaybVJfPktWq2E\n"
++ "eNe7XhV05KKnqZTbDO2iYqNHqGhZ8pz/IstDRTZP3z/q1vXTG0P9Gx28rEy5TaUY\n"
++ "QjtD+ZoFUQm0ORMDBjd8jikqtJ87hKeuOPMH4rzdydotMaPQSm7KLzHBGBr6gg7z\n"
++ "g1IxPWkhMyHapoMqqrhjwjzoTY97UIXpZTEoIA+KpEC8f9CciBtL0i1MPBjWozB6\n"
++ "Jma9q5iEwZXuRr3cnPYeIPlK2drgDZCMuSFcYiT8ApLw5OhKqY1m2EvfZ2ox2s9R\n"
++ "68/HzYdPi3kZwiNEtlBvMlpt5yKBJAflp76d7DkCAwEAAaNuMGwwCwYDVR0PBAQD\n"
++ "AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDBDAdBgNVHQ4EFgQUc+Mi\n"
++ "kr8WMCk00SQo+P2iggp/oQkwHwYDVR0jBBgwFoAU8GPENaMyGNcRGKsoX06j/kKW\n"
++ "VzkwDQYJKoZIhvcNAQELBQADggEBAKU9+CUR0Jcfybd1+8Aqgh1RH96yQygnVuyt\n"
++ "Na9rFz4fM3ij9tGXDHXrkZw8bW1dWLU9quu8zeTxKxc3aiDIw739Alz0tukttDo7\n"
++ "dW7YqIb77zsIsWB9p7G9dlxT6ieUy+5IKk69BbeK8KR0vAciAG4KVQxPhuPy/LGX\n"
++ "PzqlJIJ4h61s3UOroReHPB1keLZgpORqrvtpClOmABH9TLFRJA/WFg8Q2XYB/p0x\n"
++ "l/pWiaoBC+8wK9cDoMUK5yOwXeuCLffCb+UlAD0+z/qxJ2pisE8E9X8rRKRrWI+i\n"
++ "G7LtJCEn86EQK8KuRlJxKgj8lClZhoULB0oL4jbblBuNow9WRmM=\n"
++ "-----END CERTIFICATE-----\n";
++
++static char msg_pem[] =
++ "-----BEGIN PKCS7-----\n"
++ "MIIK2QYJKoZIhvcNAQcCoIIKyjCCCsYCAQExDTALBglghkgBZQMEAgEwCwYJKoZI\n"
++ "hvcNAQcBoIIJTzCCAwowggHyoAMCAQICAQEwDQYJKoZIhvcNAQELBQAwFTETMBEG\n"
++ "A1UECgwKRXhhbXBsZSBDQTAgFw0xNzA3MjExNDQzMjFaGA8yMjIyMDcyMTE0NDMy\n"
++ "MVowFTETMBEGA1UECgwKRXhhbXBsZSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP\n"
++ "ADCCAQoCggEBAL51eyE4j8wAKQKMGlO9HEY2iaGvsdPSJmidSdmCi1jnNK39Lx4Y\n"
++ "31h279hSHF5wtI6VM91HHfeLf1mjEZHlKrXXJQzBPLpbHWapD778drHBitOP8e56\n"
++ "fDMIfofLV4tkMk8690vPe4cJH1UHGspMyz6EQF9kPRaW80XtMV/6dalgL/9Esmaw\n"
++ "XBNPJAS1VutDuXQkJ/3/rWFLmkpYHHtGPjX782YRmT1s+VOVTsLqmKx0TEL8A381\n"
++ "bbElHPUAMjPcyWR5qqA8KWnS5Dwqk3LwI0AvuhQytCq0S7Xl4DXauvxwTRXv0UU7\n"
++ "W8r3MLAw9DnlnJiD/RFjw5rbGO3wMePk/qUCAwEAAaNjMGEwDwYDVR0TAQH/BAUw\n"
++ "AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFIh2KRoKJoe2VtpOwWMkRAkR\n"
++ "mLWKMB8GA1UdIwQYMBaAFIh2KRoKJoe2VtpOwWMkRAkRmLWKMA0GCSqGSIb3DQEB\n"
++ "CwUAA4IBAQBovvlOjoy0MCT5U0eWfcPQQjY4Ssrn3IiPNlVkqSNo+FHX+2baTLVQ\n"
++ "5QTHxwXwzdIJiwtjFWDdGEQXqmuIvnFG+u/whGbeg6oQygfnQ5Y+q6epOxCsPgLQ\n"
++ "mKKEaF7mvh8DauUx4QSbYCNGCctOZuB1vlN9bJ3/5QbH+2pFPOfCr5CAyPDwHo6S\n"
++ "qO3yPcutRwT9xS7gXEHM9HhLp+DmdCGh4eVBPiFilyZm1d92lWxU8oxoSfXgzDT/\n"
++ "GCzlMykNZNs4JD9QmiRClP/3U0dQbOhah/Fda+N+L90xaqEgGcvwKKZa3pzo59pl\n"
++ "BbkcIP4YPyHeinwkgAn5UVJg9DOxNCS0MIIDFzCCAf+gAwIBAgIBAjANBgkqhkiG\n"
++ "9w0BAQsFADAVMRMwEQYDVQQKDApFeGFtcGxlIENBMCAXDTE3MDcyMTE0NDQxM1oY\n"
++ "DzIyMjIwNzIxMTQ0NDEzWjAiMSAwHgYDVQQKDBdFeGFtcGxlIGludGVybWVkaWF0\n"
++ "ZSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMPFDEvDANwvhviu\n"
++ "pwXTvaKyxyX94jVu1wgAhIRyQBVRiMbrn8MEufLG8oA0vKd8s92gv/lWe1jFb2rn\n"
++ "91jMkZWsjWjiJFD6SzqFfBo+XxOGikEqO1MAf92UqavmSGlXVRG1Vy7T7dWibZP0\n"
++ "WODhHYWayR0Y6owSz5IqNfrHXzDME+lSJxHgRFI7pK+b0OgiVmvyXDKFPvyU6GrP\n"
++ "lxXDi/XbjyPvC5gpiwtTgm+s8KERwmdlfZUNjkh2PpHx1g1joijHT3wIvO/Pek1E\n"
++ "C+Xs6w3XxGgL6TTL7FDuv4AjZVX9KK66/yBhX3aN8bkqAg+hs9XNk3zzWC0XEFOS\n"
++ "Qoh2va0CAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw\n"
++ "HQYDVR0OBBYEFHwi/7dUWGjkMWJctOm7MCjjQj1cMB8GA1UdIwQYMBaAFIh2KRoK\n"
++ "Joe2VtpOwWMkRAkRmLWKMA0GCSqGSIb3DQEBCwUAA4IBAQCF6sHCBdYRwBwvfCve\n"
++ "og9cPnmPqZrG4AtmSvtoSsMvgvKb/4z3/gG8oPtTBkeRcAHoMoEp/oA+B2ylwIAc\n"
++ "S5U7jx+lYH/Pqih0X/OcOLbaMv8uzGSGQxk+L9LuuIT6E/THfRRIPEvkDkzC+/uk\n"
++ "7vUbG17bSEWeF0o/6sjzAY2aH1jnbCDyu0UC78GXkc6bZ5QlH98uLMDMrOmqcZjS\n"
++ "JFfvuRDQyKV5yBdBkYaobsIWSQDsgYxJzf/2y8c3r+HXqT+jhrXPWJ3btgMPxpu7\n"
++ "E8KmoFgp9EM+48oYlXJ66rk08/KjaVmgN7R+Hm3e2+MFT2kme4fBKalLjcazTe3x\n"
++ "0FisMIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQsFADAiMSAwHgYDVQQKDBdF\n"
++ "eGFtcGxlIGludGVybWVkaWF0ZSBDQTAgFw0yMjA3MjExNDQ1MzBaGA8yMjIyMDcy\n"
++ "MTE0NDUzMVowFTETMBEGA1UEAwwKSm9obiBTbWl0aDCCASIwDQYJKoZIhvcNAQEB\n"
++ "BQADggEPADCCAQoCggEBAMjhSqhdD5RjmOm6W3hG7zkgKBP9whRN/SipcdEMlkgc\n"
++ "F/U3QMu66qIfKwheNdWalC1JLtruLDWP92ysa6Vw+CCG8aSax1AgB//RKQB7kgPA\n"
++ "9js9hi/oCdBmCv2HJxhWSLz+MVoxgzW4C7S9FenI+btxe/99Uw4nOw7kwjsYDLKr\n"
++ "tMw8myv7aCW/63CuBYGtohiZupM3RI3kKFcZots+KRPLlZpjv+I2h9xSln8VxKNb\n"
++ "XiMrYwGfHB7iX7ghe1TvFjKatEUhsqa7AvIq7nfe/cyq97f0ODQO814njgZtk5iQ\n"
++ "JVavXHdhTVaypt1HdAFMuHX5UATylHxx9tRCgSIijUsCAwEAAaNuMGwwCwYDVR0P\n"
++ "BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDBDAdBgNVHQ4EFgQU\n"
++ "31+vHl4E/2Jpnwinbzf+d7usshcwHwYDVR0jBBgwFoAUfCL/t1RYaOQxYly06bsw\n"
++ "KONCPVwwDQYJKoZIhvcNAQELBQADggEBAAWe63DcNwmleQ3INFGDJZ/m2I/R/cBa\n"
++ "nnrxgR5Ey1ljHdA/x1z1JLTGmGVwqGExs5DNG9Q//Pmc9pZ1yPa8J4Xf8AvFcmkY\n"
++ "mWoH1HvW0xu/RF1UN5SAoD2PRQ+Vq4OSPD58IlEu/u4o1wZV7Wl91Cv6VNpiAb63\n"
++ "j9PA1YacOpOtcRqG59Vuj9HFm9f30ejHVo2+KJcpo290cR3Zg4fOm8mtjeMdt/QS\n"
++ "Atq+RqPAQ7yxqvEEv8zPIZj2kAOQm3mh/yYqBrR68lQUD/dBTP7ApIZkhUK3XK6U\n"
++ "nf9JvoF6Fn2+Cnqb//FLBgHSnoeqeQNwDLUXTsD02iYxHzJrhokSY4YxggFQMIIB\n"
++ "TAIBATAnMCIxIDAeBgNVBAoMF0V4YW1wbGUgaW50ZXJtZWRpYXRlIENBAgEBMAsG\n"
++ "CWCGSAFlAwQCATANBgkqhkiG9w0BAQEFAASCAQATHg6wNsBcs/Ub1GQfKwTpKCk5\n"
++ "8QXuNnZ0u7b6mKgrSY2Gf47fpL2aRgaR+BAQncbctu5EH/IL38pWjaGtOhFAj/5q\n"
++ "7luVQW11kuyJN3Bd/dtLqawWOwMmAIEigw6X50l5ZHnEVzFfxt+RKTNhk4XWVtbi\n"
++ "2iIlITOplW0rnvxYAwCxKL9ocaB7etK8au7ixMxbFp75Ts4iLX8dhlAFdCuFCk8k\n"
++ "B8mi9HHuwr3QYRqMPW61hu1wBL3yB8eoZNOwPXb0gkIh6ZvgptxgQzm/cc+Iw9fP\n"
++ "QkR0fTM7ElJ5QZmSV98AUbZDHmDvpmcjcUxfSPMc3IoT8T300usRu7QHqKJi\n"
++ "-----END PKCS7-----\n";
++
++const gnutls_datum_t rca_datum = { (void *)rca_pem, sizeof(rca_pem) - 1 };
++const gnutls_datum_t ca_datum = { (void *)ca_pem, sizeof(ca_pem) - 1 };
++const gnutls_datum_t ee_datum = { (void *)ee_pem, sizeof(ee_pem) - 1 };
++const gnutls_datum_t msg_datum = { (void *)msg_pem, sizeof(msg_pem) - 1 };
++
++static void tls_log_func(int level, const char *str)
++{
++ fprintf(stderr, "%s |<%d>| %s", "err", level, str);
++}
++
++#define CHECK(X)\
++{\
++ r = X;\
++ if (r < 0)\
++ fail("error in %d: %s\n", __LINE__, gnutls_strerror(r));\
++}\
++
++void doit(void)
++{
++ int r;
++ gnutls_x509_crt_t rca_cert = NULL;
++ gnutls_x509_crt_t ca_cert = NULL;
++ gnutls_x509_crt_t ee_cert = NULL;
++ gnutls_x509_trust_list_t tlist = NULL;
++ gnutls_pkcs7_t pkcs7 = NULL;
++ gnutls_datum_t data = { (unsigned char *)"xxx", 3 };
++
++ if (debug) {
++ gnutls_global_set_log_function(tls_log_func);
++ gnutls_global_set_log_level(4711);
++ }
++
++ // Import certificates
++ CHECK(gnutls_x509_crt_init(&rca_cert));
++ CHECK(gnutls_x509_crt_import(rca_cert, &rca_datum, GNUTLS_X509_FMT_PEM));
++ CHECK(gnutls_x509_crt_init(&ca_cert));
++ CHECK(gnutls_x509_crt_import(ca_cert, &ca_datum, GNUTLS_X509_FMT_PEM));
++ CHECK(gnutls_x509_crt_init(&ee_cert));
++ CHECK(gnutls_x509_crt_import(ee_cert, &ee_datum, GNUTLS_X509_FMT_PEM));
++
++ // Setup trust store
++ CHECK(gnutls_x509_trust_list_init(&tlist, 0));
++ CHECK(gnutls_x509_trust_list_add_named_crt(tlist, rca_cert, "rca", 3, 0));
++ CHECK(gnutls_x509_trust_list_add_named_crt(tlist, ca_cert, "ca", 2, 0));
++ CHECK(gnutls_x509_trust_list_add_named_crt(tlist, ee_cert, "ee", 2, 0));
++
++ // Setup pkcs7 structure
++ CHECK(gnutls_pkcs7_init(&pkcs7));
++ CHECK(gnutls_pkcs7_import(pkcs7, &msg_datum, GNUTLS_X509_FMT_PEM));
++
++ // Signature verification
++ gnutls_pkcs7_verify(pkcs7, tlist, NULL, 0, 0, &data, 0);
++
++ gnutls_x509_crt_deinit(rca_cert);
++ gnutls_x509_crt_deinit(ca_cert);
++ gnutls_x509_crt_deinit(ee_cert);
++ gnutls_x509_trust_list_deinit(tlist, 0);
++ gnutls_pkcs7_deinit(pkcs7);
++}
+--
+2.25.1
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2023-0361.patch b/meta/recipes-support/gnutls/gnutls/CVE-2023-0361.patch
new file mode 100644
index 0000000000..943f4ca704
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2023-0361.patch
@@ -0,0 +1,85 @@
+From 80a6ce8ddb02477cd724cd5b2944791aaddb702a Mon Sep 17 00:00:00 2001
+From: Alexander Sosedkin <asosedkin@redhat.com>
+Date: Tue, 9 Aug 2022 16:05:53 +0200
+Subject: [PATCH] auth/rsa: side-step potential side-channel
+
+Signed-off-by: Alexander Sosedkin <asosedkin@redhat.com>
+Signed-off-by: Hubert Kario <hkario@redhat.com>
+Tested-by: Hubert Kario <hkario@redhat.com>
+Upstream-Status: Backport [https://gitlab.com/gnutls/gnutls/-/commit/80a6ce8ddb02477cd724cd5b2944791aaddb702a
+ https://gitlab.com/gnutls/gnutls/-/commit/4b7ff428291c7ed77c6d2635577c83a43bbae558]
+CVE: CVE-2023-0361
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ lib/auth/rsa.c | 30 +++---------------------------
+ 1 file changed, 3 insertions(+), 27 deletions(-)
+
+diff --git a/lib/auth/rsa.c b/lib/auth/rsa.c
+index 8108ee8..858701f 100644
+--- a/lib/auth/rsa.c
++++ b/lib/auth/rsa.c
+@@ -155,13 +155,10 @@ static int
+ proc_rsa_client_kx(gnutls_session_t session, uint8_t * data,
+ size_t _data_size)
+ {
+- const char attack_error[] = "auth_rsa: Possible PKCS #1 attack\n";
+ gnutls_datum_t ciphertext;
+ int ret, dsize;
+ ssize_t data_size = _data_size;
+ volatile uint8_t ver_maj, ver_min;
+- volatile uint8_t check_ver_min;
+- volatile uint32_t ok;
+
+ #ifdef ENABLE_SSL3
+ if (get_num_version(session) == GNUTLS_SSL3) {
+@@ -187,7 +184,6 @@ proc_rsa_client_kx(gnutls_session_t session, uint8_t * data,
+
+ ver_maj = _gnutls_get_adv_version_major(session);
+ ver_min = _gnutls_get_adv_version_minor(session);
+- check_ver_min = (session->internals.allow_wrong_pms == 0);
+
+ session->key.key.data = gnutls_malloc(GNUTLS_MASTER_SIZE);
+ if (session->key.key.data == NULL) {
+@@ -206,10 +202,9 @@ proc_rsa_client_kx(gnutls_session_t session, uint8_t * data,
+ return ret;
+ }
+
+- ret =
+- gnutls_privkey_decrypt_data2(session->internals.selected_key,
+- 0, &ciphertext, session->key.key.data,
+- session->key.key.size);
++ gnutls_privkey_decrypt_data2(session->internals.selected_key,
++ 0, &ciphertext, session->key.key.data,
++ session->key.key.size);
+ /* After this point, any conditional on failure that cause differences
+ * in execution may create a timing or cache access pattern side
+ * channel that can be used as an oracle, so treat very carefully */
+@@ -225,25 +220,6 @@ proc_rsa_client_kx(gnutls_session_t session, uint8_t * data,
+ * Vlastimil Klima, Ondej Pokorny and Tomas Rosa.
+ */
+
+- /* ok is 0 in case of error and 1 in case of success. */
+-
+- /* if ret < 0 */
+- ok = CONSTCHECK_EQUAL(ret, 0);
+- /* session->key.key.data[0] must equal ver_maj */
+- ok &= CONSTCHECK_EQUAL(session->key.key.data[0], ver_maj);
+- /* if check_ver_min then session->key.key.data[1] must equal ver_min */
+- ok &= CONSTCHECK_NOT_EQUAL(check_ver_min, 0) &
+- CONSTCHECK_EQUAL(session->key.key.data[1], ver_min);
+-
+- if (ok) {
+- /* call logging function unconditionally so all branches are
+- * indistinguishable for timing and cache access when debug
+- * logging is disabled */
+- _gnutls_no_log("%s", attack_error);
+- } else {
+- _gnutls_debug_log("%s", attack_error);
+- }
+-
+ /* This is here to avoid the version check attack
+ * discussed above.
+ */
+--
+2.25.1
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2023-5981.patch b/meta/recipes-support/gnutls/gnutls/CVE-2023-5981.patch
new file mode 100644
index 0000000000..33e498b8e5
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2023-5981.patch
@@ -0,0 +1,206 @@
+Backport of:
+
+From 29d6298d0b04cfff970b993915db71ba3f580b6d Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <ueno@gnu.org>
+Date: Mon, 23 Oct 2023 09:26:57 +0900
+Subject: [PATCH] auth/rsa_psk: side-step potential side-channel
+
+This removes branching that depends on secret data, porting changes
+for regular RSA key exchange from
+4804febddc2ed958e5ae774de2a8f85edeeff538 and
+80a6ce8ddb02477cd724cd5b2944791aaddb702a. This also removes the
+allow_wrong_pms as it was used sorely to control debug output
+depending on the branching.
+
+Signed-off-by: Daiki Ueno <ueno@gnu.org>
+
+Upstream-Status: Backport [import from debian https://launchpad.net/ubuntu/+archive/primary/+sourcefiles/gnutls28/3.7.3-4ubuntu1.3/gnutls28_3.7.3-4ubuntu1.3.debian.tar.xz
+Upstream-Commit: https://gitlab.com/gnutls/gnutls/-/commit/29d6298d0b04cfff970b993915db71ba3f580b6d]
+CVE: CVE-2023-5981
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/auth/rsa.c | 2 +-
+ lib/auth/rsa_psk.c | 90 ++++++++++++++++++----------------------------
+ lib/gnutls_int.h | 4 ---
+ lib/priority.c | 1 -
+ 4 files changed, 35 insertions(+), 62 deletions(-)
+
+--- a/lib/auth/rsa.c
++++ b/lib/auth/rsa.c
+@@ -207,7 +207,7 @@ proc_rsa_client_kx(gnutls_session_t sess
+ session->key.key.size);
+ /* After this point, any conditional on failure that cause differences
+ * in execution may create a timing or cache access pattern side
+- * channel that can be used as an oracle, so treat very carefully */
++ * channel that can be used as an oracle, so tread carefully */
+
+ /* Error handling logic:
+ * In case decryption fails then don't inform the peer. Just use the
+--- a/lib/auth/rsa_psk.c
++++ b/lib/auth/rsa_psk.c
+@@ -264,14 +264,13 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_se
+ {
+ gnutls_datum_t username;
+ psk_auth_info_t info;
+- gnutls_datum_t plaintext;
+ gnutls_datum_t ciphertext;
+ gnutls_datum_t pwd_psk = { NULL, 0 };
+ int ret, dsize;
+- int randomize_key = 0;
+ ssize_t data_size = _data_size;
+ gnutls_psk_server_credentials_t cred;
+ gnutls_datum_t premaster_secret = { NULL, 0 };
++ volatile uint8_t ver_maj, ver_min;
+
+ cred = (gnutls_psk_server_credentials_t)
+ _gnutls_get_cred(session, GNUTLS_CRD_PSK);
+@@ -327,71 +326,47 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_se
+ }
+ ciphertext.size = dsize;
+
+- ret =
+- gnutls_privkey_decrypt_data(session->internals.selected_key, 0,
+- &ciphertext, &plaintext);
+- if (ret < 0 || plaintext.size != GNUTLS_MASTER_SIZE) {
+- /* In case decryption fails then don't inform
+- * the peer. Just use a random key. (in order to avoid
+- * attack against pkcs-1 formatting).
+- */
+- gnutls_assert();
+- _gnutls_debug_log
+- ("auth_rsa_psk: Possible PKCS #1 format attack\n");
+- if (ret >= 0) {
+- gnutls_free(plaintext.data);
+- }
+- randomize_key = 1;
+- } else {
+- /* If the secret was properly formatted, then
+- * check the version number.
+- */
+- if (_gnutls_get_adv_version_major(session) !=
+- plaintext.data[0]
+- || (session->internals.allow_wrong_pms == 0
+- && _gnutls_get_adv_version_minor(session) !=
+- plaintext.data[1])) {
+- /* No error is returned here, if the version number check
+- * fails. We proceed normally.
+- * That is to defend against the attack described in the paper
+- * "Attacking RSA-based sessions in SSL/TLS" by Vlastimil Klima,
+- * Ondej Pokorny and Tomas Rosa.
+- */
+- gnutls_assert();
+- _gnutls_debug_log
+- ("auth_rsa: Possible PKCS #1 version check format attack\n");
+- }
+- }
++ ver_maj = _gnutls_get_adv_version_major(session);
++ ver_min = _gnutls_get_adv_version_minor(session);
+
++ premaster_secret.data = gnutls_malloc(GNUTLS_MASTER_SIZE);
++ if (premaster_secret.data == NULL) {
++ gnutls_assert();
++ return GNUTLS_E_MEMORY_ERROR;
++ }
++ premaster_secret.size = GNUTLS_MASTER_SIZE;
+
+- if (randomize_key != 0) {
+- premaster_secret.size = GNUTLS_MASTER_SIZE;
+- premaster_secret.data =
+- gnutls_malloc(premaster_secret.size);
+- if (premaster_secret.data == NULL) {
+- gnutls_assert();
+- return GNUTLS_E_MEMORY_ERROR;
+- }
+-
+- /* we do not need strong random numbers here.
+- */
+- ret = gnutls_rnd(GNUTLS_RND_NONCE, premaster_secret.data,
+- premaster_secret.size);
+- if (ret < 0) {
+- gnutls_assert();
+- goto cleanup;
+- }
+- } else {
+- premaster_secret.data = plaintext.data;
+- premaster_secret.size = plaintext.size;
++ /* Fallback value when decryption fails. Needs to be unpredictable. */
++ ret = gnutls_rnd(GNUTLS_RND_NONCE, premaster_secret.data,
++ premaster_secret.size);
++ if (ret < 0) {
++ gnutls_assert();
++ goto cleanup;
+ }
+
++ gnutls_privkey_decrypt_data2(session->internals.selected_key, 0,
++ &ciphertext, premaster_secret.data,
++ premaster_secret.size);
++ /* After this point, any conditional on failure that cause differences
++ * in execution may create a timing or cache access pattern side
++ * channel that can be used as an oracle, so tread carefully */
++
++ /* Error handling logic:
++ * In case decryption fails then don't inform the peer. Just use the
++ * random key previously generated. (in order to avoid attack against
++ * pkcs-1 formatting).
++ *
++ * If we get version mismatches no error is returned either. We
++ * proceed normally. This is to defend against the attack described
++ * in the paper "Attacking RSA-based sessions in SSL/TLS" by
++ * Vlastimil Klima, Ondej Pokorny and Tomas Rosa.
++ */
++
+ /* This is here to avoid the version check attack
+ * discussed above.
+ */
+-
+- premaster_secret.data[0] = _gnutls_get_adv_version_major(session);
+- premaster_secret.data[1] = _gnutls_get_adv_version_minor(session);
++ premaster_secret.data[0] = ver_maj;
++ premaster_secret.data[1] = ver_min;
+
+ /* find the key of this username
+ */
+--- a/lib/gnutls_int.h
++++ b/lib/gnutls_int.h
+@@ -974,7 +974,6 @@ struct gnutls_priority_st {
+ bool _no_etm;
+ bool _no_ext_master_secret;
+ bool _allow_key_usage_violation;
+- bool _allow_wrong_pms;
+ bool _dumbfw;
+ unsigned int _dh_prime_bits; /* old (deprecated) variable */
+
+@@ -992,7 +991,6 @@ struct gnutls_priority_st {
+ (x)->no_etm = 1; \
+ (x)->no_ext_master_secret = 1; \
+ (x)->allow_key_usage_violation = 1; \
+- (x)->allow_wrong_pms = 1; \
+ (x)->dumbfw = 1
+
+ #define ENABLE_PRIO_COMPAT(x) \
+@@ -1001,7 +999,6 @@ struct gnutls_priority_st {
+ (x)->_no_etm = 1; \
+ (x)->_no_ext_master_secret = 1; \
+ (x)->_allow_key_usage_violation = 1; \
+- (x)->_allow_wrong_pms = 1; \
+ (x)->_dumbfw = 1
+
+ /* DH and RSA parameters types.
+@@ -1126,7 +1123,6 @@ typedef struct {
+ bool no_etm;
+ bool no_ext_master_secret;
+ bool allow_key_usage_violation;
+- bool allow_wrong_pms;
+ bool dumbfw;
+
+ /* old (deprecated) variable. This is used for both srp_prime_bits
+--- a/lib/priority.c
++++ b/lib/priority.c
+@@ -690,7 +690,6 @@ gnutls_priority_set(gnutls_session_t ses
+ COPY_TO_INTERNALS(no_etm);
+ COPY_TO_INTERNALS(no_ext_master_secret);
+ COPY_TO_INTERNALS(allow_key_usage_violation);
+- COPY_TO_INTERNALS(allow_wrong_pms);
+ COPY_TO_INTERNALS(dumbfw);
+ COPY_TO_INTERNALS(dh_prime_bits);
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2024-0553.patch b/meta/recipes-support/gnutls/gnutls/CVE-2024-0553.patch
new file mode 100644
index 0000000000..f15c470879
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2024-0553.patch
@@ -0,0 +1,125 @@
+From 40dbbd8de499668590e8af51a15799fbc430595e Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <ueno@gnu.org>
+Date: Wed, 10 Jan 2024 19:13:17 +0900
+Subject: [PATCH] rsa-psk: minimize branching after decryption
+
+This moves any non-trivial code between gnutls_privkey_decrypt_data2
+and the function return in _gnutls_proc_rsa_psk_client_kx up until the
+decryption. This also avoids an extra memcpy to session->key.key.
+
+Signed-off-by: Daiki Ueno <ueno@gnu.org>
+
+Upstream-Status: Backport [https://gitlab.com/gnutls/gnutls/-/commit/40dbbd8de499668590e8af51a15799fbc430595e]
+CVE: CVE-2024-0553
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/auth/rsa_psk.c | 68 ++++++++++++++++++++++++----------------------
+ 1 file changed, 35 insertions(+), 33 deletions(-)
+
+diff --git a/lib/auth/rsa_psk.c b/lib/auth/rsa_psk.c
+index 93c2dc9..c6cfb92 100644
+--- a/lib/auth/rsa_psk.c
++++ b/lib/auth/rsa_psk.c
+@@ -269,7 +269,6 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_session_t session, uint8_t * data,
+ int ret, dsize;
+ ssize_t data_size = _data_size;
+ gnutls_psk_server_credentials_t cred;
+- gnutls_datum_t premaster_secret = { NULL, 0 };
+ volatile uint8_t ver_maj, ver_min;
+
+ cred = (gnutls_psk_server_credentials_t)
+@@ -329,24 +328,48 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_session_t session, uint8_t * data,
+ ver_maj = _gnutls_get_adv_version_major(session);
+ ver_min = _gnutls_get_adv_version_minor(session);
+
+- premaster_secret.data = gnutls_malloc(GNUTLS_MASTER_SIZE);
+- if (premaster_secret.data == NULL) {
++ /* Find the key of this username. A random value will be
++ * filled in if the key is not found.
++ */
++ ret = _gnutls_psk_pwd_find_entry(session, info->username,
++ strlen(info->username), &pwd_psk);
++ if (ret < 0)
++ return gnutls_assert_val(ret);
++
++ /* Allocate memory for premaster secret, and fill in the
++ * fields except the decryption result.
++ */
++ session->key.key.size = 2 + GNUTLS_MASTER_SIZE + 2 + pwd_psk.size;
++ session->key.key.data = gnutls_malloc(session->key.key.size);
++ if (session->key.key.data == NULL) {
+ gnutls_assert();
++ _gnutls_free_key_datum(&pwd_psk);
++ /* No need to zeroize, as the secret is not copied in yet */
++ _gnutls_free_datum(&session->key.key);
+ return GNUTLS_E_MEMORY_ERROR;
+ }
+- premaster_secret.size = GNUTLS_MASTER_SIZE;
+
+ /* Fallback value when decryption fails. Needs to be unpredictable. */
+- ret = gnutls_rnd(GNUTLS_RND_NONCE, premaster_secret.data,
+- premaster_secret.size);
++ ret = gnutls_rnd(GNUTLS_RND_NONCE, session->key.key.data + 2,
++ GNUTLS_MASTER_SIZE);
+ if (ret < 0) {
+ gnutls_assert();
+- goto cleanup;
++ _gnutls_free_key_datum(&pwd_psk);
++ /* No need to zeroize, as the secret is not copied in yet */
++ _gnutls_free_datum(&session->key.key);
++ return ret;
+ }
+
++ _gnutls_write_uint16(GNUTLS_MASTER_SIZE, session->key.key.data);
++ _gnutls_write_uint16(pwd_psk.size,
++ &session->key.key.data[2 + GNUTLS_MASTER_SIZE]);
++ memcpy(&session->key.key.data[2 + GNUTLS_MASTER_SIZE + 2], pwd_psk.data,
++ pwd_psk.size);
++ _gnutls_free_key_datum(&pwd_psk);
++
+ gnutls_privkey_decrypt_data2(session->internals.selected_key, 0,
+- &ciphertext, premaster_secret.data,
+- premaster_secret.size);
++ &ciphertext, session->key.key.data + 2,
++ GNUTLS_MASTER_SIZE);
+ /* After this point, any conditional on failure that cause differences
+ * in execution may create a timing or cache access pattern side
+ * channel that can be used as an oracle, so tread carefully */
+@@ -365,31 +388,10 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_session_t session, uint8_t * data,
+ /* This is here to avoid the version check attack
+ * discussed above.
+ */
+- premaster_secret.data[0] = ver_maj;
+- premaster_secret.data[1] = ver_min;
++ session->key.key.data[2] = ver_maj;
++ session->key.key.data[3] = ver_min;
+
+- /* find the key of this username
+- */
+- ret =
+- _gnutls_psk_pwd_find_entry(session, info->username, strlen(info->username), &pwd_psk);
+- if (ret < 0) {
+- gnutls_assert();
+- goto cleanup;
+- }
+-
+- ret =
+- set_rsa_psk_session_key(session, &pwd_psk, &premaster_secret);
+- if (ret < 0) {
+- gnutls_assert();
+- goto cleanup;
+- }
+-
+- ret = 0;
+- cleanup:
+- _gnutls_free_key_datum(&pwd_psk);
+- _gnutls_free_temp_key_datum(&premaster_secret);
+-
+- return ret;
++ return 0;
+ }
+
+ static int
+--
+2.25.1
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2024-0567.patch b/meta/recipes-support/gnutls/gnutls/CVE-2024-0567.patch
new file mode 100644
index 0000000000..49c4531a9b
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2024-0567.patch
@@ -0,0 +1,184 @@
+From 9edbdaa84e38b1bfb53a7d72c1de44f8de373405 Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <ueno@gnu.org>
+Date: Thu, 11 Jan 2024 15:45:11 +0900
+Subject: [PATCH] x509: detect loop in certificate chain
+
+There can be a loop in a certificate chain, when multiple CA
+certificates are cross-signed with each other, such as A → B, B → C,
+and C → A. Previously, the verification logic was not capable of
+handling this scenario while sorting the certificates in the chain in
+_gnutls_sort_clist, resulting in an assertion failure. This patch
+properly detects such loop and aborts further processing in a graceful
+manner.
+
+Signed-off-by: Daiki Ueno <ueno@gnu.org>
+
+Upstream-Status: Backport [https://gitlab.com/gnutls/gnutls/-/commit/9edbdaa84e38b1bfb53a7d72c1de44f8de373405]
+CVE: CVE-2024-0567
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/x509/common.c | 4 ++
+ tests/test-chains.h | 125 ++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 129 insertions(+)
+
+diff --git a/lib/x509/common.c b/lib/x509/common.c
+index fad9da5..6367b03 100644
+--- a/lib/x509/common.c
++++ b/lib/x509/common.c
+@@ -1790,6 +1790,10 @@ unsigned int _gnutls_sort_clist(gnutls_x509_crt_t *clist,
+ break;
+ }
+
++ if (insorted[prev]) { /* loop detected */
++ break;
++ }
++
+ sorted[i] = clist[prev];
+ insorted[prev] = 1;
+ }
+diff --git a/tests/test-chains.h b/tests/test-chains.h
+index dd7ccf0..09a5461 100644
+--- a/tests/test-chains.h
++++ b/tests/test-chains.h
+@@ -4263,6 +4263,129 @@ static const char *rsa_sha1_not_in_trusted_ca[] = {
+ NULL
+ };
+
++static const char *cross_signed[] = {
++ /* server (signed by A1) */
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIBqDCCAVqgAwIBAgIUejlil+8DBffazcnMNwyOOP6yCCowBQYDK2VwMBoxGDAW\n"
++ "BgNVBAMTD0ludGVybWVkaWF0ZSBBMTAgFw0yNDAxMTEwNjI3MjJaGA85OTk5MTIz\n"
++ "MTIzNTk1OVowNzEbMBkGA1UEChMSR251VExTIHRlc3Qgc2VydmVyMRgwFgYDVQQD\n"
++ "Ew90ZXN0LmdudXRscy5vcmcwKjAFBgMrZXADIQA1ZVS0PcNeTPQMZ+FuVz82AHrj\n"
++ "qL5hWEpCDgpG4M4fxaOBkjCBjzAMBgNVHRMBAf8EAjAAMBoGA1UdEQQTMBGCD3Rl\n"
++ "c3QuZ251dGxzLm9yZzATBgNVHSUEDDAKBggrBgEFBQcDATAOBgNVHQ8BAf8EBAMC\n"
++ "B4AwHQYDVR0OBBYEFGtEUv+JSt+zPoO3lu0IiObZVoiNMB8GA1UdIwQYMBaAFPnY\n"
++ "v6Pw0IvKSqIlb6ewHyEAmTA3MAUGAytlcANBAAS2lyc87kH/aOvNKzPjqDwUYxPA\n"
++ "CfYjyaKea2d0DZLBM5+Bjnj/4aWwTKgVTJzWhLJcLtaSdVHrXqjr9NhEhQ0=\n"
++ "-----END CERTIFICATE-----\n",
++ /* A1 (signed by A) */
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIBUjCCAQSgAwIBAgIUe/R+NVp04e74ySw2qgI6KZgFR20wBQYDK2VwMBExDzAN\n"
++ "BgNVBAMTBlJvb3QgQTAgFw0yNDAxMTEwNjI1MDFaGA85OTk5MTIzMTIzNTk1OVow\n"
++ "GjEYMBYGA1UEAxMPSW50ZXJtZWRpYXRlIEExMCowBQYDK2VwAyEAlkTNqwz973sy\n"
++ "u3whMjSiUMs77CZu5YA7Gi5KcakExrKjYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYD\n"
++ "VR0PAQH/BAQDAgIEMB0GA1UdDgQWBBT52L+j8NCLykqiJW+nsB8hAJkwNzAfBgNV\n"
++ "HSMEGDAWgBRbYgOkRGsd3Z74+CauX4htzLg0lzAFBgMrZXADQQBM0NBaFVPd3cTJ\n"
++ "DSaZNT34fsHuJk4eagpn8mBxKQpghq4s8Ap+nYtp2KiXjcizss53PeLXVnkfyLi0\n"
++ "TLVBHvUJ\n"
++ "-----END CERTIFICATE-----\n",
++ /* A (signed by B) */
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIBSDCB+6ADAgECAhQtdJpg+qlPcLoRW8iiztJUD4xNvDAFBgMrZXAwETEPMA0G\n"
++ "A1UEAxMGUm9vdCBCMCAXDTI0MDExMTA2MTk1OVoYDzk5OTkxMjMxMjM1OTU5WjAR\n"
++ "MQ8wDQYDVQQDEwZSb290IEEwKjAFBgMrZXADIQA0vDYyg3tgotSETL1Wq2hBs32p\n"
++ "WbnINkmOSNmOiZlGHKNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC\n"
++ "AgQwHQYDVR0OBBYEFFtiA6REax3dnvj4Jq5fiG3MuDSXMB8GA1UdIwQYMBaAFJFA\n"
++ "s2rg6j8w9AKItRnOOOjG2FG6MAUGAytlcANBAPv674p9ek5GjRcRfVQhgN+kQlHU\n"
++ "u774wL3Vx3fWA1E7+WchdMzcHrPoa5OKtKmxjIKUTO4SeDZL/AVpvulrWwk=\n"
++ "-----END CERTIFICATE-----\n",
++ /* A (signed by C) */
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIBSDCB+6ADAgECAhReNpCiVn7eFDUox3mvM5qE942AVzAFBgMrZXAwETEPMA0G\n"
++ "A1UEAxMGUm9vdCBDMCAXDTI0MDExMTA2MjEyMVoYDzk5OTkxMjMxMjM1OTU5WjAR\n"
++ "MQ8wDQYDVQQDEwZSb290IEIwKjAFBgMrZXADIQAYX92hS97OGKbMzwrD7ReVifwM\n"
++ "3iz5tnfQHWQSkvvYMKNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC\n"
++ "AgQwHQYDVR0OBBYEFJFAs2rg6j8w9AKItRnOOOjG2FG6MB8GA1UdIwQYMBaAFEh/\n"
++ "XKjIuMeEavX5QVoy39Q+GhnwMAUGAytlcANBAIwghH3gelXty8qtoTGIEJb0+EBv\n"
++ "BH4YOUh7TamxjxkjvvIhDA7ZdheofFb7NrklJco7KBcTATUSOvxakYRP9Q8=\n"
++ "-----END CERTIFICATE-----\n",
++ /* B1 (signed by B) */
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIBUjCCAQSgAwIBAgIUfpmrVDc1XBA5/7QYMyGBuB9mTtUwBQYDK2VwMBExDzAN\n"
++ "BgNVBAMTBlJvb3QgQjAgFw0yNDAxMTEwNjI1MjdaGA85OTk5MTIzMTIzNTk1OVow\n"
++ "GjEYMBYGA1UEAxMPSW50ZXJtZWRpYXRlIEIxMCowBQYDK2VwAyEAh6ZTuJWsweVB\n"
++ "a5fsye5iq89kWDC2Y/Hlc0htLmjzMP+jYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYD\n"
++ "VR0PAQH/BAQDAgIEMB0GA1UdDgQWBBTMQu37PKyLjKfPODZgxYCaayff+jAfBgNV\n"
++ "HSMEGDAWgBSRQLNq4Oo/MPQCiLUZzjjoxthRujAFBgMrZXADQQBblmguY+lnYvOK\n"
++ "rAZJnqpEUGfm1tIFyu3rnlE7WOVcXRXMIoNApLH2iHIipQjlvNWuSBFBTC1qdewh\n"
++ "/e+0cgQB\n"
++ "-----END CERTIFICATE-----\n",
++ /* B (signed by A) */
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIBSDCB+6ADAgECAhRpEm+dWNX6DMZh/nottkFfFFrXXDAFBgMrZXAwETEPMA0G\n"
++ "A1UEAxMGUm9vdCBBMCAXDTI0MDExMTA2MTcyNloYDzk5OTkxMjMxMjM1OTU5WjAR\n"
++ "MQ8wDQYDVQQDEwZSb290IEIwKjAFBgMrZXADIQAYX92hS97OGKbMzwrD7ReVifwM\n"
++ "3iz5tnfQHWQSkvvYMKNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC\n"
++ "AgQwHQYDVR0OBBYEFJFAs2rg6j8w9AKItRnOOOjG2FG6MB8GA1UdIwQYMBaAFFti\n"
++ "A6REax3dnvj4Jq5fiG3MuDSXMAUGAytlcANBAFvmcK3Ida5ViVYDzxKVLPcPsCHe\n"
++ "3hxz99lBrerJC9iJSvRYTJoPBvjTxDYnBn5EFrQYMrUED+6i71lmGXNU9gs=\n"
++ "-----END CERTIFICATE-----\n",
++ /* B (signed by C) */
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIBSDCB+6ADAgECAhReNpCiVn7eFDUox3mvM5qE942AVzAFBgMrZXAwETEPMA0G\n"
++ "A1UEAxMGUm9vdCBDMCAXDTI0MDExMTA2MjEyMVoYDzk5OTkxMjMxMjM1OTU5WjAR\n"
++ "MQ8wDQYDVQQDEwZSb290IEIwKjAFBgMrZXADIQAYX92hS97OGKbMzwrD7ReVifwM\n"
++ "3iz5tnfQHWQSkvvYMKNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC\n"
++ "AgQwHQYDVR0OBBYEFJFAs2rg6j8w9AKItRnOOOjG2FG6MB8GA1UdIwQYMBaAFEh/\n"
++ "XKjIuMeEavX5QVoy39Q+GhnwMAUGAytlcANBAIwghH3gelXty8qtoTGIEJb0+EBv\n"
++ "BH4YOUh7TamxjxkjvvIhDA7ZdheofFb7NrklJco7KBcTATUSOvxakYRP9Q8=\n"
++ "-----END CERTIFICATE-----\n",
++ /* C1 (signed by C) */
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIBUjCCAQSgAwIBAgIUSKsfY1wD3eD2VmaaK1wt5naPckMwBQYDK2VwMBExDzAN\n"
++ "BgNVBAMTBlJvb3QgQzAgFw0yNDAxMTEwNjI1NDdaGA85OTk5MTIzMTIzNTk1OVow\n"
++ "GjEYMBYGA1UEAxMPSW50ZXJtZWRpYXRlIEMxMCowBQYDK2VwAyEA/t7i1chZlKkV\n"
++ "qxJOrmmyATn8XnpK+nV/iT4OMHSHfAyjYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYD\n"
++ "VR0PAQH/BAQDAgIEMB0GA1UdDgQWBBRmpF3JjoP3NiBzE5J5ANT0bvfRmjAfBgNV\n"
++ "HSMEGDAWgBRIf1yoyLjHhGr1+UFaMt/UPhoZ8DAFBgMrZXADQQAeRBXv6WCTOp0G\n"
++ "3wgd8bbEGrrILfpi+qH7aj/MywgkPIlppDYRQ3jL6ASd+So/408dlE0DV9DXKBi0\n"
++ "725XUUYO\n"
++ "-----END CERTIFICATE-----\n",
++ /* C (signed by A) */
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIBSDCB+6ADAgECAhRvbZv3SRTjDOiAbyFWHH4y0yMZkjAFBgMrZXAwETEPMA0G\n"
++ "A1UEAxMGUm9vdCBBMCAXDTI0MDExMTA2MTg1MVoYDzk5OTkxMjMxMjM1OTU5WjAR\n"
++ "MQ8wDQYDVQQDEwZSb290IEMwKjAFBgMrZXADIQDxm6Ubhsa0gSa1vBCIO5e+qZEH\n"
++ "8Oocz+buNHfIJbh5NaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC\n"
++ "AgQwHQYDVR0OBBYEFEh/XKjIuMeEavX5QVoy39Q+GhnwMB8GA1UdIwQYMBaAFFti\n"
++ "A6REax3dnvj4Jq5fiG3MuDSXMAUGAytlcANBAPl+SyiOfXJnjSWx8hFMhJ7w92mn\n"
++ "tkGifCFHBpUhYcBIMeMtLw0RBLXqaaN0EKlTFimiEkLClsU7DKYrpEEJegs=\n"
++ "-----END CERTIFICATE-----\n",
++ /* C (signed by B) */
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIBSDCB+6ADAgECAhQU1OJWRVOLrGrgJiLwexd1/MwKkTAFBgMrZXAwETEPMA0G\n"
++ "A1UEAxMGUm9vdCBCMCAXDTI0MDExMTA2MjAzMFoYDzk5OTkxMjMxMjM1OTU5WjAR\n"
++ "MQ8wDQYDVQQDEwZSb290IEMwKjAFBgMrZXADIQDxm6Ubhsa0gSa1vBCIO5e+qZEH\n"
++ "8Oocz+buNHfIJbh5NaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC\n"
++ "AgQwHQYDVR0OBBYEFEh/XKjIuMeEavX5QVoy39Q+GhnwMB8GA1UdIwQYMBaAFJFA\n"
++ "s2rg6j8w9AKItRnOOOjG2FG6MAUGAytlcANBALXeyuj8vj6Q8j4l17VzZwmJl0gN\n"
++ "bCGoKMl0J/0NiN/fQRIsdbwQDh0RUN/RN3I6DTtB20ER6f3VdnzAh8nXkQ4=\n"
++ "-----END CERTIFICATE-----\n",
++ NULL
++};
++
++static const char *cross_signed_ca[] = {
++ /* A (self-signed) */
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIBJzCB2qADAgECAhQs1Ur+gzPs1ISxs3Tbs700q0CZcjAFBgMrZXAwETEPMA0G\n"
++ "A1UEAxMGUm9vdCBBMCAXDTI0MDExMTA2MTYwMFoYDzk5OTkxMjMxMjM1OTU5WjAR\n"
++ "MQ8wDQYDVQQDEwZSb290IEEwKjAFBgMrZXADIQA0vDYyg3tgotSETL1Wq2hBs32p\n"
++ "WbnINkmOSNmOiZlGHKNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC\n"
++ "AgQwHQYDVR0OBBYEFFtiA6REax3dnvj4Jq5fiG3MuDSXMAUGAytlcANBAHrVv7E9\n"
++ "5scuOVCH9gNRRm8Z9SUoLakRHAPnySdg6z/kI3vOgA/OM7reArpnW8l1H2FapgpL\n"
++ "bDeZ2XJH+BdVFwg=\n"
++ "-----END CERTIFICATE-----\n",
++ NULL
++};
++
+ #if defined __clang__ || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+ # pragma GCC diagnostic push
+ # pragma GCC diagnostic ignored "-Wunused-variable"
+@@ -4442,6 +4565,8 @@ static struct
+ rsa_sha1_not_in_trusted, rsa_sha1_not_in_trusted_ca,
+ GNUTLS_PROFILE_TO_VFLAGS(GNUTLS_PROFILE_MEDIUM),
+ GNUTLS_CERT_INSECURE_ALGORITHM | GNUTLS_CERT_INVALID, NULL, 1620118136, 1},
++ { "cross signed - ok", cross_signed, cross_signed_ca, 0, 0, 0,
++ 1704955300 },
+ { NULL, NULL, NULL, 0, 0}
+ };
+
+--
+2.25.1
+
diff --git a/meta/recipes-support/gnutls/gnutls_3.7.4.bb b/meta/recipes-support/gnutls/gnutls_3.7.4.bb
index b34eb7f5f0..b290022781 100644
--- a/meta/recipes-support/gnutls/gnutls_3.7.4.bb
+++ b/meta/recipes-support/gnutls/gnutls_3.7.4.bb
@@ -8,7 +8,7 @@ LICENSE = "GPL-3.0-or-later & LGPL-2.1-or-later"
LICENSE:${PN} = "LGPL-2.1-or-later"
LICENSE:${PN}-xx = "LGPL-2.1-or-later"
LICENSE:${PN}-bin = "GPL-3.0-or-later"
-LICENSE:${PN}-OpenSSL = "GPL-3.0-or-later"
+LICENSE:${PN}-openssl = "GPL-3.0-or-later"
LIC_FILES_CHKSUM = "file://LICENSE;md5=71391c8e0c1cfe68077e7fce3b586283 \
file://doc/COPYING;md5=c678957b0c8e964aa6c70fd77641a71e \
@@ -21,6 +21,11 @@ SHRT_VER = "${@d.getVar('PV').split('.')[0]}.${@d.getVar('PV').split('.')[1]}"
SRC_URI = "https://www.gnupg.org/ftp/gcrypt/gnutls/v${SHRT_VER}/gnutls-${PV}.tar.xz \
file://arm_eabi.patch \
+ file://CVE-2022-2509.patch \
+ file://CVE-2023-0361.patch \
+ file://CVE-2023-5981.patch \
+ file://CVE-2024-0553.patch \
+ file://CVE-2024-0567.patch \
"
SRC_URI[sha256sum] = "e6adbebcfbc95867de01060d93c789938cf89cc1d1f6ef9ef661890f6217451f"
diff --git a/meta/recipes-support/gnutls/libtasn1_4.18.0.bb b/meta/recipes-support/gnutls/libtasn1_4.19.0.bb
index db49adc1c2..5fb8b54c06 100644
--- a/meta/recipes-support/gnutls/libtasn1_4.18.0.bb
+++ b/meta/recipes-support/gnutls/libtasn1_4.19.0.bb
@@ -16,7 +16,7 @@ SRC_URI = "${GNU_MIRROR}/libtasn1/libtasn1-${PV}.tar.gz \
DEPENDS = "bison-native"
-SRC_URI[sha256sum] = "4365c154953563d64c67a024b607d1ee75c6db76e0d0f65709ea80a334cd1898"
+SRC_URI[sha256sum] = "1613f0ac1cf484d6ec0ce3b8c06d56263cc7242f1c23b30d82d23de345a63f7a"
inherit autotools texinfo lib_package gtk-doc
diff --git a/meta/recipes-support/iso-codes/iso-codes_4.10.0.bb b/meta/recipes-support/iso-codes/iso-codes_4.15.0.bb
index 857fe463ef..b789a99035 100644
--- a/meta/recipes-support/iso-codes/iso-codes_4.10.0.bb
+++ b/meta/recipes-support/iso-codes/iso-codes_4.15.0.bb
@@ -9,7 +9,7 @@ LICENSE = "LGPL-2.1-only"
LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
SRC_URI = "git://salsa.debian.org/iso-codes-team/iso-codes.git;protocol=https;branch=main;"
-SRCREV = "9a6c24ee40e737ab34273c1af13a8dabcae888dd"
+SRCREV = "69ba16daef3c5c5e3c18f2d919e25296a4b946be"
# inherit gettext cannot be used, because it adds gettext-native to BASEDEPENDS which
# are inhibited by allarch
diff --git a/meta/recipes-support/libassuan/libassuan_2.5.5.bb b/meta/recipes-support/libassuan/libassuan_2.5.6.bb
index 2bab3ac955..7e899e7399 100644
--- a/meta/recipes-support/libassuan/libassuan_2.5.5.bb
+++ b/meta/recipes-support/libassuan/libassuan_2.5.6.bb
@@ -20,7 +20,7 @@ SRC_URI = "${GNUPG_MIRROR}/libassuan/libassuan-${PV}.tar.bz2 \
file://libassuan-add-pkgconfig-support.patch \
"
-SRC_URI[sha256sum] = "8e8c2fcc982f9ca67dcbb1d95e2dc746b1739a4668bc20b3a3c5be632edb34e4"
+SRC_URI[sha256sum] = "e9fd27218d5394904e4e39788f9b1742711c3e6b41689a31aa3380bd5aa4f426"
BINCONFIG = "${bindir}/libassuan-config"
diff --git a/meta/recipes-support/libatomic-ops/libatomic-ops_7.6.12.bb b/meta/recipes-support/libatomic-ops/libatomic-ops_7.6.14.bb
index 8ea8436977..fad92df507 100644
--- a/meta/recipes-support/libatomic-ops/libatomic-ops_7.6.12.bb
+++ b/meta/recipes-support/libatomic-ops/libatomic-ops_7.6.14.bb
@@ -5,13 +5,13 @@ SECTION = "optional"
PROVIDES += "libatomics-ops"
LICENSE = "GPL-2.0-only & MIT"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
- file://doc/LICENSING.txt;md5=e00dd5c8ac03a14c5ae5225a4525fa2d \
+ file://doc/LICENSING.txt;md5=dfc50c7cea7b66935844587a0f7389e7 \
"
SRC_URI = "https://github.com/ivmai/libatomic_ops/releases/download/v${PV}/libatomic_ops-${PV}.tar.gz"
UPSTREAM_CHECK_URI = "https://github.com/ivmai/libatomic_ops/releases"
-SRC_URI[sha256sum] = "f0ab566e25fce08b560e1feab6a3db01db4a38e5bc687804334ef3920c549f3e"
+SRC_URI[sha256sum] = "390f244d424714735b7050d056567615b3b8f29008a663c262fb548f1802d292"
S = "${WORKDIR}/libatomic_ops-${PV}"
diff --git a/meta/recipes-support/libbsd/libbsd_0.11.5.bb b/meta/recipes-support/libbsd/libbsd_0.11.5.bb
index bb8766a070..21af37882f 100644
--- a/meta/recipes-support/libbsd/libbsd_0.11.5.bb
+++ b/meta/recipes-support/libbsd/libbsd_0.11.5.bb
@@ -29,6 +29,13 @@ HOMEPAGE = "https://libbsd.freedesktop.org/wiki/"
# License: public-domain-Colin-Plumb
LICENSE = "BSD-3-Clause & BSD-4-Clause & ISC & PD"
LICENSE:${PN} = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-dbg = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-dev = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-doc = "BSD-3-Clause & BSD-4-Clause & ISC & PD"
+LICENSE:${PN}-locale = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-src = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-staticdev = "BSD-3-Clause & ISC & PD"
+
LIC_FILES_CHKSUM = "file://COPYING;md5=0b31944ca2c1075410a30f0c17379d3b"
SECTION = "libs"
diff --git a/meta/recipes-support/libcap/files/0001-nativesdk-libcap-Raise-the-size-of-arrays-containing.patch b/meta/recipes-support/libcap/files/0001-nativesdk-libcap-Raise-the-size-of-arrays-containing.patch
index 9884fb5641..8bd2050ea5 100644
--- a/meta/recipes-support/libcap/files/0001-nativesdk-libcap-Raise-the-size-of-arrays-containing.patch
+++ b/meta/recipes-support/libcap/files/0001-nativesdk-libcap-Raise-the-size-of-arrays-containing.patch
@@ -1,4 +1,4 @@
-From fc60e000169618a4adced845b9462d36ced1efdd Mon Sep 17 00:00:00 2001
+From a3196f3a06e7bbfde30d143c92a4325be323b3d0 Mon Sep 17 00:00:00 2001
From: Hongxu Jia <hongxu.jia@windriver.com>
Date: Thu, 14 Oct 2021 15:57:36 +0800
Subject: [PATCH] nativesdk-libcap: Raise the size of arrays containing dl
diff --git a/meta/recipes-support/libcap/files/CVE-2023-2602.patch b/meta/recipes-support/libcap/files/CVE-2023-2602.patch
new file mode 100644
index 0000000000..1ad5aeb826
--- /dev/null
+++ b/meta/recipes-support/libcap/files/CVE-2023-2602.patch
@@ -0,0 +1,45 @@
+From bc6b36682f188020ee4770fae1d41bde5b2c97bb Mon Sep 17 00:00:00 2001
+From: "Andrew G. Morgan" <morgan@kernel.org>
+Date: Wed, 3 May 2023 19:18:36 -0700
+Subject: Correct the check of pthread_create()'s return value.
+
+This function returns a positive number (errno) on error, so the code
+wasn't previously freeing some memory in this situation.
+
+Discussion:
+
+ https://stackoverflow.com/a/3581020/14760867
+
+Credit for finding this bug in libpsx goes to David Gstir of
+X41 D-Sec GmbH (https://x41-dsec.de/) who performed a security
+audit of the libcap source code in April of 2023. The audit
+was sponsored by the Open Source Technology Improvement Fund
+(https://ostif.org/).
+
+Audit ref: LCAP-CR-23-01 (CVE-2023-2602)
+
+Signed-off-by: Andrew G. Morgan <morgan@kernel.org>
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/libs/libcap/libcap.git/patch/?id=bc6b36682f188020ee4770fae1d41bde5b2c97bb]
+CVE: CVE-2023-2602
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ psx/psx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/psx/psx.c b/psx/psx.c
+index d9c0485..65eb2aa 100644
+--- a/psx/psx.c
++++ b/psx/psx.c
+@@ -516,7 +516,7 @@ int __wrap_pthread_create(pthread_t *thread, const pthread_attr_t *attr,
+ pthread_sigmask(SIG_BLOCK, &sigbit, NULL);
+
+ int ret = __real_pthread_create(thread, attr, _psx_start_fn, starter);
+- if (ret == -1) {
++ if (ret > 0) {
+ psx_new_state(_PSX_CREATE, _PSX_IDLE);
+ memset(starter, 0, sizeof(*starter));
+ free(starter);
+--
+2.25.1
+
diff --git a/meta/recipes-support/libcap/files/CVE-2023-2603.patch b/meta/recipes-support/libcap/files/CVE-2023-2603.patch
new file mode 100644
index 0000000000..e09be78640
--- /dev/null
+++ b/meta/recipes-support/libcap/files/CVE-2023-2603.patch
@@ -0,0 +1,60 @@
+From 422bec25ae4a1ab03fd4d6f728695ed279173b18 Mon Sep 17 00:00:00 2001
+From: "Andrew G. Morgan" <morgan@kernel.org>
+Date: Wed, 3 May 2023 19:44:22 -0700
+Subject: Large strings can confuse libcap's internal strdup code.
+
+Avoid something subtle with really long strings: 1073741823 should
+be enough for anybody. This is an improved fix over something attempted
+in libcap-2.55 to address some static analysis findings.
+
+Reviewing the library, cap_proc_root() and cap_launcher_set_chroot()
+are the only two calls where the library is potentially exposed to a
+user controlled string input.
+
+Credit for finding this bug in libcap goes to Richard Weinberger of
+X41 D-Sec GmbH (https://x41-dsec.de/) who performed a security audit
+of the libcap source code in April of 2023. The audit was sponsored
+by the Open Source Technology Improvement Fund (https://ostif.org/).
+
+Audit ref: LCAP-CR-23-02 (CVE-2023-2603)
+
+Signed-off-by: Andrew G. Morgan <morgan@kernel.org>
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/libs/libcap/libcap.git/commit/?id=422bec25ae4a1ab03fd4d6f728695ed279173b18]
+CVE: CVE-2023-2603
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+
+---
+ libcap/cap_alloc.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/libcap/cap_alloc.c b/libcap/cap_alloc.c
+index c826e7a..25f9981 100644
+--- a/libcap/cap_alloc.c
++++ b/libcap/cap_alloc.c
+@@ -105,15 +105,17 @@ char *_libcap_strdup(const char *old)
+ errno = EINVAL;
+ return NULL;
+ }
+- len = strlen(old) + 1 + 2*sizeof(__u32);
+- if (len < sizeof(struct _cap_alloc_s)) {
+- len = sizeof(struct _cap_alloc_s);
+- }
+- if ((len & 0xffffffff) != len) {
++
++ len = strlen(old);
++ if ((len & 0x3fffffff) != len) {
+ _cap_debug("len is too long for libcap to manage");
+ errno = EINVAL;
+ return NULL;
+ }
++ len += 1 + 2*sizeof(__u32);
++ if (len < sizeof(struct _cap_alloc_s)) {
++ len = sizeof(struct _cap_alloc_s);
++ }
+
+ raw_data = calloc(1, len);
+ if (raw_data == NULL) {
+--
+2.25.1
+
diff --git a/meta/recipes-support/libcap/libcap_2.63.bb b/meta/recipes-support/libcap/libcap_2.66.bb
index 9e341c4bd0..7534063b7d 100644
--- a/meta/recipes-support/libcap/libcap_2.63.bb
+++ b/meta/recipes-support/libcap/libcap_2.66.bb
@@ -16,11 +16,13 @@ DEPENDS = "hostperl-runtime-native gperf-native"
SRC_URI = "${KERNELORG_MIRROR}/linux/libs/security/linux-privs/${BPN}2/${BPN}-${PV}.tar.xz \
file://0001-ensure-the-XATTR_NAME_CAPS-is-defined-when-it-is-use.patch \
file://0002-tests-do-not-run-target-executables.patch \
+ file://CVE-2023-2602.patch \
+ file://CVE-2023-2603.patch \
"
SRC_URI:append:class-nativesdk = " \
file://0001-nativesdk-libcap-Raise-the-size-of-arrays-containing.patch \
"
-SRC_URI[sha256sum] = "0c637b8f44fc7d8627787e9cf57f15ac06c1ddccb53e41feec5496be3466f77f"
+SRC_URI[sha256sum] = "15c40ededb3003d70a283fe587a36b7d19c8b3b554e33f86129c059a4bb466b2"
UPSTREAM_CHECK_URI = "https://www.kernel.org/pub/linux/libs/security/linux-privs/${BPN}2/"
diff --git a/meta/recipes-support/libffi/libffi/0001-arm-sysv-reverted-clang-VFP-mitigation.patch b/meta/recipes-support/libffi/libffi/0001-arm-sysv-reverted-clang-VFP-mitigation.patch
index 5e529d1ce7..3ffcb3e128 100644
--- a/meta/recipes-support/libffi/libffi/0001-arm-sysv-reverted-clang-VFP-mitigation.patch
+++ b/meta/recipes-support/libffi/libffi/0001-arm-sysv-reverted-clang-VFP-mitigation.patch
@@ -1,4 +1,4 @@
-From 501a6b55853af549fae72723e74271f2a4ec7cf6 Mon Sep 17 00:00:00 2001
+From 000f1500b693a84880d2da49b77b1113f98dde35 Mon Sep 17 00:00:00 2001
From: Brett Warren <brett.warren@arm.com>
Date: Fri, 27 Nov 2020 15:28:42 +0000
Subject: [PATCH] arm/sysv: reverted clang VFP mitigation
@@ -11,8 +11,9 @@ https://github.com/libffi/libffi/issues/607. Now that
clang supports the LDC and SDC instructions, this mitigation
has been reverted.
-Upstream-Status: Pending
+Upstream-Status: Submitted [https://github.com/libffi/libffi/pull/747]
Signed-off-by: Brett Warren <brett.warren@arm.com>
+
---
src/arm/sysv.S | 33 ---------------------------------
1 file changed, 33 deletions(-)
@@ -99,6 +100,3 @@ index fb36213..e4272a1 100644
b call_epilogue
E(ARM_TYPE_INT64)
ldr r1, [r2, #4]
---
-2.25.1
-
diff --git a/meta/recipes-support/libffi/libffi/not-win32.patch b/meta/recipes-support/libffi/libffi/not-win32.patch
index 62daaf4b38..38f9b0025c 100644
--- a/meta/recipes-support/libffi/libffi/not-win32.patch
+++ b/meta/recipes-support/libffi/libffi/not-win32.patch
@@ -1,4 +1,4 @@
-From 306719369a0d3608b4ff2737de74ae284788a14b Mon Sep 17 00:00:00 2001
+From 20bc4e03442e15965ae3907013e9a177878f0323 Mon Sep 17 00:00:00 2001
From: Ross Burton <ross.burton@intel.com>
Date: Thu, 4 Feb 2016 16:22:50 +0000
Subject: [PATCH] libffi: ensure sysroot paths are not in libffi.pc
@@ -21,11 +21,11 @@ Signed-off-by: Ross Burton <ross.burton@intel.com>
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/configure.ac b/configure.ac
-index b764368..d51ce91 100644
+index 7e8cd98..cf37e88 100644
--- a/configure.ac
+++ b/configure.ac
-@@ -354,7 +354,7 @@ AC_ARG_ENABLE(multi-os-directory,
-
+@@ -405,7 +405,7 @@ AC_ARG_ENABLE(multi-os-directory,
+
# These variables are only ever used when we cross-build to X86_WIN32.
# And we only support this with GCC, so...
-if test "x$GCC" = "xyes"; then
diff --git a/meta/recipes-support/libffi/libffi_3.4.2.bb b/meta/recipes-support/libffi/libffi_3.4.4.bb
index 733fcc5e6c..4ceee6f3cc 100644
--- a/meta/recipes-support/libffi/libffi_3.4.2.bb
+++ b/meta/recipes-support/libffi/libffi_3.4.4.bb
@@ -8,17 +8,18 @@ library really only provides the lowest, machine dependent layer of a fully feat
A layer must exist above `libffi' that handles type conversions for values passed between the two languages."
LICENSE = "MIT"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=679b5c9bdc79a2b93ee574e193e7a7bc"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=32c0d09a0641daf4903e5d61cc8f23a8"
SRC_URI = "https://github.com/libffi/libffi/releases/download/v${PV}/${BPN}-${PV}.tar.gz \
file://not-win32.patch \
file://0001-arm-sysv-reverted-clang-VFP-mitigation.patch \
"
-SRC_URI[sha256sum] = "540fb721619a6aba3bdeef7d940d8e9e0e6d2c193595bc243241b77ff9e93620"
+SRC_URI[sha256sum] = "d66c56ad259a82cf2a9dfc408b32bf5da52371500b84745f7fb8b645712df676"
UPSTREAM_CHECK_URI = "https://github.com/libffi/libffi/releases/"
UPSTREAM_CHECK_REGEX = "libffi-(?P<pver>\d+(\.\d+)+)\.tar"
EXTRA_OECONF += "--disable-builddir --disable-exec-static-tramp"
+EXTRA_OECONF:class-native += "--with-gcc-arch=generic"
EXTRA_OEMAKE:class-target = "LIBTOOLFLAGS='--tag=CC'"
inherit autotools texinfo multilib_header
@@ -33,4 +34,3 @@ FILES:${PN}-dev += "${libdir}/libffi-${PV}"
MIPS_INSTRUCTION_SET = "mips"
BBCLASSEXTEND = "native nativesdk"
-
diff --git a/meta/recipes-support/libgit2/libgit2/CVE-2024-24575.patch b/meta/recipes-support/libgit2/libgit2/CVE-2024-24575.patch
new file mode 100644
index 0000000000..d3957ac5d0
--- /dev/null
+++ b/meta/recipes-support/libgit2/libgit2/CVE-2024-24575.patch
@@ -0,0 +1,56 @@
+From c9d31b711e8906cf248566f43142f20b03e20cbf Mon Sep 17 00:00:00 2001
+From: Edward Thomson <ethomson@edwardthomson.com>
+Date: Fri, 17 Nov 2023 16:54:47 +0000
+Subject: [PATCH] revparse: fix parsing bug for trailing `@`
+
+When parsing a revspec that ends with a trailing `@`, explicitly stop
+parsing. Introduce a sentinel variable to explicitly stop parsing.
+
+Prior to this, we would set `spec` to `HEAD`, but were looping on the
+value of `spec[pos]`, so we would continue walking the (new) `spec`
+at offset `pos`, looking for a NUL. This is obviously an out-of-bounds
+read.
+
+Credit to Michael Rodler (@f0rki) and Amazon AWS Security.
+
+CVE: CVE-2024-24575
+
+Upstream-Status: Backport [https://github.com/libgit2/libgit2/commit/c9d31b711e8906cf248566f43142f20b03e20cbf]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/revparse.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/src/revparse.c b/src/revparse.c
+index 9bc28e9fc..d3bbe840b 100644
+--- a/src/revparse.c
++++ b/src/revparse.c
+@@ -685,6 +685,7 @@ static int revparse(
+ git_object *base_rev = NULL;
+
+ bool should_return_reference = true;
++ bool parsed = false;
+
+ GIT_ASSERT_ARG(object_out);
+ GIT_ASSERT_ARG(reference_out);
+@@ -694,7 +695,7 @@ static int revparse(
+ *object_out = NULL;
+ *reference_out = NULL;
+
+- while (spec[pos]) {
++ while (!parsed && spec[pos]) {
+ switch (spec[pos]) {
+ case '^':
+ should_return_reference = false;
+@@ -801,6 +802,8 @@ static int revparse(
+ break;
+ } else if (spec[pos+1] == '\0') {
+ spec = "HEAD";
++ identifier_len = 4;
++ parsed = true;
+ break;
+ }
+ /* fall through */
+--
+2.40.0
diff --git a/meta/recipes-support/libgit2/libgit2/CVE-2024-24577.patch b/meta/recipes-support/libgit2/libgit2/CVE-2024-24577.patch
new file mode 100644
index 0000000000..3469f9d099
--- /dev/null
+++ b/meta/recipes-support/libgit2/libgit2/CVE-2024-24577.patch
@@ -0,0 +1,52 @@
+From eb4c1716cd92bf56f2770653a915d5fc01eab8f3 Mon Sep 17 00:00:00 2001
+From: Edward Thomson <ethomson@edwardthomson.com>
+Date: Sat, 16 Dec 2023 11:19:07 +0000
+Subject: [PATCH] index: correct index has_dir_name check
+
+`has_dir_name` is used to check for directory/file collisions,
+and attempts to determine whether the index contains a file with
+a directory name that is a proper subset of the new index entry
+that we're trying to add.
+
+To determine directory name, the function would walk the path string
+backwards to identify a `/`, stopping at the end of the string. However,
+the function assumed that the strings did not start with a `/`. If the
+paths contain only a single `/` at the beginning of the string, then the
+function would continue the loop, erroneously, when they should have
+stopped at the first character.
+
+Correct the order of the tests to terminate properly.
+
+Credit to Michael Rodler (@f0rki) and Amazon AWS Security.
+
+CVE: CVE-2024-24577
+
+Upstream-Status: Backport [https://github.com/libgit2/libgit2/commit/eb4c1716cd92bf56f2770653a915d5fc01eab8f3]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/index.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/src/index.c b/src/index.c
+index aa97c6421..e8ff82e1a 100644
+--- a/src/index.c
++++ b/src/index.c
+@@ -1148,10 +1148,13 @@ static int has_dir_name(git_index *index,
+ size_t len, pos;
+
+ for (;;) {
+- if (*--slash == '/')
+- break;
++ slash--;
++
+ if (slash <= entry->path)
+ return 0;
++
++ if (*slash == '/')
++ break;
+ }
+ len = slash - name;
+
+--
+2.40.0
diff --git a/meta/recipes-support/libgit2/libgit2_1.4.3.bb b/meta/recipes-support/libgit2/libgit2_1.4.5.bb
index 7e27b5b018..ad8b9a536a 100644
--- a/meta/recipes-support/libgit2/libgit2_1.4.3.bb
+++ b/meta/recipes-support/libgit2/libgit2_1.4.5.bb
@@ -5,8 +5,11 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=e5a9227de4cb6afb5d35ed7b0fdf480d"
DEPENDS = "curl openssl zlib libssh2 libgcrypt libpcre2"
-SRC_URI = "git://github.com/libgit2/libgit2.git;branch=maint/v1.4;protocol=https"
-SRCREV = "465bbf88ea939a965fbcbade72870c61f815e457"
+SRC_URI = "git://github.com/libgit2/libgit2.git;branch=maint/v1.4;protocol=https \
+ file://CVE-2024-24575.patch \
+ file://CVE-2024-24577.patch \
+ "
+SRCREV = "cd6f679af401eda1f172402006ef8265f8bd58ea"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-support/libical/libical_3.0.14.bb b/meta/recipes-support/libical/libical_3.0.16.bb
index 58baf3f32f..c53b7ca375 100644
--- a/meta/recipes-support/libical/libical_3.0.14.bb
+++ b/meta/recipes-support/libical/libical_3.0.16.bb
@@ -15,7 +15,7 @@ SECTION = "libs"
SRC_URI = "https://github.com/${BPN}/${BPN}/releases/download/v${PV}/${BP}.tar.gz \
file://0001-cmake-Do-not-export-CC-into-gir-compiler.patch \
"
-SRC_URI[sha256sum] = "4284b780356f1dc6a01f16083e7b836e63d3815e27ed0eaaad684712357ccc8f"
+SRC_URI[sha256sum] = "b44705dd71ca4538c86fb16248483ab4b48978524fb1da5097bd76aa2e0f0c33"
UPSTREAM_CHECK_URI = "https://github.com/libical/libical/releases"
inherit cmake pkgconfig gobject-introspection vala
diff --git a/meta/recipes-support/libksba/libksba/ksba-add-pkgconfig-support.patch b/meta/recipes-support/libksba/libksba/ksba-add-pkgconfig-support.patch
index af96bd57cd..bdb80ff34d 100644
--- a/meta/recipes-support/libksba/libksba/ksba-add-pkgconfig-support.patch
+++ b/meta/recipes-support/libksba/libksba/ksba-add-pkgconfig-support.patch
@@ -1,4 +1,4 @@
-From 6081640895b6d566fa21123e2de7d111eeab5c4c Mon Sep 17 00:00:00 2001
+From ca8174aa81d7bf364b33f7254a9e887735c4996d Mon Sep 17 00:00:00 2001
From: Chen Qi <Qi.Chen@windriver.com>
Date: Mon, 3 Dec 2012 18:17:31 +0800
Subject: [PATCH] libksba: add pkgconfig support
@@ -16,7 +16,7 @@ Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
1 file changed, 4 insertions(+), 86 deletions(-)
diff --git a/src/ksba.m4 b/src/ksba.m4
-index 6b55bb8..6e7336f 100644
+index 452c245..aa96255 100644
--- a/src/ksba.m4
+++ b/src/ksba.m4
@@ -23,37 +23,6 @@ dnl with a changed API.
@@ -44,7 +44,7 @@ index 6b55bb8..6e7336f 100644
- fi
-
- use_gpgrt_config=""
-- if test x"$KSBA_CONFIG" = x -a x"$GPGRT_CONFIG" != x -a "$GPGRT_CONFIG" != "no"; then
+- if test x"$GPGRT_CONFIG" != x -a "$GPGRT_CONFIG" != "no"; then
- if $GPGRT_CONFIG ksba --exists; then
- KSBA_CONFIG="$GPGRT_CONFIG ksba"
- AC_MSG_NOTICE([Use gpgrt-config as ksba-config])
diff --git a/meta/recipes-support/libksba/libksba_1.6.0.bb b/meta/recipes-support/libksba/libksba_1.6.4.bb
index f9e83681dd..f9636f9433 100644
--- a/meta/recipes-support/libksba/libksba_1.6.0.bb
+++ b/meta/recipes-support/libksba/libksba_1.6.4.bb
@@ -24,7 +24,7 @@ UPSTREAM_CHECK_URI = "https://gnupg.org/download/index.html"
SRC_URI = "${GNUPG_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \
file://ksba-add-pkgconfig-support.patch"
-SRC_URI[sha256sum] = "dad683e6f2d915d880aa4bed5cea9a115690b8935b78a1bbe01669189307a48b"
+SRC_URI[sha256sum] = "bbb43f032b9164d86c781ffe42213a83bf4f2fee91455edfa4654521b8b03b6b"
do_configure:prepend () {
# Else these could be used in preference to those in aclocal-copy
diff --git a/meta/recipes-support/libmicrohttpd/libmicrohttpd_0.9.75.bb b/meta/recipes-support/libmicrohttpd/libmicrohttpd_0.9.76.bb
index 9c99af7c91..ad3c34ab9e 100644
--- a/meta/recipes-support/libmicrohttpd/libmicrohttpd_0.9.75.bb
+++ b/meta/recipes-support/libmicrohttpd/libmicrohttpd_0.9.76.bb
@@ -7,7 +7,7 @@ SECTION = "net"
DEPENDS = "file"
SRC_URI = "${GNU_MIRROR}/libmicrohttpd/${BPN}-${PV}.tar.gz"
-SRC_URI[sha256sum] = "9278907a6f571b391aab9644fd646a5108ed97311ec66f6359cebbedb0a4e3bb"
+SRC_URI[sha256sum] = "f0b1547b5a42a6c0f724e8e1c1cb5ce9c4c35fb495e7d780b9930d35011ceb4c"
inherit autotools lib_package pkgconfig gettext
diff --git a/meta/recipes-support/libpcre/libpcre2/CVE-2022-41409.patch b/meta/recipes-support/libpcre/libpcre2/CVE-2022-41409.patch
new file mode 100644
index 0000000000..833348cdf1
--- /dev/null
+++ b/meta/recipes-support/libpcre/libpcre2/CVE-2022-41409.patch
@@ -0,0 +1,75 @@
+From 94e1c001761373b7d9450768aa15d04c25547a35 Mon Sep 17 00:00:00 2001
+From: Philip Hazel <Philip.Hazel@gmail.com>
+Date: Tue, 16 Aug 2022 17:00:45 +0100
+Subject: [PATCH] Diagnose negative repeat value in pcre2test subject line
+
+CVE: CVE-2022-41409
+Upstream-Status: Backport [https://github.com/PCRE2Project/pcre2/commit/94e1c001761373b7d9450768aa15d04c25547a35]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+
+---
+ ChangeLog | 3 +++
+ src/pcre2test.c | 4 ++--
+ testdata/testinput2 | 3 +++
+ testdata/testoutput2 | 4 ++++
+ 4 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/ChangeLog b/ChangeLog
+index eab50eb7..276eb57a 100644
+--- a/ChangeLog
++++ b/ChangeLog
+index eab50eb7..276eb57a 100644
+@@ -1,6 +1,9 @@
+ Change Log for PCRE2
+ --------------------
+
++20. A negative repeat value in a pcre2test subject line was not being
++diagnosed, leading to infinite looping.
++
+
+ Version 10.40 15-April-2022
+ ---------------------------
+diff --git a/src/pcre2test.c b/src/pcre2test.c
+index 08f86096..f6f5d66c 100644
+--- a/src/pcre2test.c
++++ b/src/pcre2test.c
+@@ -6781,9 +6781,9 @@ while ((c = *p++) != 0)
+ }
+
+ i = (int32_t)li;
+- if (i-- == 0)
++ if (i-- <= 0)
+ {
+- fprintf(outfile, "** Zero repeat not allowed\n");
++ fprintf(outfile, "** Zero or negative repeat not allowed\n");
+ return PR_OK;
+ }
+
+diff --git a/testdata/testinput2 b/testdata/testinput2
+index d37d8f30..717ba2ae 100644
+--- a/testdata/testinput2
++++ b/testdata/testinput2
+@@ -5932,4 +5932,7 @@ a)"xI
+ /[Aa]{2,3}/BI
+ aabcd
+
++--
++ \[X]{-10}
++
+ # End of testinput2
+diff --git a/testdata/testoutput2 b/testdata/testoutput2
+index ce090f8c..d2188d3c 100644
+--- a/testdata/testoutput2
++++ b/testdata/testoutput2
+@@ -17746,6 +17746,10 @@ Subject length lower bound = 2
+ aabcd
+ 0: aa
+
++--
++ \[X]{-10}
++** Zero or negative repeat not allowed
++
+ # End of testinput2
+ Error -70: PCRE2_ERROR_BADDATA (unknown error number)
+ Error -62: bad serialized data
diff --git a/meta/recipes-support/libpcre/libpcre2_10.40.bb b/meta/recipes-support/libpcre/libpcre2_10.40.bb
index 3843d43b69..74c12ecec2 100644
--- a/meta/recipes-support/libpcre/libpcre2_10.40.bb
+++ b/meta/recipes-support/libpcre/libpcre2_10.40.bb
@@ -11,6 +11,7 @@ LICENSE = "BSD-3-Clause"
LIC_FILES_CHKSUM = "file://LICENCE;md5=41bfb977e4933c506588724ce69bf5d2"
SRC_URI = "https://github.com/PhilipHazel/pcre2/releases/download/pcre2-${PV}/pcre2-${PV}.tar.bz2 \
+ file://CVE-2022-41409.patch \
"
UPSTREAM_CHECK_URI = "https://github.com/PhilipHazel/pcre2/releases"
diff --git a/meta/recipes-support/libseccomp/files/run-ptest b/meta/recipes-support/libseccomp/files/run-ptest
index 54b4a63cd2..63c79f09c4 100644
--- a/meta/recipes-support/libseccomp/files/run-ptest
+++ b/meta/recipes-support/libseccomp/files/run-ptest
@@ -1,4 +1,7 @@
#!/bin/sh
cd tests
+sed -i 's/SUCCESS/PASS/g; s/FAILURE/FAIL/g; s/SKIPPED/SKIP/g' regression
+sed -i 's/"Test %s result: %s\\n" "$1" "$2"/"%s: %s\\n" "$2" "$1"/g' regression
+sed -i 's/"Test %s result: %s %s\\n" "$1" "$2" "$3"/"%s: %s %s\\n" "$2" "$1" "$3"/g' regression
./regression -a
diff --git a/meta/recipes-support/libseccomp/libseccomp_2.5.3.bb b/meta/recipes-support/libseccomp/libseccomp_2.5.3.bb
index 4c0fb1d7b3..1f43686ade 100644
--- a/meta/recipes-support/libseccomp/libseccomp_2.5.3.bb
+++ b/meta/recipes-support/libseccomp/libseccomp_2.5.3.bb
@@ -1,5 +1,5 @@
SUMMARY = "interface to seccomp filtering mechanism"
-DESCRIPTION = "The libseccomp library provides and easy to use, platform independent,interface to the Linux Kernel's syscall filtering mechanism: seccomp."
+DESCRIPTION = "The libseccomp library provides an easy to use, platform independent, interface to the Linux Kernel's syscall filtering mechanism: seccomp."
HOMEPAGE = "https://github.com/seccomp/libseccomp"
SECTION = "security"
LICENSE = "LGPL-2.1-only"
diff --git a/meta/recipes-support/libsoup/libsoup_3.0.6.bb b/meta/recipes-support/libsoup/libsoup_3.0.7.bb
index 17825ae6a4..59cc4a1d0a 100644
--- a/meta/recipes-support/libsoup/libsoup_3.0.6.bb
+++ b/meta/recipes-support/libsoup/libsoup_3.0.7.bb
@@ -12,7 +12,7 @@ DEPENDS = "glib-2.0 glib-2.0-native libxml2 sqlite3 libpsl nghttp2"
SHRT_VER = "${@d.getVar('PV').split('.')[0]}.${@d.getVar('PV').split('.')[1]}"
SRC_URI = "${GNOME_MIRROR}/libsoup/${SHRT_VER}/libsoup-${PV}.tar.xz"
-SRC_URI[sha256sum] = "b45d59f840b9acf9bb45fd45854e3ef672f57e3ab957401c3ad8d7502ac23da6"
+SRC_URI[sha256sum] = "ebdf90cf3599c11acbb6818a9d9e3fc9d2c68e56eb829b93962972683e1bf7c8"
PROVIDES = "libsoup-3.0"
CVE_PRODUCT = "libsoup"
diff --git a/meta/recipes-support/libssh2/files/0001-Don-t-let-host-enviroment-to-decide-if-a-test-is-bui.patch b/meta/recipes-support/libssh2/files/0001-Don-t-let-host-enviroment-to-decide-if-a-test-is-bui.patch
deleted file mode 100644
index b1204e49eb..0000000000
--- a/meta/recipes-support/libssh2/files/0001-Don-t-let-host-enviroment-to-decide-if-a-test-is-bui.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From f6abce5ba41a412a247250dcd80e387e53474466 Mon Sep 17 00:00:00 2001
-From: Your Name <you@example.com>
-Date: Mon, 28 Dec 2020 02:08:03 +0000
-Subject: [PATCH] Don't let host enviroment to decide if a test is build
-
-test ssh2.sh need sshd, for cross compile, we need it on target, so
-don't use SSHD on host to decide weither to build a test
-
-Upstream-Status: Inappropriate[oe specific]
-
-Signed-off-by: Changqing Li <changqing.li@windriver.com>
-
----
- tests/Makefile.am | 6 +-----
- 1 file changed, 1 insertion(+), 5 deletions(-)
-
-diff --git a/tests/Makefile.am b/tests/Makefile.am
-index dc0922f..6cbc35d 100644
---- a/tests/Makefile.am
-+++ b/tests/Makefile.am
-@@ -1,16 +1,12 @@
- AM_CPPFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/include -I$(top_builddir)/src
- LDADD = ../src/libssh2.la
-
--if SSHD
- noinst_PROGRAMS = ssh2
- ssh2_SOURCES = ssh2.c
--endif
-
- ctests = simple$(EXEEXT)
- TESTS = $(ctests) mansyntax.sh
--if SSHD
- TESTS += ssh2.sh
--endif
- check_PROGRAMS = $(ctests)
-
- TESTS_ENVIRONMENT = SSHD=$(SSHD) EXEEXT=$(EXEEXT)
-@@ -38,4 +34,4 @@ if OPENSSL
- # EXTRA_DIST += test_public_key_auth_succeeds_with_correct_encrypted_ed25519_key.c
- # EXTRA_DIST += test_public_key_auth_succeeds_with_correct_ed25519_key_from_mem.c
- EXTRA_DIST += test_public_key_auth_succeeds_with_correct_rsa_openssh_key.c
--endif
-\ No newline at end of file
-+endif
diff --git a/meta/recipes-support/libssh2/libssh2/CVE-2020-22218.patch b/meta/recipes-support/libssh2/libssh2/CVE-2020-22218.patch
new file mode 100644
index 0000000000..066233fcae
--- /dev/null
+++ b/meta/recipes-support/libssh2/libssh2/CVE-2020-22218.patch
@@ -0,0 +1,34 @@
+CVE: CVE-2020-22218
+Upstream-Status: Backport [ https://github.com/libssh2/libssh2/commit/642eec48ff3adfdb7a9e562b6d7fc865d1733f45 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+
+From 642eec48ff3adfdb7a9e562b6d7fc865d1733f45 Mon Sep 17 00:00:00 2001
+From: lutianxiong <lutianxiong@huawei.com>
+Date: Fri, 29 May 2020 01:25:40 +0800
+Subject: [PATCH] transport.c: fix use-of-uninitialized-value (#476)
+
+file:transport.c
+
+notes:
+return error if malloc(0)
+
+credit:
+lutianxiong
+---
+ src/transport.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/transport.c b/src/transport.c
+index 96fca6b8cc..adf96c2437 100644
+--- a/src/transport.c
++++ b/src/transport.c
+@@ -472,7 +472,7 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session)
+ /* Get a packet handle put data into. We get one to
+ hold all data, including padding and MAC. */
+ p->payload = LIBSSH2_ALLOC(session, total_num);
+- if(!p->payload) {
++ if(total_num == 0 || !p->payload) {
+ return LIBSSH2_ERROR_ALLOC;
+ }
+ p->total_num = total_num;
diff --git a/meta/recipes-support/libssh2/libssh2/CVE-2023-48795.patch b/meta/recipes-support/libssh2/libssh2/CVE-2023-48795.patch
new file mode 100644
index 0000000000..c7a228217f
--- /dev/null
+++ b/meta/recipes-support/libssh2/libssh2/CVE-2023-48795.patch
@@ -0,0 +1,459 @@
+From d34d9258b8420b19ec3f97b4cc5bf7aa7d98e35a Mon Sep 17 00:00:00 2001
+From: Michael Buckley <michael@buckleyisms.com>
+Date: Thu, 30 Nov 2023 15:08:02 -0800
+Subject: [PATCH] src: add 'strict KEX' to fix CVE-2023-48795 "Terrapin Attack"
+
+Refs:
+https://terrapin-attack.com/ https://seclists.org/oss-sec/2023/q4/292
+https://osv.dev/list?ecosystem=&q=CVE-2023-48795 GHSA-45x7-px36-x8w8
+https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-48795
+
+Fixes #1290
+Closes #1291
+
+CVE: CVE-2023-48795
+Upstream-Status: Backport [https://github.com/libssh2/libssh2/commit/d34d9258b8420b19ec3f97b4cc5bf7aa7d98e35a]
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ src/kex.c | 64 +++++++++++++++++++++-------------
+ src/libssh2_priv.h | 18 +++++++---
+ src/packet.c | 85 +++++++++++++++++++++++++++++++++++++++++++---
+ src/packet.h | 2 +-
+ src/session.c | 3 ++
+ src/transport.c | 12 ++++++-
+ 6 files changed, 150 insertions(+), 34 deletions(-)
+
+diff --git a/src/kex.c b/src/kex.c
+index 9f3ef79..e040dcd 100644
+--- a/src/kex.c
++++ b/src/kex.c
+@@ -3026,6 +3026,13 @@ kex_method_ssh_curve25519_sha256 = {
+ };
+ #endif
+
++static const LIBSSH2_KEX_METHOD
++kex_method_strict_client_extension = {
++ "kex-strict-c-v00@openssh.com",
++ NULL,
++ 0,
++};
++
+ static const LIBSSH2_KEX_METHOD *libssh2_kex_methods[] = {
+ #if LIBSSH2_ED25519
+ &kex_method_ssh_curve25519_sha256,
+@@ -3043,6 +3050,7 @@ static const LIBSSH2_KEX_METHOD *libssh2_kex_methods[] = {
+ &kex_method_diffie_helman_group14_sha1,
+ &kex_method_diffie_helman_group1_sha1,
+ &kex_method_diffie_helman_group_exchange_sha1,
++ &kex_method_strict_client_extension,
+ NULL
+ };
+
+@@ -3281,13 +3289,13 @@ static int kexinit(LIBSSH2_SESSION * session)
+ return 0;
+ }
+
+-/* kex_agree_instr
++/* _libssh2_kex_agree_instr
+ * Kex specific variant of strstr()
+ * Needle must be precede by BOL or ',', and followed by ',' or EOL
+ */
+-static unsigned char *
+-kex_agree_instr(unsigned char *haystack, unsigned long haystack_len,
+- const unsigned char *needle, unsigned long needle_len)
++unsigned char *
++_libssh2_kex_agree_instr(unsigned char *haystack, size_t haystack_len,
++ const unsigned char *needle, size_t needle_len)
+ {
+ unsigned char *s;
+ unsigned char *end_haystack;
+@@ -3371,7 +3379,7 @@ static int kex_agree_hostkey(LIBSSH2_SESSION * session,
+ while(s && *s) {
+ unsigned char *p = (unsigned char *) strchr((char *) s, ',');
+ size_t method_len = (p ? (size_t)(p - s) : strlen((char *) s));
+- if(kex_agree_instr(hostkey, hostkey_len, s, method_len)) {
++ if(_libssh2_kex_agree_instr(hostkey, hostkey_len, s, method_len)) {
+ const LIBSSH2_HOSTKEY_METHOD *method =
+ (const LIBSSH2_HOSTKEY_METHOD *)
+ kex_get_method_by_name((char *) s, method_len,
+@@ -3405,9 +3413,9 @@ static int kex_agree_hostkey(LIBSSH2_SESSION * session,
+ }
+
+ while(hostkeyp && (*hostkeyp) && (*hostkeyp)->name) {
+- s = kex_agree_instr(hostkey, hostkey_len,
+- (unsigned char *) (*hostkeyp)->name,
+- strlen((*hostkeyp)->name));
++ s = _libssh2_kex_agree_instr(hostkey, hostkey_len,
++ (unsigned char *) (*hostkeyp)->name,
++ strlen((*hostkeyp)->name));
+ if(s) {
+ /* So far so good, but does it suit our purposes? (Encrypting vs
+ Signing) */
+@@ -3442,13 +3450,19 @@ static int kex_agree_kex_hostkey(LIBSSH2_SESSION * session, unsigned char *kex,
+ const LIBSSH2_KEX_METHOD **kexp = libssh2_kex_methods;
+ unsigned char *s;
+
++ const unsigned char *strict =
++ (unsigned char *)"kex-strict-s-v00@openssh.com";
++
++ if(_libssh2_kex_agree_instr(kex, kex_len, strict, 28)) {
++ session->kex_strict = 1;
++ }
+ if(session->kex_prefs) {
+ s = (unsigned char *) session->kex_prefs;
+
+ while(s && *s) {
+ unsigned char *q, *p = (unsigned char *) strchr((char *) s, ',');
+ size_t method_len = (p ? (size_t)(p - s) : strlen((char *) s));
+- q = kex_agree_instr(kex, kex_len, s, method_len);
++ q = _libssh2_kex_agree_instr(kex, kex_len, s, method_len);
+ if(q) {
+ const LIBSSH2_KEX_METHOD *method = (const LIBSSH2_KEX_METHOD *)
+ kex_get_method_by_name((char *) s, method_len,
+@@ -3482,9 +3496,9 @@ static int kex_agree_kex_hostkey(LIBSSH2_SESSION * session, unsigned char *kex,
+ }
+
+ while(*kexp && (*kexp)->name) {
+- s = kex_agree_instr(kex, kex_len,
+- (unsigned char *) (*kexp)->name,
+- strlen((*kexp)->name));
++ s = _libssh2_kex_agree_instr(kex, kex_len,
++ (unsigned char *) (*kexp)->name,
++ strlen((*kexp)->name));
+ if(s) {
+ /* We've agreed on a key exchange method,
+ * Can we agree on a hostkey that works with this kex?
+@@ -3528,7 +3542,7 @@ static int kex_agree_crypt(LIBSSH2_SESSION * session,
+ unsigned char *p = (unsigned char *) strchr((char *) s, ',');
+ size_t method_len = (p ? (size_t)(p - s) : strlen((char *) s));
+
+- if(kex_agree_instr(crypt, crypt_len, s, method_len)) {
++ if(_libssh2_kex_agree_instr(crypt, crypt_len, s, method_len)) {
+ const LIBSSH2_CRYPT_METHOD *method =
+ (const LIBSSH2_CRYPT_METHOD *)
+ kex_get_method_by_name((char *) s, method_len,
+@@ -3550,9 +3564,9 @@ static int kex_agree_crypt(LIBSSH2_SESSION * session,
+ }
+
+ while(*cryptp && (*cryptp)->name) {
+- s = kex_agree_instr(crypt, crypt_len,
+- (unsigned char *) (*cryptp)->name,
+- strlen((*cryptp)->name));
++ s = _libssh2_kex_agree_instr(crypt, crypt_len,
++ (unsigned char *) (*cryptp)->name,
++ strlen((*cryptp)->name));
+ if(s) {
+ endpoint->crypt = *cryptp;
+ return 0;
+@@ -3583,7 +3597,7 @@ static int kex_agree_mac(LIBSSH2_SESSION * session,
+ unsigned char *p = (unsigned char *) strchr((char *) s, ',');
+ size_t method_len = (p ? (size_t)(p - s) : strlen((char *) s));
+
+- if(kex_agree_instr(mac, mac_len, s, method_len)) {
++ if(_libssh2_kex_agree_instr(mac, mac_len, s, method_len)) {
+ const LIBSSH2_MAC_METHOD *method = (const LIBSSH2_MAC_METHOD *)
+ kex_get_method_by_name((char *) s, method_len,
+ (const LIBSSH2_COMMON_METHOD **)
+@@ -3604,8 +3618,9 @@ static int kex_agree_mac(LIBSSH2_SESSION * session,
+ }
+
+ while(*macp && (*macp)->name) {
+- s = kex_agree_instr(mac, mac_len, (unsigned char *) (*macp)->name,
+- strlen((*macp)->name));
++ s = _libssh2_kex_agree_instr(mac, mac_len,
++ (unsigned char *) (*macp)->name,
++ strlen((*macp)->name));
+ if(s) {
+ endpoint->mac = *macp;
+ return 0;
+@@ -3636,7 +3651,7 @@ static int kex_agree_comp(LIBSSH2_SESSION *session,
+ unsigned char *p = (unsigned char *) strchr((char *) s, ',');
+ size_t method_len = (p ? (size_t)(p - s) : strlen((char *) s));
+
+- if(kex_agree_instr(comp, comp_len, s, method_len)) {
++ if(_libssh2_kex_agree_instr(comp, comp_len, s, method_len)) {
+ const LIBSSH2_COMP_METHOD *method =
+ (const LIBSSH2_COMP_METHOD *)
+ kex_get_method_by_name((char *) s, method_len,
+@@ -3658,8 +3673,9 @@ static int kex_agree_comp(LIBSSH2_SESSION *session,
+ }
+
+ while(*compp && (*compp)->name) {
+- s = kex_agree_instr(comp, comp_len, (unsigned char *) (*compp)->name,
+- strlen((*compp)->name));
++ s = _libssh2_kex_agree_instr(comp, comp_len,
++ (unsigned char *) (*compp)->name,
++ strlen((*compp)->name));
+ if(s) {
+ endpoint->comp = *compp;
+ return 0;
+@@ -3856,7 +3872,8 @@ _libssh2_kex_exchange(LIBSSH2_SESSION * session, int reexchange,
+ session->local.kexinit = key_state->oldlocal;
+ session->local.kexinit_len = key_state->oldlocal_len;
+ key_state->state = libssh2_NB_state_idle;
+- session->state &= ~LIBSSH2_STATE_KEX_ACTIVE;
++ session->state &= ~LIBSSH2_STATE_INITIAL_KEX;
++ session->state &= ~LIBSSH2_STATE_KEX_ACTIVE;
+ session->state &= ~LIBSSH2_STATE_EXCHANGING_KEYS;
+ return -1;
+ }
+@@ -3904,6 +3921,7 @@ _libssh2_kex_exchange(LIBSSH2_SESSION * session, int reexchange,
+ session->remote.kexinit = NULL;
+ }
+
++ session->state &= ~LIBSSH2_STATE_INITIAL_KEX;
+ session->state &= ~LIBSSH2_STATE_KEX_ACTIVE;
+ session->state &= ~LIBSSH2_STATE_EXCHANGING_KEYS;
+
+diff --git a/src/libssh2_priv.h b/src/libssh2_priv.h
+index da488b7..7faeab6 100644
+--- a/src/libssh2_priv.h
++++ b/src/libssh2_priv.h
+@@ -640,6 +640,9 @@ struct _LIBSSH2_SESSION
+ unsigned char server_hostkey_sha256[SHA256_DIGEST_LENGTH];
+ int server_hostkey_sha256_valid;
+
++ /* Whether to use the OpenSSH Strict KEX extension */
++ int kex_strict;
++
+ /* (remote as source of data -- packet_read ) */
+ libssh2_endpoint_data remote;
+
+@@ -809,6 +812,7 @@ struct _LIBSSH2_SESSION
+ int fullpacket_macstate;
+ size_t fullpacket_payload_len;
+ int fullpacket_packet_type;
++ uint32_t fullpacket_required_type;
+
+ /* State variables used in libssh2_sftp_init() */
+ libssh2_nonblocking_states sftpInit_state;
+@@ -856,10 +860,11 @@ struct _LIBSSH2_SESSION
+ };
+
+ /* session.state bits */
+-#define LIBSSH2_STATE_EXCHANGING_KEYS 0x00000001
+-#define LIBSSH2_STATE_NEWKEYS 0x00000002
+-#define LIBSSH2_STATE_AUTHENTICATED 0x00000004
+-#define LIBSSH2_STATE_KEX_ACTIVE 0x00000008
++#define LIBSSH2_STATE_INITIAL_KEX 0x00000001
++#define LIBSSH2_STATE_EXCHANGING_KEYS 0x00000002
++#define LIBSSH2_STATE_NEWKEYS 0x00000004
++#define LIBSSH2_STATE_AUTHENTICATED 0x00000008
++#define LIBSSH2_STATE_KEX_ACTIVE 0x00000010
+
+ /* session.flag helpers */
+ #ifdef MSG_NOSIGNAL
+@@ -1076,6 +1081,11 @@ ssize_t _libssh2_send(libssh2_socket_t socket, const void *buffer,
+ int _libssh2_kex_exchange(LIBSSH2_SESSION * session, int reexchange,
+ key_exchange_state_t * state);
+
++unsigned char *_libssh2_kex_agree_instr(unsigned char *haystack,
++ size_t haystack_len,
++ const unsigned char *needle,
++ size_t needle_len);
++
+ /* Let crypt.c/hostkey.c expose their method structs */
+ const LIBSSH2_CRYPT_METHOD **libssh2_crypt_methods(void);
+ const LIBSSH2_HOSTKEY_METHOD **libssh2_hostkey_methods(void);
+diff --git a/src/packet.c b/src/packet.c
+index 04937d6..786ba40 100644
+--- a/src/packet.c
++++ b/src/packet.c
+@@ -467,14 +467,13 @@ packet_x11_open(LIBSSH2_SESSION * session, unsigned char *data,
+ * layer when it has received a packet.
+ *
+ * The input pointer 'data' is pointing to allocated data that this function
+- * is asked to deal with so on failure OR success, it must be freed fine.
+- * The only exception is when the return code is LIBSSH2_ERROR_EAGAIN.
++ * will be freed unless return the code is LIBSSH2_ERROR_EAGAIN.
+ *
+ * This function will always be called with 'datalen' greater than zero.
+ */
+ int
+ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data,
+- size_t datalen, int macstate)
++ size_t datalen, int macstate, uint32_t seq)
+ {
+ int rc = 0;
+ unsigned char *message = NULL;
+@@ -517,6 +516,70 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data,
+ break;
+ }
+
++ if(session->state & LIBSSH2_STATE_INITIAL_KEX) {
++ if(msg == SSH_MSG_KEXINIT) {
++ if(!session->kex_strict) {
++ if(datalen < 17) {
++ LIBSSH2_FREE(session, data);
++ session->packAdd_state = libssh2_NB_state_idle;
++ return _libssh2_error(session,
++ LIBSSH2_ERROR_BUFFER_TOO_SMALL,
++ "Data too short extracting kex");
++ }
++ else {
++ const unsigned char *strict =
++ (unsigned char *)"kex-strict-s-v00@openssh.com";
++ struct string_buf buf;
++ unsigned char *algs = NULL;
++ size_t algs_len = 0;
++
++ buf.data = (unsigned char *)data;
++ buf.dataptr = buf.data;
++ buf.len = datalen;
++ buf.dataptr += 17; /* advance past type and cookie */
++
++ if(_libssh2_get_string(&buf, &algs, &algs_len)) {
++ LIBSSH2_FREE(session, data);
++ session->packAdd_state = libssh2_NB_state_idle;
++ return _libssh2_error(session,
++ LIBSSH2_ERROR_BUFFER_TOO_SMALL,
++ "Algs too short");
++ }
++
++ if(algs_len == 0 ||
++ _libssh2_kex_agree_instr(algs, algs_len, strict, 28)) {
++ session->kex_strict = 1;
++ }
++ }
++ }
++
++ if(session->kex_strict && seq) {
++ LIBSSH2_FREE(session, data);
++ session->socket_state = LIBSSH2_SOCKET_DISCONNECTED;
++ session->packAdd_state = libssh2_NB_state_idle;
++ libssh2_session_disconnect(session, "strict KEX violation: "
++ "KEXINIT was not the first packet");
++
++ return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_DISCONNECT,
++ "strict KEX violation: "
++ "KEXINIT was not the first packet");
++ }
++ }
++
++ if(session->kex_strict && session->fullpacket_required_type &&
++ session->fullpacket_required_type != msg) {
++ LIBSSH2_FREE(session, data);
++ session->socket_state = LIBSSH2_SOCKET_DISCONNECTED;
++ session->packAdd_state = libssh2_NB_state_idle;
++ libssh2_session_disconnect(session, "strict KEX violation: "
++ "unexpected packet type");
++
++ return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_DISCONNECT,
++ "strict KEX violation: "
++ "unexpected packet type");
++ }
++ }
++
+ if(session->packAdd_state == libssh2_NB_state_allocated) {
+ /* A couple exceptions to the packet adding rule: */
+ switch(msg) {
+@@ -1118,7 +1181,16 @@ _libssh2_packet_ask(LIBSSH2_SESSION * session, unsigned char packet_type,
+
+ return 0;
+ }
+- packet = _libssh2_list_next(&packet->node);
++ else if(session->kex_strict &&
++ (session->state & LIBSSH2_STATE_INITIAL_KEX)) {
++ libssh2_session_disconnect(session, "strict KEX violation: "
++ "unexpected packet type");
++
++ return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_DISCONNECT,
++ "strict KEX violation: "
++ "unexpected packet type");
++ }
++ packet = _libssh2_list_next(&packet->node);
+ }
+ return -1;
+ }
+@@ -1179,7 +1251,10 @@ _libssh2_packet_require(LIBSSH2_SESSION * session, unsigned char packet_type,
+ }
+
+ while(session->socket_state == LIBSSH2_SOCKET_CONNECTED) {
+- int ret = _libssh2_transport_read(session);
++ int ret;
++ session->fullpacket_required_type = packet_type;
++ ret = _libssh2_transport_read(session);
++ session->fullpacket_required_type = 0;
+ if(ret == LIBSSH2_ERROR_EAGAIN)
+ return ret;
+ else if(ret < 0) {
+diff --git a/src/packet.h b/src/packet.h
+index 79018bc..08ea2a2 100644
+--- a/src/packet.h
++++ b/src/packet.h
+@@ -71,6 +71,6 @@ int _libssh2_packet_burn(LIBSSH2_SESSION * session,
+ int _libssh2_packet_write(LIBSSH2_SESSION * session, unsigned char *data,
+ unsigned long data_len);
+ int _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data,
+- size_t datalen, int macstate);
++ size_t datalen, int macstate, uint32_t seq);
+
+ #endif /* __LIBSSH2_PACKET_H */
+diff --git a/src/session.c b/src/session.c
+index 212560b..019b9ed 100644
+--- a/src/session.c
++++ b/src/session.c
+@@ -500,6 +500,8 @@ libssh2_session_init_ex(LIBSSH2_ALLOC_FUNC((*my_alloc)),
+ session->abstract = abstract;
+ session->api_timeout = 0; /* timeout-free API by default */
+ session->api_block_mode = 1; /* blocking API by default */
++ session->state = LIBSSH2_STATE_INITIAL_KEX;
++ session->fullpacket_required_type = 0;
+ _libssh2_debug(session, LIBSSH2_TRACE_TRANS,
+ "New session resource allocated");
+ _libssh2_init_if_needed();
+@@ -1171,6 +1173,7 @@ libssh2_session_disconnect_ex(LIBSSH2_SESSION *session, int reason,
+ const char *desc, const char *lang)
+ {
+ int rc;
++ session->state &= ~LIBSSH2_STATE_INITIAL_KEX;
+ session->state &= ~LIBSSH2_STATE_EXCHANGING_KEYS;
+ BLOCK_ADJUST(rc, session,
+ session_disconnect(session, reason, desc, lang));
+diff --git a/src/transport.c b/src/transport.c
+index 1074fc2..6823b63 100644
+--- a/src/transport.c
++++ b/src/transport.c
+@@ -168,6 +168,7 @@ fullpacket(LIBSSH2_SESSION * session, int encrypted /* 1 or 0 */ )
+ struct transportpacket *p = &session->packet;
+ int rc;
+ int compressed;
++ uint32_t seq = session->remote.seqno;
+
+ if(session->fullpacket_state == libssh2_NB_state_idle) {
+ session->fullpacket_macstate = LIBSSH2_MAC_CONFIRMED;
+@@ -240,7 +241,7 @@ fullpacket(LIBSSH2_SESSION * session, int encrypted /* 1 or 0 */ )
+ if(session->fullpacket_state == libssh2_NB_state_created) {
+ rc = _libssh2_packet_add(session, p->payload,
+ session->fullpacket_payload_len,
+- session->fullpacket_macstate);
++ session->fullpacket_macstate, seq);
+ if(rc == LIBSSH2_ERROR_EAGAIN)
+ return rc;
+ if(rc) {
+@@ -251,6 +252,11 @@ fullpacket(LIBSSH2_SESSION * session, int encrypted /* 1 or 0 */ )
+
+ session->fullpacket_state = libssh2_NB_state_idle;
+
++ if(session->kex_strict &&
++ session->fullpacket_packet_type == SSH_MSG_NEWKEYS) {
++ session->remote.seqno = 0;
++ }
++
+ return session->fullpacket_packet_type;
+ }
+
+@@ -892,6 +898,10 @@ int _libssh2_transport_send(LIBSSH2_SESSION *session,
+
+ session->local.seqno++;
+
++ if(session->kex_strict && data[0] == SSH_MSG_NEWKEYS) {
++ session->local.seqno = 0;
++ }
++
+ ret = LIBSSH2_SEND(session, p->outbuf, total_length,
+ LIBSSH2_SOCKET_SEND_FLAGS(session));
+ if(ret < 0)
+--
+2.40.0
diff --git a/meta/recipes-support/libssh2/libssh2/fix-ssh2-test.patch b/meta/recipes-support/libssh2/libssh2/fix-ssh2-test.patch
new file mode 100644
index 0000000000..ee916c42d4
--- /dev/null
+++ b/meta/recipes-support/libssh2/libssh2/fix-ssh2-test.patch
@@ -0,0 +1,23 @@
+In 8.8 OpenSSH disabled sha1 rsa-sha keys out of the box,
+so we need to re-enable them as a workaround for the test
+suite until upstream updates the tests.
+
+See: https://github.com/libssh2/libssh2/issues/630
+
+Upstream-Status: Backport [alternative fixes merged upstream]
+
+Patch taken from https://github.com/mirror-rpm/libssh2/commit/47f7114f7d0780f3075bad51a71881f45cc933c5
+
+--- a/tests/ssh2.sh
++++ b/tests/ssh2.sh
+@@ -25,7 +25,8 @@ $SSHD -f /dev/null -h "$srcdir"/etc/host
+ -o 'Port 4711' \
+ -o 'Protocol 2' \
+ -o "AuthorizedKeysFile $srcdir/etc/user.pub" \
+- -o 'UsePrivilegeSeparation no' \
++ -o 'HostKeyAlgorithms +ssh-rsa' \
++ -o 'PubkeyAcceptedAlgorithms +ssh-rsa' \
+ -o 'StrictModes no' \
+ -D \
+ $libssh2_sshd_params &
+
diff --git a/meta/recipes-support/libssh2/files/run-ptest b/meta/recipes-support/libssh2/libssh2/run-ptest
index 9e2fce2d24..5e7426f79d 100644
--- a/meta/recipes-support/libssh2/files/run-ptest
+++ b/meta/recipes-support/libssh2/libssh2/run-ptest
@@ -2,8 +2,7 @@
ptestdir=$(dirname "$(readlink -f "$0")")
cd tests
-# omit ssh2.sh until https://github.com/libssh2/libssh2/issues/630 is fixed
-for test in simple mansyntax.sh
+for test in simple mansyntax.sh ssh2.sh
do
./../test-driver --test-name $test --log-file ../$test.log --trs-file ../$test.trs --color-tests no --enable-hard-errors yes --expect-failure no -- ./$test
done
diff --git a/meta/recipes-support/libssh2/libssh2_1.10.0.bb b/meta/recipes-support/libssh2/libssh2_1.10.0.bb
index 072d6819c0..8fd77996d5 100644
--- a/meta/recipes-support/libssh2/libssh2_1.10.0.bb
+++ b/meta/recipes-support/libssh2/libssh2_1.10.0.bb
@@ -8,11 +8,12 @@ LICENSE = "BSD-3-Clause"
LIC_FILES_CHKSUM = "file://COPYING;md5=3e089ad0cf27edf1e7f261dfcd06acc7"
SRC_URI = "http://www.libssh2.org/download/${BP}.tar.gz \
+ file://fix-ssh2-test.patch \
file://run-ptest \
+ file://CVE-2020-22218.patch \
+ file://CVE-2023-48795.patch \
"
-SRC_URI:append:ptest = " file://0001-Don-t-let-host-enviroment-to-decide-if-a-test-is-bui.patch"
-
SRC_URI[sha256sum] = "2d64e90f3ded394b91d3a2e774ca203a4179f69aebee03003e5a6fa621e41d51"
inherit autotools pkgconfig ptest
diff --git a/meta/recipes-support/liburcu/liburcu_0.13.1.bb b/meta/recipes-support/liburcu/liburcu_0.13.2.bb
index 66763349d2..6ecf2e21c0 100644
--- a/meta/recipes-support/liburcu/liburcu_0.13.1.bb
+++ b/meta/recipes-support/liburcu/liburcu_0.13.2.bb
@@ -12,7 +12,7 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=e548d28737289d75a8f1e01ba2fd7825 \
SRC_URI = "http://lttng.org/files/urcu/userspace-rcu-${PV}.tar.bz2"
-SRC_URI[sha256sum] = "3213f33d2b8f710eb920eb1abb279ec04bf8ae6361f44f2513c28c20d3363083"
+SRC_URI[sha256sum] = "1213fd9f1b0b74da7de2bb74335b76098db9738fec5d3cdc07c0c524f34fc032"
S = "${WORKDIR}/userspace-rcu-${PV}"
inherit autotools multilib_header
diff --git a/meta/recipes-support/libusb/libusb1/0001-configure.ac-Link-with-latomic-only-if-no-atomic-bui.patch b/meta/recipes-support/libusb/libusb1/0001-configure.ac-Link-with-latomic-only-if-no-atomic-bui.patch
new file mode 100644
index 0000000000..3c223e0822
--- /dev/null
+++ b/meta/recipes-support/libusb/libusb1/0001-configure.ac-Link-with-latomic-only-if-no-atomic-bui.patch
@@ -0,0 +1,46 @@
+From 95e601ce116dd46ea7915c171976b85ea0905d58 Mon Sep 17 00:00:00 2001
+From: Lonnie Abelbeck <lonnie@abelbeck.com>
+Date: Sun, 8 May 2022 14:05:56 -0500
+Subject: [PATCH] configure.ac: Link with -latomic only if no atomic builtins
+
+Follow-up to 561dbda, a check of GCC atomic builtins needs to be done
+first.
+
+I'm no autoconf guru, but using this:
+https://github.com/mesa3d/mesa/blob/0df485c285b73c34ba9062f0c27e55c3c702930d/configure.ac#L469
+as inspiration, I created a pre-check before calling AC_SEARCH_LIBS(...)
+
+Fixes #1135
+Closes #1139
+Upstream-Status: Backport [https://github.com/kraj/libusb/commit/95e601ce116dd46ea7915c171976b85ea0905d58]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ configure.ac | 16 +++++++++++++++-
+ libusb/version_nano.h | 2 +-
+ 2 files changed, 16 insertions(+), 2 deletions(-)
+
+--- a/configure.ac
++++ b/configure.ac
+@@ -153,7 +153,21 @@ if test "x$platform" = xposix; then
+ AC_SEARCH_LIBS([pthread_create], [pthread],
+ [test "x$ac_cv_search_pthread_create" != "xnone required" && AC_SUBST(THREAD_LIBS, [-lpthread])],
+ [], [])
+- AC_SEARCH_LIBS([__atomic_fetch_add_4], [atomic])
++ dnl Check for new-style atomic builtins. We first check without linking to -latomic.
++ AC_MSG_CHECKING(whether __atomic_load_n is supported)
++ AC_LINK_IFELSE([AC_LANG_SOURCE([[
++ #include <stdint.h>
++ int main() {
++ struct {
++ uint64_t *v;
++ } x;
++ return (int)__atomic_load_n(x.v, __ATOMIC_ACQUIRE) &
++ (int)__atomic_add_fetch(x.v, (uint64_t)1, __ATOMIC_ACQ_REL);
++ }]])], GCC_ATOMIC_BUILTINS_SUPPORTED=yes, GCC_ATOMIC_BUILTINS_SUPPORTED=no)
++ AC_MSG_RESULT($GCC_ATOMIC_BUILTINS_SUPPORTED)
++ if test "x$GCC_ATOMIC_BUILTINS_SUPPORTED" != xyes; then
++ AC_SEARCH_LIBS([__atomic_fetch_add_4], [atomic])
++ fi
+ elif test "x$platform" = xwindows; then
+ AC_DEFINE([PLATFORM_WINDOWS], [1], [Define to 1 if compiling for a Windows platform.])
+ else
diff --git a/meta/recipes-support/libusb/libusb1_1.0.26.bb b/meta/recipes-support/libusb/libusb1_1.0.26.bb
index fd63e7adc2..18ab612d13 100644
--- a/meta/recipes-support/libusb/libusb1_1.0.26.bb
+++ b/meta/recipes-support/libusb/libusb1_1.0.26.bb
@@ -11,6 +11,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=fbc093901857fcd118f065f900982c24"
BBCLASSEXTEND = "native nativesdk"
SRC_URI = "https://github.com/libusb/libusb/releases/download/v${PV}/libusb-${PV}.tar.bz2 \
+ file://0001-configure.ac-Link-with-latomic-only-if-no-atomic-bui.patch \
file://run-ptest \
"
@@ -34,12 +35,12 @@ do_install:append() {
fi
}
-do_compile_ptest() {
- oe_runmake -C tests stress
-}
-
-do_install_ptest() {
- install -m 755 ${B}/tests/.libs/stress ${D}${PTEST_PATH}
+do_compile_ptest() {
+ oe_runmake -C tests stress
+}
+
+do_install_ptest() {
+ install -m 755 ${B}/tests/.libs/stress ${D}${PTEST_PATH}
}
FILES:${PN} += "${base_libdir}/*.so.*"
diff --git a/meta/recipes-support/lz4/files/CVE-2021-3520.patch b/meta/recipes-support/lz4/files/CVE-2021-3520.patch
deleted file mode 100644
index 5ac8f6691f..0000000000
--- a/meta/recipes-support/lz4/files/CVE-2021-3520.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From 8301a21773ef61656225e264f4f06ae14462bca7 Mon Sep 17 00:00:00 2001
-From: Jasper Lievisse Adriaanse <j@jasper.la>
-Date: Fri, 26 Feb 2021 15:21:20 +0100
-Subject: [PATCH] Fix potential memory corruption with negative memmove() size
-
-Upstream-Status: Backport
-https://github.com/lz4/lz4/commit/8301a21773ef61656225e264f4f06ae14462bca7#diff-7055e9cf14c488aea9837aaf9f528b58ee3c22988d7d0d81d172ec62d94a88a7
-CVE: CVE-2021-3520
-Signed-off-by: Armin Kuster <akuster@mvista.com>
-
----
- lib/lz4.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-Index: git/lib/lz4.c
-===================================================================
---- git.orig/lib/lz4.c
-+++ git/lib/lz4.c
-@@ -1665,7 +1665,7 @@ LZ4_decompress_generic(
- const size_t dictSize /* note : = 0 if noDict */
- )
- {
-- if (src == NULL) { return -1; }
-+ if ((src == NULL) || (outputSize < 0)) { return -1; }
-
- { const BYTE* ip = (const BYTE*) src;
- const BYTE* const iend = ip + srcSize;
diff --git a/meta/recipes-support/lz4/lz4_1.9.3.bb b/meta/recipes-support/lz4/lz4_1.9.4.bb
index 129a86b681..a2a178bab5 100644
--- a/meta/recipes-support/lz4/lz4_1.9.3.bb
+++ b/meta/recipes-support/lz4/lz4_1.9.4.bb
@@ -3,18 +3,16 @@ DESCRIPTION = "LZ4 is a very fast lossless compression algorithm, providing comp
HOMEPAGE = "https://github.com/lz4/lz4"
LICENSE = "BSD-2-Clause | GPL-2.0-only"
-LIC_FILES_CHKSUM = "file://lib/LICENSE;md5=ebc2ea4814a64de7708f1571904b32cc \
+LIC_FILES_CHKSUM = "file://lib/LICENSE;md5=5cd5f851b52ec832b10eedb3f01f885a \
file://programs/COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
- file://LICENSE;md5=d57c0d21cb917fb4e0af2454aa48b956 \
+ file://LICENSE;md5=c5cc3cd6f9274b4d32988096df9c3ec3 \
"
PE = "1"
-SRCREV = "d44371841a2f1728a3f36839fd4b7e872d0927d3"
+SRCREV = "5ff839680134437dbf4678f3d0c7b371d84f4964"
-SRC_URI = "git://github.com/lz4/lz4.git;branch=release;protocol=https \
- file://CVE-2021-3520.patch \
- "
+SRC_URI = "git://github.com/lz4/lz4.git;branch=release;protocol=https"
UPSTREAM_CHECK_GITTAGREGEX = "v(?P<pver>.*)"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-support/mpfr/mpfr_4.1.0.bb b/meta/recipes-support/mpfr/mpfr_4.1.1.bb
index 2121dad57c..f531a88961 100644
--- a/meta/recipes-support/mpfr/mpfr_4.1.0.bb
+++ b/meta/recipes-support/mpfr/mpfr_4.1.1.bb
@@ -12,7 +12,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=1ebbd3e34237af26da5dc08a4e440464 \
DEPENDS = "gmp autoconf-archive"
SRC_URI = "https://www.mpfr.org/mpfr-${PV}/mpfr-${PV}.tar.xz"
-SRC_URI[sha256sum] = "0c98a3f1732ff6ca4ea690552079da9c597872d30e96ec28414ee23c95558a7f"
+SRC_URI[sha256sum] = "ffd195bd567dbaffc3b98b23fd00aad0537680c9896171e44fe3ff79e28ac33d"
UPSTREAM_CHECK_URI = "http://www.mpfr.org/mpfr-current/"
diff --git a/meta/recipes-support/nghttp2/nghttp2/CVE-2023-35945.patch b/meta/recipes-support/nghttp2/nghttp2/CVE-2023-35945.patch
new file mode 100644
index 0000000000..e03915fda8
--- /dev/null
+++ b/meta/recipes-support/nghttp2/nghttp2/CVE-2023-35945.patch
@@ -0,0 +1,151 @@
+From ce385d3f55a4b76da976b3bdf71fe2deddf315ba Mon Sep 17 00:00:00 2001
+From: Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com>
+Date: Thu, 24 Aug 2023 09:34:26 +0000
+Subject: [PATCH] Fix memory leak
+
+This commit fixes memory leak that happens when PUSH_PROMISE or
+HEADERS frame cannot be sent, and nghttp2_on_stream_close_callback
+fails with a fatal error. For example, if GOAWAY frame has been
+received, a HEADERS frame that opens new stream cannot be sent.
+
+This issue has already been made public via CVE-2023-35945 [1] issued
+by envoyproxy/envoy project. During embargo period, the patch to fix
+this bug was accidentally submitted to nghttp2/nghttp2 repository [2].
+And they decided to disclose CVE early. I was notified just 1.5 hours
+before disclosure. I had no time to respond.
+
+PoC described in [1] is quite simple, but I think it is not enough to
+trigger this bug. While it is true that receiving GOAWAY prevents a
+client from opening new stream, and nghttp2 enters error handling
+branch, in order to cause the memory leak,
+nghttp2_session_close_stream function must return a fatal error.
+nghttp2 defines 2 fatal error codes:
+
+- NGHTTP2_ERR_NOMEM
+- NGHTTP2_ERR_CALLBACK_FAILURE
+
+NGHTTP2_ERR_NOMEM, as its name suggests, indicates out of memory. It
+is unlikely that a process gets short of memory with this simple PoC
+scenario unless application does something memory heavy processing.
+
+NGHTTP2_ERR_CALLBACK_FAILURE is returned from application defined
+callback function (nghttp2_on_stream_close_callback, in this case),
+which indicates something fatal happened inside a callback, and a
+connection must be closed immediately without any further action. As
+nghttp2_on_stream_close_error_callback documentation says, any error
+code other than 0 or NGHTTP2_ERR_CALLBACK_FAILURE is treated as fatal
+error code. More specifically, it is treated as if
+NGHTTP2_ERR_CALLBACK_FAILURE is returned. I guess that envoy returns
+NGHTTP2_ERR_CALLBACK_FAILURE or other error code which is translated
+into NGHTTP2_ERR_CALLBACK_FAILURE.
+
+[1] https://github.com/envoyproxy/envoy/security/advisories/GHSA-jfxv-29pc-x22r
+[2] https://github.com/nghttp2/nghttp2/pull/1929
+
+CVE: CVE-2023-35945
+
+Upstream-Status: Backport [https://github.com/nghttp2/nghttp2/commit/ce385d3f55a4b76da976b3bdf71fe2deddf315ba]
+
+Signed-off-by: Yogita Urade <yogita.urade@windriver.com>
+---
+ lib/nghttp2_session.c | 10 +++++-----
+ tests/nghttp2_session_test.c | 34 ++++++++++++++++++++++++++++++++++
+ 2 files changed, 39 insertions(+), 5 deletions(-)
+
+diff --git a/lib/nghttp2_session.c b/lib/nghttp2_session.c
+index 380a47c..2d9285f 100644
+--- a/lib/nghttp2_session.c
++++ b/lib/nghttp2_session.c
+@@ -2940,6 +2940,7 @@ static ssize_t nghttp2_session_mem_send_internal(nghttp2_session *session,
+ if (rv < 0) {
+ int32_t opened_stream_id = 0;
+ uint32_t error_code = NGHTTP2_INTERNAL_ERROR;
++ int rv2 = 0;
+
+ DEBUGF("send: frame preparation failed with %s\n",
+ nghttp2_strerror(rv));
+@@ -2982,19 +2983,18 @@ static ssize_t nghttp2_session_mem_send_internal(nghttp2_session *session,
+ }
+ if (opened_stream_id) {
+ /* careful not to override rv */
+- int rv2;
+ rv2 = nghttp2_session_close_stream(session, opened_stream_id,
+ error_code);
+-
+- if (nghttp2_is_fatal(rv2)) {
+- return rv2;
+- }
+ }
+
+ nghttp2_outbound_item_free(item, mem);
+ nghttp2_mem_free(mem, item);
+ active_outbound_item_reset(aob, mem);
+
++ if (nghttp2_is_fatal(rv2)) {
++ return rv2;
++ }
++
+ if (rv == NGHTTP2_ERR_HEADER_COMP) {
+ /* If header compression error occurred, should terminiate
+ connection. */
+diff --git a/tests/nghttp2_session_test.c b/tests/nghttp2_session_test.c
+index cb6bdf7..c2778bc 100644
+--- a/tests/nghttp2_session_test.c
++++ b/tests/nghttp2_session_test.c
+@@ -584,6 +584,15 @@ static int on_stream_close_callback(nghttp2_session *session, int32_t stream_id,
+ return 0;
+ }
+
++static int fatal_error_on_stream_close_callback(nghttp2_session *session,
++ int32_t stream_id,
++ uint32_t error_code,
++ void *user_data) {
++ on_stream_close_callback(session, stream_id, error_code, user_data);
++
++ return NGHTTP2_ERR_CALLBACK_FAILURE;
++}
++
+ static ssize_t pack_extension_callback(nghttp2_session *session, uint8_t *buf,
+ size_t len, const nghttp2_frame *frame,
+ void *user_data) {
+@@ -3906,6 +3915,8 @@ void test_nghttp2_session_on_goaway_received(void) {
+ nghttp2_frame frame;
+ int i;
+ nghttp2_mem *mem;
++ const uint8_t *data;
++ ssize_t datalen;
+
+ mem = nghttp2_mem_default();
+ user_data.frame_recv_cb_called = 0;
+@@ -3947,6 +3958,29 @@ void test_nghttp2_session_on_goaway_received(void) {
+
+ nghttp2_frame_goaway_free(&frame.goaway, mem);
+ nghttp2_session_del(session);
++
++ /* Make sure that no memory leak when stream_close callback fails
++ with a fatal error */
++ memset(&callbacks, 0, sizeof(nghttp2_session_callbacks));
++ callbacks.on_stream_close_callback = fatal_error_on_stream_close_callback;
++
++ memset(&user_data, 0, sizeof(user_data));
++
++ nghttp2_session_client_new(&session, &callbacks, &user_data);
++
++ nghttp2_frame_goaway_init(&frame.goaway, 0, NGHTTP2_NO_ERROR, NULL, 0);
++
++ CU_ASSERT(0 == nghttp2_session_on_goaway_received(session, &frame));
++
++ nghttp2_submit_request(session, NULL, reqnv, ARRLEN(reqnv), NULL, NULL);
++
++ datalen = nghttp2_session_mem_send(session, &data);
++
++ CU_ASSERT(NGHTTP2_ERR_CALLBACK_FAILURE == datalen);
++ CU_ASSERT(1 == user_data.stream_close_cb_called);
++
++ nghttp2_frame_goaway_free(&frame.goaway, mem);
++ nghttp2_session_del(session);
+ }
+
+ void test_nghttp2_session_on_window_update_received(void) {
+--
+2.35.5
diff --git a/meta/recipes-support/nghttp2/nghttp2/CVE-2023-44487.patch b/meta/recipes-support/nghttp2/nghttp2/CVE-2023-44487.patch
new file mode 100644
index 0000000000..3cba83307c
--- /dev/null
+++ b/meta/recipes-support/nghttp2/nghttp2/CVE-2023-44487.patch
@@ -0,0 +1,927 @@
+From 72b4af6143681f528f1d237b21a9a7aee1738832 Mon Sep 17 00:00:00 2001
+From: Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com>
+Date: Sun, 1 Oct 2023 00:05:01 +0900
+Subject: [PATCH] Rework session management
+
+CVE: CVE-2023-44487
+
+Upstream-Status: Backport [https://github.com/nghttp2/nghttp2/commit/72b4af6143681f528f1d237b21a9a7aee1738832]
+
+Signed-off-by: Zahir Hussain zahir.basha@kpit.com
+Signed-off-by: aszh07 <mail2szahir@gmail.com>
+---
+CMakeLists.txt | 4 ++
+cmakeconfig.h.in | 9 +++
+configure.ac | 21 +++++++
+doc/Makefile.am | 1 +
+lib/CMakeLists.txt | 2 +
+lib/Makefile.am | 4 ++
+lib/includes/nghttp2/nghttp2.h | 17 ++++++
+lib/nghttp2_option.c | 7 +++
+lib/nghttp2_ratelim.c | 75 ++++++++++++++++++++++++
+lib/nghttp2_ratelim.h | 57 ++++++++++++++++++
+lib/nghttp2_session.c | 34 ++++++++++-
+lib/nghttp2_session.h | 12 +++-
+lib/nghttp2_time.c | 62 ++++++++++++++++++++
+lib/nghttp2_time.h | 38 ++++++++++++
+tests/nghttp2_ratelim_test.c | 101 ++++++++++++++++++++++++++++++++
+tests/nghttp2_ratelim_test.h | 35 +++++++++++
+tests/nghttp2_session_test.c | 103 +++++++++++++++++++++++++++++++++
+tests/nghttp2_session_test.h | 1 +
+tests/CMakeLists.txt | 1 +
+tests/Makefile.am | 6 +-
+lib/nghttp2_option.h | 6 ++
+tests/main.c | 7 ++-
+22 files changed, 598 insertions(+), 5 deletions(-)
+create mode 100644 lib/nghttp2_ratelim.c
+create mode 100644 lib/nghttp2_ratelim.h
+create mode 100644 lib/nghttp2_time.c
+create mode 100644 lib/nghttp2_time.h
+create mode 100644 tests/nghttp2_ratelim_test.c
+create mode 100644 tests/nghttp2_ratelim_test.h
+
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -262,6 +262,7 @@ check_include_file("netinet/in.h" HAVE
+ check_include_file("pwd.h" HAVE_PWD_H)
+ check_include_file("sys/socket.h" HAVE_SYS_SOCKET_H)
+ check_include_file("sys/time.h" HAVE_SYS_TIME_H)
++check_include_file("sysinfoapi.h" HAVE_SYSINFOAPI_H)
+ check_include_file("syslog.h" HAVE_SYSLOG_H)
+ check_include_file("time.h" HAVE_TIME_H)
+ check_include_file("unistd.h" HAVE_UNISTD_H)
+@@ -302,8 +303,11 @@ check_type_size("time_t" SIZEOF_TIME_T)
+ include(CheckFunctionExists)
+ check_function_exists(_Exit HAVE__EXIT)
+ check_function_exists(accept4 HAVE_ACCEPT4)
++check_function_exists(clock_gettime HAVE_CLOCK_GETTIME)
+ check_function_exists(mkostemp HAVE_MKOSTEMP)
+
++check_symbol_exists(GetTickCount64 sysinfoapi.h HAVE_GETTICKCOUNT64)
++
+ include(CheckSymbolExists)
+ # XXX does this correctly detect initgroups (un)availability on cygwin?
+ check_symbol_exists(initgroups grp.h HAVE_DECL_INITGROUPS)
+--- a/cmakeconfig.h.in
++++ b/cmakeconfig.h.in
+@@ -34,9 +34,15 @@
+ /* Define to 1 if you have the `accept4` function. */
+ #cmakedefine HAVE_ACCEPT4 1
+
++/* Define to 1 if you have the `clock_gettime` function. */
++#cmakedefine HAVE_CLOCK_GETTIME 1
++
+ /* Define to 1 if you have the `mkostemp` function. */
+ #cmakedefine HAVE_MKOSTEMP 1
+
++/* Define to 1 if you have the `GetTickCount64` function. */
++#cmakedefine HAVE_GETTICKCOUNT64 1
++
+ /* Define to 1 if you have the `initgroups` function. */
+ #cmakedefine01 HAVE_DECL_INITGROUPS
+
+@@ -73,6 +79,9 @@
+ /* Define to 1 if you have the <sys/time.h> header file. */
+ #cmakedefine HAVE_SYS_TIME_H 1
+
++/* Define to 1 if you have the <sysinfoapi.h> header file. */
++#cmakedefine HAVE_SYSINFOAPI_H 1
++
+ /* Define to 1 if you have the <syslog.h> header file. */
+ #cmakedefine HAVE_SYSLOG_H 1
+
+--- a/configure.ac
++++ b/configure.ac
+@@ -607,6 +607,7 @@ AC_CHECK_HEADERS([ \
+ string.h \
+ sys/socket.h \
+ sys/time.h \
++ sysinfoapi.h \
+ syslog.h \
+ time.h \
+ unistd.h \
+@@ -681,6 +682,7 @@ AC_FUNC_STRNLEN
+ AC_CHECK_FUNCS([ \
+ _Exit \
+ accept4 \
++ clock_gettime \
+ dup2 \
+ getcwd \
+ getpwnam \
+@@ -706,6 +708,25 @@ AC_CHECK_FUNCS([ \
+ AC_CHECK_FUNC([timerfd_create],
+ [have_timerfd_create=yes], [have_timerfd_create=no])
+
++AC_MSG_CHECKING([checking for GetTickCount64])
++AC_LINK_IFELSE([AC_LANG_PROGRAM(
++[[
++#include <sysinfoapi.h>
++]],
++[[
++GetTickCount64();
++]])],
++[have_gettickcount64=yes],
++[have_gettickcount64=no])
++
++if test "x${have_gettickcount64}" = "xyes"; then
++ AC_MSG_RESULT([yes])
++ AC_DEFINE([HAVE_GETTICKCOUNT64], [1],
++ [Define to 1 if you have `GetTickCount64` function.])
++else
++ AC_MSG_RESULT([no])
++fi
++
+ # For cygwin: we can link initgroups, so AC_CHECK_FUNCS succeeds, but
+ # cygwin disables initgroups due to feature test macro magic with our
+ # configuration. FreeBSD declares initgroups() in unistd.h.
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -69,6 +69,7 @@ APIDOCS= \
+ nghttp2_option_set_user_recv_extension_type.rst \
+ nghttp2_option_set_max_outbound_ack.rst \
+ nghttp2_option_set_max_settings.rst \
++ nghttp2_option_set_stream_reset_rate_limit.rst \
+ nghttp2_pack_settings_payload.rst \
+ nghttp2_priority_spec_check_default.rst \
+ nghttp2_priority_spec_default_init.rst \
+--- a/lib/CMakeLists.txt
++++ b/lib/CMakeLists.txt
+@@ -23,6 +23,8 @@ set(NGHTTP2_SOURCES
+ nghttp2_mem.c
+ nghttp2_http.c
+ nghttp2_rcbuf.c
++ nghttp2_ratelim.c
++ nghttp2_time.c
+ nghttp2_debug.c
+ )
+
+--- a/lib/Makefile.am
++++ b/lib/Makefile.am
+@@ -49,6 +49,8 @@ OBJECTS = nghttp2_pq.c nghttp2_map.c ngh
+ nghttp2_mem.c \
+ nghttp2_http.c \
+ nghttp2_rcbuf.c \
++ nghttp2_ratelim.c \
++ nghttp2_time.c \
+ nghttp2_debug.c
+
+ HFILES = nghttp2_pq.h nghttp2_int.h nghttp2_map.h nghttp2_queue.h \
+@@ -65,6 +67,8 @@ HFILES = nghttp2_pq.h nghttp2_int.h nght
+ nghttp2_mem.h \
+ nghttp2_http.h \
+ nghttp2_rcbuf.h \
++ nghttp2_ratelim.h \
++ nghttp2_time.h \
+ nghttp2_debug.h
+
+ libnghttp2_la_SOURCES = $(HFILES) $(OBJECTS)
+--- a/lib/includes/nghttp2/nghttp2.h
++++ b/lib/includes/nghttp2/nghttp2.h
+@@ -2763,6 +2763,23 @@ nghttp2_session_client_new2(nghttp2_sess
+ /**
+ * @function
+ *
++ * This function sets the rate limit for the incoming stream reset
++ * (RST_STREAM frame). It is server use only. It is a token-bucket
++ * based rate limiter. |burst| specifies the number of tokens that is
++ * initially available. The maximum number of tokens is capped to
++ * this value. |rate| specifies the number of tokens that are
++ * regenerated per second. An incoming RST_STREAM consumes one token.
++ * If there is no token available, GOAWAY is sent to tear down the
++ * connection. |burst| and |rate| default to 1000 and 33
++ * respectively.
++ */
++NGHTTP2_EXTERN void
++nghttp2_option_set_stream_reset_rate_limit(nghttp2_option *option,
++ uint64_t burst, uint64_t rate);
++
++/**
++ * @function
++ *
+ * Like `nghttp2_session_server_new()`, but with additional options
+ * specified in the |option|.
+ *
+--- a/lib/nghttp2_option.c
++++ b/lib/nghttp2_option.c
+@@ -126,3 +126,10 @@ void nghttp2_option_set_max_settings(ngh
+ option->opt_set_mask |= NGHTTP2_OPT_MAX_SETTINGS;
+ option->max_settings = val;
+ }
++
++void nghttp2_option_set_stream_reset_rate_limit(nghttp2_option *option,
++ uint64_t burst, uint64_t rate) {
++ option->opt_set_mask |= NGHTTP2_OPT_STREAM_RESET_RATE_LIMIT;
++ option->stream_reset_burst = burst;
++ option->stream_reset_rate = rate;
++}
+--- /dev/null
++++ b/lib/nghttp2_ratelim.c
+@@ -0,0 +1,75 @@
++/*
++ * nghttp2 - HTTP/2 C Library
++ *
++ * Copyright (c) 2023 nghttp2 contributors
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#include "nghttp2_ratelim.h"
++#include "nghttp2_helper.h"
++
++void nghttp2_ratelim_init(nghttp2_ratelim *rl, uint64_t burst, uint64_t rate) {
++ rl->val = rl->burst = burst;
++ rl->rate = rate;
++ rl->tstamp = 0;
++}
++
++void nghttp2_ratelim_update(nghttp2_ratelim *rl, uint64_t tstamp) {
++ uint64_t d, gain;
++
++ if (tstamp == rl->tstamp) {
++ return;
++ }
++
++ if (tstamp > rl->tstamp) {
++ d = tstamp - rl->tstamp;
++ } else {
++ d = 1;
++ }
++
++ rl->tstamp = tstamp;
++
++ if (UINT64_MAX / d < rl->rate) {
++ rl->val = rl->burst;
++
++ return;
++ }
++
++ gain = rl->rate * d;
++
++ if (UINT64_MAX - gain < rl->val) {
++ rl->val = rl->burst;
++
++ return;
++ }
++
++ rl->val += gain;
++ rl->val = nghttp2_min(rl->val, rl->burst);
++}
++
++int nghttp2_ratelim_drain(nghttp2_ratelim *rl, uint64_t n) {
++ if (rl->val < n) {
++ return -1;
++ }
++
++ rl->val -= n;
++
++ return 0;
++}
+--- /dev/null
++++ b/lib/nghttp2_ratelim.h
+@@ -0,0 +1,57 @@
++/*
++ * nghttp2 - HTTP/2 C Library
++ *
++ * Copyright (c) 2023 nghttp2 contributors
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef NGHTTP2_RATELIM_H
++#define NGHTTP2_RATELIM_H
++
++#ifdef HAVE_CONFIG_H
++# include <config.h>
++#endif /* HAVE_CONFIG_H */
++
++#include <nghttp2/nghttp2.h>
++
++typedef struct nghttp2_ratelim {
++ /* burst is the maximum value of val. */
++ uint64_t burst;
++ /* rate is the amount of value that is regenerated per 1 tstamp. */
++ uint64_t rate;
++ /* val is the amount of value available to drain. */
++ uint64_t val;
++ /* tstamp is the last timestamp in second resolution that is known
++ to this object. */
++ uint64_t tstamp;
++} nghttp2_ratelim;
++
++/* nghttp2_ratelim_init initializes |rl| with the given parameters. */
++void nghttp2_ratelim_init(nghttp2_ratelim *rl, uint64_t burst, uint64_t rate);
++
++/* nghttp2_ratelim_update updates rl->val with the current |tstamp|
++ given in second resolution. */
++void nghttp2_ratelim_update(nghttp2_ratelim *rl, uint64_t tstamp);
++
++/* nghttp2_ratelim_drain drains |n| from rl->val. It returns 0 if it
++ succeeds, or -1. */
++int nghttp2_ratelim_drain(nghttp2_ratelim *rl, uint64_t n);
++
++#endif /* NGHTTP2_RATELIM_H */
+--- a/lib/nghttp2_session.c
++++ b/lib/nghttp2_session.c
+@@ -36,6 +36,7 @@
+ #include "nghttp2_option.h"
+ #include "nghttp2_http.h"
+ #include "nghttp2_pq.h"
++#include "nghttp2_time.h"
+ #include "nghttp2_debug.h"
+
+ /*
+@@ -443,6 +444,10 @@ static int session_new(nghttp2_session *
+ NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS;
+ (*session_ptr)->pending_enable_push = 1;
+
++ nghttp2_ratelim_init(&(*session_ptr)->stream_reset_ratelim,
++ NGHTTP2_DEFAULT_STREAM_RESET_BURST,
++ NGHTTP2_DEFAULT_STREAM_RESET_RATE);
++
+ if (server) {
+ (*session_ptr)->server = 1;
+ }
+@@ -527,6 +532,12 @@ static int session_new(nghttp2_session *
+ option->max_settings) {
+ (*session_ptr)->max_settings = option->max_settings;
+ }
++
++ if (option->opt_set_mask & NGHTTP2_OPT_STREAM_RESET_RATE_LIMIT) {
++ nghttp2_ratelim_init(&(*session_ptr)->stream_reset_ratelim,
++ option->stream_reset_burst,
++ option->stream_reset_rate);
++ }
+ }
+
+ rv = nghttp2_hd_deflate_init2(&(*session_ptr)->hd_deflater,
+@@ -4144,6 +4155,23 @@ static int session_process_priority_fram
+ return nghttp2_session_on_priority_received(session, frame);
+ }
+
++static int session_update_stream_reset_ratelim(nghttp2_session *session) {
++ if (!session->server || (session->goaway_flags & NGHTTP2_GOAWAY_SUBMITTED)) {
++ return 0;
++ }
++
++ nghttp2_ratelim_update(&session->stream_reset_ratelim,
++ nghttp2_time_now_sec());
++
++ if (nghttp2_ratelim_drain(&session->stream_reset_ratelim, 1) == 0) {
++ return 0;
++ }
++
++ return nghttp2_session_add_goaway(session, session->last_recv_stream_id,
++ NGHTTP2_INTERNAL_ERROR, NULL, 0,
++ NGHTTP2_GOAWAY_AUX_NONE);
++}
++
+ int nghttp2_session_on_rst_stream_received(nghttp2_session *session,
+ nghttp2_frame *frame) {
+ int rv;
+@@ -4173,7 +4201,8 @@ int nghttp2_session_on_rst_stream_receiv
+ if (nghttp2_is_fatal(rv)) {
+ return rv;
+ }
+- return 0;
++
++ return session_update_stream_reset_ratelim(session);
+ }
+
+ static int session_process_rst_stream_frame(nghttp2_session *session) {
+@@ -6965,6 +6994,9 @@ int nghttp2_session_add_goaway(nghttp2_s
+ nghttp2_mem_free(mem, item);
+ return rv;
+ }
++
++ session->goaway_flags |= NGHTTP2_GOAWAY_SUBMITTED;
++
+ return 0;
+ }
+
+--- a/lib/nghttp2_session.h
++++ b/lib/nghttp2_session.h
+@@ -39,6 +39,7 @@
+ #include "nghttp2_buf.h"
+ #include "nghttp2_callbacks.h"
+ #include "nghttp2_mem.h"
++#include "nghttp2_ratelim.h"
+
+ /* The global variable for tests where we want to disable strict
+ preface handling. */
+@@ -102,6 +103,10 @@ typedef struct {
+ /* The default value of maximum number of concurrent streams. */
+ #define NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS 0xffffffffu
+
++/* The default values for stream reset rate limiter. */
++#define NGHTTP2_DEFAULT_STREAM_RESET_BURST 1000
++#define NGHTTP2_DEFAULT_STREAM_RESET_RATE 33
++
+ /* Internal state when receiving incoming frame */
+ typedef enum {
+ /* Receiving frame header */
+@@ -176,7 +181,9 @@ typedef enum {
+ /* Flag means GOAWAY was sent */
+ NGHTTP2_GOAWAY_SENT = 0x4,
+ /* Flag means GOAWAY was received */
+- NGHTTP2_GOAWAY_RECV = 0x8
++ NGHTTP2_GOAWAY_RECV = 0x8,
++ /* Flag means GOAWAY has been submitted at least once */
++ NGHTTP2_GOAWAY_SUBMITTED = 0x10
+ } nghttp2_goaway_flag;
+
+ /* nghttp2_inflight_settings stores the SETTINGS entries which local
+@@ -230,6 +237,9 @@ struct nghttp2_session {
+ /* Queue of In-flight SETTINGS values. SETTINGS bearing ACK is not
+ considered as in-flight. */
+ nghttp2_inflight_settings *inflight_settings_head;
++ /* Stream reset rate limiter. If receiving excessive amount of
++ stream resets, GOAWAY will be sent. */
++ nghttp2_ratelim stream_reset_ratelim;
+ /* The number of outgoing streams. This will be capped by
+ remote_settings.max_concurrent_streams. */
+ size_t num_outgoing_streams;
+--- /dev/null
++++ b/lib/nghttp2_time.c
+@@ -0,0 +1,62 @@
++/*
++ * nghttp2 - HTTP/2 C Library
++ *
++ * Copyright (c) 2023 nghttp2 contributors
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#include "nghttp2_time.h"
++
++#ifdef HAVE_TIME_H
++# include <time.h>
++#endif /* HAVE_TIME_H */
++
++#ifdef HAVE_SYSINFOAPI_H
++# include <sysinfoapi.h>
++#endif /* HAVE_SYSINFOAPI_H */
++
++#ifndef HAVE_GETTICKCOUNT64
++static uint64_t time_now_sec(void) {
++ time_t t = time(NULL);
++
++ if (t == -1) {
++ return 0;
++ }
++
++ return (uint64_t)t;
++}
++#endif /* HAVE_GETTICKCOUNT64 */
++
++#ifdef HAVE_CLOCK_GETTIME
++uint64_t nghttp2_time_now_sec(void) {
++ struct timespec tp;
++ int rv = clock_gettime(CLOCK_MONOTONIC, &tp);
++
++ if (rv == -1) {
++ return time_now_sec();
++ }
++
++ return (uint64_t)tp.tv_sec;
++}
++#elif defined(HAVE_GETTICKCOUNT64)
++uint64_t nghttp2_time_now_sec(void) { return GetTickCount64() / 1000; }
++#else /* !HAVE_CLOCK_GETTIME && !HAVE_GETTICKCOUNT64 */
++uint64_t nghttp2_time_now_sec(void) { return time_now_sec(); }
++#endif /* !HAVE_CLOCK_GETTIME && !HAVE_GETTICKCOUNT64 */
+--- /dev/null
++++ b/lib/nghttp2_time.h
+@@ -0,0 +1,38 @@
++/*
++ * nghttp2 - HTTP/2 C Library
++ *
++ * Copyright (c) 2023 nghttp2 contributors
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef NGHTTP2_TIME_H
++#define NGHTTP2_TIME_H
++
++#ifdef HAVE_CONFIG_H
++# include <config.h>
++#endif /* HAVE_CONFIG_H */
++
++#include <nghttp2/nghttp2.h>
++
++/* nghttp2_time_now_sec returns seconds from implementation-specific
++ timepoint. If it is unable to get seconds, it returns 0. */
++uint64_t nghttp2_time_now_sec(void);
++
++#endif /* NGHTTP2_TIME_H */
+--- /dev/null
++++ b/tests/nghttp2_ratelim_test.c
+@@ -0,0 +1,101 @@
++/*
++ * nghttp2 - HTTP/2 C Library
++ *
++ * Copyright (c) 2023 nghttp2 contributors
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#include "nghttp2_ratelim_test.h"
++
++#include <stdio.h>
++
++#include <CUnit/CUnit.h>
++
++#include "nghttp2_ratelim.h"
++
++void test_nghttp2_ratelim_update(void) {
++ nghttp2_ratelim rl;
++
++ nghttp2_ratelim_init(&rl, 1000, 21);
++
++ CU_ASSERT(1000 == rl.val);
++ CU_ASSERT(1000 == rl.burst);
++ CU_ASSERT(21 == rl.rate);
++ CU_ASSERT(0 == rl.tstamp);
++
++ nghttp2_ratelim_update(&rl, 999);
++
++ CU_ASSERT(1000 == rl.val);
++ CU_ASSERT(999 == rl.tstamp);
++
++ nghttp2_ratelim_drain(&rl, 100);
++
++ CU_ASSERT(900 == rl.val);
++
++ nghttp2_ratelim_update(&rl, 1000);
++
++ CU_ASSERT(921 == rl.val);
++
++ nghttp2_ratelim_update(&rl, 1002);
++
++ CU_ASSERT(963 == rl.val);
++
++ nghttp2_ratelim_update(&rl, 1004);
++
++ CU_ASSERT(1000 == rl.val);
++ CU_ASSERT(1004 == rl.tstamp);
++
++ /* timer skew */
++ nghttp2_ratelim_init(&rl, 1000, 21);
++ nghttp2_ratelim_update(&rl, 1);
++
++ CU_ASSERT(1000 == rl.val);
++
++ nghttp2_ratelim_update(&rl, 0);
++
++ CU_ASSERT(1000 == rl.val);
++
++ /* rate * duration overflow */
++ nghttp2_ratelim_init(&rl, 1000, 100);
++ nghttp2_ratelim_drain(&rl, 999);
++
++ CU_ASSERT(1 == rl.val);
++
++ nghttp2_ratelim_update(&rl, UINT64_MAX);
++
++ CU_ASSERT(1000 == rl.val);
++
++ /* val + rate * duration overflow */
++ nghttp2_ratelim_init(&rl, UINT64_MAX - 1, 2);
++ nghttp2_ratelim_update(&rl, 1);
++
++ CU_ASSERT(UINT64_MAX - 1 == rl.val);
++}
++
++void test_nghttp2_ratelim_drain(void) {
++ nghttp2_ratelim rl;
++
++ nghttp2_ratelim_init(&rl, 100, 7);
++
++ CU_ASSERT(-1 == nghttp2_ratelim_drain(&rl, 101));
++ CU_ASSERT(0 == nghttp2_ratelim_drain(&rl, 51));
++ CU_ASSERT(0 == nghttp2_ratelim_drain(&rl, 49));
++ CU_ASSERT(-1 == nghttp2_ratelim_drain(&rl, 1));
++}
+--- /dev/null
++++ b/tests/nghttp2_ratelim_test.h
+@@ -0,0 +1,35 @@
++/*
++ * nghttp2 - HTTP/2 C Library
++ *
++ * Copyright (c) 2023 nghttp2 contributors
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef NGHTTP2_RATELIM_TEST_H
++#define NGHTTP2_RATELIM_TEST_H
++
++#ifdef HAVE_CONFIG_H
++# include <config.h>
++#endif /* HAVE_CONFIG_H */
++
++void test_nghttp2_ratelim_update(void);
++void test_nghttp2_ratelim_drain(void);
++
++#endif /* NGHTTP2_RATELIM_TEST_H */
+--- a/tests/nghttp2_session_test.c
++++ b/tests/nghttp2_session_test.c
+@@ -10813,6 +10813,109 @@ void test_nghttp2_session_set_stream_use
+ nghttp2_session_del(session);
+ }
+
++void test_nghttp2_session_stream_reset_ratelim(void) {
++ nghttp2_session *session;
++ nghttp2_session_callbacks callbacks;
++ nghttp2_frame frame;
++ ssize_t rv;
++ nghttp2_bufs bufs;
++ nghttp2_buf *buf;
++ nghttp2_mem *mem;
++ size_t i;
++ nghttp2_hd_deflater deflater;
++ size_t nvlen;
++ nghttp2_nv *nva;
++ int32_t stream_id;
++ nghttp2_outbound_item *item;
++ nghttp2_option *option;
++
++ mem = nghttp2_mem_default();
++ frame_pack_bufs_init(&bufs);
++
++ memset(&callbacks, 0, sizeof(nghttp2_session_callbacks));
++ callbacks.send_callback = null_send_callback;
++
++ nghttp2_option_new(&option);
++ nghttp2_option_set_stream_reset_rate_limit(
++ option, NGHTTP2_DEFAULT_STREAM_RESET_BURST, 0);
++
++ nghttp2_session_server_new2(&session, &callbacks, NULL, option);
++
++ nghttp2_frame_settings_init(&frame.settings, NGHTTP2_FLAG_NONE, NULL, 0);
++ rv = nghttp2_frame_pack_settings(&bufs, &frame.settings);
++
++ CU_ASSERT(0 == rv);
++
++ nghttp2_frame_settings_free(&frame.settings, mem);
++
++ buf = &bufs.head->buf;
++ rv = nghttp2_session_mem_recv(session, buf->pos, nghttp2_buf_len(buf));
++
++ CU_ASSERT((ssize_t)nghttp2_buf_len(buf) == rv);
++
++ /* Send SETTINGS ACK */
++ rv = nghttp2_session_send(session);
++
++ CU_ASSERT(0 == rv);
++
++ nghttp2_hd_deflate_init(&deflater, mem);
++
++ for (i = 0; i < NGHTTP2_DEFAULT_STREAM_RESET_BURST + 2; ++i) {
++ stream_id = (int32_t)(i * 2 + 1);
++
++ nghttp2_bufs_reset(&bufs);
++
++ /* HEADERS */
++ nvlen = ARRLEN(reqnv);
++ nghttp2_nv_array_copy(&nva, reqnv, nvlen, mem);
++ nghttp2_frame_headers_init(&frame.headers, NGHTTP2_FLAG_END_HEADERS,
++ stream_id, NGHTTP2_HCAT_HEADERS, NULL, nva,
++ nvlen);
++ rv = nghttp2_frame_pack_headers(&bufs, &frame.headers, &deflater);
++
++ CU_ASSERT(0 == rv);
++
++ nghttp2_frame_headers_free(&frame.headers, mem);
++
++ buf = &bufs.head->buf;
++ rv = nghttp2_session_mem_recv(session, buf->pos, nghttp2_buf_len(buf));
++
++ CU_ASSERT((ssize_t)nghttp2_buf_len(buf) == rv);
++
++ nghttp2_bufs_reset(&bufs);
++
++ /* RST_STREAM */
++ nghttp2_frame_rst_stream_init(&frame.rst_stream, stream_id,
++ NGHTTP2_NO_ERROR);
++ nghttp2_frame_pack_rst_stream(&bufs, &frame.rst_stream);
++ nghttp2_frame_rst_stream_free(&frame.rst_stream);
++
++ buf = &bufs.head->buf;
++ rv = nghttp2_session_mem_recv(session, buf->pos, nghttp2_buf_len(buf));
++
++ CU_ASSERT((ssize_t)nghttp2_buf_len(buf) == rv);
++
++ if (i < NGHTTP2_DEFAULT_STREAM_RESET_BURST) {
++ CU_ASSERT(0 == nghttp2_outbound_queue_size(&session->ob_reg));
++
++ continue;
++ }
++
++ CU_ASSERT(1 == nghttp2_outbound_queue_size(&session->ob_reg));
++
++ item = nghttp2_session_get_next_ob_item(session);
++
++ CU_ASSERT(NGHTTP2_GOAWAY == item->frame.hd.type);
++ CU_ASSERT(NGHTTP2_DEFAULT_STREAM_RESET_BURST * 2 + 1 ==
++ item->frame.goaway.last_stream_id);
++ }
++
++ nghttp2_hd_deflate_free(&deflater);
++ nghttp2_session_del(session);
++ nghttp2_bufs_free(&bufs);
++ nghttp2_option_del(option);
++}
++
+ static void check_nghttp2_http_recv_headers_fail(
+ nghttp2_session *session, nghttp2_hd_deflater *deflater, int32_t stream_id,
+ int stream_state, const nghttp2_nv *nva, size_t nvlen) {
+--- a/tests/nghttp2_session_test.h
++++ b/tests/nghttp2_session_test.h
+@@ -160,6 +160,7 @@ void test_nghttp2_session_removed_closed
+ void test_nghttp2_session_pause_data(void);
+ void test_nghttp2_session_no_closed_streams(void);
+ void test_nghttp2_session_set_stream_user_data(void);
++void test_nghttp2_session_stream_reset_ratelim(void);
+ void test_nghttp2_http_mandatory_headers(void);
+ void test_nghttp2_http_content_length(void);
+ void test_nghttp2_http_content_length_mismatch(void);
+--- a/tests/CMakeLists.txt
++++ b/tests/CMakeLists.txt
+@@ -21,6 +21,7 @@ if(HAVE_CUNIT)
+ nghttp2_npn_test.c
+ nghttp2_helper_test.c
+ nghttp2_buf_test.c
++ nghttp2_ratelim_test.c
+ )
+
+ add_executable(main EXCLUDE_FROM_ALL
+--- a/tests/Makefile.am
++++ b/tests/Makefile.am
+@@ -40,14 +40,16 @@ OBJECTS = main.c nghttp2_pq_test.c nghtt
+ nghttp2_hd_test.c \
+ nghttp2_npn_test.c \
+ nghttp2_helper_test.c \
+- nghttp2_buf_test.c
++ nghttp2_buf_test.c \
++ nghttp2_ratelim_test.c
+
+ HFILES = nghttp2_pq_test.h nghttp2_map_test.h nghttp2_queue_test.h \
+ nghttp2_session_test.h \
+ nghttp2_frame_test.h nghttp2_stream_test.h nghttp2_hd_test.h \
+ nghttp2_npn_test.h nghttp2_helper_test.h \
+ nghttp2_test_helper.h \
+- nghttp2_buf_test.h
++ nghttp2_buf_test.h \
++ nghttp2_ratelim_test.h
+
+ main_SOURCES = $(HFILES) $(OBJECTS)
+
+--- a/lib/nghttp2_option.h
++++ b/lib/nghttp2_option.h
+@@ -68,6 +68,7 @@ typedef enum {
+ NGHTTP2_OPT_NO_CLOSED_STREAMS = 1 << 10,
+ NGHTTP2_OPT_MAX_OUTBOUND_ACK = 1 << 11,
+ NGHTTP2_OPT_MAX_SETTINGS = 1 << 12,
++ NGHTTP2_OPT_STREAM_RESET_RATE_LIMIT = 1 << 15,
+ } nghttp2_option_flag;
+
+ /**
+@@ -75,6 +76,11 @@ typedef enum {
+ */
+ struct nghttp2_option {
+ /**
++ * NGHTTP2_OPT_STREAM_RESET_RATE_LIMIT
++ */
++ uint64_t stream_reset_burst;
++ uint64_t stream_reset_rate;
++ /**
+ * NGHTTP2_OPT_MAX_SEND_HEADER_BLOCK_LENGTH
+ */
+ size_t max_send_header_block_length;
+--- a/tests/main.c
++++ b/tests/main.c
+@@ -40,6 +40,7 @@
+ #include "nghttp2_npn_test.h"
+ #include "nghttp2_helper_test.h"
+ #include "nghttp2_buf_test.h"
++#include "nghttp2_ratelim_test.h"
+
+ extern int nghttp2_enable_strict_preface;
+
+@@ -323,6 +324,8 @@ int main() {
+ test_nghttp2_session_no_closed_streams) ||
+ !CU_add_test(pSuite, "session_set_stream_user_data",
+ test_nghttp2_session_set_stream_user_data) ||
++ !CU_add_test(pSuite, "session_stream_reset_ratelim",
++ test_nghttp2_session_stream_reset_ratelim) ||
+ !CU_add_test(pSuite, "http_mandatory_headers",
+ test_nghttp2_http_mandatory_headers) ||
+ !CU_add_test(pSuite, "http_content_length",
+@@ -418,7 +421,9 @@ int main() {
+ !CU_add_test(pSuite, "bufs_advance", test_nghttp2_bufs_advance) ||
+ !CU_add_test(pSuite, "bufs_next_present",
+ test_nghttp2_bufs_next_present) ||
+- !CU_add_test(pSuite, "bufs_realloc", test_nghttp2_bufs_realloc)) {
++ !CU_add_test(pSuite, "bufs_realloc", test_nghttp2_bufs_realloc) ||
++ !CU_add_test(pSuite, "ratelim_update", test_nghttp2_ratelim_update) ||
++ !CU_add_test(pSuite, "ratelim_drain", test_nghttp2_ratelim_drain)) {
+ CU_cleanup_registry();
+ return (int)CU_get_error();
+ }
diff --git a/meta/recipes-support/nghttp2/nghttp2/CVE-2024-28182-0001.patch b/meta/recipes-support/nghttp2/nghttp2/CVE-2024-28182-0001.patch
new file mode 100644
index 0000000000..e1d909b0d1
--- /dev/null
+++ b/meta/recipes-support/nghttp2/nghttp2/CVE-2024-28182-0001.patch
@@ -0,0 +1,110 @@
+From 00201ecd8f982da3b67d4f6868af72a1b03b14e0 Mon Sep 17 00:00:00 2001
+From: Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com>
+Date: Sat, 9 Mar 2024 16:26:42 +0900
+Subject: [PATCH] Limit CONTINUATION frames following an incoming HEADER frame
+
+CVE: CVE-2024-28182
+
+Upstream-Status: Backport [https://github.com/nghttp2/nghttp2/commit/00201ecd8f982da3b67d4f6868af72a1b03b14e0]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ lib/includes/nghttp2/nghttp2.h | 7 ++++++-
+ lib/nghttp2_helper.c | 2 ++
+ lib/nghttp2_session.c | 7 +++++++
+ lib/nghttp2_session.h | 10 ++++++++++
+ 4 files changed, 25 insertions(+), 1 deletion(-)
+
+diff --git a/lib/includes/nghttp2/nghttp2.h b/lib/includes/nghttp2/nghttp2.h
+index 2bd35f4..6cc8c0c 100644
+--- a/lib/includes/nghttp2/nghttp2.h
++++ b/lib/includes/nghttp2/nghttp2.h
+@@ -440,7 +440,12 @@ typedef enum {
+ * exhaustion on server side to send these frames forever and does
+ * not read network.
+ */
+- NGHTTP2_ERR_FLOODED = -904
++ NGHTTP2_ERR_FLOODED = -904,
++ /**
++ * When a local endpoint receives too many CONTINUATION frames
++ * following a HEADER frame.
++ */
++ NGHTTP2_ERR_TOO_MANY_CONTINUATIONS = -905,
+ } nghttp2_error;
+
+ /**
+diff --git a/lib/nghttp2_helper.c b/lib/nghttp2_helper.c
+index 588e269..98989f6 100644
+--- a/lib/nghttp2_helper.c
++++ b/lib/nghttp2_helper.c
+@@ -336,6 +336,8 @@ const char *nghttp2_strerror(int error_code) {
+ "closed";
+ case NGHTTP2_ERR_TOO_MANY_SETTINGS:
+ return "SETTINGS frame contained more than the maximum allowed entries";
++ case NGHTTP2_ERR_TOO_MANY_CONTINUATIONS:
++ return "Too many CONTINUATION frames following a HEADER frame";
+ default:
+ return "Unknown error code";
+ }
+diff --git a/lib/nghttp2_session.c b/lib/nghttp2_session.c
+index 5c834fa..537127c 100644
+--- a/lib/nghttp2_session.c
++++ b/lib/nghttp2_session.c
+@@ -464,6 +464,7 @@ static int session_new(nghttp2_session **session_ptr,
+ (*session_ptr)->max_send_header_block_length = NGHTTP2_MAX_HEADERSLEN;
+ (*session_ptr)->max_outbound_ack = NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM;
+ (*session_ptr)->max_settings = NGHTTP2_DEFAULT_MAX_SETTINGS;
++ (*session_ptr)->max_continuations = NGHTTP2_DEFAULT_MAX_CONTINUATIONS;
+
+ if (option) {
+ if ((option->opt_set_mask & NGHTTP2_OPT_NO_AUTO_WINDOW_UPDATE) &&
+@@ -6307,6 +6308,8 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in,
+ }
+ }
+ session_inbound_frame_reset(session);
++
++ session->num_continuations = 0;
+ }
+ break;
+ }
+@@ -6428,6 +6431,10 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in,
+ }
+ #endif /* DEBUGBUILD */
+
++ if (++session->num_continuations > session->max_continuations) {
++ return NGHTTP2_ERR_TOO_MANY_CONTINUATIONS;
++ }
++
+ readlen = inbound_frame_buf_read(iframe, in, last);
+ in += readlen;
+
+diff --git a/lib/nghttp2_session.h b/lib/nghttp2_session.h
+index 5f71a16..9a00b0e 100644
+--- a/lib/nghttp2_session.h
++++ b/lib/nghttp2_session.h
+@@ -107,6 +107,10 @@ typedef struct {
+ #define NGHTTP2_DEFAULT_STREAM_RESET_BURST 1000
+ #define NGHTTP2_DEFAULT_STREAM_RESET_RATE 33
+
++/* The default max number of CONTINUATION frames following an incoming
++ HEADER frame. */
++#define NGHTTP2_DEFAULT_MAX_CONTINUATIONS 8
++
+ /* Internal state when receiving incoming frame */
+ typedef enum {
+ /* Receiving frame header */
+@@ -279,6 +283,12 @@ struct nghttp2_session {
+ size_t max_send_header_block_length;
+ /* The maximum number of settings accepted per SETTINGS frame. */
+ size_t max_settings;
++ /* The maximum number of CONTINUATION frames following an incoming
++ HEADER frame. */
++ size_t max_continuations;
++ /* The number of CONTINUATION frames following an incoming HEADER
++ frame. This variable is reset when END_HEADERS flag is seen. */
++ size_t num_continuations;
+ /* Next Stream ID. Made unsigned int to detect >= (1 << 31). */
+ uint32_t next_stream_id;
+ /* The last stream ID this session initiated. For client session,
+--
+2.40.0
diff --git a/meta/recipes-support/nghttp2/nghttp2/CVE-2024-28182-0002.patch b/meta/recipes-support/nghttp2/nghttp2/CVE-2024-28182-0002.patch
new file mode 100644
index 0000000000..fee19465d5
--- /dev/null
+++ b/meta/recipes-support/nghttp2/nghttp2/CVE-2024-28182-0002.patch
@@ -0,0 +1,105 @@
+From d71a4668c6bead55805d18810d633fbb98315af9 Mon Sep 17 00:00:00 2001
+From: Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com>
+Date: Sat, 9 Mar 2024 16:48:10 +0900
+Subject: [PATCH] Add nghttp2_option_set_max_continuations
+
+CVE: CVE-2024-28182
+
+Upstream-Status: Backport [https://github.com/nghttp2/nghttp2/commit/d71a4668c6bead55805d18810d633fbb98315af9]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ doc/Makefile.am | 1 +
+ lib/includes/nghttp2/nghttp2.h | 11 +++++++++++
+ lib/nghttp2_option.c | 5 +++++
+ lib/nghttp2_option.h | 5 +++++
+ lib/nghttp2_session.c | 4 ++++
+ 5 files changed, 26 insertions(+)
+
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index b9d5a2d..83cfdfd 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -70,6 +70,7 @@ APIDOCS= \
+ nghttp2_option_set_no_recv_client_magic.rst \
+ nghttp2_option_set_peer_max_concurrent_streams.rst \
+ nghttp2_option_set_user_recv_extension_type.rst \
++ nghttp2_option_set_max_continuations.rst \
+ nghttp2_option_set_max_outbound_ack.rst \
+ nghttp2_option_set_max_settings.rst \
+ nghttp2_option_set_stream_reset_rate_limit.rst \
+diff --git a/lib/includes/nghttp2/nghttp2.h b/lib/includes/nghttp2/nghttp2.h
+index 6cc8c0c..c77cca9 100644
+--- a/lib/includes/nghttp2/nghttp2.h
++++ b/lib/includes/nghttp2/nghttp2.h
+@@ -2724,6 +2724,17 @@ NGHTTP2_EXTERN void nghttp2_option_set_max_outbound_ack(nghttp2_option *option,
+ NGHTTP2_EXTERN void nghttp2_option_set_max_settings(nghttp2_option *option,
+ size_t val);
+
++/**
++ * @function
++ *
++ * This function sets the maximum number of CONTINUATION frames
++ * following an incoming HEADER frame. If more than those frames are
++ * received, the remote endpoint is considered to be misbehaving and
++ * session will be closed. The default value is 8.
++ */
++NGHTTP2_EXTERN void nghttp2_option_set_max_continuations(nghttp2_option *option,
++ size_t val);
++
+ /**
+ * @function
+ *
+diff --git a/lib/nghttp2_option.c b/lib/nghttp2_option.c
+index 0d9a404..f3659c1 100644
+--- a/lib/nghttp2_option.c
++++ b/lib/nghttp2_option.c
+@@ -133,3 +133,8 @@ void nghttp2_option_set_stream_reset_rate_limit(nghttp2_option *option,
+ option->stream_reset_burst = burst;
+ option->stream_reset_rate = rate;
+ }
++
++void nghttp2_option_set_max_continuations(nghttp2_option *option, size_t val) {
++ option->opt_set_mask |= NGHTTP2_OPT_MAX_CONTINUATIONS;
++ option->max_continuations = val;
++}
+diff --git a/lib/nghttp2_option.h b/lib/nghttp2_option.h
+index e6ba910..c1b48c7 100644
+--- a/lib/nghttp2_option.h
++++ b/lib/nghttp2_option.h
+@@ -69,6 +69,7 @@ typedef enum {
+ NGHTTP2_OPT_MAX_OUTBOUND_ACK = 1 << 11,
+ NGHTTP2_OPT_MAX_SETTINGS = 1 << 12,
+ NGHTTP2_OPT_STREAM_RESET_RATE_LIMIT = 1 << 15,
++ NGHTTP2_OPT_MAX_CONTINUATIONS = 1 << 16,
+ } nghttp2_option_flag;
+
+ /**
+@@ -96,6 +97,10 @@ struct nghttp2_option {
+ * NGHTTP2_OPT_MAX_SETTINGS
+ */
+ size_t max_settings;
++ /**
++ * NGHTTP2_OPT_MAX_CONTINUATIONS
++ */
++ size_t max_continuations;
+ /**
+ * Bitwise OR of nghttp2_option_flag to determine that which fields
+ * are specified.
+diff --git a/lib/nghttp2_session.c b/lib/nghttp2_session.c
+index 537127c..b390cd5 100644
+--- a/lib/nghttp2_session.c
++++ b/lib/nghttp2_session.c
+@@ -539,6 +539,10 @@ static int session_new(nghttp2_session **session_ptr,
+ option->stream_reset_burst,
+ option->stream_reset_rate);
+ }
++
++ if (option->opt_set_mask & NGHTTP2_OPT_MAX_CONTINUATIONS) {
++ (*session_ptr)->max_continuations = option->max_continuations;
++ }
+ }
+
+ rv = nghttp2_hd_deflate_init2(&(*session_ptr)->hd_deflater,
+--
+2.40.0
diff --git a/meta/recipes-support/nghttp2/nghttp2_1.47.0.bb b/meta/recipes-support/nghttp2/nghttp2_1.47.0.bb
index 58ce08084d..79b1cf95c5 100644
--- a/meta/recipes-support/nghttp2/nghttp2_1.47.0.bb
+++ b/meta/recipes-support/nghttp2/nghttp2_1.47.0.bb
@@ -9,6 +9,10 @@ UPSTREAM_CHECK_URI = "https://github.com/nghttp2/nghttp2/releases"
SRC_URI = "\
https://github.com/nghttp2/nghttp2/releases/download/v${PV}/nghttp2-${PV}.tar.xz \
file://0001-fetch-ocsp-response-use-python3.patch \
+ file://CVE-2023-35945.patch \
+ file://CVE-2023-44487.patch \
+ file://CVE-2024-28182-0001.patch \
+ file://CVE-2024-28182-0002.patch \
"
SRC_URI[sha256sum] = "68271951324554c34501b85190f22f2221056db69f493afc3bbac8e7be21e7cc"
@@ -19,17 +23,19 @@ PACKAGECONFIG[manpages] = ""
# first place
EXTRA_OECMAKE = "-DENABLE_EXAMPLES=OFF -DENABLE_APP=OFF -DENABLE_HPACK_TOOLS=OFF"
-PACKAGES =+ "lib${BPN} ${PN}-client ${PN}-proxy ${PN}-server"
+# Do not let configure try to decide this.
+#
+EXTRA_OECMAKE += "-DENABLE_PYTHON_BINDINGS=OFF"
-RDEPENDS:${PN} = "${PN}-client (>= ${PV}) ${PN}-proxy (>= ${PV}) ${PN}-server (>= ${PV})"
+PACKAGES =+ "lib${BPN} ${PN}-proxy "
+
+RDEPENDS:${PN} = "${PN}-proxy (>= ${PV})"
RDEPENDS:${PN}:class-native = ""
RDEPENDS:${PN}-proxy = "openssl python3-core python3-io python3-shell"
ALLOW_EMPTY:${PN} = "1"
FILES:${PN} = ""
FILES:lib${BPN} = "${libdir}/*${SOLIBS}"
-FILES:${PN}-client = "${bindir}/h2load ${bindir}/nghttp"
FILES:${PN}-proxy = "${bindir}/nghttpx ${datadir}/${BPN}/fetch-ocsp-response"
-FILES:${PN}-server = "${bindir}/nghttpd"
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-support/numactl/numactl/Fix-the-test-output-format.patch b/meta/recipes-support/numactl/numactl/Fix-the-test-output-format.patch
index 9812ecc8b3..a7bc8d322e 100644
--- a/meta/recipes-support/numactl/numactl/Fix-the-test-output-format.patch
+++ b/meta/recipes-support/numactl/numactl/Fix-the-test-output-format.patch
@@ -7,6 +7,7 @@ Upstream-Status: Pending
Signed-off-by: Roy Li <rongqing.li@windriver.com>
Signed-off-by: Li Xin <lixin.fnst@cn.fujitsu.com>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
---
test/regress | 6 +++---
test/regress2 | 11 +++++------
@@ -20,7 +21,7 @@ index 2ce1705..d086a47 100755
if [ $numnodes -lt 2 ] ; then
echo "need at least two nodes with at least $NEEDPAGES each of"
echo "free memory for mempolicy regression tests"
-+ echo "FAIL: numa regress"
++ echo "SKIP: numa regress"
exit 77 # Skip test
fi
}
diff --git a/meta/recipes-support/numactl/numactl/run-ptest b/meta/recipes-support/numactl/numactl/run-ptest
index bf269da755..e019b0d364 100755
--- a/meta/recipes-support/numactl/numactl/run-ptest
+++ b/meta/recipes-support/numactl/numactl/run-ptest
@@ -8,7 +8,11 @@ if ! numactl -s | grep -q "No NUMA support available on this system."; then
if numademo -t -e 10M; then
echo "PASS: numademo"
else
- echo "FAIL: numademo"
+ if [ "$?" = 77 ] ; then
+ echo "SKIP: numademo"
+ else
+ echo "FAIL: numademo"
+ fi
fi
else
echo "SKIP: ./../test/bind_range"
diff --git a/meta/recipes-support/numactl/numactl_git.bb b/meta/recipes-support/numactl/numactl_git.bb
index 93547ea239..23be0a3b4f 100644
--- a/meta/recipes-support/numactl/numactl_git.bb
+++ b/meta/recipes-support/numactl/numactl_git.bb
@@ -8,10 +8,10 @@ SECTION = "apps"
inherit autotools-brokensep ptest
-LIC_FILES_CHKSUM = "file://README.md;beginline=19;endline=32;md5=f8ff2391624f28e481299f3f677b21bb"
+LIC_FILES_CHKSUM = "file://README.md;beginline=19;endline=32;md5=9f34c3af4ed6f3f5df0da5f3c0835a43"
-SRCREV = "dd6de072c92c892a86e18c0fd0dfa1ba57a9a05d"
-PV = "2.0.14"
+SRCREV = "10285f1a1bad49306839b2c463936460b604e3ea"
+PV = "2.0.16"
SRC_URI = "git://github.com/numactl/numactl;branch=master;protocol=https \
file://Fix-the-test-output-format.patch \
diff --git a/meta/recipes-support/p11-kit/p11-kit_0.24.1.bb b/meta/recipes-support/p11-kit/p11-kit_0.24.1.bb
index 59cbb67961..72b446204a 100644
--- a/meta/recipes-support/p11-kit/p11-kit_0.24.1.bb
+++ b/meta/recipes-support/p11-kit/p11-kit_0.24.1.bb
@@ -29,4 +29,4 @@ FILES:${PN} += " \
# PN contains p11-kit-proxy.so, a symlink to a loadable module
INSANE_SKIP:${PN} = "dev-so"
-BBCLASSEXTEND = "nativesdk"
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-support/pinentry/pinentry_1.2.0.bb b/meta/recipes-support/pinentry/pinentry_1.2.0.bb
index 169cac8965..e6cc71a547 100644
--- a/meta/recipes-support/pinentry/pinentry_1.2.0.bb
+++ b/meta/recipes-support/pinentry/pinentry_1.2.0.bb
@@ -32,5 +32,8 @@ PACKAGECONFIG[secret] = "--enable-libsecret, --disable-libsecret, libsecret"
EXTRA_OECONF = " \
--disable-rpath \
"
+EXTRA_OECONF:append:libc-musl = " \
+ ac_cv_should_define__xopen_source=yes \
+"
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-support/serf/serf/0001-Fix-syntax-of-a-print-in-the-scons-file-to-unbreak-b.patch b/meta/recipes-support/serf/serf/0001-Fix-syntax-of-a-print-in-the-scons-file-to-unbreak-b.patch
deleted file mode 100644
index 4a5832ac1a..0000000000
--- a/meta/recipes-support/serf/serf/0001-Fix-syntax-of-a-print-in-the-scons-file-to-unbreak-b.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From 99f6e1b0d68281b63218d6adfe68cd9e331ac5be Mon Sep 17 00:00:00 2001
-From: Khem Raj <raj.khem@gmail.com>
-Date: Mon, 3 Sep 2018 10:50:08 -0700
-Subject: [PATCH] Fix syntax of a print() in the scons file to unbreak building
- with most recent scons version.
-
-* SConstruct Use Python 3.0 valid syntax to make Scons 3.0.0 happy on both python
- 3.0 and 2.7.
-
-Upstream-Status: Backport
-[https://svn.apache.org/viewvc/serf/trunk/SConstruct?r1=1809132&r2=1811083&diff_format=h]
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
----
- SConstruct | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/SConstruct b/SConstruct
-index 1670459..18a45fa 100644
---- a/SConstruct
-+++ b/SConstruct
-@@ -184,7 +184,7 @@ CALLOUT_OKAY = not (env.GetOption('clean') or env.GetOption('help'))
-
- unknown = opts.UnknownVariables()
- if unknown:
-- print 'Warning: Used unknown variables:', ', '.join(unknown.keys())
-+ print('Warning: Used unknown variables:', ', '.join(unknown.keys()))
-
- apr = str(env['APR'])
- apu = str(env['APU'])
diff --git a/meta/recipes-support/serf/serf/0001-buckets-ssl_buckets.c-do-not-use-ERR_GET_FUNC.patch b/meta/recipes-support/serf/serf/0001-buckets-ssl_buckets.c-do-not-use-ERR_GET_FUNC.patch
deleted file mode 100644
index 91ccc8a474..0000000000
--- a/meta/recipes-support/serf/serf/0001-buckets-ssl_buckets.c-do-not-use-ERR_GET_FUNC.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From 2f45711a66ff99886b6e4a5708e2db01a63e5af4 Mon Sep 17 00:00:00 2001
-From: Alexander Kanavin <alex@linutronix.de>
-Date: Fri, 10 Sep 2021 11:05:10 +0200
-Subject: [PATCH] buckets/ssl_buckets.c: do not use ERR_GET_FUNC
-
-Upstream removed it in
-https://github.com/openssl/openssl/pull/16004
-
-Upstream-Status: Inactive-Upstream [lastrelease: 2015, lastcommit: 2019]
-Signed-off-by: Alexander Kanavin <alex@linutronix.de>
----
- buckets/ssl_buckets.c | 3 +--
- 1 file changed, 1 insertion(+), 2 deletions(-)
-
-diff --git a/buckets/ssl_buckets.c b/buckets/ssl_buckets.c
-index b01e535..9801f87 100644
---- a/buckets/ssl_buckets.c
-+++ b/buckets/ssl_buckets.c
-@@ -1325,8 +1325,7 @@ static int ssl_need_client_cert(SSL *ssl, X509 **cert, EVP_PKEY **pkey)
- return 0;
- }
- else {
-- printf("OpenSSL cert error: %d %d %d\n", ERR_GET_LIB(err),
-- ERR_GET_FUNC(err),
-+ printf("OpenSSL cert error: %d %d\n", ERR_GET_LIB(err),
- ERR_GET_REASON(err));
- PKCS12_free(p12);
- bio_meth_free(biom);
diff --git a/meta/recipes-support/serf/serf/0004-Follow-up-to-r1811083-fix-building-with-scons-3.0.0-.patch b/meta/recipes-support/serf/serf/0004-Follow-up-to-r1811083-fix-building-with-scons-3.0.0-.patch
deleted file mode 100644
index 02fa9e3a06..0000000000
--- a/meta/recipes-support/serf/serf/0004-Follow-up-to-r1811083-fix-building-with-scons-3.0.0-.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From 565211fd082ef653ca9c44a345350fc1451f5a0f Mon Sep 17 00:00:00 2001
-From: Khem Raj <raj.khem@gmail.com>
-Date: Mon, 3 Sep 2018 11:12:38 -0700
-Subject: [PATCH] Follow-up to r1811083 fix building with scons 3.0.0 and
- Python3
-
-* SConstruct: Append decode('utf-8) to FILE.get_contents() to avoid
- TypeError: cannot use a string pattern on a bytes-like object
-
-Upstream-Status: Backport
-[https://svn.apache.org/viewvc/serf/trunk/SConstruct?r1=1811088&r2=1814604]
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
----
- SConstruct | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/SConstruct b/SConstruct
-index 877731e..7678bb1 100644
---- a/SConstruct
-+++ b/SConstruct
-@@ -169,7 +169,7 @@ env.Append(BUILDERS = {
- match = re.search('SERF_MAJOR_VERSION ([0-9]+).*'
- 'SERF_MINOR_VERSION ([0-9]+).*'
- 'SERF_PATCH_VERSION ([0-9]+)',
-- env.File('serf.h').get_contents(),
-+ env.File('serf.h').get_contents().decode('utf-8'),
- re.DOTALL)
- MAJOR, MINOR, PATCH = [int(x) for x in match.groups()]
- env.Append(MAJOR=str(MAJOR))
diff --git a/meta/recipes-support/serf/serf/SConstruct.stop.creating.directories.without.sandbox-install.prefix.patch b/meta/recipes-support/serf/serf/SConstruct.stop.creating.directories.without.sandbox-install.prefix.patch
index 4105868a7e..91640d6044 100644
--- a/meta/recipes-support/serf/serf/SConstruct.stop.creating.directories.without.sandbox-install.prefix.patch
+++ b/meta/recipes-support/serf/serf/SConstruct.stop.creating.directories.without.sandbox-install.prefix.patch
@@ -31,7 +31,7 @@ ERROR: scons install execution failed.
and the installed paths (including the paths inside libserf*.pc)
look correct
-Upstream-Status: Inactive-Upstream [lastrelease: 2015, lastcommit: 2019]
+Upstream-Status: Pending
Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
diff --git a/meta/recipes-support/serf/serf_1.3.9.bb b/meta/recipes-support/serf/serf_1.3.10.bb
index 669f42b8e7..c6b51452aa 100644
--- a/meta/recipes-support/serf/serf_1.3.9.bb
+++ b/meta/recipes-support/serf/serf_1.3.10.bb
@@ -7,16 +7,12 @@ HOMEPAGE = "http://serf.apache.org/"
SRC_URI = "${APACHE_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \
file://norpath.patch \
file://env.patch \
- file://0001-Fix-syntax-of-a-print-in-the-scons-file-to-unbreak-b.patch \
file://0002-SConstruct-Fix-path-quoting-for-.def-generator.patch \
file://0003-gen_def.patch \
- file://0004-Follow-up-to-r1811083-fix-building-with-scons-3.0.0-.patch \
file://SConstruct.stop.creating.directories.without.sandbox-install.prefix.patch \
- file://0001-buckets-ssl_buckets.c-do-not-use-ERR_GET_FUNC.patch \
"
-SRC_URI[md5sum] = "370a6340ff20366ab088012cd13f2b57"
-SRC_URI[sha256sum] = "549c2d21c577a8a9c0450facb5cca809f26591f048e466552240947bdf7a87cc"
+SRC_URI[sha256sum] = "be81ef08baa2516ecda76a77adf7def7bc3227eeb578b9a33b45f7b41dc064e6"
LICENSE = "Apache-2.0"
LIC_FILES_CHKSUM = "file://LICENSE;md5=86d3f3a95c324c9479bd8986968f4327"
diff --git a/meta/recipes-support/sqlite/files/0001-sqlite-Increased-the-size-of-loop-variables-in-the-printf-implementation.patch b/meta/recipes-support/sqlite/files/0001-sqlite-Increased-the-size-of-loop-variables-in-the-printf-implementation.patch
new file mode 100644
index 0000000000..9e8f039ef6
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/0001-sqlite-Increased-the-size-of-loop-variables-in-the-printf-implementation.patch
@@ -0,0 +1,26 @@
+From ec75530b8d8268cb07d8e476d79e1b0e59492fa2 Mon Sep 17 00:00:00 2001
+From: drh
+Date: Thu, 18 Aug 2022 15:10:46 +0200
+Subject: [PATCH] sqlite: Increase the size of loop variables in the printf() implementation
+
+Increase the size of loop variables in the printf() implementation to avoid integer overflow on multi-gigabyte string arguments. CVE-2022-35737.
+
+This bug fix refers to: CVE-2022-35737 and it's a backport of a fix added in sqlite 3.39.2 (2022-07-21).
+
+Signed-off-by: Ghassane Ben El Aattar ghassaneb.aattar@huawei.com
+
+CVE: CVE-2022-35737
+
+Upstream-Status: Backport [https://www.sqlite.org/src/info/aab790a16e1bdff7]
+---
+ sqlite3.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/sqlite3.c b/sqlite3.c
+index f867d62..490199a 100644
+--- a/sqlite3.c
++++ b/sqlite3.c
+@@ -30234,1 +30234,2 @@ static int vxprintf(
+- int i, j, k, n, isnull;
++ i64 i, j, k, n;
++ int isnull;
diff --git a/meta/recipes-support/sqlite/files/CVE-2022-46908.patch b/meta/recipes-support/sqlite/files/CVE-2022-46908.patch
new file mode 100644
index 0000000000..38bd544838
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/CVE-2022-46908.patch
@@ -0,0 +1,39 @@
+From 1b779afa3ed2f35a110e460fc6ed13cba744db85 2022-12-05 02:52:37 UTC
+From: larrybr <larrybr@sqlite.org>
+Date: 2022-12-05 02:52:37 UTC
+Subject: [PATCH] Fix safe mode authorizer callback to reject disallowed UDFs
+
+Fix safe mode authorizer callback to reject disallowed UDFs. Reported at Forum post 07beac8056151b2f.
+
+Upstream-Status: Backport [https://sqlite.org/src/info/cefc032473ac5ad2]
+CVE-2022-46908
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ shell.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/shell.c b/shell.c
+index d104768..0200c0a 100644
+--- a/shell.c
++++ b/shell.c
+@@ -12894,7 +12894,7 @@ static int safeModeAuth(
+ "zipfile",
+ "zipfile_cds",
+ };
+- UNUSED_PARAMETER(zA2);
++ UNUSED_PARAMETER(zA1);
+ UNUSED_PARAMETER(zA3);
+ UNUSED_PARAMETER(zA4);
+ switch( op ){
+@@ -12905,7 +12905,7 @@ static int safeModeAuth(
+ case SQLITE_FUNCTION: {
+ int i;
+ for(i=0; i<ArraySize(azProhibitedFunctions); i++){
+- if( sqlite3_stricmp(zA1, azProhibitedFunctions[i])==0 ){
++ if( sqlite3_stricmp(zA2, azProhibitedFunctions[i])==0 ){
+ failIfSafeMode(p, "cannot use the %s() function in safe mode",
+ azProhibitedFunctions[i]);
+ }
+--
+2.30.2
+
diff --git a/meta/recipes-support/sqlite/files/CVE-2023-36191.patch b/meta/recipes-support/sqlite/files/CVE-2023-36191.patch
new file mode 100644
index 0000000000..aca79c334a
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/CVE-2023-36191.patch
@@ -0,0 +1,37 @@
+From 4e8a0eb4e773b808d9e9697af94319599777169a Mon Sep 17 00:00:00 2001
+From: larrybr <larrybr@noemail.net>
+Date: Fri, 2 Jun 2023 12:56:32 +0000
+Subject: [PATCH] Fix CLI fault on missing -nonce reported by [forum:/info/f8c14a1134|forum post f8c14a1134].
+
+FossilOrigin-Name: cd24178bbaad4a1dafc3848e7d74240f90030160b5c43c93e1e0e11b073c2df5
+
+Upstream-Status: Backport [https://sqlite.org/src/info/cd24178bbaad4a1d
+Upstream commit https://github.com/sqlite/sqlite/commit/4e8a0eb4e773b808d9e9697af94319599777169a]
+CVE: CVE-2023-36191
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ shell.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/shell.c b/shell.c
+index 0200c0a..fa45d40 100644
+--- a/shell.c
++++ b/shell.c
+@@ -23163,8 +23163,12 @@ int SQLITE_CDECL wmain(int argc, wchar_t **wargv){
+ }else if( strcmp(z,"-bail")==0 ){
+ bail_on_error = 1;
+ }else if( strcmp(z,"-nonce")==0 ){
+- free(data.zNonce);
+- data.zNonce = strdup(argv[++i]);
++ if( data.zNonce ) free(data.zNonce);
++ if( i+1 < argc ) data.zNonce = strdup(argv[++i]);
++ else{
++ data.zNonce = 0;
++ break;
++ }
+ }else if( strcmp(z,"-safe")==0 ){
+ /* no-op - catch this on the second pass */
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-support/sqlite/files/CVE-2023-7104.patch b/meta/recipes-support/sqlite/files/CVE-2023-7104.patch
new file mode 100644
index 0000000000..25c6ba017c
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/CVE-2023-7104.patch
@@ -0,0 +1,44 @@
+From 09f1652f36c5c4e8a6a640ce887f9ea0f48a7958 Mon Sep 17 00:00:00 2001
+From: dan <Dan Kennedy>
+Date: Thu, 7 Sep 2023 13:53:09 +0000
+Subject: [PATCH] Fix a buffer overread in the sessions extension that could
+ occur when processing a corrupt changeset.
+
+Upstream-Status: Backport [https://sqlite.org/src/info/0e4e7a05c4204b47]
+CVE: CVE-2022-46908
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ sqlite3.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/ext/session/sqlite3session.c b/ext/session/sqlite3session.c
+index 9f862f2465..0491549231 100644
+--- a/sqlite3.c
++++ b/sqlite3.c
+@@ -213482,15 +213482,19 @@ static int sessionReadRecord(
+ }
+ }
+ if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
+- sqlite3_int64 v = sessionGetI64(aVal);
+- if( eType==SQLITE_INTEGER ){
+- sqlite3VdbeMemSetInt64(apOut[i], v);
++ if( (pIn->nData-pIn->iNext)<8 ){
++ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+- double d;
+- memcpy(&d, &v, 8);
+- sqlite3VdbeMemSetDouble(apOut[i], d);
++ sqlite3_int64 v = sessionGetI64(aVal);
++ if( eType==SQLITE_INTEGER ){
++ sqlite3VdbeMemSetInt64(apOut[i], v);
++ }else{
++ double d;
++ memcpy(&d, &v, 8);
++ sqlite3VdbeMemSetDouble(apOut[i], d);
++ }
++ pIn->iNext += 8;
+ }
+- pIn->iNext += 8;
+ }
+ }
+ }
diff --git a/meta/recipes-support/sqlite/sqlite3_3.38.5.bb b/meta/recipes-support/sqlite/sqlite3_3.38.5.bb
index d56a3a0209..cece207eae 100644
--- a/meta/recipes-support/sqlite/sqlite3_3.38.5.bb
+++ b/meta/recipes-support/sqlite/sqlite3_3.38.5.bb
@@ -3,7 +3,12 @@ require sqlite3.inc
LICENSE = "PD"
LIC_FILES_CHKSUM = "file://sqlite3.h;endline=11;md5=786d3dc581eff03f4fd9e4a77ed00c66"
-SRC_URI = "http://www.sqlite.org/2022/sqlite-autoconf-${SQLITE_PV}.tar.gz"
+SRC_URI = "http://www.sqlite.org/2022/sqlite-autoconf-${SQLITE_PV}.tar.gz \
+ file://0001-sqlite-Increased-the-size-of-loop-variables-in-the-printf-implementation.patch \
+ file://CVE-2022-46908.patch \
+ file://CVE-2023-36191.patch \
+ file://CVE-2023-7104.patch \
+"
SRC_URI[sha256sum] = "5af07de982ba658fd91a03170c945f99c971f6955bc79df3266544373e39869c"
# -19242 is only an issue in specific development branch commits
diff --git a/meta/recipes-support/vim/files/racefix.patch b/meta/recipes-support/vim/files/racefix.patch
deleted file mode 100644
index 1cb8fb442f..0000000000
--- a/meta/recipes-support/vim/files/racefix.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-The creation of the LINGUAS file is duplicated for each desktop file
-which can lead the commands to race against each other. Rework
-the makefile to avoid this as the expense of leaving the file on disk.
-
-Upstream-Status: Pending
-RP 2021/2/15
-
-Index: git/src/po/Makefile
-===================================================================
---- git.orig/src/po/Makefile
-+++ git/src/po/Makefile
-@@ -207,17 +207,16 @@ $(PACKAGE).pot: $(PO_INPUTLIST) $(PO_VIM
- # Delete the temporary files
- rm *.js
-
--vim.desktop: vim.desktop.in $(POFILES)
-+LINGUAS:
- echo $(LANGUAGES) | tr " " "\n" |sed -e '/\./d' | sort > LINGUAS
-+
-+vim.desktop: vim.desktop.in $(POFILES) LINGUAS
- $(MSGFMT) --desktop -d . --template vim.desktop.in -o tmp_vim.desktop
-- rm -f LINGUAS
- if command -v desktop-file-validate; then desktop-file-validate tmp_vim.desktop; fi
- mv tmp_vim.desktop vim.desktop
-
--gvim.desktop: gvim.desktop.in $(POFILES)
-- echo $(LANGUAGES) | tr " " "\n" |sed -e '/\./d' | sort > LINGUAS
-+gvim.desktop: gvim.desktop.in $(POFILES) LINGUAS
- $(MSGFMT) --desktop -d . --template gvim.desktop.in -o tmp_gvim.desktop
-- rm -f LINGUAS
- if command -v desktop-file-validate; then desktop-file-validate tmp_gvim.desktop; fi
- mv tmp_gvim.desktop gvim.desktop
-
diff --git a/meta/recipes-support/vim/vim-tiny_8.2.bb b/meta/recipes-support/vim/vim-tiny_9.0.bb
index e4c26d23f6..e4c26d23f6 100644
--- a/meta/recipes-support/vim/vim-tiny_8.2.bb
+++ b/meta/recipes-support/vim/vim-tiny_9.0.bb
diff --git a/meta/recipes-support/vim/vim.inc b/meta/recipes-support/vim/vim.inc
index 06707dbe11..906aa53a16 100644
--- a/meta/recipes-support/vim/vim.inc
+++ b/meta/recipes-support/vim/vim.inc
@@ -10,31 +10,28 @@ DEPENDS = "ncurses gettext-native"
RSUGGESTS:${PN} = "diffutils"
LICENSE = "Vim"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=6b30ea4fa660c483b619924bc709ef99 \
- file://runtime/doc/uganda.txt;md5=daf48235bb824c77fe8ae88d5f575f74"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=d1a651ab770b45d41c0f8cb5a8ca930e"
SRC_URI = "git://github.com/vim/vim.git;branch=master;protocol=https \
file://disable_acl_header_check.patch \
file://vim-add-knob-whether-elf.h-are-checked.patch \
file://0001-src-Makefile-improve-reproducibility.patch \
file://no-path-adjust.patch \
- file://racefix.patch \
"
-PV .= ".5083"
-SRCREV = "db77c49401145d76441fbb3d22a1d7d987681c13"
-
-# Remove when 8.3 is out
-UPSTREAM_VERSION_UNKNOWN = "1"
+PV .= ".2190"
+SRCREV = "6a950da86d7a6eb09d5ebeab17657986420d07ac"
# Do not consider .z in x.y.z, as that is updated with every commit
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>\d+\.\d+)\.0"
+# Ignore that the upstream version .z in x.y.z is always newer
+UPSTREAM_VERSION_UNKNOWN = "1"
S = "${WORKDIR}/git"
VIMDIR = "vim${@d.getVar('PV').split('.')[0]}${@d.getVar('PV').split('.')[1]}"
-inherit autotools-brokensep update-alternatives mime-xdg
+inherit autotools-brokensep update-alternatives mime-xdg pkgconfig
CLEANBROKEN = "1"
@@ -43,22 +40,18 @@ do_configure () {
cd src
rm -f auto/*
touch auto/config.mk
+ # git timestamps aren't reliable, so touch the shipped .po files so they aren't regenerated
+ touch -c po/cs.cp1250.po po/ja.euc-jp.po po/ja.sjis.po po/ko.po po/pl.UTF-8.po po/pl.cp1250.po po/ru.cp1251.po po/sk.cp1250.po po/uk.cp1251.po po/zh_CN.po po/zh_CN.cp936.po po/zh_TW.po
+ # ru.cp1251.po uses CP1251 rather than cp1251, fix that
+ sed -i -e s/CP1251/cp1251/ po/ru.cp1251.po
aclocal
autoconf
cd ..
oe_runconf
touch src/auto/configure
touch src/auto/config.mk src/auto/config.h
-}
-
-do_compile() {
- # We do not support fully / correctly the following locales. Attempting
- # to use these with msgfmt in order to update the ".desktop" files exposes
- # this problem and leads to the compile failing.
- for LOCALE in cs fr ko pl sk zh_CN zh_TW;do
- echo -n > src/po/${LOCALE}.po
- done
- autotools_do_compile
+ # need a native tool, not a target one
+ ${BUILD_CC} src/po/sjiscorr.c -o src/po/sjiscorr
}
PACKAGECONFIG ??= "\
@@ -83,6 +76,7 @@ EXTRA_OECONF = " \
--disable-netbeans \
--disable-desktop-database-update \
--with-tlib=ncurses \
+ --with-modified-by='${MAINTAINER}' \
ac_cv_small_wchar_t=no \
ac_cv_path_GLIB_COMPILE_RESOURCES=no \
vim_cv_getcwd_broken=no \
@@ -95,6 +89,10 @@ EXTRA_OECONF = " \
STRIP=/bin/true \
"
+# Some host distros don't have it, disable consistently
+EXTRA_OECONF:append:class-native = " vim_cv_timer_create=no"
+EXTRA_OECONF:append:class-target = " vim_cv_timer_create=yes"
+
do_install() {
autotools_do_install
diff --git a/meta/recipes-support/vim/vim_8.2.bb b/meta/recipes-support/vim/vim_9.0.bb
index f358e61132..f358e61132 100644
--- a/meta/recipes-support/vim/vim_8.2.bb
+++ b/meta/recipes-support/vim/vim_9.0.bb
diff --git a/scripts/combo-layer b/scripts/combo-layer
index 045de65642..19ad32660d 100755
--- a/scripts/combo-layer
+++ b/scripts/combo-layer
@@ -21,7 +21,6 @@ import re
import copy
import pipes
import shutil
-from collections import OrderedDict
from string import Template
from functools import reduce
@@ -192,6 +191,23 @@ def runcmd(cmd,destdir=None,printerr=True,out=None,env=None):
logger.debug("output: %s" % output.replace(chr(0), '\\0'))
return output
+def action_sync_revs(conf, args):
+ """
+ Update the last_revision config option for each repo with the latest
+ revision in the remote's branch. Useful if multiple people are using
+ combo-layer.
+ """
+ repos = get_repos(conf, args[1:])
+
+ for name in repos:
+ repo = conf.repos[name]
+ ldir = repo['local_repo_dir']
+ branch = repo.get('branch', "master")
+ runcmd("git fetch", ldir)
+ lastrev = runcmd('git rev-parse origin/%s' % branch, ldir).strip()
+ print("Updating %s to %s" % (name, lastrev))
+ conf.update(name, "last_revision", lastrev)
+
def action_init(conf, args):
"""
Clone component repositories
@@ -467,7 +483,7 @@ def check_repo_clean(repodir):
exit if repo is dirty
"""
output=runcmd("git status --porcelain", repodir)
- r = re.compile('\?\? patch-.*/')
+ r = re.compile(r'\?\? patch-.*/')
dirtyout = [item for item in output.splitlines() if not r.match(item)]
if dirtyout:
logger.error("git repo %s is dirty, please fix it first", repodir)
@@ -508,7 +524,7 @@ def check_patch(patchfile):
f.close()
if of:
of.close()
- bb.utils.rename(patchfile + '.tmp', patchfile)
+ os.rename(of.name, patchfile)
def drop_to_shell(workdir=None):
if not sys.stdin.isatty():
@@ -1302,6 +1318,7 @@ actions = {
"update": action_update,
"pull": action_pull,
"splitpatch": action_splitpatch,
+ "sync-revs": action_sync_revs,
}
def main():
@@ -1312,10 +1329,11 @@ def main():
Create and update a combination layer repository from multiple component repositories.
Action:
- init initialise the combo layer repo
- update [components] get patches from component repos and apply them to the combo repo
- pull [components] just pull component repos only
- splitpatch [commit] generate commit patch and split per component, default commit is HEAD""")
+ init initialise the combo layer repo
+ update [components] get patches from component repos and apply them to the combo repo
+ pull [components] just pull component repos only
+ sync-revs [components] update the config file's last_revision for each repository
+ splitpatch [commit] generate commit patch and split per component, default commit is HEAD""")
parser.add_option("-c", "--conf", help = "specify the config file (conf/combo-layer.conf is the default).",
action = "store", dest = "conffile", default = "conf/combo-layer.conf")
diff --git a/scripts/contrib/bbvars.py b/scripts/contrib/bbvars.py
index 090133600b..a9cdf082ab 100755
--- a/scripts/contrib/bbvars.py
+++ b/scripts/contrib/bbvars.py
@@ -36,8 +36,8 @@ def bbvar_is_documented(var, documented_vars):
def collect_documented_vars(docfiles):
''' Walk the docfiles and collect the documented variables '''
documented_vars = []
- prog = re.compile(".*($|[^A-Z_])<glossentry id=\'var-")
- var_prog = re.compile('<glossentry id=\'var-(.*)\'>')
+ prog = re.compile(r".*($|[^A-Z_])<glossentry id=\'var-")
+ var_prog = re.compile(r'<glossentry id=\'var-(.*)\'>')
for d in docfiles:
with open(d) as f:
documented_vars += var_prog.findall(f.read())
@@ -45,7 +45,7 @@ def collect_documented_vars(docfiles):
return documented_vars
def bbvar_doctag(var, docconf):
- prog = re.compile('^%s\[doc\] *= *"(.*)"' % (var))
+ prog = re.compile(r'^%s\[doc\] *= *"(.*)"' % (var))
if docconf == "":
return "?"
diff --git a/scripts/contrib/convert-overrides.py b/scripts/contrib/convert-overrides.py
index 4d41a4c475..c69acb4095 100755
--- a/scripts/contrib/convert-overrides.py
+++ b/scripts/contrib/convert-overrides.py
@@ -22,66 +22,78 @@ import sys
import tempfile
import shutil
import mimetypes
+import argparse
-if len(sys.argv) < 2:
- print("Please specify a directory to run the conversion script against.")
- sys.exit(1)
+parser = argparse.ArgumentParser(description="Convert override syntax")
+parser.add_argument("--override", "-o", action="append", default=[], help="Add additional strings to consider as an override (e.g. custom machines/distros")
+parser.add_argument("--skip", "-s", action="append", default=[], help="Add additional string to skip and not consider an override")
+parser.add_argument("--skip-ext", "-e", action="append", default=[], help="Additional file suffixes to skip when processing (e.g. '.foo')")
+parser.add_argument("--package-vars", action="append", default=[], help="Additional variables to treat as package variables")
+parser.add_argument("--image-vars", action="append", default=[], help="Additional variables to treat as image variables")
+parser.add_argument("--short-override", action="append", default=[], help="Additional strings to treat as short overrides")
+parser.add_argument("path", nargs="+", help="Paths to convert")
+
+args = parser.parse_args()
# List of strings to treat as overrides
-vars = ["append", "prepend", "remove"]
-vars = vars + ["qemuarm", "qemux86", "qemumips", "qemuppc", "qemuriscv", "qemuall"]
-vars = vars + ["genericx86", "edgerouter", "beaglebone-yocto"]
-vars = vars + ["armeb", "arm", "armv5", "armv6", "armv4", "powerpc64", "aarch64", "riscv32", "riscv64", "x86", "mips64", "powerpc"]
-vars = vars + ["mipsarch", "x86-x32", "mips16e", "microblaze", "e5500-64b", "mipsisa32", "mipsisa64"]
-vars = vars + ["class-native", "class-target", "class-cross-canadian", "class-cross", "class-devupstream"]
-vars = vars + ["tune-", "pn-", "forcevariable"]
-vars = vars + ["libc-musl", "libc-glibc", "libc-newlib","libc-baremetal"]
-vars = vars + ["task-configure", "task-compile", "task-install", "task-clean", "task-image-qa", "task-rm_work", "task-image-complete", "task-populate-sdk"]
-vars = vars + ["toolchain-clang", "mydistro", "nios2", "sdkmingw32", "overrideone", "overridetwo"]
-vars = vars + ["linux-gnux32", "linux-muslx32", "linux-gnun32", "mingw32", "poky", "darwin", "linuxstdbase"]
-vars = vars + ["linux-gnueabi", "eabi"]
-vars = vars + ["virtclass-multilib", "virtclass-mcextend"]
+vars = args.override
+vars += ["append", "prepend", "remove"]
+vars += ["qemuarm", "qemux86", "qemumips", "qemuppc", "qemuriscv", "qemuall"]
+vars += ["genericx86", "edgerouter", "beaglebone-yocto"]
+vars += ["armeb", "arm", "armv5", "armv6", "armv4", "powerpc64", "aarch64", "riscv32", "riscv64", "x86", "mips64", "powerpc"]
+vars += ["mipsarch", "x86-x32", "mips16e", "microblaze", "e5500-64b", "mipsisa32", "mipsisa64"]
+vars += ["class-native", "class-target", "class-cross-canadian", "class-cross", "class-devupstream"]
+vars += ["tune-", "pn-", "forcevariable"]
+vars += ["libc-musl", "libc-glibc", "libc-newlib","libc-baremetal"]
+vars += ["task-configure", "task-compile", "task-install", "task-clean", "task-image-qa", "task-rm_work", "task-image-complete", "task-populate-sdk"]
+vars += ["toolchain-clang", "mydistro", "nios2", "sdkmingw32", "overrideone", "overridetwo"]
+vars += ["linux-gnux32", "linux-muslx32", "linux-gnun32", "mingw32", "poky", "darwin", "linuxstdbase"]
+vars += ["linux-gnueabi", "eabi"]
+vars += ["virtclass-multilib", "virtclass-mcextend"]
# List of strings to treat as overrides but only with whitespace following or another override (more restricted matching).
# Handles issues with arc matching arch.
-shortvars = ["arc", "mips", "mipsel", "sh4"]
+shortvars = ["arc", "mips", "mipsel", "sh4"] + args.short_override
# Variables which take packagenames as an override
packagevars = ["FILES", "RDEPENDS", "RRECOMMENDS", "SUMMARY", "DESCRIPTION", "RSUGGESTS", "RPROVIDES", "RCONFLICTS", "PKG", "ALLOW_EMPTY",
"pkg_postrm", "pkg_postinst_ontarget", "pkg_postinst", "INITSCRIPT_NAME", "INITSCRIPT_PARAMS", "DEBIAN_NOAUTONAME", "ALTERNATIVE",
"PKGE", "PKGV", "PKGR", "USERADD_PARAM", "GROUPADD_PARAM", "CONFFILES", "SYSTEMD_SERVICE", "LICENSE", "SECTION", "pkg_preinst",
"pkg_prerm", "RREPLACES", "GROUPMEMS_PARAM", "SYSTEMD_AUTO_ENABLE", "SKIP_FILEDEPS", "PRIVATE_LIBS", "PACKAGE_ADD_METADATA",
- "INSANE_SKIP", "DEBIANNAME", "SYSTEMD_SERVICE_ESCAPED"]
+ "INSANE_SKIP", "DEBIANNAME", "SYSTEMD_SERVICE_ESCAPED"] + args.package_vars
# Expressions to skip if encountered, these are not overrides
-skips = ["parser_append", "recipe_to_append", "extra_append", "to_remove", "show_appends", "applied_appends", "file_appends", "handle_remove"]
-skips = skips + ["expanded_removes", "color_remove", "test_remove", "empty_remove", "toaster_prepend", "num_removed", "licfiles_append", "_write_append"]
-skips = skips + ["no_report_remove", "test_prepend", "test_append", "multiple_append", "test_remove", "shallow_remove", "do_remove_layer", "first_append"]
-skips = skips + ["parser_remove", "to_append", "no_remove", "bblayers_add_remove", "bblayers_remove", "apply_append", "is_x86", "base_dep_prepend"]
-skips = skips + ["autotools_dep_prepend", "go_map_arm", "alt_remove_links", "systemd_append_file", "file_append", "process_file_darwin"]
-skips = skips + ["run_loaddata_poky", "determine_if_poky_env", "do_populate_poky_src", "libc_cv_include_x86_isa_level", "test_rpm_remove", "do_install_armmultilib"]
-skips = skips + ["get_appends_for_files", "test_doubleref_remove", "test_bitbakelayers_add_remove", "elf32_x86_64", "colour_remove", "revmap_remove"]
-skips = skips + ["test_rpm_remove", "test_bitbakelayers_add_remove", "recipe_append_file", "log_data_removed", "recipe_append", "systemd_machine_unit_append"]
-skips = skips + ["recipetool_append", "changetype_remove", "try_appendfile_wc", "test_qemux86_directdisk", "test_layer_appends", "tgz_removed"]
-
-imagevars = ["IMAGE_CMD", "EXTRA_IMAGECMD", "IMAGE_TYPEDEP", "CONVERSION_CMD", "COMPRESS_CMD"]
-packagevars = packagevars + imagevars
+skips = args.skip
+skips += ["parser_append", "recipe_to_append", "extra_append", "to_remove", "show_appends", "applied_appends", "file_appends", "handle_remove"]
+skips += ["expanded_removes", "color_remove", "test_remove", "empty_remove", "toaster_prepend", "num_removed", "licfiles_append", "_write_append"]
+skips += ["no_report_remove", "test_prepend", "test_append", "multiple_append", "test_remove", "shallow_remove", "do_remove_layer", "first_append"]
+skips += ["parser_remove", "to_append", "no_remove", "bblayers_add_remove", "bblayers_remove", "apply_append", "is_x86", "base_dep_prepend"]
+skips += ["autotools_dep_prepend", "go_map_arm", "alt_remove_links", "systemd_append_file", "file_append", "process_file_darwin"]
+skips += ["run_loaddata_poky", "determine_if_poky_env", "do_populate_poky_src", "libc_cv_include_x86_isa_level", "test_rpm_remove", "do_install_armmultilib"]
+skips += ["get_appends_for_files", "test_doubleref_remove", "test_bitbakelayers_add_remove", "elf32_x86_64", "colour_remove", "revmap_remove"]
+skips += ["test_rpm_remove", "test_bitbakelayers_add_remove", "recipe_append_file", "log_data_removed", "recipe_append", "systemd_machine_unit_append"]
+skips += ["recipetool_append", "changetype_remove", "try_appendfile_wc", "test_qemux86_directdisk", "test_layer_appends", "tgz_removed"]
+
+imagevars = ["IMAGE_CMD", "EXTRA_IMAGECMD", "IMAGE_TYPEDEP", "CONVERSION_CMD", "COMPRESS_CMD"] + args.image_vars
+packagevars += imagevars
+
+skip_ext = [".html", ".patch", ".m4", ".diff"] + args.skip_ext
vars_re = {}
for exp in vars:
- vars_re[exp] = (re.compile('((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp), r"\1:" + exp)
+ vars_re[exp] = (re.compile(r'((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp), r"\1:" + exp)
shortvars_re = {}
for exp in shortvars:
- shortvars_re[exp] = (re.compile('((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp + '([\(\'"\s:])'), r"\1:" + exp + r"\3")
+ shortvars_re[exp] = (re.compile(r'((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp + r'([\(\'"\s:])'), r"\1:" + exp + r"\3")
package_re = {}
for exp in packagevars:
- package_re[exp] = (re.compile('(^|[#\'"\s\-\+]+)' + exp + '_' + '([$a-z"\'\s%\[<{\\\*].)'), r"\1" + exp + r":\2")
+ package_re[exp] = (re.compile(r'(^|[#\'"\s\-\+]+)' + exp + r'_' + r'([$a-z"\'\s%\[<{\\\*].)'), r"\1" + exp + r":\2")
# Other substitutions to make
subs = {
- 'r = re.compile("([^:]+):\s*(.*)")' : 'r = re.compile("(^.+?):\s+(.*)")',
+ 'r = re.compile(r"([^:]+):\s*(.*)")' : 'r = re.compile(r"(^.+?):\s+(.*)")',
"val = d.getVar('%s_%s' % (var, pkg))" : "val = d.getVar('%s:%s' % (var, pkg))",
"f.write('%s_%s: %s\\n' % (var, pkg, encode(val)))" : "f.write('%s:%s: %s\\n' % (var, pkg, encode(val)))",
"d.getVar('%s_%s' % (scriptlet_name, pkg))" : "d.getVar('%s:%s' % (scriptlet_name, pkg))",
@@ -124,21 +136,20 @@ def processfile(fn):
ourname = os.path.basename(sys.argv[0])
ourversion = "0.9.3"
-if os.path.isfile(sys.argv[1]):
- processfile(sys.argv[1])
- sys.exit(0)
-
-for targetdir in sys.argv[1:]:
- print("processing directory '%s'" % targetdir)
- for root, dirs, files in os.walk(targetdir):
- for name in files:
- if name == ourname:
- continue
- fn = os.path.join(root, name)
- if os.path.islink(fn):
- continue
- if "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff"):
- continue
- processfile(fn)
+for p in args.path:
+ if os.path.isfile(p):
+ processfile(p)
+ else:
+ print("processing directory '%s'" % p)
+ for root, dirs, files in os.walk(p):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "/.git/" in fn or any(fn.endswith(ext) for ext in skip_ext):
+ continue
+ processfile(fn)
print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/image-manifest b/scripts/contrib/image-manifest
index 3c07a73a4e..4d65a99258 100755
--- a/scripts/contrib/image-manifest
+++ b/scripts/contrib/image-manifest
@@ -392,7 +392,7 @@ def export_manifest_info(args):
for key in rd.getVarFlags('PACKAGECONFIG').keys():
if key == 'doc':
continue
- rvalues[pn]['packageconfig_opts'][key] = rd.getVarFlag('PACKAGECONFIG', key, True)
+ rvalues[pn]['packageconfig_opts'][key] = rd.getVarFlag('PACKAGECONFIG', key)
if config['patches'] == 'yes':
patches = oe.recipeutils.get_recipe_patches(rd)
diff --git a/scripts/create-pull-request b/scripts/create-pull-request
index 8eefcf63a5..885105fab3 100755
--- a/scripts/create-pull-request
+++ b/scripts/create-pull-request
@@ -128,7 +128,7 @@ PROTO_RE="[a-z][a-z+]*://"
GIT_RE="\(^\($PROTO_RE\)\?\)\($USER_RE@\)\?\([^:/]*\)[:/]\(.*\)"
REMOTE_URL=${REMOTE_URL%.git}
REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\5#")
-REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\4/\5#")
+REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#https://\4/\5#")
if [ -z "$BRANCH" ]; then
BRANCH=$(git branch | grep -e "^\* " | cut -d' ' -f2)
@@ -149,13 +149,10 @@ fi
WEB_URL=""
case "$REMOTE_URL" in
*git.yoctoproject.org*)
- WEB_URL="http://git.yoctoproject.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
- ;;
- *git.pokylinux.org*)
- WEB_URL="http://git.pokylinux.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
+ WEB_URL="https://git.yoctoproject.org/$REMOTE_REPO/log/?h=$BRANCH"
;;
*git.openembedded.org*)
- WEB_URL="http://cgit.openembedded.org/$REMOTE_REPO/log/?h=$BRANCH"
+ WEB_URL="https://git.openembedded.org/$REMOTE_REPO/log/?h=$BRANCH"
;;
*github.com*)
WEB_URL="https://github.com/$REMOTE_REPO/tree/$BRANCH"
diff --git a/scripts/devtool b/scripts/devtool
index af4811b922..20d785c7f7 100755
--- a/scripts/devtool
+++ b/scripts/devtool
@@ -104,6 +104,7 @@ def read_workspace():
for fn in glob.glob(os.path.join(config.workspace_path, 'appends', '*.bbappend')):
with open(fn, 'r') as f:
pnvalues = {}
+ pn = None
for line in f:
res = externalsrc_re.match(line.rstrip())
if res:
@@ -123,6 +124,9 @@ def read_workspace():
elif line.startswith('# srctreebase: '):
pnvalues['srctreebase'] = line.split(':', 1)[1].strip()
if pnvalues:
+ if not pn:
+ raise DevtoolError("Found *.bbappend in %s, but could not determine EXTERNALSRC:pn-*. "
+ "Maybe still using old syntax?" % config.workspace_path)
if not pnvalues.get('srctreebase', None):
pnvalues['srctreebase'] = pnvalues['srctree']
logger.debug('Found recipe %s' % pnvalues)
@@ -314,10 +318,10 @@ def main():
args = parser.parse_args(unparsed_args, namespace=global_args)
- if not getattr(args, 'no_workspace', False):
- read_workspace()
-
try:
+ if not getattr(args, 'no_workspace', False):
+ read_workspace()
+
ret = args.func(args, config, basepath, workspace)
except DevtoolError as err:
if str(err):
diff --git a/scripts/lib/buildstats.py b/scripts/lib/buildstats.py
index c69b5bf4d7..6db60d5bcf 100644
--- a/scripts/lib/buildstats.py
+++ b/scripts/lib/buildstats.py
@@ -8,7 +8,7 @@ import json
import logging
import os
import re
-from collections import namedtuple,OrderedDict
+from collections import namedtuple
from statistics import mean
@@ -79,8 +79,8 @@ class BSTask(dict):
return self['rusage']['ru_oublock']
@classmethod
- def from_file(cls, buildstat_file):
- """Read buildstat text file"""
+ def from_file(cls, buildstat_file, fallback_end=0):
+ """Read buildstat text file. fallback_end is an optional end time for tasks that are not recorded as finishing."""
bs_task = cls()
log.debug("Reading task buildstats from %s", buildstat_file)
end_time = None
@@ -108,7 +108,10 @@ class BSTask(dict):
bs_task[ru_type][ru_key] = val
elif key == 'Status':
bs_task['status'] = val
- if end_time is not None and start_time is not None:
+ # If the task didn't finish, fill in the fallback end time if specified
+ if start_time and not end_time and fallback_end:
+ end_time = fallback_end
+ if start_time and end_time:
bs_task['elapsed_time'] = end_time - start_time
else:
raise BSError("{} looks like a invalid buildstats file".format(buildstat_file))
@@ -226,25 +229,44 @@ class BuildStats(dict):
epoch = match.group('epoch')
return name, epoch, version, revision
+ @staticmethod
+ def parse_top_build_stats(path):
+ """
+ Parse the top-level build_stats file for build-wide start and duration.
+ """
+ start = elapsed = 0
+ with open(path) as fobj:
+ for line in fobj.readlines():
+ key, val = line.split(':', 1)
+ val = val.strip()
+ if key == 'Build Started':
+ start = float(val)
+ elif key == "Elapsed time":
+ elapsed = float(val.split()[0])
+ return start, elapsed
+
@classmethod
def from_dir(cls, path):
"""Load buildstats from a buildstats directory"""
- if not os.path.isfile(os.path.join(path, 'build_stats')):
+ top_stats = os.path.join(path, 'build_stats')
+ if not os.path.isfile(top_stats):
raise BSError("{} does not look like a buildstats directory".format(path))
log.debug("Reading buildstats directory %s", path)
-
buildstats = cls()
+ build_started, build_elapsed = buildstats.parse_top_build_stats(top_stats)
+ build_end = build_started + build_elapsed
+
subdirs = os.listdir(path)
for dirname in subdirs:
recipe_dir = os.path.join(path, dirname)
- if not os.path.isdir(recipe_dir):
+ if dirname == "reduced_proc_pressure" or not os.path.isdir(recipe_dir):
continue
name, epoch, version, revision = cls.split_nevr(dirname)
bsrecipe = BSRecipe(name, epoch, version, revision)
for task in os.listdir(recipe_dir):
bsrecipe.tasks[task] = BSTask.from_file(
- os.path.join(recipe_dir, task))
+ os.path.join(recipe_dir, task), build_end)
if name in buildstats:
raise BSError("Cannot handle multiple versions of the same "
"package ({})".format(name))
diff --git a/scripts/lib/checklayer/__init__.py b/scripts/lib/checklayer/__init__.py
index aa946f3036..53f99dce1e 100644
--- a/scripts/lib/checklayer/__init__.py
+++ b/scripts/lib/checklayer/__init__.py
@@ -16,6 +16,7 @@ class LayerType(Enum):
BSP = 0
DISTRO = 1
SOFTWARE = 2
+ CORE = 3
ERROR_NO_LAYER_CONF = 98
ERROR_BSP_DISTRO = 99
@@ -106,7 +107,13 @@ def _detect_layer(layer_path):
if distros:
is_distro = True
- if is_bsp and is_distro:
+ layer['collections'] = _get_layer_collections(layer['path'])
+
+ if layer_name == "meta" and "core" in layer['collections']:
+ layer['type'] = LayerType.CORE
+ layer['conf']['machines'] = machines
+ layer['conf']['distros'] = distros
+ elif is_bsp and is_distro:
layer['type'] = LayerType.ERROR_BSP_DISTRO
elif is_bsp:
layer['type'] = LayerType.BSP
@@ -117,8 +124,6 @@ def _detect_layer(layer_path):
else:
layer['type'] = LayerType.SOFTWARE
- layer['collections'] = _get_layer_collections(layer['path'])
-
return layer
def detect_layers(layer_directories, no_auto):
@@ -319,8 +324,8 @@ def get_signatures(builddir, failsafe=False, machine=None, extravars=None):
else:
raise
- sig_regex = re.compile("^(?P<task>.*:.*):(?P<hash>.*) .$")
- tune_regex = re.compile("(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*")
+ sig_regex = re.compile(r"^(?P<task>.*:.*):(?P<hash>.*) .$")
+ tune_regex = re.compile(r"(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*")
current_tune = None
with open(sigs_file, 'r') as f:
for line in f.readlines():
diff --git a/scripts/lib/checklayer/cases/bsp.py b/scripts/lib/checklayer/cases/bsp.py
index a80a5844da..b76163fb56 100644
--- a/scripts/lib/checklayer/cases/bsp.py
+++ b/scripts/lib/checklayer/cases/bsp.py
@@ -11,7 +11,7 @@ from checklayer.case import OECheckLayerTestCase
class BSPCheckLayer(OECheckLayerTestCase):
@classmethod
def setUpClass(self):
- if self.tc.layer['type'] != LayerType.BSP:
+ if self.tc.layer['type'] not in (LayerType.BSP, LayerType.CORE):
raise unittest.SkipTest("BSPCheckLayer: Layer %s isn't BSP one." %\
self.tc.layer['name'])
diff --git a/scripts/lib/checklayer/cases/common.py b/scripts/lib/checklayer/cases/common.py
index 491a13953c..722d3cf638 100644
--- a/scripts/lib/checklayer/cases/common.py
+++ b/scripts/lib/checklayer/cases/common.py
@@ -12,6 +12,9 @@ from checklayer.case import OECheckLayerTestCase
class CommonCheckLayer(OECheckLayerTestCase):
def test_readme(self):
+ if self.tc.layer['type'] == LayerType.CORE:
+ raise unittest.SkipTest("Core layer's README is top level")
+
# The top-level README file may have a suffix (like README.rst or README.txt).
readme_files = glob.glob(os.path.join(self.tc.layer['path'], '[Rr][Ee][Aa][Dd][Mm][Ee]*'))
self.assertTrue(len(readme_files) > 0,
diff --git a/scripts/lib/checklayer/cases/distro.py b/scripts/lib/checklayer/cases/distro.py
index f0bee5493c..a35332451c 100644
--- a/scripts/lib/checklayer/cases/distro.py
+++ b/scripts/lib/checklayer/cases/distro.py
@@ -11,7 +11,7 @@ from checklayer.case import OECheckLayerTestCase
class DistroCheckLayer(OECheckLayerTestCase):
@classmethod
def setUpClass(self):
- if self.tc.layer['type'] != LayerType.DISTRO:
+ if self.tc.layer['type'] not in (LayerType.DISTRO, LayerType.CORE):
raise unittest.SkipTest("DistroCheckLayer: Layer %s isn't Distro one." %\
self.tc.layer['name'])
diff --git a/scripts/lib/devtool/menuconfig.py b/scripts/lib/devtool/menuconfig.py
index 95384c5333..ff9227035d 100644
--- a/scripts/lib/devtool/menuconfig.py
+++ b/scripts/lib/devtool/menuconfig.py
@@ -43,7 +43,7 @@ def menuconfig(args, config, basepath, workspace):
return 1
check_workspace_recipe(workspace, args.component)
- pn = rd.getVar('PN', True)
+ pn = rd.getVar('PN')
if not rd.getVarFlag('do_menuconfig','task'):
raise DevtoolError("This recipe does not support menuconfig option")
diff --git a/scripts/lib/devtool/standard.py b/scripts/lib/devtool/standard.py
index 4b50e3c63b..7a005c9010 100644
--- a/scripts/lib/devtool/standard.py
+++ b/scripts/lib/devtool/standard.py
@@ -520,7 +520,9 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
for event in history:
if not 'flag' in event:
if event['op'].startswith((':append[', ':prepend[')):
- extra_overrides.append(event['op'].split('[')[1].split(']')[0])
+ override = event['op'].split('[')[1].split(']')[0]
+ if not override.startswith('pn-'):
+ extra_overrides.append(override)
# We want to remove duplicate overrides. If a recipe had multiple
# SRC_URI_override += values it would cause mulitple instances of
# overrides. This doesn't play nicely with things like creating a
@@ -763,6 +765,16 @@ def get_staging_kbranch(srcdir):
staging_kbranch = "".join(branch.split('\n')[0])
return staging_kbranch
+def get_real_srctree(srctree, s, workdir):
+ # Check that recipe isn't using a shared workdir
+ s = os.path.abspath(s)
+ workdir = os.path.abspath(workdir)
+ if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
+ # Handle if S is set to a subdirectory of the source
+ srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
+ srctree = os.path.join(srctree, srcsubdir)
+ return srctree
+
def modify(args, config, basepath, workspace):
"""Entry point for the devtool 'modify' subcommand"""
import bb
@@ -921,14 +933,7 @@ def modify(args, config, basepath, workspace):
# Need to grab this here in case the source is within a subdirectory
srctreebase = srctree
-
- # Check that recipe isn't using a shared workdir
- s = os.path.abspath(rd.getVar('S'))
- workdir = os.path.abspath(rd.getVar('WORKDIR'))
- if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
- # Handle if S is set to a subdirectory of the source
- srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
- srctree = os.path.join(srctree, srcsubdir)
+ srctree = get_real_srctree(srctree, rd.getVar('S'), rd.getVar('WORKDIR'))
bb.utils.mkdirhier(os.path.dirname(appendfile))
with open(appendfile, 'w') as f:
@@ -1404,6 +1409,18 @@ def _export_local_files(srctree, rd, destdir, srctreebase):
updated = OrderedDict()
added = OrderedDict()
removed = OrderedDict()
+
+ # Get current branch and return early with empty lists
+ # if on one of the override branches
+ # (local files are provided only for the main branch and processing
+ # them against lists from recipe overrides will result in mismatches
+ # and broken modifications to recipes).
+ stdout, _ = bb.process.run('git rev-parse --abbrev-ref HEAD',
+ cwd=srctree)
+ branchname = stdout.rstrip()
+ if branchname.startswith(override_branch_prefix):
+ return (updated, added, removed)
+
local_files_dir = os.path.join(srctreebase, 'oe-local-files')
git_files = _git_ls_tree(srctree)
if 'oe-local-files' in git_files:
@@ -1604,6 +1621,19 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
if not os.path.exists(append):
raise DevtoolError('unable to find workspace bbappend for recipe %s' %
recipename)
+ srctreebase = workspace[recipename]['srctreebase']
+ relpatchdir = os.path.relpath(srctreebase, srctree)
+ if relpatchdir == '.':
+ patchdir_params = {}
+ else:
+ patchdir_params = {'patchdir': relpatchdir}
+
+ def srcuri_entry(basepath):
+ if patchdir_params:
+ paramstr = ';' + ';'.join('%s=%s' % (k,v) for k,v in patchdir_params.items())
+ else:
+ paramstr = ''
+ return 'file://%s%s' % (basepath, paramstr)
initial_rev, update_rev, changed_revs, filter_patches = _get_patchset_revs(srctree, append, initial_rev, force_patch_refresh)
if not initial_rev:
@@ -1620,32 +1650,25 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
tempdir = tempfile.mkdtemp(prefix='devtool')
try:
local_files_dir = tempfile.mkdtemp(dir=tempdir)
- if filter_patches:
- upd_f = {}
- new_f = {}
- del_f = {}
- else:
- srctreebase = workspace[recipename]['srctreebase']
- upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir, srctreebase)
-
- remove_files = []
- if not no_remove:
- # Get all patches from source tree and check if any should be removed
- all_patches_dir = tempfile.mkdtemp(dir=tempdir)
- _, _, del_p = _export_patches(srctree, rd, initial_rev,
- all_patches_dir)
- # Remove deleted local files and patches
- remove_files = list(del_f.values()) + list(del_p.values())
+ upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir, srctreebase)
# Get updated patches from source tree
patches_dir = tempfile.mkdtemp(dir=tempdir)
upd_p, new_p, _ = _export_patches(srctree, rd, update_rev,
patches_dir, changed_revs)
+ # Get all patches from source tree and check if any should be removed
+ all_patches_dir = tempfile.mkdtemp(dir=tempdir)
+ _, _, del_p = _export_patches(srctree, rd, initial_rev,
+ all_patches_dir)
logger.debug('Pre-filtering: update: %s, new: %s' % (dict(upd_p), dict(new_p)))
if filter_patches:
new_p = OrderedDict()
upd_p = OrderedDict((k,v) for k,v in upd_p.items() if k in filter_patches)
- remove_files = [f for f in remove_files if f in filter_patches]
+ del_p = OrderedDict((k,v) for k,v in del_p.items() if k in filter_patches)
+ remove_files = []
+ if not no_remove:
+ # Remove deleted local files and patches
+ remove_files = list(del_f.values()) + list(del_p.values())
updatefiles = False
updaterecipe = False
destpath = None
@@ -1661,14 +1684,15 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
removedentries, remaining = _remove_file_entries(
srcuri, remove_files)
if removedentries or remaining:
- remaining = ['file://' + os.path.basename(item) for
+ remaining = [srcuri_entry(os.path.basename(item)) for
item in remaining]
removevalues = {'SRC_URI': removedentries + remaining}
appendfile, destpath = oe.recipeutils.bbappend_recipe(
rd, appendlayerdir, files,
wildcardver=wildcard_version,
removevalues=removevalues,
- redirect_output=dry_run_outdir)
+ redirect_output=dry_run_outdir,
+ params=[patchdir_params] * len(files))
else:
logger.info('No patches or local source files needed updating')
else:
@@ -1692,7 +1716,7 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
# replace the entry in SRC_URI with our local version
logger.info('Replacing remote patch %s with updated local version' % basepath)
path = os.path.join(files_dir, basepath)
- _replace_srcuri_entry(srcuri, basepath, 'file://%s' % basepath)
+ _replace_srcuri_entry(srcuri, basepath, srcuri_entry(basepath))
updaterecipe = True
else:
logger.info('Updating patch %s%s' % (basepath, dry_run_suffix))
@@ -1706,7 +1730,7 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
os.path.join(files_dir, basepath),
dry_run_outdir=dry_run_outdir,
base_outdir=recipedir)
- srcuri.append('file://%s' % basepath)
+ srcuri.append(srcuri_entry(basepath))
updaterecipe = True
for basepath, path in new_p.items():
logger.info('Adding new patch %s%s' % (basepath, dry_run_suffix))
@@ -1714,7 +1738,7 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
os.path.join(files_dir, basepath),
dry_run_outdir=dry_run_outdir,
base_outdir=recipedir)
- srcuri.append('file://%s' % basepath)
+ srcuri.append(srcuri_entry(basepath))
updaterecipe = True
# Update recipe, if needed
if _remove_file_entries(srcuri, remove_files)[0]:
diff --git a/scripts/lib/devtool/upgrade.py b/scripts/lib/devtool/upgrade.py
index 0357ec07bf..6c4a62b558 100644
--- a/scripts/lib/devtool/upgrade.py
+++ b/scripts/lib/devtool/upgrade.py
@@ -88,7 +88,7 @@ def _rename_recipe_files(oldrecipe, bpn, oldpv, newpv, path):
_rename_recipe_dirs(oldpv, newpv, path)
return _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path)
-def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d):
+def _write_append(rc, srctreebase, srctree, same_dir, no_same_dir, rev, copied, workspace, d):
"""Writes an append file"""
if not os.path.exists(rc):
raise DevtoolError("bbappend not created because %s does not exist" % rc)
@@ -104,6 +104,11 @@ def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d)
af = os.path.join(appendpath, '%s.bbappend' % brf)
with open(af, 'w') as f:
f.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n\n')
+ # Local files can be modified/tracked in separate subdir under srctree
+ # Mostly useful for packages with S != WORKDIR
+ f.write('FILESPATH:prepend := "%s:"\n' %
+ os.path.join(srctreebase, 'oe-local-files'))
+ f.write('# srctreebase: %s\n' % srctreebase)
f.write('inherit externalsrc\n')
f.write(('# NOTE: We use pn- overrides here to avoid affecting'
'multiple variants in the case where the recipe uses BBCLASSEXTEND\n'))
@@ -119,20 +124,16 @@ def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d)
f.write('# original_files: %s\n' % ' '.join(copied))
return af
-def _cleanup_on_error(rf, srctree):
- rfp = os.path.split(rf)[0] # recipe folder
- rfpp = os.path.split(rfp)[0] # recipes folder
- if os.path.exists(rfp):
- shutil.rmtree(rfp)
- if not len(os.listdir(rfpp)):
- os.rmdir(rfpp)
+def _cleanup_on_error(rd, srctree):
+ if os.path.exists(rd):
+ shutil.rmtree(rd)
srctree = os.path.abspath(srctree)
if os.path.exists(srctree):
shutil.rmtree(srctree)
-def _upgrade_error(e, rf, srctree, keep_failure=False, extramsg=None):
- if rf and not keep_failure:
- _cleanup_on_error(rf, srctree)
+def _upgrade_error(e, rd, srctree, keep_failure=False, extramsg=None):
+ if not keep_failure:
+ _cleanup_on_error(rd, srctree)
logger.error(e)
if extramsg:
logger.error(extramsg)
@@ -337,7 +338,10 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
replacing = True
new_src_uri = []
for entry in src_uri:
- scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(entry)
+ try:
+ scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(entry)
+ except bb.fetch2.MalformedUrl as e:
+ raise DevtoolError("Could not decode SRC_URI: {}".format(e))
if replacing and scheme in ['git', 'gitsm']:
branch = params.get('branch', 'master')
if rd.expand(branch) != srcbranch:
@@ -426,7 +430,7 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
try:
rd = tinfoil.parse_recipe_file(fullpath, False)
except bb.tinfoil.TinfoilCommandFailed as e:
- _upgrade_error(e, fullpath, srctree, keep_failure, 'Parsing of upgraded recipe failed')
+ _upgrade_error(e, os.path.dirname(fullpath), srctree, keep_failure, 'Parsing of upgraded recipe failed')
oe.recipeutils.patch_recipe(rd, fullpath, newvalues)
return fullpath, copied
@@ -522,14 +526,7 @@ def upgrade(args, config, basepath, workspace):
else:
srctree = standard.get_default_srctree(config, pn)
- # Check that recipe isn't using a shared workdir
- s = os.path.abspath(rd.getVar('S'))
- workdir = os.path.abspath(rd.getVar('WORKDIR'))
- srctree_s = srctree
- if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
- # Handle if S is set to a subdirectory of the source
- srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
- srctree_s = os.path.join(srctree, srcsubdir)
+ srctree_s = standard.get_real_srctree(srctree, rd.getVar('S'), rd.getVar('WORKDIR'))
# try to automatically discover latest version and revision if not provided on command line
if not args.version and not args.srcrev:
@@ -568,13 +565,12 @@ def upgrade(args, config, basepath, workspace):
new_licenses = _extract_licenses(srctree_s, (rd.getVar('LIC_FILES_CHKSUM') or ""))
license_diff = _generate_license_diff(old_licenses, new_licenses)
rf, copied = _create_new_recipe(args.version, md5, sha256, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses, srctree, args.keep_failure)
- except bb.process.CmdError as e:
- _upgrade_error(e, rf, srctree, args.keep_failure)
- except DevtoolError as e:
- _upgrade_error(e, rf, srctree, args.keep_failure)
+ except (bb.process.CmdError, DevtoolError) as e:
+ recipedir = os.path.join(config.workspace_path, 'recipes', rd.getVar('BPN'))
+ _upgrade_error(e, recipedir, srctree, args.keep_failure)
standard._add_md5(config, pn, os.path.dirname(rf))
- af = _write_append(rf, srctree_s, args.same_dir, args.no_same_dir, rev2,
+ af = _write_append(rf, srctree, srctree_s, args.same_dir, args.no_same_dir, rev2,
copied, config.workspace_path, rd)
standard._add_md5(config, pn, af)
diff --git a/scripts/lib/recipetool/create.py b/scripts/lib/recipetool/create.py
index 220465ed2f..7b4c501456 100644
--- a/scripts/lib/recipetool/create.py
+++ b/scripts/lib/recipetool/create.py
@@ -745,6 +745,10 @@ def create_recipe(args):
for handler in handlers:
handler.process(srctree_use, classes, lines_before, lines_after, handled, extravalues)
+ # native and nativesdk classes are special and must be inherited last
+ # If present, put them at the end of the classes list
+ classes.sort(key=lambda c: c in ("native", "nativesdk"))
+
extrafiles = extravalues.pop('extrafiles', {})
extra_pn = extravalues.pop('PN', None)
extra_pv = extravalues.pop('PV', None)
@@ -1069,12 +1073,12 @@ def crunch_license(licfile):
# Note: these are carefully constructed!
license_title_re = re.compile(r'^#*\(? *(This is )?([Tt]he )?.{0,15} ?[Ll]icen[sc]e( \(.{1,10}\))?\)?[:\.]? ?#*$')
license_statement_re = re.compile(r'^((This (project|software)|.{1,10}) is( free software)? (released|licen[sc]ed)|(Released|Licen[cs]ed)) under the .{1,10} [Ll]icen[sc]e:?$')
- copyright_re = re.compile('^ *[#\*]* *(Modified work |MIT LICENSED )?Copyright ?(\([cC]\))? .*$')
- disclaimer_re = re.compile('^ *\*? ?All [Rr]ights [Rr]eserved\.$')
- email_re = re.compile('^.*<[\w\.-]*@[\w\.\-]*>$')
- header_re = re.compile('^(\/\**!?)? ?[\-=\*]* ?(\*\/)?$')
- tag_re = re.compile('^ *@?\(?([Ll]icense|MIT)\)?$')
- url_re = re.compile('^ *[#\*]* *https?:\/\/[\w\.\/\-]+$')
+ copyright_re = re.compile(r'^ *[#\*]* *(Modified work |MIT LICENSED )?Copyright ?(\([cC]\))? .*$')
+ disclaimer_re = re.compile(r'^ *\*? ?All [Rr]ights [Rr]eserved\.$')
+ email_re = re.compile(r'^.*<[\w\.-]*@[\w\.\-]*>$')
+ header_re = re.compile(r'^(\/\**!?)? ?[\-=\*]* ?(\*\/)?$')
+ tag_re = re.compile(r'^ *@?\(?([Ll]icense|MIT)\)?$')
+ url_re = re.compile(r'^ *[#\*]* *https?:\/\/[\w\.\/\-]+$')
crunched_md5sums = {}
diff --git a/scripts/lib/recipetool/create_buildsys.py b/scripts/lib/recipetool/create_buildsys.py
index 5015634476..bc4fb14a20 100644
--- a/scripts/lib/recipetool/create_buildsys.py
+++ b/scripts/lib/recipetool/create_buildsys.py
@@ -137,15 +137,15 @@ class CmakeRecipeHandler(RecipeHandler):
deps = []
unmappedpkgs = []
- proj_re = re.compile('project\s*\(([^)]*)\)', re.IGNORECASE)
- pkgcm_re = re.compile('pkg_check_modules\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?\s+([^)\s]+)\s*\)', re.IGNORECASE)
- pkgsm_re = re.compile('pkg_search_module\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?((\s+[^)\s]+)+)\s*\)', re.IGNORECASE)
- findpackage_re = re.compile('find_package\s*\(\s*([a-zA-Z0-9-_]+)\s*.*', re.IGNORECASE)
- findlibrary_re = re.compile('find_library\s*\(\s*[a-zA-Z0-9-_]+\s*(NAMES\s+)?([a-zA-Z0-9-_ ]+)\s*.*')
- checklib_re = re.compile('check_library_exists\s*\(\s*([^\s)]+)\s*.*', re.IGNORECASE)
- include_re = re.compile('include\s*\(\s*([^)\s]*)\s*\)', re.IGNORECASE)
- subdir_re = re.compile('add_subdirectory\s*\(\s*([^)\s]*)\s*([^)\s]*)\s*\)', re.IGNORECASE)
- dep_re = re.compile('([^ ><=]+)( *[<>=]+ *[^ ><=]+)?')
+ proj_re = re.compile(r'project\s*\(([^)]*)\)', re.IGNORECASE)
+ pkgcm_re = re.compile(r'pkg_check_modules\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?\s+([^)\s]+)\s*\)', re.IGNORECASE)
+ pkgsm_re = re.compile(r'pkg_search_module\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?((\s+[^)\s]+)+)\s*\)', re.IGNORECASE)
+ findpackage_re = re.compile(r'find_package\s*\(\s*([a-zA-Z0-9-_]+)\s*.*', re.IGNORECASE)
+ findlibrary_re = re.compile(r'find_library\s*\(\s*[a-zA-Z0-9-_]+\s*(NAMES\s+)?([a-zA-Z0-9-_ ]+)\s*.*')
+ checklib_re = re.compile(r'check_library_exists\s*\(\s*([^\s)]+)\s*.*', re.IGNORECASE)
+ include_re = re.compile(r'include\s*\(\s*([^)\s]*)\s*\)', re.IGNORECASE)
+ subdir_re = re.compile(r'add_subdirectory\s*\(\s*([^)\s]*)\s*([^)\s]*)\s*\)', re.IGNORECASE)
+ dep_re = re.compile(r'([^ ><=]+)( *[<>=]+ *[^ ><=]+)?')
def find_cmake_package(pkg):
RecipeHandler.load_devel_filemap(tinfoil.config_data)
@@ -423,16 +423,16 @@ class AutotoolsRecipeHandler(RecipeHandler):
'makeinfo': 'texinfo',
}
- pkg_re = re.compile('PKG_CHECK_MODULES\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
- pkgce_re = re.compile('PKG_CHECK_EXISTS\(\s*\[?([^,\]]*)\]?[),].*')
- lib_re = re.compile('AC_CHECK_LIB\(\s*\[?([^,\]]*)\]?,.*')
- libx_re = re.compile('AX_CHECK_LIBRARY\(\s*\[?[^,\]]*\]?,\s*\[?([^,\]]*)\]?,\s*\[?([a-zA-Z0-9-]*)\]?,.*')
- progs_re = re.compile('_PROGS?\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
- dep_re = re.compile('([^ ><=]+)( [<>=]+ [^ ><=]+)?')
- ac_init_re = re.compile('AC_INIT\(\s*([^,]+),\s*([^,]+)[,)].*')
- am_init_re = re.compile('AM_INIT_AUTOMAKE\(\s*([^,]+),\s*([^,]+)[,)].*')
- define_re = re.compile('\s*(m4_)?define\(\s*([^,]+),\s*([^,]+)\)')
- version_re = re.compile('([0-9.]+)')
+ pkg_re = re.compile(r'PKG_CHECK_MODULES\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
+ pkgce_re = re.compile(r'PKG_CHECK_EXISTS\(\s*\[?([^,\]]*)\]?[),].*')
+ lib_re = re.compile(r'AC_CHECK_LIB\(\s*\[?([^,\]]*)\]?,.*')
+ libx_re = re.compile(r'AX_CHECK_LIBRARY\(\s*\[?[^,\]]*\]?,\s*\[?([^,\]]*)\]?,\s*\[?([a-zA-Z0-9-]*)\]?,.*')
+ progs_re = re.compile(r'_PROGS?\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
+ dep_re = re.compile(r'([^ ><=]+)( [<>=]+ [^ ><=]+)?')
+ ac_init_re = re.compile(r'AC_INIT\(\s*([^,]+),\s*([^,]+)[,)].*')
+ am_init_re = re.compile(r'AM_INIT_AUTOMAKE\(\s*([^,]+),\s*([^,]+)[,)].*')
+ define_re = re.compile(r'\s*(m4_)?define\(\s*([^,]+),\s*([^,]+)\)')
+ version_re = re.compile(r'([0-9.]+)')
defines = {}
def subst_defines(value):
diff --git a/scripts/lib/recipetool/create_buildsys_python.py b/scripts/lib/recipetool/create_buildsys_python.py
index f4f51c88b4..a7eed3256f 100644
--- a/scripts/lib/recipetool/create_buildsys_python.py
+++ b/scripts/lib/recipetool/create_buildsys_python.py
@@ -10,7 +10,7 @@ import codecs
import collections
import setuptools.command.build_py
import email
-import imp
+import importlib
import glob
import itertools
import logging
@@ -209,6 +209,18 @@ class PythonRecipeHandler(RecipeHandler):
continue
if line.startswith('['):
+ # PACKAGECONFIG must not contain expressions or whitespace
+ line = line.replace(" ", "")
+ line = line.replace(':', "")
+ line = line.replace('.', "-dot-")
+ line = line.replace('"', "")
+ line = line.replace('<', "-smaller-")
+ line = line.replace('>', "-bigger-")
+ line = line.replace('_', "-")
+ line = line.replace('(', "")
+ line = line.replace(')', "")
+ line = line.replace('!', "-not-")
+ line = line.replace('=', "-equals-")
current_feature = line[1:-1]
elif current_feature:
extras_req[current_feature].append(line)
@@ -297,6 +309,7 @@ class PythonRecipeHandler(RecipeHandler):
lines_after.append('# The following configs & dependencies are from setuptools extras_require.')
lines_after.append('# These dependencies are optional, hence can be controlled via PACKAGECONFIG.')
lines_after.append('# The upstream names may not correspond exactly to bitbake package names.')
+ lines_after.append('# The configs are might not correct, since PACKAGECONFIG does not support expressions as may used in requires.txt - they are just replaced by text.')
lines_after.append('#')
lines_after.append('# Uncomment this line to enable all the optional features.')
lines_after.append('#PACKAGECONFIG ?= "{}"'.format(' '.join(k.lower() for k in extras_req)))
@@ -548,7 +561,6 @@ class PythonRecipeHandler(RecipeHandler):
return deps
def parse_pkgdata_for_python_packages(self):
- suffixes = [t[0] for t in imp.get_suffixes()]
pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
ldata = tinfoil.config_data.createCopy()
@@ -572,7 +584,7 @@ class PythonRecipeHandler(RecipeHandler):
continue
for fn in files_info:
- for suffix in suffixes:
+ for suffix in importlib.machinery.all_suffixes():
if fn.endswith(suffix):
break
else:
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py
index f0ca50ebe2..a349510ab8 100644
--- a/scripts/lib/resulttool/report.py
+++ b/scripts/lib/resulttool/report.py
@@ -176,7 +176,10 @@ class ResultsTextReport(object):
vals['sort'] = line['testseries'] + "_" + line['result_id']
vals['failed_testcases'] = line['failed_testcases']
for k in cols:
- vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ if total_tested:
+ vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ else:
+ vals[k] = "0 (0%)"
for k in maxlen:
if k in vals and len(vals[k]) > maxlen[k]:
maxlen[k] = len(vals[k])
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py
index 8917022d36..c5521d81bd 100644
--- a/scripts/lib/resulttool/resultutils.py
+++ b/scripts/lib/resulttool/resultutils.py
@@ -58,7 +58,11 @@ def append_resultsdata(results, f, configmap=store_map, configvars=extra_configv
testseries = posixpath.basename(posixpath.dirname(url.path))
else:
with open(f, "r") as filedata:
- data = json.load(filedata)
+ try:
+ data = json.load(filedata)
+ except json.decoder.JSONDecodeError:
+ print("Cannot decode {}. Possible corruption. Skipping.".format(f))
+ data = ""
testseries = os.path.basename(os.path.dirname(f))
else:
data = f
@@ -142,7 +146,7 @@ def generic_get_log(sectionname, results, section):
return decode_log(ptest['log'])
def ptestresult_get_log(results, section):
- return generic_get_log('ptestresuls.sections', results, section)
+ return generic_get_log('ptestresult.sections', results, section)
def generic_get_rawlogs(sectname, results):
if sectname not in results:
diff --git a/scripts/lib/wic/misc.py b/scripts/lib/wic/misc.py
index 3e11822996..2b90821b30 100644
--- a/scripts/lib/wic/misc.py
+++ b/scripts/lib/wic/misc.py
@@ -36,6 +36,7 @@ NATIVE_RECIPES = {"bmaptool": "bmap-tools",
"mkdosfs": "dosfstools",
"mkisofs": "cdrtools",
"mkfs.btrfs": "btrfs-tools",
+ "mkfs.erofs": "erofs-utils",
"mkfs.ext2": "e2fsprogs",
"mkfs.ext3": "e2fsprogs",
"mkfs.ext4": "e2fsprogs",
@@ -140,11 +141,12 @@ def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""):
cmd_and_args = pseudo + cmd_and_args
hosttools_dir = get_bitbake_var("HOSTTOOLS_DIR")
+ target_sys = get_bitbake_var("TARGET_SYS")
- native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/bin:%s" % \
+ native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/usr/bin/%s:%s/bin:%s" % \
(native_sysroot, native_sysroot,
- native_sysroot, native_sysroot,
- hosttools_dir)
+ native_sysroot, native_sysroot, target_sys,
+ native_sysroot, hosttools_dir)
native_cmd_and_args = "export PATH=%s:$PATH;%s" % \
(native_paths, cmd_and_args)
diff --git a/scripts/lib/wic/partition.py b/scripts/lib/wic/partition.py
index 09e491dd49..5275da6ed3 100644
--- a/scripts/lib/wic/partition.py
+++ b/scripts/lib/wic/partition.py
@@ -132,6 +132,8 @@ class Partition():
self.update_fstab_in_rootfs = True
if not self.source:
+ if self.fstype == "none" or self.no_table:
+ return
if not self.size and not self.fixed_size:
raise WicError("The %s partition has a size of zero. Please "
"specify a non-zero --size/--fixed-size for that "
@@ -299,6 +301,30 @@ class Partition():
mkfs_cmd = "fsck.%s -pvfD %s" % (self.fstype, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ sde_time = hex(int(os.getenv('SOURCE_DATE_EPOCH')))
+ debugfs_script_path = os.path.join(cr_workdir, "debugfs_script")
+ files = []
+ for root, dirs, others in os.walk(rootfs_dir):
+ base = root.replace(rootfs_dir, "").rstrip(os.sep)
+ files += [ "/" if base == "" else base ]
+ files += [ base + "/" + n for n in dirs + others ]
+ with open(debugfs_script_path, "w") as f:
+ f.write("set_current_time %s\n" % (sde_time))
+ if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
+ f.write("set_inode_field /etc/fstab mtime %s\n" % (sde_time))
+ f.write("set_inode_field /etc/fstab mtime_extra 0\n")
+ for file in set(files):
+ for time in ["atime", "ctime", "crtime"]:
+ f.write("set_inode_field \"%s\" %s %s\n" % (file, time, sde_time))
+ f.write("set_inode_field \"%s\" %s_extra 0\n" % (file, time))
+ for time in ["wtime", "mkfs_time", "lastcheck"]:
+ f.write("set_super_value %s %s\n" % (time, sde_time))
+ for time in ["mtime", "first_error_time", "last_error_time"]:
+ f.write("set_super_value %s 0\n" % (time))
+ debugfs_cmd = "debugfs -w -f %s %s" % (debugfs_script_path, rootfs)
+ exec_native_cmd(debugfs_cmd, native_sysroot)
+
self.check_for_Y2038_problem(rootfs, native_sysroot)
def prepare_rootfs_btrfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
@@ -352,7 +378,7 @@ class Partition():
exec_native_cmd(mcopy_cmd, native_sysroot)
if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
- mcopy_cmd = "mcopy -i %s %s ::/etc/fstab" % (rootfs, self.updated_fstab_path)
+ mcopy_cmd = "mcopy -m -i %s %s ::/etc/fstab" % (rootfs, self.updated_fstab_path)
exec_native_cmd(mcopy_cmd, native_sysroot)
chmod_cmd = "chmod 644 %s" % rootfs
@@ -380,6 +406,9 @@ class Partition():
(extraopts, self.fsuuid, rootfs, rootfs_dir)
exec_native_cmd(erofs_cmd, native_sysroot, pseudo=pseudo)
+ def prepare_empty_partition_none(self, rootfs, oe_builddir, native_sysroot):
+ pass
+
def prepare_empty_partition_ext(self, rootfs, oe_builddir,
native_sysroot):
"""
diff --git a/scripts/lib/wic/plugins/imager/direct.py b/scripts/lib/wic/plugins/imager/direct.py
index 4d0b836ef6..165fc2979f 100644
--- a/scripts/lib/wic/plugins/imager/direct.py
+++ b/scripts/lib/wic/plugins/imager/direct.py
@@ -117,7 +117,7 @@ class DirectPlugin(ImagerPlugin):
updated = False
for part in self.parts:
if not part.realnum or not part.mountpoint \
- or part.mountpoint == "/" or not part.mountpoint.startswith('/'):
+ or part.mountpoint == "/" or not (part.mountpoint.startswith('/') or part.mountpoint == "swap"):
continue
if part.use_uuid:
@@ -148,6 +148,9 @@ class DirectPlugin(ImagerPlugin):
self.updated_fstab_path = os.path.join(self.workdir, "fstab")
with open(self.updated_fstab_path, "w") as f:
f.writelines(fstab_lines)
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ fstab_time = int(os.getenv('SOURCE_DATE_EPOCH'))
+ os.utime(self.updated_fstab_path, (fstab_time, fstab_time))
def _full_path(self, path, name, extention):
""" Construct full file path to a file we generate. """
diff --git a/scripts/lib/wic/plugins/source/bootimg-efi.py b/scripts/lib/wic/plugins/source/bootimg-efi.py
index 0391aebdc8..a2b9f4c893 100644
--- a/scripts/lib/wic/plugins/source/bootimg-efi.py
+++ b/scripts/lib/wic/plugins/source/bootimg-efi.py
@@ -326,21 +326,22 @@ class BootimgEFIPlugin(SourcePlugin):
exec_cmd(install_cmd)
staging_dir_host = get_bitbake_var("STAGING_DIR_HOST")
+ target_sys = get_bitbake_var("TARGET_SYS")
# https://www.freedesktop.org/software/systemd/man/systemd-stub.html
- objcopy_cmd = "objcopy \
- --add-section .osrel=%s --change-section-vma .osrel=0x20000 \
- --add-section .cmdline=%s --change-section-vma .cmdline=0x30000 \
- --add-section .linux=%s --change-section-vma .linux=0x2000000 \
- --add-section .initrd=%s --change-section-vma .initrd=0x3000000 \
- %s %s" % \
- ("%s/usr/lib/os-release" % staging_dir_host,
- cmdline.name,
- "%s/%s" % (staging_kernel_dir, kernel),
- initrd.name,
- efi_stub,
- "%s/EFI/Linux/linux.efi" % hdddir)
- exec_cmd(objcopy_cmd)
+ objcopy_cmd = "%s-objcopy" % target_sys
+ objcopy_cmd += " --enable-deterministic-archives"
+ objcopy_cmd += " --preserve-dates"
+ objcopy_cmd += " --add-section .osrel=%s/usr/lib/os-release" % staging_dir_host
+ objcopy_cmd += " --change-section-vma .osrel=0x20000"
+ objcopy_cmd += " --add-section .cmdline=%s" % cmdline.name
+ objcopy_cmd += " --change-section-vma .cmdline=0x30000"
+ objcopy_cmd += " --add-section .linux=%s/%s" % (staging_kernel_dir, kernel)
+ objcopy_cmd += " --change-section-vma .linux=0x2000000"
+ objcopy_cmd += " --add-section .initrd=%s" % initrd.name
+ objcopy_cmd += " --change-section-vma .initrd=0x3000000"
+ objcopy_cmd += " %s %s/EFI/Linux/linux.efi" % (efi_stub, hdddir)
+ exec_native_cmd(objcopy_cmd, native_sysroot)
else:
install_cmd = "install -m 0644 %s/%s %s/%s" % \
(staging_kernel_dir, kernel, hdddir, kernel)
@@ -391,6 +392,13 @@ class BootimgEFIPlugin(SourcePlugin):
logger.debug("Added %d extra blocks to %s to get to %d total blocks",
extra_blocks, part.mountpoint, blocks)
+ # required for compatibility with certain devices expecting file system
+ # block count to be equal to partition block count
+ if blocks < part.fixed_size:
+ blocks = part.fixed_size
+ logger.debug("Overriding %s to %d total blocks for compatibility",
+ part.mountpoint, blocks)
+
# dosfs image, created by mkdosfs
bootimg = "%s/boot.img" % cr_workdir
diff --git a/scripts/lib/wic/plugins/source/rootfs.py b/scripts/lib/wic/plugins/source/rootfs.py
index 25bb41dd70..e29f3a4c2f 100644
--- a/scripts/lib/wic/plugins/source/rootfs.py
+++ b/scripts/lib/wic/plugins/source/rootfs.py
@@ -35,7 +35,7 @@ class RootfsPlugin(SourcePlugin):
@staticmethod
def __validate_path(cmd, rootfs_dir, path):
if os.path.isabs(path):
- logger.error("%s: Must be relative: %s" % (cmd, orig_path))
+ logger.error("%s: Must be relative: %s" % (cmd, path))
sys.exit(1)
# Disallow climbing outside of parent directory using '..',
@@ -224,7 +224,7 @@ class RootfsPlugin(SourcePlugin):
if part.update_fstab_in_rootfs and part.has_fstab and not part.no_fstab_update:
fstab_path = os.path.join(new_rootfs, "etc/fstab")
# Assume that fstab should always be owned by root with fixed permissions
- install_cmd = "install -m 0644 %s %s" % (part.updated_fstab_path, fstab_path)
+ install_cmd = "install -m 0644 -p %s %s" % (part.updated_fstab_path, fstab_path)
if new_pseudo:
pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
else:
diff --git a/scripts/oe-check-sstate b/scripts/oe-check-sstate
index f4cc5869de..0d171c4463 100755
--- a/scripts/oe-check-sstate
+++ b/scripts/oe-check-sstate
@@ -18,7 +18,6 @@ import re
scripts_path = os.path.dirname(os.path.realpath(__file__))
lib_path = scripts_path + '/lib'
sys.path = sys.path + [lib_path]
-import scriptutils
import scriptpath
scriptpath.add_bitbake_lib_path()
import argparse_oe
@@ -51,13 +50,10 @@ def check(args):
env['TMPDIR:forcevariable'] = tmpdir
try:
- output = subprocess.check_output(
- 'bitbake -n %s' % ' '.join(args.target),
- stderr=subprocess.STDOUT,
- env=env,
- shell=True)
+ cmd = ['bitbake', '--dry-run', '--runall=build'] + args.target
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
- task_re = re.compile('NOTE: Running setscene task [0-9]+ of [0-9]+ \(([^)]+)\)')
+ task_re = re.compile(r'NOTE: Running setscene task [0-9]+ of [0-9]+ \(([^)]+)\)')
tasks = []
for line in output.decode('utf-8').splitlines():
res = task_re.match(line)
diff --git a/scripts/oe-depends-dot b/scripts/oe-depends-dot
index 5eb3e12769..1c2d51c6ec 100755
--- a/scripts/oe-depends-dot
+++ b/scripts/oe-depends-dot
@@ -15,7 +15,7 @@ class Dot(object):
def __init__(self):
parser = argparse.ArgumentParser(
description="Analyse recipe-depends.dot generated by bitbake -g",
- epilog="Use %(prog)s --help to get help")
+ formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("dotfile",
help = "Specify the dotfile", nargs = 1, action='store', default='')
parser.add_argument("-k", "--key",
@@ -32,6 +32,21 @@ class Dot(object):
" For example, A->B, B->C, A->C, then A->C can be removed.",
action="store_true", default=False)
+ parser.epilog = """
+Examples:
+First generate the .dot file:
+ bitbake -g core-image-minimal
+
+To find out why a package is being built:
+ %(prog)s -k <package> -w ./task-depends.dot
+
+To find out what a package depends on:
+ %(prog)s -k <package> -d ./task-depends.dot
+
+Reduce the .dot file packages only, no tasks:
+ %(prog)s -r ./task-depends.dot
+"""
+
self.args = parser.parse_args()
if len(sys.argv) != 3 and len(sys.argv) < 5:
@@ -99,6 +114,10 @@ class Dot(object):
if key == "meta-world-pkgdata":
continue
dep = m.group(2)
+ key = key.split('.')[0]
+ dep = dep.split('.')[0]
+ if key == dep:
+ continue
if key in depends:
if not key in depends[key]:
depends[key].add(dep)
diff --git a/scripts/oe-pkgdata-util b/scripts/oe-pkgdata-util
index 7412cc1f47..44ae40549a 100755
--- a/scripts/oe-pkgdata-util
+++ b/scripts/oe-pkgdata-util
@@ -296,7 +296,7 @@ def package_info(args):
extra = ''
for line in f:
for var in vars:
- m = re.match(var + '(?::\S+)?:\s*(.+?)\s*$', line)
+ m = re.match(var + r'(?::\S+)?:\s*(.+?)\s*$', line)
if m:
vals[var] = m.group(1)
pkg_version = vals['PKGV'] or ''
diff --git a/scripts/oe-setup-builddir b/scripts/oe-setup-builddir
index 54048e62ec..5d644168cb 100755
--- a/scripts/oe-setup-builddir
+++ b/scripts/oe-setup-builddir
@@ -74,9 +74,10 @@ fi
if [ ! -r "$BUILDDIR/conf/local.conf" ]; then
cat <<EOM
You had no conf/local.conf file. This configuration file has therefore been
-created for you with some default values. You may wish to edit it to, for
-example, select a different MACHINE (target hardware). See conf/local.conf
-for more information as common configuration options are commented.
+created for you from $OECORELOCALCONF
+You may wish to edit it to, for example, select a different MACHINE (target
+hardware). See conf/local.conf for more information as common configuration
+options are commented.
EOM
cp -f "$OECORELOCALCONF" "$BUILDDIR/conf/local.conf"
@@ -89,8 +90,9 @@ fi
if [ ! -r "$BUILDDIR/conf/bblayers.conf" ]; then
cat <<EOM
You had no conf/bblayers.conf file. This configuration file has therefore been
-created for you with some default values. To add additional metadata layers
-into your configuration please add entries to conf/bblayers.conf.
+created for you from $OECORELAYERCONF
+To add additional metadata layers into your configuration please add entries
+to conf/bblayers.conf.
EOM
diff --git a/scripts/opkg-query-helper.py b/scripts/opkg-query-helper.py
index bc3ab43823..084d9ef684 100755
--- a/scripts/opkg-query-helper.py
+++ b/scripts/opkg-query-helper.py
@@ -29,7 +29,7 @@ for arg in sys.argv[1:]:
args.append(arg)
# Regex for removing version specs after dependency items
-verregex = re.compile(' \([=<>]* [^ )]*\)')
+verregex = re.compile(r' \([=<>]* [^ )]*\)')
pkg = ""
ver = ""
diff --git a/scripts/pybootchartgui/pybootchartgui/draw.py b/scripts/pybootchartgui/pybootchartgui/draw.py
index fc708b55c3..ce3af74e2b 100644
--- a/scripts/pybootchartgui/pybootchartgui/draw.py
+++ b/scripts/pybootchartgui/pybootchartgui/draw.py
@@ -558,6 +558,11 @@ def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
draw_label_in_box(ctx, PROC_TEXT_COLOR, process, x, y + proc_h - 4, w, proc_h)
+
+ # Show elapsed time for each task
+ elapsed_time = f"{trace.processes[process][1] - start}s"
+ draw_text(ctx, elapsed_time, PROC_TEXT_COLOR, x + w + 4, y + proc_h - 4)
+
y = y + proc_h
return curr_y
@@ -698,7 +703,7 @@ def draw_processes_recursively(ctx, proc, proc_tree, y, proc_h, rect, clip) :
cmdString = proc.cmd
else:
cmdString = ''
- if (OPTIONS.show_pid or OPTIONS.show_all) and ipid is not 0:
+ if (OPTIONS.show_pid or OPTIONS.show_all) and ipid != 0:
cmdString = cmdString + " [" + str(ipid // 1000) + "]"
if OPTIONS.show_all:
if proc.args:
@@ -796,7 +801,7 @@ class CumlSample:
if self.color is None:
i = self.next() % HSV_MAX_MOD
h = 0.0
- if i is not 0:
+ if i != 0:
h = (1.0 * i) / HSV_MAX_MOD
s = 0.5
v = 1.0
diff --git a/scripts/pybootchartgui/pybootchartgui/parsing.py b/scripts/pybootchartgui/pybootchartgui/parsing.py
index b42dac6b88..9d6787ec5a 100644
--- a/scripts/pybootchartgui/pybootchartgui/parsing.py
+++ b/scripts/pybootchartgui/pybootchartgui/parsing.py
@@ -128,7 +128,7 @@ class Trace:
def compile(self, writer):
def find_parent_id_for(pid):
- if pid is 0:
+ if pid == 0:
return 0
ppid = self.parent_map.get(pid)
if ppid:
diff --git a/scripts/relocate_sdk.py b/scripts/relocate_sdk.py
index 4ed8bfc0d1..8a728720ba 100755
--- a/scripts/relocate_sdk.py
+++ b/scripts/relocate_sdk.py
@@ -104,11 +104,12 @@ def change_interpreter(elf_file_name):
if (len(new_dl_path) >= p_filesz):
print("ERROR: could not relocate %s, interp size = %i and %i is needed." \
% (elf_file_name, p_memsz, len(new_dl_path) + 1))
- break
+ return False
dl_path = new_dl_path + b("\0") * (p_filesz - len(new_dl_path))
f.seek(p_offset)
f.write(dl_path)
break
+ return True
def change_dl_sysdirs(elf_file_name):
if arch == 32:
@@ -222,6 +223,7 @@ else:
executables_list = sys.argv[3:]
+errors = False
for e in executables_list:
perms = os.stat(e)[stat.ST_MODE]
if os.access(e, os.W_OK|os.R_OK):
@@ -247,7 +249,8 @@ for e in executables_list:
arch = get_arch()
if arch:
parse_elf_header()
- change_interpreter(e)
+ if not change_interpreter(e):
+ errors = True
change_dl_sysdirs(e)
""" change permissions back """
@@ -260,3 +263,6 @@ for e in executables_list:
print("New file size for %s is different. Looks like a relocation error!", e)
sys.exit(-1)
+if errors:
+ print("Relocation of one or more executables failed.")
+ sys.exit(-1)
diff --git a/scripts/rpm2cpio.sh b/scripts/rpm2cpio.sh
index 7cd771bbe7..8199b43784 100755
--- a/scripts/rpm2cpio.sh
+++ b/scripts/rpm2cpio.sh
@@ -7,7 +7,7 @@ fatal() {
}
pkg="$1"
-[ -n "$pkg" -a -e "$pkg" ] ||
+[ -n "$pkg" ] && [ -e "$pkg" ] ||
fatal "No package supplied"
_dd() {
@@ -16,14 +16,23 @@ _dd() {
}
calcsize() {
+
+ case "$(_dd $1 bs=4 count=1 | tr -d '\0')" in
+ "$(printf '\216\255\350')"*) ;; # '\x8e\xad\xe8'
+ *) fatal "File doesn't look like rpm: $pkg" ;;
+ esac
+
offset=$(($1 + 8))
local i b b0 b1 b2 b3 b4 b5 b6 b7
i=0
while [ $i -lt 8 ]; do
- b=$(_dd $(($offset + $i)) bs=1 count=1; echo X)
- b=${b%X}
+ # add . to not loose \n
+ # strip \0 as it gets dropped with warning otherwise
+ b="$(_dd $(($offset + $i)) bs=1 count=1 | tr -d '\0' ; echo .)"
+ b=${b%.} # strip . again
+
[ -z "$b" ] &&
b="0" ||
b="$(exec printf '%u\n' "'$b")"
@@ -35,7 +44,7 @@ calcsize() {
offset=$(($offset + $rsize))
}
-case "$(_dd 0 bs=8 count=1)" in
+case "$(_dd 0 bs=4 count=1 | tr -d '\0')" in
"$(printf '\355\253\356\333')"*) ;; # '\xed\xab\xee\xdb'
*) fatal "File doesn't look like rpm: $pkg" ;;
esac
@@ -46,10 +55,11 @@ sigsize=$rsize
calcsize $(($offset + (8 - ($sigsize % 8)) % 8))
hdrsize=$rsize
-case "$(_dd $offset bs=3 count=1)" in
- "$(printf '\102\132')"*) _dd $offset | bunzip2 ;; # '\x42\x5a'
- "$(printf '\037\213')"*) _dd $offset | gunzip ;; # '\x1f\x8b'
- "$(printf '\375\067')"*) _dd $offset | xzcat ;; # '\xfd\x37'
- "$(printf '\135\000')"*) _dd $offset | unlzma ;; # '\x5d\x00'
- *) fatal "Unrecognized rpm file: $pkg" ;;
+case "$(_dd $offset bs=2 count=1 | tr -d '\0')" in
+ "$(printf '\102\132')") _dd $offset | bunzip2 ;; # '\x42\x5a'
+ "$(printf '\037\213')") _dd $offset | gunzip ;; # '\x1f\x8b'
+ "$(printf '\375\067')") _dd $offset | xzcat ;; # '\xfd\x37'
+ "$(printf '\135')") _dd $offset | unlzma ;; # '\x5d\x00'
+ "$(printf '\050\265')") _dd $offset | unzstd ;; # '\x28\xb5'
+ *) fatal "Unrecognized payload compression format in rpm file: $pkg" ;;
esac
diff --git a/scripts/runqemu b/scripts/runqemu
index 6e1f073ed2..ba7c1b2461 100755
--- a/scripts/runqemu
+++ b/scripts/runqemu
@@ -82,6 +82,7 @@ of the following environment variables (in any order):
kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required)
publicvnc - enable a VNC server open to all hosts
audio - enable audio
+ qmp=<path> - create a QMP socket (defaults to unix:qmp.sock if unspecified)
[*/]ovmf* - OVMF firmware file or base name for booting with UEFI
tcpserial=<port> - specify tcp serial port number
qemuparams=<xyz> - specify custom parameters to QEMU
@@ -210,11 +211,13 @@ class BaseConfig(object):
self.mac_tap = "52:54:00:12:34:"
self.mac_slirp = "52:54:00:12:35:"
# pid of the actual qemu process
- self.qemupid = None
+ self.qemu_environ = os.environ.copy()
+ self.qemuprocess = None
# avoid cleanup twice
self.cleaned = False
# Files to cleanup after run
self.cleanup_files = []
+ self.qmp = None
def acquire_taplock(self, error=True):
logger.debug("Acquiring lockfile %s..." % self.taplock)
@@ -361,7 +364,7 @@ class BaseConfig(object):
if p.endswith('.qemuboot.conf'):
self.qemuboot = p
self.qbconfload = True
- elif re.search('\.bin$', p) or re.search('bzImage', p) or \
+ elif re.search('\\.bin$', p) or re.search('bzImage', p) or \
re.search('zImage', p) or re.search('vmlinux', p) or \
re.search('fitImage', p) or re.search('uImage', p):
self.kernel = p
@@ -375,13 +378,13 @@ class BaseConfig(object):
fst = t
break
if not fst:
- m = re.search('.*\.(.*)$', self.rootfs)
+ m = re.search('.*\\.(.*)$', self.rootfs)
if m:
fst = m.group(1)
if fst:
self.check_arg_fstype(fst)
- qb = re.sub('\.' + fst + "$", '', self.rootfs)
- qb = '%s%s' % (re.sub('\.rootfs$', '', qb), '.qemuboot.conf')
+ qb = re.sub('\\.' + fst + "$", '', self.rootfs)
+ qb = '%s%s' % (re.sub('\\.rootfs$', '', qb), '.qemuboot.conf')
if os.path.exists(qb):
self.qemuboot = qb
self.qbconfload = True
@@ -446,29 +449,16 @@ class BaseConfig(object):
self.set("MACHINE", arg)
def set_dri_path(self):
- # As runqemu can be run within bitbake (when using testimage, for example),
- # we need to ensure that we run host pkg-config, and that it does not
- # get mis-directed to native build paths set by bitbake.
- try:
- del os.environ['PKG_CONFIG_PATH']
- del os.environ['PKG_CONFIG_DIR']
- del os.environ['PKG_CONFIG_LIBDIR']
- del os.environ['PKG_CONFIG_SYSROOT_DIR']
- except KeyError:
- pass
- try:
- dripath = subprocess.check_output("PATH=/bin:/usr/bin:$PATH pkg-config --variable=dridriverdir dri", shell=True)
- except subprocess.CalledProcessError as e:
- raise RunQemuError("Could not determine the path to dri drivers on the host via pkg-config.\nPlease install Mesa development files (particularly, dri.pc) on the host machine.")
- os.environ['LIBGL_DRIVERS_PATH'] = dripath.decode('utf-8').strip()
-
- # This preloads uninative libc pieces and therefore ensures that RPATH/RUNPATH
- # in host mesa drivers doesn't trick uninative into loading host libc.
- preload_items = ['libdl.so.2', 'librt.so.1', 'libpthread.so.0']
- uninative_path = os.path.dirname(self.get("UNINATIVE_LOADER"))
- if os.path.exists(uninative_path):
- preload_paths = [os.path.join(uninative_path, i) for i in preload_items]
- os.environ['LD_PRELOAD'] = " ".join(preload_paths)
+ drivers_path = os.path.join(self.bindir_native, '../lib/dri')
+ if not os.path.exists(drivers_path) or not os.listdir(drivers_path):
+ raise RunQemuError("""
+qemu has been built without opengl support and accelerated graphics support is not available.
+To enable it, add:
+DISTRO_FEATURES_NATIVE:append = " opengl"
+DISTRO_FEATURES_NATIVESDK:append = " opengl"
+to your build configuration.
+""")
+ self.qemu_environ['LIBGL_DRIVERS_PATH'] = drivers_path
def check_args(self):
for debug in ("-d", "--debug"):
@@ -482,8 +472,8 @@ class BaseConfig(object):
sys.argv.remove(quiet)
if 'gl' not in sys.argv[1:] and 'gl-es' not in sys.argv[1:]:
- os.environ['SDL_RENDER_DRIVER'] = 'software'
- os.environ['SDL_FRAMEBUFFER_ACCELERATION'] = 'false'
+ self.qemu_environ['SDL_RENDER_DRIVER'] = 'software'
+ self.qemu_environ['SDL_FRAMEBUFFER_ACCELERATION'] = 'false'
unknown_arg = ""
for arg in sys.argv[1:]:
@@ -497,7 +487,7 @@ class BaseConfig(object):
self.gtk = True
elif arg == 'gl':
self.gl = True
- elif 'gl-es' in sys.argv[1:]:
+ elif arg == 'gl-es':
self.gl_es = True
elif arg == 'egl-headless':
self.egl_headless = True
@@ -524,6 +514,10 @@ class BaseConfig(object):
elif arg == 'publicvnc':
self.publicvnc = True
self.qemu_opt_script += ' -vnc :0'
+ elif arg == "qmp":
+ self.qmp = "unix:qmp.sock"
+ elif arg.startswith("qmp="):
+ self.qmp = arg[len('qmp='):]
elif arg.startswith('tcpserial='):
self.tcpserial_portnum = '%s' % arg[len('tcpserial='):]
elif arg.startswith('qemuparams='):
@@ -999,17 +993,14 @@ class BaseConfig(object):
else:
self.nfs_server = '192.168.7.1'
- # Figure out a new nfs_instance to allow multiple qemus running.
- ps = subprocess.check_output(("ps", "auxww")).decode('utf-8')
- pattern = '/bin/unfsd .* -i .*\.pid -e .*/exports([0-9]+) '
- all_instances = re.findall(pattern, ps, re.M)
- if all_instances:
- all_instances.sort(key=int)
- self.nfs_instance = int(all_instances.pop()) + 1
-
- nfsd_port = 3049 + 2 * self.nfs_instance
- mountd_port = 3048 + 2 * self.nfs_instance
+ nfsd_port = 3048 + self.nfs_instance
+ lockdir = "/tmp/qemu-port-locks"
+ self.make_lock_dir(lockdir)
+ while not self.check_free_port('localhost', nfsd_port, lockdir):
+ self.nfs_instance += 1
+ nfsd_port += 1
+ mountd_port = nfsd_port
# Export vars for runqemu-export-rootfs
export_dict = {
'NFS_INSTANCE': self.nfs_instance,
@@ -1060,6 +1051,17 @@ class BaseConfig(object):
self.set('NETWORK_CMD', '-netdev bridge,br=%s,id=net0,helper=%s -device virtio-net-pci,netdev=net0 ' % (
self.net_bridge, os.path.join(self.bindir_native, 'qemu-oe-bridge-helper')))
+ def make_lock_dir(self, lockdir):
+ if not os.path.exists(lockdir):
+ # There might be a race issue when multi runqemu processess are
+ # running at the same time.
+ try:
+ os.mkdir(lockdir)
+ os.chmod(lockdir, 0o777)
+ except FileExistsError:
+ pass
+ return
+
def setup_slirp(self):
"""Setup user networking"""
@@ -1078,14 +1080,7 @@ class BaseConfig(object):
mac = 2
lockdir = "/tmp/qemu-port-locks"
- if not os.path.exists(lockdir):
- # There might be a race issue when multi runqemu processess are
- # running at the same time.
- try:
- os.mkdir(lockdir)
- os.chmod(lockdir, 0o777)
- except FileExistsError:
- pass
+ self.make_lock_dir(lockdir)
# Find a free port to avoid conflicts
for p in ports[:]:
@@ -1125,14 +1120,7 @@ class BaseConfig(object):
logger.error("ip: %s" % ip)
raise OEPathError("runqemu-ifup, runqemu-ifdown or ip not found")
- if not os.path.exists(lockdir):
- # There might be a race issue when multi runqemu processess are
- # running at the same time.
- try:
- os.mkdir(lockdir)
- os.chmod(lockdir, 0o777)
- except FileExistsError:
- pass
+ self.make_lock_dir(lockdir)
cmd = (ip, 'link')
logger.debug('Running %s...' % str(cmd))
@@ -1350,6 +1338,10 @@ class BaseConfig(object):
raise RunQemuError("Failed to boot, QB_SYSTEM_NAME is NULL!")
self.qemu_system = qemu_system
+ def setup_qmp(self):
+ if self.qmp:
+ self.qemu_opt += " -qmp %s,server,nowait" % self.qmp
+
def setup_vga(self):
if self.nographic == True:
if self.sdl == True:
@@ -1369,13 +1361,13 @@ class BaseConfig(object):
# need our font setup and show-cusor below so we need to see what qemu --help says
# is supported so we can pass our correct config in.
if not self.nographic and not self.sdl and not self.gtk and not self.publicvnc and not self.egl_headless == True:
- output = subprocess.check_output([self.qemu_bin, "--help"], universal_newlines=True)
+ output = subprocess.check_output([self.qemu_bin, "--help"], universal_newlines=True, env=self.qemu_environ)
if "-display gtk" in output:
self.gtk = True
elif "-display sdl" in output:
self.sdl = True
else:
- self.qemu_opt += '-display none'
+ self.qemu_opt += ' -display none'
if self.sdl == True or self.gtk == True or self.egl_headless == True:
@@ -1393,7 +1385,7 @@ class BaseConfig(object):
if self.sdl == True:
self.qemu_opt += 'sdl,'
elif self.gtk == True:
- os.environ['FONTCONFIG_PATH'] = '/etc/fonts'
+ self.qemu_environ['FONTCONFIG_PATH'] = '/etc/fonts'
self.qemu_opt += 'gtk,'
if self.gl == True:
@@ -1480,6 +1472,7 @@ class BaseConfig(object):
if self.snapshot:
self.qemu_opt += " -snapshot"
+ self.setup_qmp()
self.setup_serial()
self.setup_vga()
@@ -1500,14 +1493,17 @@ class BaseConfig(object):
cmd = "%s %s" % (self.qemu_opt, kernel_opts)
cmds = shlex.split(cmd)
logger.info('Running %s\n' % cmd)
+ with open('/proc/uptime', 'r') as f:
+ uptime_seconds = f.readline().split()[0]
+ logger.info('Host uptime: %s\n' % uptime_seconds)
pass_fds = []
if self.taplock_descriptor:
pass_fds = [self.taplock_descriptor.fileno()]
if len(self.portlocks):
for descriptor in self.portlocks.values():
pass_fds.append(descriptor.fileno())
- process = subprocess.Popen(cmds, stderr=subprocess.PIPE, pass_fds=pass_fds)
- self.qemupid = process.pid
+ process = subprocess.Popen(cmds, stderr=subprocess.PIPE, pass_fds=pass_fds, env=self.qemu_environ)
+ self.qemuprocess = process
retcode = process.wait()
if retcode:
if retcode == -signal.SIGTERM:
@@ -1523,18 +1519,30 @@ class BaseConfig(object):
signal.signal(signal.SIGTERM, signal.SIG_IGN)
logger.info("Cleaning up")
+
+ if self.qemuprocess:
+ try:
+ # give it some time to shut down, ignore return values and output
+ self.qemuprocess.send_signal(signal.SIGTERM)
+ self.qemuprocess.communicate(timeout=5)
+ except subprocess.TimeoutExpired:
+ self.qemuprocess.kill()
+
+ with open('/proc/uptime', 'r') as f:
+ uptime_seconds = f.readline().split()[0]
+ logger.info('Host uptime: %s\n' % uptime_seconds)
if self.cleantap:
cmd = ('sudo', self.qemuifdown, self.tap, self.bindir_native)
logger.debug('Running %s' % str(cmd))
subprocess.check_call(cmd)
self.release_taplock()
- self.release_portlock()
if self.nfs_running:
logger.info("Shutting down the userspace NFS server...")
cmd = ("runqemu-export-rootfs", "stop", self.rootfs)
logger.debug('Running %s' % str(cmd))
subprocess.check_call(cmd)
+ self.release_portlock()
if self.saved_stty:
subprocess.check_call(("stty", self.saved_stty))
@@ -1547,6 +1555,9 @@ class BaseConfig(object):
else:
shutil.rmtree(ent)
+ # Deliberately ignore the return code of 'tput smam'.
+ subprocess.call(["tput", "smam"])
+
self.cleaned = True
def run_bitbake_env(self, mach=None):
@@ -1623,12 +1634,8 @@ def main():
subprocess.check_call([renice, str(os.getpid())])
def sigterm_handler(signum, frame):
- logger.info("SIGTERM received")
- if config.qemupid:
- os.kill(config.qemupid, signal.SIGTERM)
+ logger.info("Received signal: %s" % (signum))
config.cleanup()
- # Deliberately ignore the return code of 'tput smam'.
- subprocess.call(["tput", "smam"])
signal.signal(signal.SIGTERM, sigterm_handler)
config.check_args()
@@ -1650,8 +1657,6 @@ def main():
return 1
finally:
config.cleanup()
- # Deliberately ignore the return code of 'tput smam'.
- subprocess.call(["tput", "smam"])
if __name__ == "__main__":
sys.exit(main())
diff --git a/scripts/runqemu.README b/scripts/runqemu.README
index da9abd7dfb..e5f4b4634c 100644
--- a/scripts/runqemu.README
+++ b/scripts/runqemu.README
@@ -1,12 +1,12 @@
Using OE images with QEMU
=========================
-OE-Core can generate qemu bootable kernels and images with can be used
+OE-Core can generate qemu bootable kernels and images which can be used
on a desktop system. The scripts currently support booting ARM, MIPS, PowerPC
-and x86 (32 and 64 bit) images. The scripts can be used within the OE build
-system or externaly.
+and x86 (32 and 64 bit) images. The scripts can be used within the OE build
+system or externally.
-The runqemu script is run as:
+The runqemu script is run as:
runqemu <machine> <zimage> <filesystem>
@@ -15,13 +15,13 @@ where:
<machine> is the machine/architecture to use (qemuarm/qemumips/qemuppc/qemux86/qemux86-64)
<zimage> is the path to a kernel (e.g. zimage-qemuarm.bin)
<filesystem> is the path to an ext2 image (e.g. filesystem-qemuarm.ext2) or an nfs directory
-
-If <machine> isn't specified, the script will try to detect the machine name
+
+If <machine> isn't specified, the script will try to detect the machine name
from the name of the <zimage> file.
If <filesystem> isn't specified, nfs booting will be assumed.
-When used within the build system, it will default to qemuarm, ext2 and the last kernel and
+When used within the build system, it will default to qemuarm, ext2 and the last kernel and
core-image-sato-sdk image built by the build system. If an sdk image isn't present it will look
for sato and minimal images.
@@ -31,7 +31,7 @@ Full usage instructions can be seen by running the command with no options speci
Notes
=====
- - The scripts run qemu using sudo. Change perms on /dev/net/tun to
+ - The scripts run qemu using sudo. Change perms on /dev/net/tun to
run as non root. The runqemu-gen-tapdevs script can also be used by
root to prepopulate the appropriate network devices.
- You can access the host computer at 192.168.7.1 within the image.
diff --git a/scripts/wic b/scripts/wic
index aee63a45aa..06e0b48db0 100755
--- a/scripts/wic
+++ b/scripts/wic
@@ -209,7 +209,7 @@ def wic_create_subcommand(options, usage_str):
logger.info(" (Please check that the build artifacts for the machine")
logger.info(" selected in local.conf actually exist and that they")
logger.info(" are the correct artifacts for the image (.wks file)).\n")
- raise WicError("The artifact that couldn't be found was %s:\n %s", not_found, not_found_dir)
+ raise WicError("The artifact that couldn't be found was %s:\n %s" % (not_found, not_found_dir))
krootfs_dir = options.rootfs_dir
if krootfs_dir is None:
diff --git a/scripts/yocto-check-layer b/scripts/yocto-check-layer
index 0e5b75b1f7..67cc71950f 100755
--- a/scripts/yocto-check-layer
+++ b/scripts/yocto-check-layer
@@ -168,14 +168,13 @@ def main():
layers_tested = 0
for layer in layers:
- if layer['type'] == LayerType.ERROR_NO_LAYER_CONF or \
- layer['type'] == LayerType.ERROR_BSP_DISTRO:
+ if layer['type'] in (LayerType.ERROR_NO_LAYER_CONF, LayerType.ERROR_BSP_DISTRO):
continue
# Reset to a clean backup copy for each run
shutil.copyfile(bblayersconf + '.backup', bblayersconf)
- if check_bblayers(bblayersconf, layer['path'], logger):
+ if layer['type'] not in (LayerType.CORE, ) and check_bblayers(bblayersconf, layer['path'], logger):
logger.info("%s already in %s. To capture initial signatures, layer under test should not present "
"in BBLAYERS. Please remove %s from BBLAYERS." % (layer['name'], bblayersconf, layer['name']))
results[layer['name']] = None