image: ghcr.io/siemens/kas/kas:3.2 variables: CPU_REQUEST: "" DEFAULT_TAG: "" # These are needed as the k8s executor doesn't respect the container entrypoint # by default FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY: 0 FF_KUBERNETES_HONOR_ENTRYPOINT: 1 stages: - prep - build # Common job fragment to get a worker ready .setup: tags: - $DEFAULT_TAG stage: build interruptible: true variables: KAS_WORK_DIR: $CI_PROJECT_DIR/work KAS_REPO_REF_DIR: $CI_BUILDS_DIR/persist/repos SSTATE_DIR: $CI_BUILDS_DIR/persist/sstate DL_DIR: $CI_BUILDS_DIR/persist/downloads BB_LOGCONFIG: $CI_PROJECT_DIR/ci/logging.yml TOOLCHAIN_DIR: $CI_BUILDS_DIR/persist/toolchains IMAGE_DIR: $CI_PROJECT_DIR/work/build/tmp/deploy/images TOOLCHAIN_LINK_DIR: $CI_PROJECT_DIR/work/build/toolchains before_script: - echo KAS_WORK_DIR = $KAS_WORK_DIR - echo SSTATE_DIR = $SSTATE_DIR - echo DL_DIR = $DL_DIR - rm -rf $KAS_WORK_DIR - mkdir --verbose --parents $KAS_WORK_DIR $KAS_REPO_REF_DIR $SSTATE_DIR $DL_DIR $TOOLCHAIN_DIR $TOOLCHAIN_LINK_DIR # Must do this here, as it's the only way to make sure the toolchain is installed on the same builder - ./ci/get-binary-toolchains $DL_DIR $TOOLCHAIN_DIR $TOOLCHAIN_LINK_DIR # This can be removed with Kas 3.2 - sudo apt-get update && sudo apt-get install --yes python3-subunit # Generalised fragment to do a Kas build .build: extends: .setup variables: KUBERNETES_CPU_REQUEST: $CPU_REQUEST rules: # Don't run MR pipelines - if: $CI_PIPELINE_SOURCE == "merge_request_event" when: never # Don't run pipelines for tags - if: $CI_COMMIT_TAG when: never # Don't run if BUILD_ENABLE_REGEX is set, but the job doesn't match the regex - if: '$BUILD_ENABLE_REGEX != null && $CI_JOB_NAME !~ $BUILD_ENABLE_REGEX' when: never # Allow the dev kernels to fail and not fail the overall build - if: '$KERNEL == "linux-yocto-dev"' allow_failure: true # Catch all for everything else - if: '$KERNEL != "linux-yocto-dev"' script: - KASFILES=$(./ci/jobs-to-kas "$CI_JOB_NAME") - kas shell --update --force-checkout $KASFILES -c 'cat conf/*.conf' - kas build $KASFILES - ./ci/check-warnings $KAS_WORK_DIR/build/warnings.log artifacts: name: "logs" when: on_failure paths: - $CI_PROJECT_DIR/work/build/tmp/work*/**/temp/log.do_*.* # # Prep stage, update repositories once # update-repos: extends: .setup stage: prep script: - flock --verbose --timeout 60 $KAS_REPO_REF_DIR ./ci/update-repos # # Build stage, the actual build jobs # # Available options for building are # TOOLCHAINS: [gcc, clang, armgcc, external-gccarm] # TCLIBC: [glibc, musl] # FIRMWARE: [uboot, edk2] # VIRT: [none, xen] # TESTING: testimage corstone500: extends: .build parallel: matrix: - TESTING: testimage tags: - x86_64 corstone1000-fvp: extends: .build parallel: matrix: - TESTING: [testimage,tftf] tags: - x86_64 corstone1000-mps3: extends: .build fvp-base: extends: .build parallel: matrix: - TESTING: testimage tags: - x86_64 fvp-base-arm32: extends: .build parallel: matrix: - TOOLCHAINS: [gcc, external-gccarm] TESTING: testimage tags: - x86_64 fvp-baser-aemv8r64: extends: .build parallel: matrix: - TESTING: testimage tags: - x86_64 fvps: extends: .build gem5-arm64: extends: .build parallel: matrix: - VIRT: [none, xen] gem5-atp-arm64: extends: .build generic-arm64: extends: .build juno: extends: .build parallel: matrix: - TOOLCHAINS: [gcc, clang] FIRMWARE: [uboot, edk2] musca-b1: extends: .build musca-s1: extends: .build n1sdp: extends: .build parallel: matrix: - TOOLCHAINS: [gcc, armgcc] qemu-generic-arm64: extends: .build parallel: matrix: - TOOLCHAINS: [gcc, clang] TESTING: testimage qemuarm64-secureboot: extends: .build parallel: matrix: - TOOLCHAINS: [gcc, clang] TCLIBC: [glibc, musl] TS: [none, trusted-services] TESTING: testimage qemuarm64: extends: .build parallel: matrix: - TOOLCHAINS: [gcc, clang] EFI: [uboot, edk2] TESTING: testimage - VIRT: xen qemuarm-secureboot: extends: .build parallel: matrix: - TOOLCHAINS: [gcc, clang] TESTING: testimage qemuarm: extends: .build parallel: matrix: - TOOLCHAINS: [gcc, clang] EFI: [uboot, edk2] TESTING: testimage - VIRT: xen qemuarmv5: extends: .build parallel: matrix: - TESTING: testimage sgi575: extends: .build tc1: extends: .build tags: - x86_64 toolchains: extends: .build selftest: extends: .setup script: - KASFILES=./ci/qemuarm64.yml:./ci/selftest.yml - kas shell --update --force-checkout $KASFILES -c 'oe-selftest --num-processes 1 --run-tests runfvp' # Validate layers are Yocto Project Compatible check-layers: extends: .setup script: - kas shell --update --force-checkout ci/base.yml:ci/meta-openembedded.yml --command \ "yocto-check-layer-wrapper $CI_PROJECT_DIR/$LAYER --dependency $CI_PROJECT_DIR/meta-* $KAS_WORK_DIR/meta-openembedded/meta-oe --no-auto-dependency" parallel: matrix: - LAYER: [meta-arm, meta-arm-bsp, meta-arm-toolchain, meta-gem5, meta-atp] pending-updates: extends: .setup artifacts: paths: - update-report script: - rm -fr update-report # This configuration has all of the layers we need enabled - kas shell ci/gem5-arm64.yml --command \ "$CI_PROJECT_DIR/scripts/machine-summary.py -t report -o $CI_PROJECT_DIR/update-report $($CI_PROJECT_DIR/ci/listmachines.py meta-arm meta-arm-bsp meta-gem5)" # Do this on x86 whilst the compilers are x86-only tags: - x86_64 # What percentage of machines in the layer do we build machine-coverage: extends: .setup script: - ./ci/check-machine-coverage coverage: '/Coverage: \d+/' metrics: extends: .setup artifacts: reports: metrics: metrics.txt script: - kas shell --update --force-checkout ci/base.yml --command \ "$CI_PROJECT_DIR/ci/patchreview $CI_PROJECT_DIR/meta-* --verbose --metrics $CI_PROJECT_DIR/metrics.txt" documentation: extends: .setup script: - | sudo pip3 install -r meta-arm-bsp/documentation/requirements.txt for CONF in meta-*/documentation/*/conf.py ; do SOURCE_DIR=$(dirname $CONF) MACHINE=$(basename $SOURCE_DIR) sphinx-build -vW $SOURCE_DIR build-docs/$MACHINE done test -d build-docs/ artifacts: paths: - build-docs/